Import clang-tools-extra 8.0.1

The clang-tools-extra contains helpful developer tools using Clang's tooling
API.

This port is more or less a copy from devel/llvm and it tries to package the
clang extra tools without increasing the complexity of devel/llvm.

Requested by many. Tested by otto@. Tweaks and OK jca@ (Thanks a lot!)
This commit is contained in:
rsadowski 2019-11-06 10:07:55 +00:00
parent 6f932cac84
commit 0626c5f973
84 changed files with 4689 additions and 0 deletions

View File

@ -0,0 +1,92 @@
# $OpenBSD: Makefile,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
# XXX
# This port is more or less a copy from devel/llvm and it tries to
# package the clang extra tools without increasing the complexity of
# devel/llvm.
#
# Please keep patches in sync with devel/llvm and just remove the lld and lldb
# patches: rm patches/patch-*lld*
ONLY_FOR_ARCHS = ${LLVM_ARCHS}
DPB_PROPERTIES = parallel
COMMENT= Clang extra tools
LLVM_V = 8.0.1
DISTNAME = llvm-${LLVM_V}.src
PKGNAME= clang-tools-extra-${LLVM_V}
CATEGORIES = devel
HOMEPAGE = https://clang.llvm.org/extra/index.html
MAINTAINER = Rafael Sadowski <rsadowski@openbsd.org>
# BSD
PERMIT_PACKAGE = Yes
WANTLIB += ${COMPILER_LIBCXX} c curses edit m z
MASTER_SITES = https://github.com/llvm/llvm-project/releases/download/llvmorg-${LLVM_V}/
EXTRACT_SUFX = .tar.xz
DISTFILES = llvm-${LLVM_V}.src${EXTRACT_SUFX} \
cfe-${LLVM_V}.src${EXTRACT_SUFX} \
clang-tools-extra-${LLVM_V}.src${EXTRACT_SUFX}
COMPILER = base-clang ports-gcc
MODULES = devel/cmake \
lang/python
RUN_DEPENDS = devel/llvm
CONFIGURE_STYLE = cmake
# Disable the most stuff to speed up the whole build step
CONFIGURE_ARGS = -DLLVM_ENABLE_FFI=OFF \
-DLLVM_ENABLE_TERMINFO=ON \
-DLLVM_ENABLE_RTTI=ON \
-DCMAKE_DISABLE_FIND_PACKAGE_LibXml2=ON \
-DLLVM_LINK_LLVM_DYLIB=OFF \
-DLLVM_BUILD_LLVM_DYLIB=OFF \
-DGO_EXECUTABLE=GO_EXECUTABLE-NOTFOUND \
-DBacktrace_LIBRARY=''
CONFIGURE_ARGS += -DCLANG_ENABLE_STATIC_ANALYZER=ON \
-DCLANG_INCLUDE_TESTS=OFF \
-DLLVM_BUILD_TESTS=OFF \
-DLLVM_BUILD_BENCHMARKS=OFF \
-DLLVM_ENABLE_DOXYGEN=OFF \
-DLLVM_ENABLE_SPHINX=OFF \
-DLLVM_INCLUDE_EXAMPLES=OFF \
-DLLVM_INCLUDE_TESTS=OFF \
-DLLVM_INCLUDE_BENCHMARKS=OFF
GCC_VER = 8.3.0
.if ${MACHINE_ARCH} == "amd64"
GCC_CONFIG = x86_64-unknown-openbsd${OSREV}
.else
GCC_CONFIG = ${MACHINE_ARCH}-unknown-openbsd${OSREV}
.endif
CLANG_INCLUDE_PATH = lib/clang/${LLVM_V}/include
SUBST_VARS += CLANG_INCLUDE_PATH LLVM_V GCC_VER GCC_CONFIG
ALL_TARGET = tools/clang/tools/extra/all
FAKE_TARGET = tools/clang/tools/extra/install
post-extract:
mv ${WRKDIR}/cfe-${LLVM_V}.src ${WRKSRC}/tools/clang
mv ${WRKDIR}/clang-tools-extra-${LLVM_V}.src ${WRKSRC}/tools/clang/tools/extra
pre-configure:
@${SUBST_CMD} ${WRKSRC}/tools/clang/lib/Driver/ToolChains/OpenBSD.cpp
-@ln -s ${MODPY_BIN} ${WRKDIR}/bin/python
post-install:
${MODPY_BIN} ${MODPY_LIBDIR}/compileall.py \
${PREFIX}/share/clang
.include <bsd.port.mk>

View File

@ -0,0 +1,131 @@
# $OpenBSD: Makefile.rsadowski,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
# XXX
# This port is more or less a copy from devel/llvm and it tries to
# package the clang extra tools without to increase the complexity of
# devel/llvm.
#
# Please keep patches in sync with devel/llvm and just rmove the lld and lldb
# patches: rm patches/patch-*lld*
ONLY_FOR_ARCHS = ${LLVM_ARCHS}
DPB_PROPERTIES = parallel
COMMENT= Clang extra tools
LLVM_V = 8.0.1
DISTNAME = llvm-${LLVM_V}.src
PKGNAME= clang-tools-extra-${LLVM_V}
CATEGORIES = devel
HOMEPAGE = https://clang.llvm.org/extra/index.html
MAINTAINER = Rafael Sadowski <rsadowski@openbsd.org>
# BSD
PERMIT_PACKAGE = Yes
MASTER_SITES = https://github.com/llvm/llvm-project/releases/download/llvmorg-${LLVM_V}/
EXTRACT_SUFX = .tar.xz
DISTFILES = llvm-${LLVM_V}.src${EXTRACT_SUFX} \
cfe-${LLVM_V}.src${EXTRACT_SUFX} \
clang-tools-extra-${LLVM_V}.src${EXTRACT_SUFX}
COMPILER = base-clang ports-gcc
MODULES = devel/cmake \
lang/python
BUILD_DEPENDS += devel/swig \
textproc/py-recommonmark
RUN_DEPENDS += devel/gtest \
devel/llvm
.include <bsd.port.arch.mk>
.if !${PROPERTIES:Mclang}
TEST_DEPENDS += lang/gcc/${MODGCC4_VERSION},-c++
RUN_DEPENDS += lang/gcc/${MODGCC4_VERSION},-c++
.endif
CONFIGURE_STYLE = cmake
# Disable the most stuff to speed up the whole build step
CONFIGURE_ARGS = -DLLVM_ENABLE_FFI=OFF \
-DLLVM_ENABLE_TERMINFO=ON \
-DLLVM_ENABLE_RTTI=ON \
-DCMAKE_DISABLE_FIND_PACKAGE_LibXml2=ON \
-DLLVM_LINK_LLVM_DYLIB=OFF \
-DLLVM_BUILD_LLVM_DYLIB=OFF \
-DGO_EXECUTABLE=GO_EXECUTABLE-NOTFOUND \
-DBacktrace_LIBRARY=''
CONFIGURE_ARGS += -DCLANG_ENABLE_STATIC_ANALYZER=ON \
-DCLANG_INCLUDE_TESTS=OFF \
-DLLVM_BUILD_TESTS=OFF \
-DLLVM_BUILD_BENCHMARKS=OFF \
-DLLVM_ENABLE_DOXYGEN=OFF \
-DLLVM_ENABLE_SPHINX=OFF \
-DLLVM_INCLUDE_EXAMPLES=OFF \
-DLLVM_INCLUDE_TESTS=OFF \
-DLLVM_INCLUDE_BENCHMARKS=OFF
# Disable some protections in the compiler to regain performance
.if ${MACHINE_ARCH} == "aarch64" || ${MACHINE_ARCH} == "amd64" || \
${MACHINE_ARCH} == "i386"
CXXFLAGS += -fno-ret-protector
.endif
.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386"
CXXFLAGS += -mno-retpoline
.endif
# Workaround relocation overflow
.if ${MACHINE_ARCH} == "powerpc"
# As a workaround for the size only build the PowerPC backend
CONFIGURE_ARGS += -DLLVM_TARGETS_TO_BUILD="PowerPC"
.elif ${MACHINE_ARCH} == "arm"
CFLAGS += -mlong-calls
CXXFLAGS += -mlong-calls
.endif
.if ${MACHINE_ARCH} == "powerpc"
PKG_ARGS += -Dpowerpc=1
.else
PKG_ARGS += -Dpowerpc=0
.endif
GCC_VER = 8.3.0
.if ${MACHINE_ARCH} == "amd64"
GCC_CONFIG = x86_64-unknown-openbsd${OSREV}
.else
GCC_CONFIG = ${MACHINE_ARCH}-unknown-openbsd${OSREV}
.endif
CLANG_INCLUDE_PATH = lib/clang/${LLVM_V}/include
SUBST_VARS += CLANG_INCLUDE_PATH LLVM_V GCC_VER GCC_CONFIG
post-extract:
mv ${WRKDIR}/cfe-${LLVM_V}.src ${WRKSRC}/tools/clang
mv ${WRKDIR}/clang-tools-extra-${LLVM_V}.src ${WRKSRC}/tools/clang/tools/extra
pre-configure:
@${SUBST_CMD} ${WRKSRC}/tools/clang/lib/Driver/ToolChains/OpenBSD.cpp
-@ln -s ${MODPY_BIN} ${WRKDIR}/bin/python
post-install:
${MODPY_BIN} ${MODPY_LIBDIR}/compileall.py \
${PREFIX}/share/clang
# LLVM/Clang use the OpenBSD shared lib style so.X.Y. So we need this helper
# environment
.for _n _v in clang 8.0 lldb 1.0 LTO 5.0
CONFIGURE_ENV+=LIB${_n}_VERSION=${_v}
MAKE_ENV+=LIB${_n}_VERSION=${_v}
.endfor
FAKE_TARGET = tools/clang/tools/extra/install
.include <bsd.port.mk>

View File

@ -0,0 +1,6 @@
SHA256 (cfe-8.0.1.src.tar.xz) = cO/9afeoqySfZrCmirqLCK9Sqiq3EN+4oPuhAmhbFkY=
SHA256 (clang-tools-extra-8.0.1.src.tar.xz) = GHF5thfk8Hu2BcwhXaBSfmSZC0p91cvMRSoWtk4Cw+E=
SHA256 (llvm-8.0.1.src.tar.xz) = RHh6bQL3FA8UXiJQ1WyfhJM04R+a43mCdRDtcvErdec=
SIZE (cfe-8.0.1.src.tar.xz) = 12810056
SIZE (clang-tools-extra-8.0.1.src.tar.xz) = 1994068
SIZE (llvm-8.0.1.src.tar.xz) = 30477608

View File

@ -0,0 +1,19 @@
$OpenBSD: patch-cmake_modules_HandleLLVMOptions_cmake,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Fix CMake Invalid Escape Sequence
https://github.com/llvm-mirror/llvm/commit/b8b62917a1566bfc6d8706d06042171aa2306bbc
+ https://github.com/llvm-mirror/llvm/commit/614d096016667810cc777db37d180accb2390cfb
Index: cmake/modules/HandleLLVMOptions.cmake
--- cmake/modules/HandleLLVMOptions.cmake.orig
+++ cmake/modules/HandleLLVMOptions.cmake
@@ -11,7 +11,7 @@ include(HandleLLVMStdlib)
include(CheckCCompilerFlag)
include(CheckCXXCompilerFlag)
-if(CMAKE_LINKER MATCHES "lld-link\.exe" OR (WIN32 AND LLVM_USE_LINKER STREQUAL "lld") OR LLVM_ENABLE_LLD)
+if(CMAKE_LINKER MATCHES "lld-link" OR (WIN32 AND LLVM_USE_LINKER STREQUAL "lld") OR LLVM_ENABLE_LLD)
set(LINKER_IS_LLD_LINK TRUE)
else()
set(LINKER_IS_LLD_LINK FALSE)

View File

@ -0,0 +1,16 @@
$OpenBSD: patch-include_llvm_BinaryFormat_Dwarf_def,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
implement -msave-args in clang/llvm, like the sun did for gcc
Index: include/llvm/BinaryFormat/Dwarf.def
--- include/llvm/BinaryFormat/Dwarf.def.orig
+++ include/llvm/BinaryFormat/Dwarf.def
@@ -365,6 +365,8 @@ HANDLE_DW_AT(0x2133, GNU_addr_base, 0, GNU)
HANDLE_DW_AT(0x2134, GNU_pubnames, 0, GNU)
HANDLE_DW_AT(0x2135, GNU_pubtypes, 0, GNU)
HANDLE_DW_AT(0x2136, GNU_discriminator, 0, GNU)
+// Sun Extension
+HANDLE_DW_AT(0x2224, SUN_amd64_parmdump, 0, GNU)
// Borland extensions.
HANDLE_DW_AT(0x3b11, BORLAND_property_read, 0, BORLAND)
HANDLE_DW_AT(0x3b12, BORLAND_property_write, 0, BORLAND)

View File

@ -0,0 +1,31 @@
$OpenBSD: patch-include_llvm_CodeGen_AsmPrinter_h,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Use int3 trap padding between functions instead of trapsleds with a leading jump.
Index: include/llvm/CodeGen/AsmPrinter.h
--- include/llvm/CodeGen/AsmPrinter.h.orig
+++ include/llvm/CodeGen/AsmPrinter.h
@@ -348,6 +348,11 @@ class AsmPrinter : public MachineFunctionPass { (publi
/// correctness.
void EmitAlignment(unsigned NumBits, const GlobalObject *GV = nullptr) const;
+ /// Emit an alignment directive to the specified power of two boundary,
+ /// like EmitAlignment, but call EmitTrapToAlignment to fill with
+ /// trap instructions instead of NOPs.
+ void EmitTrapAlignment(unsigned NumBits, const GlobalObject *GO = nullptr) const;
+
/// Lower the specified LLVM Constant to an MCExpr.
virtual const MCExpr *lowerConstant(const Constant *CV);
@@ -408,6 +413,11 @@ class AsmPrinter : public MachineFunctionPass { (publi
virtual void EmitInstruction(const MachineInstr *) {
llvm_unreachable("EmitInstruction not implemented");
}
+
+ /// Emit an alignment directive to the specified power
+ /// of two boundary, but use Trap instructions for alignment
+ /// sections that should never be executed.
+ virtual void EmitTrapToAlignment(unsigned NumBits) const;
/// Return the symbol for the specified constant pool entry.
virtual MCSymbol *GetCPISymbol(unsigned CPID) const;

View File

@ -0,0 +1,58 @@
$OpenBSD: patch-include_llvm_CodeGen_MachineFrameInfo_h,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
- Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
- Refactor retguard to make adding additional arches easier.
Index: include/llvm/CodeGen/MachineFrameInfo.h
--- include/llvm/CodeGen/MachineFrameInfo.h.orig
+++ include/llvm/CodeGen/MachineFrameInfo.h
@@ -274,6 +274,15 @@ class MachineFrameInfo { (private)
/// The frame index for the stack protector.
int StackProtectorIdx = -1;
+ struct ReturnProtector {
+ /// The register to use for return protector calculations
+ unsigned Register = 0;
+ /// Set to true if this function needs return protectors
+ bool Needed = false;
+ /// Does the return protector cookie need to be stored in frame
+ bool NeedsStore = true;
+ } RPI;
+
/// The frame index for the function context. Used for SjLj exceptions.
int FunctionContextIdx = -1;
@@ -354,6 +363,17 @@ class MachineFrameInfo { (private)
int getStackProtectorIndex() const { return StackProtectorIdx; }
void setStackProtectorIndex(int I) { StackProtectorIdx = I; }
bool hasStackProtectorIndex() const { return StackProtectorIdx != -1; }
+
+ /// Get / Set return protector calculation register
+ unsigned getReturnProtectorRegister() const { return RPI.Register; }
+ void setReturnProtectorRegister(unsigned I) { RPI.Register = I; }
+ bool hasReturnProtectorRegister() const { return RPI.Register != 0; }
+ /// Get / Set if this frame needs a return protector
+ void setReturnProtectorNeeded(bool I) { RPI.Needed = I; }
+ bool getReturnProtectorNeeded() const { return RPI.Needed; }
+ /// Get / Set if the return protector cookie needs to be stored in frame
+ void setReturnProtectorNeedsStore(bool I) { RPI.NeedsStore = I; }
+ bool getReturnProtectorNeedsStore() const { return RPI.NeedsStore; }
/// Return the index for the function context object.
/// This object is used for SjLj exceptions.

View File

@ -0,0 +1,33 @@
$OpenBSD: patch-include_llvm_CodeGen_Passes_h,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
Index: include/llvm/CodeGen/Passes.h
--- include/llvm/CodeGen/Passes.h.orig
+++ include/llvm/CodeGen/Passes.h
@@ -318,6 +318,9 @@ namespace llvm {
///
FunctionPass *createStackProtectorPass();
+ // createReturnProtectorPass - This pass add return protectors to functions.
+ FunctionPass *createReturnProtectorPass();
+
/// createMachineVerifierPass - This pass verifies cenerated machine code
/// instructions for correctness.
///

View File

@ -0,0 +1,90 @@
$OpenBSD: patch-include_llvm_CodeGen_ReturnProtectorLowering_h,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
- Refactor retguard to make adding additional arches easier.
- Do not store the retguard cookie in frame in leaf functions if possible.
Makes things slightly faster and also improves security in these functions,
since the retguard cookie can't leak via the stack.
Index: include/llvm/CodeGen/ReturnProtectorLowering.h
--- include/llvm/CodeGen/ReturnProtectorLowering.h.orig
+++ include/llvm/CodeGen/ReturnProtectorLowering.h
@@ -0,0 +1,79 @@
+//===-- llvm/CodeGen/ReturnProtectorLowering.h ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A class to insert and lower the return protector instrumentation
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_RETURNPROTECTORLOWERING_H
+#define LLVM_CODEGEN_RETURNPROTECTORLOWERING_H
+
+#include "llvm/ADT/SmallVector.h"
+
+#include <utility>
+#include <vector>
+
+namespace llvm {
+class CalleeSavedInfo;
+class GlobalVariable;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineInstr;
+
+class ReturnProtectorLowering {
+public:
+ virtual ~ReturnProtectorLowering() {}
+ /// Subclass interface - subclasses need to implement these functions.
+
+ /// insertReturnProtectorPrologue/Epilogue - insert return protector
+ /// instrumentation in prologue or epilogue.
+ virtual void insertReturnProtectorPrologue(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ GlobalVariable *cookie) const {}
+ virtual void insertReturnProtectorEpilogue(MachineFunction &MF,
+ MachineInstr &MI,
+ GlobalVariable *cookie) const {}
+
+ /// opcodeIsReturn - Reuturn true is the given opcode is a return
+ /// instruction needing return protection, false otherwise.
+ virtual bool opcodeIsReturn(unsigned opcode) const { return false; }
+
+ /// fillTempRegisters - Fill the list of available temp registers we can
+ /// use as a CalculationRegister.
+ virtual void fillTempRegisters(MachineFunction &MF,
+ std::vector<unsigned> &TempRegs) const {}
+
+ /// Generic public interface used by llvm
+
+ /// setupReturnProtector - Checks the function for ROP friendly return
+ /// instructions and sets ReturnProtectorNeeded in the frame if found.
+ virtual void setupReturnProtector(MachineFunction &MF) const;
+
+ /// saveReturnProtectorRegister - Allows the target to save the
+ /// CalculationRegister in the CalleeSavedInfo vector if needed.
+ virtual void
+ saveReturnProtectorRegister(MachineFunction &MF,
+ std::vector<CalleeSavedInfo> &CSI) const;
+
+ /// determineReturnProtectorTempRegister - Find a register that can be used
+ /// during function prologue / epilogue to store the return protector cookie.
+ /// Returns false if a register is needed but could not be found,
+ /// otherwise returns true.
+ virtual bool determineReturnProtectorRegister(
+ MachineFunction &MF,
+ const SmallVector<MachineBasicBlock *, 4> &SaveBlocks,
+ const SmallVector<MachineBasicBlock *, 4> &RestoreBlocks) const;
+
+ /// insertReturnProtectors - insert return protector instrumentation.
+ virtual void insertReturnProtectors(MachineFunction &MF) const;
+};
+
+} // namespace llvm
+
+#endif

View File

@ -0,0 +1,43 @@
$OpenBSD: patch-include_llvm_CodeGen_TargetFrameLowering_h,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
- Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
- Refactor retguard to make adding additional arches easier.
Index: include/llvm/CodeGen/TargetFrameLowering.h
--- include/llvm/CodeGen/TargetFrameLowering.h.orig
+++ include/llvm/CodeGen/TargetFrameLowering.h
@@ -15,6 +15,7 @@
#define LLVM_CODEGEN_TARGETFRAMELOWERING_H
#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/ReturnProtectorLowering.h"
#include <utility>
#include <vector>
@@ -168,6 +169,10 @@ class TargetFrameLowering { (public)
MachineBasicBlock &MBB) const = 0;
virtual void emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const = 0;
+
+ virtual const ReturnProtectorLowering *getReturnProtector() const {
+ return nullptr;
+ }
/// Replace a StackProbe stub (if any) with the actual probe code inline
virtual void inlineStackProbe(MachineFunction &MF,

View File

@ -0,0 +1,28 @@
$OpenBSD: patch-include_llvm_Config_llvm-config_h_cmake,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
When bsd.lib.mk builds shared libraries it builds with -DPIC which
causes problems in the following files which use PIC as a variable name.
Undefine PIC in llvm-config.h to minimise the diff to upstream LLVM.
Index: include/llvm/Config/llvm-config.h.cmake
--- include/llvm/Config/llvm-config.h.cmake.orig
+++ include/llvm/Config/llvm-config.h.cmake
@@ -14,6 +14,18 @@
#ifndef LLVM_CONFIG_H
#define LLVM_CONFIG_H
+/*
+ * When bsd.lib.mk builds shared libraries it builds with -DPIC which
+ * causes problems in the following files which use PIC as a variable name.
+ * undefine PIC here to minimise the diff to upstream LLVM
+ *
+ * include/llvm/MC/MCObjectFileInfo.h
+ * lib/MC/MCObjectFileInfo.cpp
+ * lib/Transforms/Scalar/LICM.cpp
+ * lib/Transforms/Utils/PredicateInfo.cpp
+ */
+#undef PIC
+
/* Define if LLVM_ENABLE_DUMP is enabled */
#cmakedefine LLVM_ENABLE_DUMP

View File

@ -0,0 +1,31 @@
$OpenBSD: patch-include_llvm_InitializePasses_h,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
Index: include/llvm/InitializePasses.h
--- include/llvm/InitializePasses.h.orig
+++ include/llvm/InitializePasses.h
@@ -345,6 +345,7 @@ void initializeRegionViewerPass(PassRegistry&);
void initializeRegisterCoalescerPass(PassRegistry&);
void initializeRenameIndependentSubregsPass(PassRegistry&);
void initializeResetMachineFunctionPass(PassRegistry&);
+void initializeReturnProtectorPass(PassRegistry&);
void initializeReversePostOrderFunctionAttrsLegacyPassPass(PassRegistry&);
void initializeRewriteStatepointsForGCLegacyPassPass(PassRegistry &);
void initializeRewriteSymbolsLegacyPassPass(PassRegistry&);

View File

@ -0,0 +1,16 @@
$OpenBSD: patch-include_llvm_MC_MCAsmInfoELF_h,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Do not use nonexec stack segment header hints.
Index: include/llvm/MC/MCAsmInfoELF.h
--- include/llvm/MC/MCAsmInfoELF.h.orig
+++ include/llvm/MC/MCAsmInfoELF.h
@@ -21,7 +21,7 @@ class MCAsmInfoELF : public MCAsmInfo {
protected:
/// Targets which have non-executable stacks by default can set this to false
/// to disable the special section which requests a non-executable stack.
- bool UsesNonexecutableStackSection = true;
+ bool UsesNonexecutableStackSection = false;
MCAsmInfoELF();
};

View File

@ -0,0 +1,72 @@
$OpenBSD: patch-lib_CodeGen_AsmPrinter_AsmPrinter_cpp,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
- Use int3 trap padding between functions instead of trapsleds with a leading jump.
- Emit trap alignment between basic blocks that are unrechable via
fallthrough. Avoids unnecessary jmp instructions in the middle
of functions and makes disassembly nicer to read.
Index: lib/CodeGen/AsmPrinter/AsmPrinter.cpp
--- lib/CodeGen/AsmPrinter/AsmPrinter.cpp.orig
+++ lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -653,7 +653,7 @@ void AsmPrinter::EmitFunctionHeader() {
EmitLinkage(&F, CurrentFnSym);
if (MAI->hasFunctionAlignment())
- EmitAlignment(MF->getAlignment(), &F);
+ EmitTrapAlignment(MF->getAlignment(), &F);
if (MAI->hasDotTypeDotSizeDirective())
OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_ELF_TypeFunction);
@@ -2112,6 +2112,33 @@ void AsmPrinter::EmitAlignment(unsigned NumBits, const
}
//===----------------------------------------------------------------------===//
+/// EmitTrapAlignment - Emit an alignment directive to the specified power of
+/// two boundary, but call EmitTrapToAlignment to fill with Trap instructions
+/// if the Target implements EmitTrapToAlignment.
+void AsmPrinter::EmitTrapAlignment(unsigned NumBits, const GlobalObject *GV) const {
+ if (GV)
+ NumBits = getGVAlignmentLog2(GV, GV->getParent()->getDataLayout(), NumBits);
+
+ if (NumBits == 0) return; // 1-byte aligned: no need to emit alignment.
+
+ assert(NumBits <
+ static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
+ "undefined behavior");
+ EmitTrapToAlignment(NumBits);
+}
+
+//===----------------------------------------------------------------------===//
+/// EmitTrapToAlignment - Emit an alignment directive to the specified power
+/// of two boundary. This default implementation calls EmitCodeAlignment on
+/// the OutStreamer, but can be overridden by Target implementations.
+void AsmPrinter::EmitTrapToAlignment(unsigned NumBits) const {
+ if (NumBits == 0) return;
+ OutStreamer->EmitCodeAlignment(1u << NumBits);
+}
+
+
+
+//===----------------------------------------------------------------------===//
// Constant emission.
//===----------------------------------------------------------------------===//
@@ -2879,11 +2906,15 @@ void AsmPrinter::EmitBasicBlockStart(const MachineBasi
}
}
- // Emit an alignment directive for this block, if needed.
- if (unsigned Align = MBB.getAlignment())
- EmitAlignment(Align);
MCCodePaddingContext Context;
setupCodePaddingContext(MBB, Context);
+ // Emit an alignment directive for this block, if needed.
+ if (unsigned Align = MBB.getAlignment()) {
+ if (Context.IsBasicBlockReachableViaFallthrough)
+ EmitAlignment(Align);
+ else
+ EmitTrapAlignment(Align);
+ }
OutStreamer->EmitCodePaddingBasicBlockStart(Context);
// If the block has its address taken, emit any labels that were used to

View File

@ -0,0 +1,33 @@
$OpenBSD: patch-lib_CodeGen_CMakeLists_txt,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
- Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
- Refactor retguard to make adding additional arches easier.
Index: lib/CodeGen/CMakeLists.txt
--- lib/CodeGen/CMakeLists.txt.orig
+++ lib/CodeGen/CMakeLists.txt
@@ -124,6 +124,8 @@ add_llvm_library(LLVMCodeGen
RegUsageInfoCollector.cpp
RegUsageInfoPropagate.cpp
ResetMachineFunctionPass.cpp
+ ReturnProtectorLowering.cpp
+ ReturnProtectorPass.cpp
SafeStack.cpp
SafeStackColoring.cpp
SafeStackLayout.cpp

View File

@ -0,0 +1,99 @@
$OpenBSD: patch-lib_CodeGen_PrologEpilogInserter_cpp,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
- Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
- Refactor retguard to make adding additional arches easier.
Index: lib/CodeGen/PrologEpilogInserter.cpp
--- lib/CodeGen/PrologEpilogInserter.cpp.orig
+++ lib/CodeGen/PrologEpilogInserter.cpp
@@ -176,7 +176,11 @@ bool PEI::runOnMachineFunction(MachineFunction &MF) {
const Function &F = MF.getFunction();
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
+ const ReturnProtectorLowering *RPL = TFI->getReturnProtector();
+ if (RPL)
+ RPL->setupReturnProtector(MF);
+
RS = TRI->requiresRegisterScavenging(MF) ? new RegScavenger() : nullptr;
FrameIndexVirtualScavenging = TRI->requiresFrameIndexScavenging(MF);
FrameIndexEliminationScavenging = (RS && !FrameIndexVirtualScavenging) ||
@@ -211,6 +215,10 @@ bool PEI::runOnMachineFunction(MachineFunction &MF) {
if (!F.hasFnAttribute(Attribute::Naked))
insertPrologEpilogCode(MF);
+ // Add Return Protectors if using them
+ if (RPL)
+ RPL->insertReturnProtectors(MF);
+
// Replace all MO_FrameIndex operands with physical register references
// and actual offsets.
//
@@ -301,7 +309,9 @@ void PEI::calculateCallFrameInfo(MachineFunction &MF)
/// Compute the sets of entry and return blocks for saving and restoring
/// callee-saved registers, and placing prolog and epilog code.
void PEI::calculateSaveRestoreBlocks(MachineFunction &MF) {
- const MachineFrameInfo &MFI = MF.getFrameInfo();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
+ const ReturnProtectorLowering *RPL = TFI->getReturnProtector();
// Even when we do not change any CSR, we still want to insert the
// prologue and epilogue of the function.
@@ -317,7 +327,18 @@ void PEI::calculateSaveRestoreBlocks(MachineFunction &
// epilogue.
if (!RestoreBlock->succ_empty() || RestoreBlock->isReturnBlock())
RestoreBlocks.push_back(RestoreBlock);
- return;
+
+ // If we are adding return protectors ensure we can find a free register
+ if (RPL &&
+ !RPL->determineReturnProtectorRegister(MF, SaveBlocks, RestoreBlocks)) {
+ // Shrinkwrapping will prevent finding a free register
+ SaveBlocks.clear();
+ RestoreBlocks.clear();
+ MFI.setSavePoint(nullptr);
+ MFI.setRestorePoint(nullptr);
+ } else {
+ return;
+ }
}
// Save refs to entry and return blocks.
@@ -328,6 +349,9 @@ void PEI::calculateSaveRestoreBlocks(MachineFunction &
if (MBB.isReturnBlock())
RestoreBlocks.push_back(&MBB);
}
+
+ if (RPL)
+ RPL->determineReturnProtectorRegister(MF, SaveBlocks, RestoreBlocks);
}
static void assignCalleeSavedSpillSlots(MachineFunction &F,
@@ -349,6 +373,10 @@ static void assignCalleeSavedSpillSlots(MachineFunctio
const TargetFrameLowering *TFI = F.getSubtarget().getFrameLowering();
MachineFrameInfo &MFI = F.getFrameInfo();
+
+ if (TFI->getReturnProtector())
+ TFI->getReturnProtector()->saveReturnProtectorRegister(F, CSI);
+
if (!TFI->assignCalleeSavedSpillSlots(F, RegInfo, CSI)) {
// If target doesn't implement this, use generic code.

View File

@ -0,0 +1,240 @@
$OpenBSD: patch-lib_CodeGen_ReturnProtectorLowering_cpp,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
- Refactor retguard to make adding additional arches easier.
- Do not store the retguard cookie in frame in leaf functions if possible.
Makes things slightly faster and also improves security in these functions,
since the retguard cookie can't leak via the stack.
Index: lib/CodeGen/ReturnProtectorLowering.cpp
--- lib/CodeGen/ReturnProtectorLowering.cpp.orig
+++ lib/CodeGen/ReturnProtectorLowering.cpp
@@ -0,0 +1,229 @@
+//===- ReturnProtectorLowering.cpp - ---------------------------------------==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements common routines for return protector support.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/ReturnProtectorLowering.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/IR/Function.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+
+using namespace llvm;
+
+static void markUsedRegsInSuccessors(MachineBasicBlock &MBB,
+ SmallSet<unsigned, 16> &Used,
+ SmallSet<int, 24> &Visited) {
+ int BBNum = MBB.getNumber();
+ if (Visited.count(BBNum))
+ return;
+
+ // Mark all the registers used
+ for (auto &MBBI : MBB.instrs()) {
+ for (auto &MBBIOp : MBBI.operands()) {
+ if (MBBIOp.isReg())
+ Used.insert(MBBIOp.getReg());
+ }
+ }
+
+ // Mark this MBB as visited
+ Visited.insert(BBNum);
+ // Recurse over all successors
+ for (auto &SuccMBB : MBB.successors())
+ markUsedRegsInSuccessors(*SuccMBB, Used, Visited);
+}
+
+/// setupReturnProtector - Checks the function for ROP friendly return
+/// instructions and sets ReturnProtectorNeeded if found.
+void ReturnProtectorLowering::setupReturnProtector(MachineFunction &MF) const {
+ if (MF.getFunction().hasFnAttribute("ret-protector")) {
+ for (auto &MBB : MF) {
+ for (auto &T : MBB.terminators()) {
+ if (opcodeIsReturn(T.getOpcode())) {
+ MF.getFrameInfo().setReturnProtectorNeeded(true);
+ return;
+ }
+ }
+ }
+ }
+}
+
+/// saveReturnProtectorRegister - Allows the target to save the
+/// ReturnProtectorRegister in the CalleeSavedInfo vector if needed.
+void ReturnProtectorLowering::saveReturnProtectorRegister(
+ MachineFunction &MF, std::vector<CalleeSavedInfo> &CSI) const {
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ if (!MFI.getReturnProtectorNeeded())
+ return;
+
+ if (!MFI.hasReturnProtectorRegister())
+ llvm_unreachable("Saving unset return protector register");
+
+ unsigned Reg = MFI.getReturnProtectorRegister();
+ if (MFI.getReturnProtectorNeedsStore())
+ CSI.push_back(CalleeSavedInfo(Reg));
+ else {
+ for (auto &MBB : MF) {
+ if (!MBB.isLiveIn(Reg))
+ MBB.addLiveIn(Reg);
+ }
+ }
+}
+
+/// determineReturnProtectorTempRegister - Find a register that can be used
+/// during function prologue / epilogue to store the return protector cookie.
+/// Returns false if a register is needed but could not be found,
+/// otherwise returns true.
+bool ReturnProtectorLowering::determineReturnProtectorRegister(
+ MachineFunction &MF, const SmallVector<MachineBasicBlock *, 4> &SaveBlocks,
+ const SmallVector<MachineBasicBlock *, 4> &RestoreBlocks) const {
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ if (!MFI.getReturnProtectorNeeded())
+ return true;
+
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+
+ std::vector<unsigned> TempRegs;
+ fillTempRegisters(MF, TempRegs);
+
+ // For leaf functions, try to find a free register that is available
+ // in every BB, so we do not need to store it in the frame at all.
+ // We walk the entire function here because MFI.hasCalls() is unreliable.
+ bool hasCalls = false;
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ if (MI.isCall() && !MI.isReturn()) {
+ hasCalls = true;
+ break;
+ }
+ }
+ if (hasCalls)
+ break;
+ }
+
+ if (!hasCalls) {
+ SmallSet<unsigned, 16> LeafUsed;
+ SmallSet<int, 24> LeafVisited;
+ markUsedRegsInSuccessors(MF.front(), LeafUsed, LeafVisited);
+ for (unsigned Reg : TempRegs) {
+ bool canUse = true;
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
+ if (LeafUsed.count(*AI)) {
+ canUse = false;
+ break;
+ }
+ }
+ if (canUse) {
+ MFI.setReturnProtectorRegister(Reg);
+ MFI.setReturnProtectorNeedsStore(false);
+ return true;
+ }
+ }
+ }
+
+ // For non-leaf functions, we only need to search save / restore blocks
+ SmallSet<unsigned, 16> Used;
+ SmallSet<int, 24> Visited;
+
+ // CSR spills happen at the beginning of this block
+ // so we can mark it as visited because anything past it is safe
+ for (auto &SB : SaveBlocks)
+ Visited.insert(SB->getNumber());
+
+ // CSR Restores happen at the end of restore blocks, before any terminators,
+ // so we need to search restores for MBB terminators, and any successor BBs.
+ for (auto &RB : RestoreBlocks) {
+ for (auto &RBI : RB->terminators()) {
+ for (auto &RBIOp : RBI.operands()) {
+ if (RBIOp.isReg())
+ Used.insert(RBIOp.getReg());
+ }
+ }
+ for (auto &SuccMBB : RB->successors())
+ markUsedRegsInSuccessors(*SuccMBB, Used, Visited);
+ }
+
+ // Now we iterate from the front to find code paths that
+ // bypass save blocks and land on return blocks
+ markUsedRegsInSuccessors(MF.front(), Used, Visited);
+
+ // Now we have gathered all the regs used outside the frame save / restore,
+ // so we can see if we have a free reg to use for the retguard cookie.
+ for (unsigned Reg : TempRegs) {
+ bool canUse = true;
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
+ if (Used.count(*AI)) {
+ // Reg is used somewhere, so we cannot use it
+ canUse = false;
+ break;
+ }
+ }
+ if (canUse) {
+ MFI.setReturnProtectorRegister(Reg);
+ break;
+ }
+ }
+
+ return MFI.hasReturnProtectorRegister();
+}
+
+/// insertReturnProtectors - insert return protector instrumentation.
+void ReturnProtectorLowering::insertReturnProtectors(
+ MachineFunction &MF) const {
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+
+ if (!MFI.getReturnProtectorNeeded())
+ return;
+
+ if (!MFI.hasReturnProtectorRegister())
+ llvm_unreachable("Inconsistent return protector state.");
+
+ const Function &Fn = MF.getFunction();
+ const Module *M = Fn.getParent();
+ GlobalVariable *cookie =
+ dyn_cast_or_null<GlobalVariable>(M->getGlobalVariable(
+ Fn.getFnAttribute("ret-protector-cookie").getValueAsString(),
+ Type::getInt8PtrTy(M->getContext())));
+
+ if (!cookie)
+ llvm_unreachable("Function needs return protector but no cookie assigned");
+
+ unsigned Reg = MFI.getReturnProtectorRegister();
+
+ std::vector<MachineInstr *> returns;
+ for (auto &MBB : MF) {
+ if (MBB.isReturnBlock()) {
+ for (auto &MI : MBB.terminators()) {
+ if (opcodeIsReturn(MI.getOpcode())) {
+ returns.push_back(&MI);
+ if (!MBB.isLiveIn(Reg))
+ MBB.addLiveIn(Reg);
+ }
+ }
+ }
+ }
+
+ if (returns.empty())
+ return;
+
+ for (auto &MI : returns)
+ insertReturnProtectorEpilogue(MF, *MI, cookie);
+
+ insertReturnProtectorPrologue(MF, MF.front(), cookie);
+
+ if (!MF.front().isLiveIn(Reg))
+ MF.front().addLiveIn(Reg);
+}

View File

@ -0,0 +1,87 @@
$OpenBSD: patch-lib_CodeGen_ReturnProtectorPass_cpp,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
- Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
- Put the new retguard symbols in their own section,
'.openbsd.randomdata.retguard', to make them easier to work with in the
kernel hibernate code.
Index: lib/CodeGen/ReturnProtectorPass.cpp
--- lib/CodeGen/ReturnProtectorPass.cpp.orig
+++ lib/CodeGen/ReturnProtectorPass.cpp
@@ -0,0 +1,60 @@
+//===- ReturnProtectorPass.cpp - Set up rteurn protectors -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass sets up functions for return protectors.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "return-protector"
+
+STATISTIC(NumSymbols, "Counts number of cookie symbols added");
+
+namespace {
+ struct ReturnProtector : public FunctionPass {
+ static char ID;
+ ReturnProtector() : FunctionPass(ID) {}
+
+ bool runOnFunction(Function &F) override {
+ if (F.hasFnAttribute("ret-protector")) {
+ // Create a symbol for the cookie
+ Module *M = F.getParent();
+ std::hash<std::string> hasher;
+ std::string cookiename = "__retguard_" + std::to_string(hasher((M->getName() + F.getName()).str()) % 4000);
+ Type *cookietype = Type::getInt8PtrTy(M->getContext());
+ GlobalVariable *cookie = dyn_cast_or_null<GlobalVariable>(
+ M->getOrInsertGlobal(cookiename, cookietype));
+ cookie->setInitializer(Constant::getNullValue(cookietype));
+ cookie->setLinkage(GlobalVariable::WeakAnyLinkage);
+ cookie->setVisibility(GlobalValue::HiddenVisibility);
+ cookie->setSection(".openbsd.randomdata.retguard");
+ cookie->setExternallyInitialized(true);
+ F.addFnAttr("ret-protector-cookie", cookiename);
+ NumSymbols++;
+ }
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ }
+ };
+}
+
+char ReturnProtector::ID = 0;
+INITIALIZE_PASS(ReturnProtector, "return-protector", "Return Protector Pass",
+ false, false)
+FunctionPass *llvm::createReturnProtectorPass() { return new ReturnProtector(); }

View File

@ -0,0 +1,21 @@
$OpenBSD: patch-lib_CodeGen_TargetLoweringBase_cpp,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Restore setting the visibility of __guard_local to hidden for better
code generation. Use dyn_case_or_null instead of a static cast to
solve the crashes in the previous code.
Index: lib/CodeGen/TargetLoweringBase.cpp
--- lib/CodeGen/TargetLoweringBase.cpp.orig
+++ lib/CodeGen/TargetLoweringBase.cpp
@@ -1636,7 +1636,10 @@ Value *TargetLoweringBase::getIRStackGuard(IRBuilder<>
if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
PointerType *PtrTy = Type::getInt8PtrTy(M.getContext());
- return M.getOrInsertGlobal("__guard_local", PtrTy);
+ Constant *C = M.getOrInsertGlobal("__guard_local", PtrTy);
+ if (GlobalVariable *G = dyn_cast_or_null<GlobalVariable>(C))
+ G->setVisibility(GlobalValue::HiddenVisibility);
+ return C;
}
return nullptr;
}

View File

@ -0,0 +1,32 @@
$OpenBSD: patch-lib_CodeGen_TargetPassConfig_cpp,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
Index: lib/CodeGen/TargetPassConfig.cpp
--- lib/CodeGen/TargetPassConfig.cpp.orig
+++ lib/CodeGen/TargetPassConfig.cpp
@@ -737,6 +737,8 @@ void TargetPassConfig::addISelPrepare() {
if (requiresCodeGenSCCOrder())
addPass(new DummyCGSCCPass);
+ addPass(createReturnProtectorPass());
+
// Add both the safe stack and the stack protection passes: each of them will
// only protect functions that have corresponding attributes.
addPass(createSafeStackPass());

View File

@ -0,0 +1,16 @@
$OpenBSD: patch-lib_MC_MCAsmInfoELF_cpp,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Do not use ident.
Index: lib/MC/MCAsmInfoELF.cpp
--- lib/MC/MCAsmInfoELF.cpp.orig
+++ lib/MC/MCAsmInfoELF.cpp
@@ -28,7 +28,7 @@ MCSection *MCAsmInfoELF::getNonexecutableStackSection(
}
MCAsmInfoELF::MCAsmInfoELF() {
- HasIdentDirective = true;
+ HasIdentDirective = false;
WeakRefDirective = "\t.weak\t";
PrivateGlobalPrefix = ".L";
PrivateLabelPrefix = ".L";

View File

@ -0,0 +1,19 @@
$OpenBSD: patch-lib_MC_MCELFStreamer_cpp,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Index: lib/MC/MCELFStreamer.cpp
--- lib/MC/MCELFStreamer.cpp.orig
+++ lib/MC/MCELFStreamer.cpp
@@ -93,8 +93,11 @@ void MCELFStreamer::InitSections(bool NoExecStack) {
SwitchSection(Ctx.getObjectFileInfo()->getTextSection());
EmitCodeAlignment(4);
- if (NoExecStack)
- SwitchSection(Ctx.getAsmInfo()->getNonexecutableStackSection(Ctx));
+ if (NoExecStack) {
+ MCSection *s = Ctx.getAsmInfo()->getNonexecutableStackSection(Ctx);
+ if (s)
+ SwitchSection(s);
+ }
}
void MCELFStreamer::EmitLabel(MCSymbol *S, SMLoc Loc) {

View File

@ -0,0 +1,24 @@
$OpenBSD: patch-lib_MC_MCParser_AsmParser_cpp,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
make clang include a FILE symbol for .(s|S) files
This is mostly needed by syspatch at the moment to be
to be able to re-link in the same order as the original
libraries were linked with by relying on the readelf(1)
and without this .(s|S) assembly files were not getting
a file directive.
Index: lib/MC/MCParser/AsmParser.cpp
--- lib/MC/MCParser/AsmParser.cpp.orig
+++ lib/MC/MCParser/AsmParser.cpp
@@ -878,6 +878,10 @@ bool AsmParser::Run(bool NoInitialTextSection, bool No
(void)InsertResult;
}
+ StringRef Filename = getContext().getMainFileName();
+ if (!Filename.empty() && (Filename.compare(StringRef("-")) != 0))
+ Out.EmitFileDirective(Filename);
+
// While we have input, parse each statement.
while (Lexer.isNot(AsmToken::Eof)) {
ParseStatementInfo Info(&AsmStrRewrites);

View File

@ -0,0 +1,27 @@
$OpenBSD: patch-lib_Target_AArch64_AArch64AsmPrinter_cpp,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Add retguard for arm64.
Index: lib/Target/AArch64/AArch64AsmPrinter.cpp
--- lib/Target/AArch64/AArch64AsmPrinter.cpp.orig
+++ lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -986,6 +986,19 @@ void AArch64AsmPrinter::EmitInstruction(const MachineI
case AArch64::SEH_EpilogEnd:
TS->EmitARM64WinCFIEpilogEnd();
return;
+
+ case AArch64::RETGUARD_JMP_TRAP:
+ {
+ MCSymbol *RGSuccSym = OutContext.createTempSymbol();
+ /* Compare and branch */
+ EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::CBZX)
+ .addReg(MI->getOperand(0).getReg())
+ .addExpr(MCSymbolRefExpr::create(RGSuccSym, OutContext)));
+ EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(1));
+ OutStreamer->EmitLabel(RGSuccSym);
+ return;
+ }
+
}
// Finally, do the automated lowerings for everything else.

View File

@ -0,0 +1,55 @@
$OpenBSD: patch-lib_Target_AArch64_AArch64FrameLowering_cpp,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Add retguard for arm64.
Index: lib/Target/AArch64/AArch64FrameLowering.cpp
--- lib/Target/AArch64/AArch64FrameLowering.cpp.orig
+++ lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -95,6 +95,7 @@
#include "AArch64InstrInfo.h"
#include "AArch64MachineFunctionInfo.h"
#include "AArch64RegisterInfo.h"
+#include "AArch64ReturnProtectorLowering.h"
#include "AArch64Subtarget.h"
#include "AArch64TargetMachine.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
@@ -1975,6 +1976,30 @@ void AArch64FrameLowering::determineCalleeSaves(Machin
? RegInfo->getBaseRegister()
: (unsigned)AArch64::NoRegister;
+ unsigned SpillEstimate = SavedRegs.count();
+ for (unsigned i = 0; CSRegs[i]; ++i) {
+ unsigned Reg = CSRegs[i];
+ unsigned PairedReg = CSRegs[i ^ 1];
+ if (Reg == BasePointerReg)
+ SpillEstimate++;
+ if (produceCompactUnwindFrame(MF) && !SavedRegs.test(PairedReg))
+ SpillEstimate++;
+ }
+
+ if (MFI.hasReturnProtectorRegister() && MFI.getReturnProtectorNeedsStore()) {
+ SavedRegs.set(MFI.getReturnProtectorRegister());
+ SpillEstimate++;
+ }
+
+ SpillEstimate += 2; // Conservatively include FP+LR in the estimate
+ unsigned StackEstimate = MFI.estimateStackSize(MF) + 8 * SpillEstimate;
+
+ // The frame record needs to be created by saving the appropriate registers
+ if (hasFP(MF) || windowsRequiresStackProbe(MF, StackEstimate)) {
+ SavedRegs.set(AArch64::FP);
+ SavedRegs.set(AArch64::LR);
+ }
+
unsigned ExtraCSSpill = 0;
// Figure out which callee-saved registers to save/restore.
for (unsigned i = 0; CSRegs[i]; ++i) {
@@ -2156,4 +2181,8 @@ unsigned AArch64FrameLowering::getWinEHFuncletFrameSiz
// This is the amount of stack a funclet needs to allocate.
return alignTo(CSSize + MF.getFrameInfo().getMaxCallFrameSize(),
getStackAlignment());
+}
+
+const ReturnProtectorLowering *AArch64FrameLowering::getReturnProtector() const {
+ return &RPL;
}

View File

@ -0,0 +1,37 @@
$OpenBSD: patch-lib_Target_AArch64_AArch64FrameLowering_h,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Add retguard for arm64.
Index: lib/Target/AArch64/AArch64FrameLowering.h
--- lib/Target/AArch64/AArch64FrameLowering.h.orig
+++ lib/Target/AArch64/AArch64FrameLowering.h
@@ -14,15 +14,19 @@
#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64FRAMELOWERING_H
#define LLVM_LIB_TARGET_AARCH64_AARCH64FRAMELOWERING_H
+#include "AArch64ReturnProtectorLowering.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
namespace llvm {
class AArch64FrameLowering : public TargetFrameLowering {
public:
+
+ const AArch64ReturnProtectorLowering RPL;
+
explicit AArch64FrameLowering()
: TargetFrameLowering(StackGrowsDown, 16, 0, 16,
- true /*StackRealignable*/) {}
+ true /*StackRealignable*/), RPL() {}
void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) const;
@@ -35,6 +39,8 @@ class AArch64FrameLowering : public TargetFrameLowerin
/// the function.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+
+ const ReturnProtectorLowering *getReturnProtector() const override;
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override;

View File

@ -0,0 +1,19 @@
$OpenBSD: patch-lib_Target_AArch64_AArch64ISelLowering_cpp,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Disable the Load Stack Guard for OpenBSD on AArch64. We don't use it
on any other platform and it causes a segfault in combination with our
IR Stack Guard.
Index: lib/Target/AArch64/AArch64ISelLowering.cpp
--- lib/Target/AArch64/AArch64ISelLowering.cpp.orig
+++ lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -11559,7 +11559,8 @@ void AArch64TargetLowering::ReplaceNodeResults(
}
bool AArch64TargetLowering::useLoadStackGuardNode() const {
- if (Subtarget->isTargetAndroid() || Subtarget->isTargetFuchsia())
+ if (Subtarget->isTargetAndroid() || Subtarget->isTargetFuchsia() ||
+ Subtarget->isTargetOpenBSD())
return TargetLowering::useLoadStackGuardNode();
return true;
}

View File

@ -0,0 +1,20 @@
$OpenBSD: patch-lib_Target_AArch64_AArch64InstrInfo_td,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Add retguard for arm64.
Index: lib/Target/AArch64/AArch64InstrInfo.td
--- lib/Target/AArch64/AArch64InstrInfo.td.orig
+++ lib/Target/AArch64/AArch64InstrInfo.td
@@ -491,6 +491,12 @@ def ADDlowTLS
} // isReMaterializable, isCodeGenOnly
+//===----------------------------------------------------------------------===//
+// Pseudo instruction used by retguard
+let isCodeGenOnly = 1, hasNoSchedulingInfo = 1 in {
+ def RETGUARD_JMP_TRAP: Pseudo<(outs), (ins GPR64:$reg), []>;
+}
+
def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
(LOADgot tglobaltlsaddr:$addr)>;

View File

@ -0,0 +1,141 @@
$OpenBSD: patch-lib_Target_AArch64_AArch64ReturnProtectorLowering_cpp,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
- Add retguard for arm64.
- Do not store the retguard cookie in frame in leaf functions if possible.
Makes things slightly faster and also improves security in these functions,
since the retguard cookie can't leak via the stack.
Index: lib/Target/AArch64/AArch64ReturnProtectorLowering.cpp
--- lib/Target/AArch64/AArch64ReturnProtectorLowering.cpp.orig
+++ lib/Target/AArch64/AArch64ReturnProtectorLowering.cpp
@@ -0,0 +1,130 @@
+//===-- AArch64ReturnProtectorLowering.cpp --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the AArch64 implementation of ReturnProtectorLowering
+// class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AArch64InstrInfo.h"
+#include "AArch64MachineFunctionInfo.h"
+#include "AArch64RegisterInfo.h"
+#include "AArch64ReturnProtectorLowering.h"
+#include "AArch64Subtarget.h"
+#include "AArch64TargetMachine.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetOptions.h"
+#include <cstdlib>
+
+using namespace llvm;
+
+void AArch64ReturnProtectorLowering::insertReturnProtectorPrologue(
+ MachineFunction &MF, MachineBasicBlock &MBB, GlobalVariable *cookie) const {
+
+ MachineBasicBlock::instr_iterator MI = MBB.instr_begin();
+ DebugLoc MBBDL = MBB.findDebugLoc(MI);
+ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
+ unsigned REG = MF.getFrameInfo().getReturnProtectorRegister();
+
+ BuildMI(MBB, MI, MBBDL, TII->get(AArch64::ADRP), REG)
+ .addGlobalAddress(cookie, 0, AArch64II::MO_PAGE);
+ BuildMI(MBB, MI, MBBDL, TII->get(AArch64::LDRXui), REG)
+ .addReg(REG)
+ .addGlobalAddress(cookie, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
+ BuildMI(MBB, MI, MBBDL, TII->get(AArch64::EORXrr), REG)
+ .addReg(REG)
+ .addReg(AArch64::LR);
+}
+
+void AArch64ReturnProtectorLowering::insertReturnProtectorEpilogue(
+ MachineFunction &MF, MachineInstr &MI, GlobalVariable *cookie) const {
+
+ MachineBasicBlock &MBB = *MI.getParent();
+ DebugLoc MBBDL = MI.getDebugLoc();
+ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
+ unsigned REG = MF.getFrameInfo().getReturnProtectorRegister();
+
+ MBB.addLiveIn(AArch64::X9);
+ // REG holds the cookie we calculated in prologue. We use X9 as a
+ // scratch reg to pull the random data. XOR REG with LR should yield
+ // the random data again. Compare REG with X9 to check.
+ BuildMI(MBB, MI, MBBDL, TII->get(AArch64::EORXrr), REG)
+ .addReg(REG)
+ .addReg(AArch64::LR);
+ BuildMI(MBB, MI, MBBDL, TII->get(AArch64::ADRP), AArch64::X9)
+ .addGlobalAddress(cookie, 0, AArch64II::MO_PAGE);
+ BuildMI(MBB, MI, MBBDL, TII->get(AArch64::LDRXui), AArch64::X9)
+ .addReg(AArch64::X9)
+ .addGlobalAddress(cookie, 0, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
+ BuildMI(MBB, MI, MBBDL, TII->get(AArch64::SUBSXrr), REG)
+ .addReg(REG)
+ .addReg(AArch64::X9);
+ BuildMI(MBB, MI, MBBDL, TII->get(AArch64::RETGUARD_JMP_TRAP)).addReg(REG);
+}
+
+bool AArch64ReturnProtectorLowering::opcodeIsReturn(unsigned opcode) const {
+ switch (opcode) {
+ case AArch64::RET:
+ case AArch64::RET_ReallyLR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void AArch64ReturnProtectorLowering::fillTempRegisters(
+ MachineFunction &MF, std::vector<unsigned> &TempRegs) const {
+
+ TempRegs.push_back(AArch64::X15);
+ TempRegs.push_back(AArch64::X14);
+ TempRegs.push_back(AArch64::X13);
+ TempRegs.push_back(AArch64::X12);
+ TempRegs.push_back(AArch64::X11);
+ TempRegs.push_back(AArch64::X10);
+}
+
+void AArch64ReturnProtectorLowering::saveReturnProtectorRegister(
+ MachineFunction &MF, std::vector<CalleeSavedInfo> &CSI) const {
+
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ if (!MFI.getReturnProtectorNeeded())
+ return;
+
+ if (!MFI.hasReturnProtectorRegister())
+ llvm_unreachable("Saving unset return protector register");
+
+ unsigned Reg = MFI.getReturnProtectorRegister();
+ if (!MFI.getReturnProtectorNeedsStore()) {
+ for (auto &MBB : MF) {
+ if (!MBB.isLiveIn(Reg))
+ MBB.addLiveIn(Reg);
+ }
+ return;
+ }
+
+ // Put the temp reg after FP and LR to avoid layout issues
+ // with the D registers later.
+ bool added = false;
+ for (auto CSRI = CSI.begin(); CSRI != CSI.end(); CSRI++) {
+ if (CSRI->getReg() != AArch64::FP && CSRI->getReg() != AArch64::LR) {
+ CSI.insert(CSRI, CalleeSavedInfo(MFI.getReturnProtectorRegister()));
+ added = true;
+ break;
+ }
+ }
+ if (!added)
+ CSI.push_back(CalleeSavedInfo(MFI.getReturnProtectorRegister()));
+}

View File

@ -0,0 +1,63 @@
$OpenBSD: patch-lib_Target_AArch64_AArch64ReturnProtectorLowering_h,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
- Add retguard for arm64.
- Do not store the retguard cookie in frame in leaf functions if possible.
Makes things slightly faster and also improves security in these functions,
since the retguard cookie can't leak via the stack.
Index: lib/Target/AArch64/AArch64ReturnProtectorLowering.h
--- lib/Target/AArch64/AArch64ReturnProtectorLowering.h.orig
+++ lib/Target/AArch64/AArch64ReturnProtectorLowering.h
@@ -0,0 +1,52 @@
+//===-- AArch64ReturnProtectorLowering.h - --------------------- -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the AArch64 implementation of ReturnProtectorLowering
+// class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AARCH64_AARCH64RETURNPROTECTORLOWERING_H
+#define LLVM_LIB_TARGET_AARCH64_AARCH64RETURNPROTECTORLOWERING_H
+
+#include "llvm/CodeGen/ReturnProtectorLowering.h"
+
+namespace llvm {
+
+class AArch64ReturnProtectorLowering : public ReturnProtectorLowering {
+public:
+ /// insertReturnProtectorPrologue/Epilogue - insert return protector
+ /// instrumentation in prologue or epilogue.
+ virtual void
+ insertReturnProtectorPrologue(MachineFunction &MF, MachineBasicBlock &MBB,
+ GlobalVariable *cookie) const override;
+ virtual void
+ insertReturnProtectorEpilogue(MachineFunction &MF, MachineInstr &MI,
+ GlobalVariable *cookie) const override;
+
+ /// opcodeIsReturn - Reuturn true is the given opcode is a return
+ /// instruction needing return protection, false otherwise.
+ virtual bool opcodeIsReturn(unsigned opcode) const override;
+
+ /// fillTempRegisters - Fill the list of available temp registers we can
+ /// use as a return protector register.
+ virtual void
+ fillTempRegisters(MachineFunction &MF,
+ std::vector<unsigned> &TempRegs) const override;
+
+ /// saveReturnProtectorRegister - Allows the target to save the
+ /// CalculationRegister in the CalleeSavedInfo vector if needed.
+ virtual void
+ saveReturnProtectorRegister(MachineFunction &MF,
+ std::vector<CalleeSavedInfo> &CSI) const override;
+};
+
+} // namespace llvm
+
+#endif

View File

@ -0,0 +1,17 @@
$OpenBSD: patch-lib_Target_AArch64_AArch64Subtarget_h,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Disable the Load Stack Guard for OpenBSD on AArch64. We don't use it
on any other platform and it causes a segfault in combination with our
IR Stack Guard.
Index: lib/Target/AArch64/AArch64Subtarget.h
--- lib/Target/AArch64/AArch64Subtarget.h.orig
+++ lib/Target/AArch64/AArch64Subtarget.h
@@ -371,6 +371,7 @@ class AArch64Subtarget final : public AArch64GenSubtar
bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
bool isTargetIOS() const { return TargetTriple.isiOS(); }
bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
+ bool isTargetOpenBSD() const { return TargetTriple.isOSOpenBSD(); }
bool isTargetWindows() const { return TargetTriple.isOSWindows(); }
bool isTargetAndroid() const { return TargetTriple.isAndroid(); }
bool isTargetFuchsia() const { return TargetTriple.isOSFuchsia(); }

View File

@ -0,0 +1,15 @@
$OpenBSD: patch-lib_Target_AArch64_CMakeLists_txt,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
Add retguard for arm64.
Index: lib/Target/AArch64/CMakeLists.txt
--- lib/Target/AArch64/CMakeLists.txt.orig
+++ lib/Target/AArch64/CMakeLists.txt
@@ -51,6 +51,7 @@ add_llvm_target(AArch64CodeGen
AArch64PBQPRegAlloc.cpp
AArch64RegisterBankInfo.cpp
AArch64RegisterInfo.cpp
+ AArch64ReturnProtectorLowering.cpp
AArch64SelectionDAGInfo.cpp
AArch64SpeculationHardening.cpp
AArch64StorePairSuppress.cpp

View File

@ -0,0 +1,268 @@
$OpenBSD: patch-lib_Target_Mips_AsmParser_MipsAsmParser_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Fix a bug in memory operand handling. If a load or store uses a symbol
as a memory operand, the assembler generates incorrect relocations in
PIC mode. As a simple fix, expand the instruction into an address load
sequence, which works, that is followed by the actual memory
instruction.
Note that the generated sequence is not always optimal. If the symbol
has a small offset, the offset could be fused with the memory
instruction. The fix does not achieve that, however. A symbol offset
adds an extra instruction.
- Implement SGE pseudo-instructions. Needed when building libcrypto.
- Implement .cplocal directive. Needed when building libcrypto.
Index: lib/Target/Mips/AsmParser/MipsAsmParser.cpp
--- lib/Target/Mips/AsmParser/MipsAsmParser.cpp.orig
+++ lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -145,6 +145,7 @@ class MipsAsmParser : public MCTargetAsmParser {
bool IsPicEnabled;
bool IsCpRestoreSet;
int CpRestoreOffset;
+ unsigned GPRegister;
unsigned CpSaveLocation;
/// If true, then CpSaveLocation is a register, otherwise it's an offset.
bool CpSaveLocationIsRegister;
@@ -307,6 +308,11 @@ class MipsAsmParser : public MCTargetAsmParser {
bool expandSeqI(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
+ bool expandSGE(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
+ bool expandSGEImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI);
+
bool expandMXTRAlias(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
const MCSubtargetInfo *STI);
@@ -321,6 +327,7 @@ class MipsAsmParser : public MCTargetAsmParser {
bool parseSetFeature(uint64_t Feature);
bool isPicAndNotNxxAbi(); // Used by .cpload, .cprestore, and .cpsetup.
bool parseDirectiveCpLoad(SMLoc Loc);
+ bool parseDirectiveCpLocal(SMLoc Loc);
bool parseDirectiveCpRestore(SMLoc Loc);
bool parseDirectiveCPSetup();
bool parseDirectiveCPReturn();
@@ -514,6 +521,7 @@ class MipsAsmParser : public MCTargetAsmParser {
IsCpRestoreSet = false;
CpRestoreOffset = -1;
+ GPRegister = ABI.GetGlobalPtr();
const Triple &TheTriple = sti.getTargetTriple();
IsLittleEndian = TheTriple.isLittleEndian();
@@ -2054,7 +2062,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, S
MipsMCExpr::create(MipsMCExpr::MEK_GOT_DISP, JalExpr, getContext());
TOut.emitRRX(ABI.ArePtrs64bit() ? Mips::LD : Mips::LW, Mips::T9,
- Mips::GP, MCOperand::createExpr(GotDispRelocExpr), IDLoc,
+ GPRegister, MCOperand::createExpr(GotDispRelocExpr), IDLoc,
STI);
}
} else {
@@ -2065,7 +2073,8 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, S
const MCExpr *Call16RelocExpr =
MipsMCExpr::create(MipsMCExpr::MEK_GOT_CALL, JalExpr, getContext());
- TOut.emitRRX(ABI.ArePtrs64bit() ? Mips::LD : Mips::LW, Mips::T9, Mips::GP,
+ TOut.emitRRX(ABI.ArePtrs64bit() ? Mips::LD : Mips::LW, Mips::T9,
+ GPRegister,
MCOperand::createExpr(Call16RelocExpr), IDLoc, STI);
}
@@ -2482,6 +2491,14 @@ MipsAsmParser::tryExpandInstruction(MCInst &Inst, SMLo
case Mips::NORImm:
case Mips::NORImm64:
return expandAliasImmediate(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
+ case Mips::SGE:
+ case Mips::SGEU:
+ return expandSGE(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
+ case Mips::SGEImm:
+ case Mips::SGEImm64:
+ case Mips::SGEUImm:
+ case Mips::SGEUImm64:
+ return expandSGEImm(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
case Mips::SLTImm64:
if (isInt<16>(Inst.getOperand(2).getImm())) {
Inst.setOpcode(Mips::SLTi64);
@@ -2876,7 +2893,7 @@ bool MipsAsmParser::loadAndAddSymbolAddress(const MCEx
ELF::STB_LOCAL))) {
const MCExpr *CallExpr =
MipsMCExpr::create(MipsMCExpr::MEK_GOT_CALL, SymExpr, getContext());
- TOut.emitRRX(Mips::LW, DstReg, ABI.GetGlobalPtr(),
+ TOut.emitRRX(Mips::LW, DstReg, GPRegister,
MCOperand::createExpr(CallExpr), IDLoc, STI);
return false;
}
@@ -2916,7 +2933,7 @@ bool MipsAsmParser::loadAndAddSymbolAddress(const MCEx
TmpReg = ATReg;
}
- TOut.emitRRX(Mips::LW, TmpReg, ABI.GetGlobalPtr(),
+ TOut.emitRRX(Mips::LW, TmpReg, GPRegister,
MCOperand::createExpr(GotExpr), IDLoc, STI);
if (LoExpr)
@@ -2952,7 +2969,7 @@ bool MipsAsmParser::loadAndAddSymbolAddress(const MCEx
ELF::STB_LOCAL))) {
const MCExpr *CallExpr =
MipsMCExpr::create(MipsMCExpr::MEK_GOT_CALL, SymExpr, getContext());
- TOut.emitRRX(Mips::LD, DstReg, ABI.GetGlobalPtr(),
+ TOut.emitRRX(Mips::LD, DstReg, GPRegister,
MCOperand::createExpr(CallExpr), IDLoc, STI);
return false;
}
@@ -2995,7 +3012,7 @@ bool MipsAsmParser::loadAndAddSymbolAddress(const MCEx
TmpReg = ATReg;
}
- TOut.emitRRX(Mips::LD, TmpReg, ABI.GetGlobalPtr(),
+ TOut.emitRRX(Mips::LD, TmpReg, GPRegister,
MCOperand::createExpr(GotExpr), IDLoc, STI);
if (LoExpr)
@@ -3226,10 +3243,10 @@ bool MipsAsmParser::emitPartialAddress(MipsTargetStrea
MipsMCExpr::create(MipsMCExpr::MEK_GOT, GotSym, getContext());
if(isABI_O32() || isABI_N32()) {
- TOut.emitRRX(Mips::LW, ATReg, Mips::GP, MCOperand::createExpr(GotExpr),
+ TOut.emitRRX(Mips::LW, ATReg, GPRegister, MCOperand::createExpr(GotExpr),
IDLoc, STI);
} else { //isABI_N64()
- TOut.emitRRX(Mips::LD, ATReg, Mips::GP, MCOperand::createExpr(GotExpr),
+ TOut.emitRRX(Mips::LD, ATReg, GPRegister, MCOperand::createExpr(GotExpr),
IDLoc, STI);
}
} else { //!IsPicEnabled
@@ -3605,6 +3622,10 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc
TOut.emitRRR(isGP64bit() ? Mips::DADDu : Mips::ADDu, TmpReg, TmpReg,
BaseReg, IDLoc, STI);
TOut.emitRRI(Inst.getOpcode(), DstReg, TmpReg, LoOffset, IDLoc, STI);
+ } else if (inPicMode()) {
+ expandLoadAddress(TmpReg, Mips::NoRegister, OffsetOp, !ABI.ArePtrs64bit(),
+ IDLoc, Out, STI);
+ TOut.emitRRI(Inst.getOpcode(), DstReg, TmpReg, 0, IDLoc, STI);
} else {
assert(OffsetOp.isExpr() && "expected expression operand kind");
const MCExpr *ExprOffset = OffsetOp.getExpr();
@@ -4934,6 +4955,72 @@ bool MipsAsmParser::expandSeqI(MCInst &Inst, SMLoc IDL
return false;
}
+bool MipsAsmParser::expandSGE(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+ unsigned DReg = Inst.getOperand(0).getReg();
+ unsigned SReg = Inst.getOperand(1).getReg();
+ unsigned TReg = Inst.getOperand(2).getReg();
+ unsigned OpCode;
+
+ warnIfNoMacro(IDLoc);
+
+ /* "$sr >= $tr" is equivalent to "not ($sr < $tr)". */
+ switch (Inst.getOpcode()) {
+ case Mips::SGE:
+ OpCode = Mips::SLT;
+ break;
+ case Mips::SGEU:
+ OpCode = Mips::SLTu;
+ break;
+ default:
+ llvm_unreachable("unexpected 'sge' opcode");
+ }
+ TOut.emitRRR(OpCode, DReg, SReg, TReg, IDLoc, STI);
+ TOut.emitRRI(Mips::XORi, DReg, DReg, 1, IDLoc, STI);
+
+ return false;
+}
+
+bool MipsAsmParser::expandSGEImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
+ const MCSubtargetInfo *STI) {
+ MipsTargetStreamer &TOut = getTargetStreamer();
+ unsigned DReg = Inst.getOperand(0).getReg();
+ unsigned SReg = Inst.getOperand(1).getReg();
+ int64_t ImmVal = Inst.getOperand(2).getImm();
+ unsigned OpCode, OpiCode;
+
+ warnIfNoMacro(IDLoc);
+
+ /* "$sr >= $imm" is equivalent to "not ($sr < $imm)". */
+ switch (Inst.getOpcode()) {
+ case Mips::SGEImm:
+ case Mips::SGEImm64:
+ OpCode = Mips::SLT;
+ OpiCode = Mips::SLTi;
+ break;
+ case Mips::SGEUImm:
+ case Mips::SGEUImm64:
+ OpCode = Mips::SLTu;
+ OpiCode = Mips::SLTiu;
+ break;
+ default:
+ llvm_unreachable("unexpected 'sge' opcode with immediate");
+ }
+
+ if (isInt<16>(ImmVal)) {
+ TOut.emitRRI(OpiCode, DReg, SReg, ImmVal, IDLoc, STI);
+ } else {
+ if (loadImmediate(ImmVal, DReg, Mips::NoRegister, isInt<32>(ImmVal), false,
+ IDLoc, Out, STI))
+ return true;
+ TOut.emitRRR(OpCode, DReg, SReg, DReg, IDLoc, STI);
+ }
+ TOut.emitRRI(Mips::XORi, DReg, DReg, 1, IDLoc, STI);
+
+ return false;
+}
+
// Map the DSP accumulator and control register to the corresponding gpr
// operand. Unlike the other alias, the m(f|t)t(lo|hi|acx) instructions
// do not map the DSP registers contigously to gpr registers.
@@ -7044,6 +7131,36 @@ bool MipsAsmParser::parseDirectiveCpLoad(SMLoc Loc) {
return false;
}
+bool MipsAsmParser::parseDirectiveCpLocal(SMLoc Loc) {
+ if (!isABI_N32() && !isABI_N64()) {
+ reportParseError(".cplocal is allowed only in N32 or N64 mode");
+ return false;
+ }
+
+ SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Reg;
+ OperandMatchResultTy ResTy = parseAnyRegister(Reg);
+ if (ResTy == MatchOperand_NoMatch || ResTy == MatchOperand_ParseFail) {
+ reportParseError("expected register containing function address");
+ return false;
+ }
+
+ MipsOperand &RegOpnd = static_cast<MipsOperand &>(*Reg[0]);
+ if (!RegOpnd.isGPRAsmReg()) {
+ reportParseError(RegOpnd.getStartLoc(), "invalid register");
+ return false;
+ }
+
+ // If this is not the end of the statement, report an error.
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ reportParseError("unexpected token, expected end of statement");
+ return false;
+ }
+
+ GPRegister = RegOpnd.getGPR32Reg();
+ getTargetStreamer().setGPReg(GPRegister);
+ return false;
+}
+
bool MipsAsmParser::parseDirectiveCpRestore(SMLoc Loc) {
MCAsmParser &Parser = getParser();
@@ -7888,6 +8005,10 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveI
if (IDVal == ".cpload") {
parseDirectiveCpLoad(DirectiveID.getLoc());
+ return false;
+ }
+ if (IDVal == ".cplocal") {
+ parseDirectiveCpLocal(DirectiveID.getLoc());
return false;
}
if (IDVal == ".cprestore") {

View File

@ -0,0 +1,124 @@
$OpenBSD: patch-lib_Target_Mips_MCTargetDesc_MipsTargetStreamer_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Implement .cplocal directive. Needed when building libcrypto.
Index: lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
--- lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp.orig
+++ lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
@@ -38,6 +38,7 @@ static cl::opt<bool> RoundSectionSizes(
MipsTargetStreamer::MipsTargetStreamer(MCStreamer &S)
: MCTargetStreamer(S), ModuleDirectiveAllowed(true) {
GPRInfoSet = FPRInfoSet = FrameInfoSet = false;
+ GPReg = Mips::GP;
}
void MipsTargetStreamer::emitDirectiveSetMicroMips() {}
void MipsTargetStreamer::emitDirectiveSetNoMicroMips() {}
@@ -258,8 +259,7 @@ void MipsTargetStreamer::emitNop(SMLoc IDLoc, const MC
/// Emit the $gp restore operation for .cprestore.
void MipsTargetStreamer::emitGPRestore(int Offset, SMLoc IDLoc,
const MCSubtargetInfo *STI) {
- emitLoadWithImmOffset(Mips::LW, Mips::GP, Mips::SP, Offset, Mips::GP, IDLoc,
- STI);
+ emitLoadWithImmOffset(Mips::LW, GPReg, Mips::SP, Offset, GPReg, IDLoc, STI);
}
/// Emit a store instruction with an immediate offset.
@@ -1136,7 +1136,7 @@ void MipsTargetELFStreamer::emitDirectiveCpLoad(unsign
MCInst TmpInst;
TmpInst.setOpcode(Mips::LUi);
- TmpInst.addOperand(MCOperand::createReg(Mips::GP));
+ TmpInst.addOperand(MCOperand::createReg(GPReg));
const MCExpr *HiSym = MipsMCExpr::create(
MipsMCExpr::MEK_HI,
MCSymbolRefExpr::create("_gp_disp", MCSymbolRefExpr::VK_None,
@@ -1148,8 +1148,8 @@ void MipsTargetELFStreamer::emitDirectiveCpLoad(unsign
TmpInst.clear();
TmpInst.setOpcode(Mips::ADDiu);
- TmpInst.addOperand(MCOperand::createReg(Mips::GP));
- TmpInst.addOperand(MCOperand::createReg(Mips::GP));
+ TmpInst.addOperand(MCOperand::createReg(GPReg));
+ TmpInst.addOperand(MCOperand::createReg(GPReg));
const MCExpr *LoSym = MipsMCExpr::create(
MipsMCExpr::MEK_LO,
MCSymbolRefExpr::create("_gp_disp", MCSymbolRefExpr::VK_None,
@@ -1161,8 +1161,8 @@ void MipsTargetELFStreamer::emitDirectiveCpLoad(unsign
TmpInst.clear();
TmpInst.setOpcode(Mips::ADDu);
- TmpInst.addOperand(MCOperand::createReg(Mips::GP));
- TmpInst.addOperand(MCOperand::createReg(Mips::GP));
+ TmpInst.addOperand(MCOperand::createReg(GPReg));
+ TmpInst.addOperand(MCOperand::createReg(GPReg));
TmpInst.addOperand(MCOperand::createReg(RegNo));
getStreamer().EmitInstruction(TmpInst, STI);
@@ -1185,7 +1185,7 @@ bool MipsTargetELFStreamer::emitDirectiveCpRestore(
return true;
// Store the $gp on the stack.
- emitStoreWithImmOffset(Mips::SW, Mips::GP, Mips::SP, Offset, GetATReg, IDLoc,
+ emitStoreWithImmOffset(Mips::SW, GPReg, Mips::SP, Offset, GetATReg, IDLoc,
STI);
return true;
}
@@ -1206,10 +1206,10 @@ void MipsTargetELFStreamer::emitDirectiveCpsetup(unsig
// Either store the old $gp in a register or on the stack
if (IsReg) {
// move $save, $gpreg
- emitRRR(Mips::OR64, RegOrOffset, Mips::GP, Mips::ZERO, SMLoc(), &STI);
+ emitRRR(Mips::OR64, RegOrOffset, GPReg, Mips::ZERO, SMLoc(), &STI);
} else {
// sd $gpreg, offset($sp)
- emitRRI(Mips::SD, Mips::GP, Mips::SP, RegOrOffset, SMLoc(), &STI);
+ emitRRI(Mips::SD, GPReg, Mips::SP, RegOrOffset, SMLoc(), &STI);
}
if (getABI().IsN32()) {
@@ -1222,10 +1222,10 @@ void MipsTargetELFStreamer::emitDirectiveCpsetup(unsig
MCA.getContext());
// lui $gp, %hi(__gnu_local_gp)
- emitRX(Mips::LUi, Mips::GP, MCOperand::createExpr(HiExpr), SMLoc(), &STI);
+ emitRX(Mips::LUi, GPReg, MCOperand::createExpr(HiExpr), SMLoc(), &STI);
// addiu $gp, $gp, %lo(__gnu_local_gp)
- emitRRX(Mips::ADDiu, Mips::GP, Mips::GP, MCOperand::createExpr(LoExpr),
+ emitRRX(Mips::ADDiu, GPReg, GPReg, MCOperand::createExpr(LoExpr),
SMLoc(), &STI);
return;
@@ -1239,14 +1239,14 @@ void MipsTargetELFStreamer::emitDirectiveCpsetup(unsig
MCA.getContext());
// lui $gp, %hi(%neg(%gp_rel(funcSym)))
- emitRX(Mips::LUi, Mips::GP, MCOperand::createExpr(HiExpr), SMLoc(), &STI);
+ emitRX(Mips::LUi, GPReg, MCOperand::createExpr(HiExpr), SMLoc(), &STI);
// addiu $gp, $gp, %lo(%neg(%gp_rel(funcSym)))
- emitRRX(Mips::ADDiu, Mips::GP, Mips::GP, MCOperand::createExpr(LoExpr),
+ emitRRX(Mips::ADDiu, GPReg, GPReg, MCOperand::createExpr(LoExpr),
SMLoc(), &STI);
// daddu $gp, $gp, $funcreg
- emitRRR(Mips::DADDu, Mips::GP, Mips::GP, RegNo, SMLoc(), &STI);
+ emitRRR(Mips::DADDu, GPReg, GPReg, RegNo, SMLoc(), &STI);
}
void MipsTargetELFStreamer::emitDirectiveCpreturn(unsigned SaveLocation,
@@ -1259,12 +1259,12 @@ void MipsTargetELFStreamer::emitDirectiveCpreturn(unsi
// Either restore the old $gp from a register or on the stack
if (SaveLocationIsRegister) {
Inst.setOpcode(Mips::OR);
- Inst.addOperand(MCOperand::createReg(Mips::GP));
+ Inst.addOperand(MCOperand::createReg(GPReg));
Inst.addOperand(MCOperand::createReg(SaveLocation));
Inst.addOperand(MCOperand::createReg(Mips::ZERO));
} else {
Inst.setOpcode(Mips::LD);
- Inst.addOperand(MCOperand::createReg(Mips::GP));
+ Inst.addOperand(MCOperand::createReg(GPReg));
Inst.addOperand(MCOperand::createReg(Mips::SP));
Inst.addOperand(MCOperand::createImm(SaveLocation));
}

View File

@ -0,0 +1,31 @@
$OpenBSD: patch-lib_Target_Mips_Mips64InstrInfo_td,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Implement SGE pseudo-instructions. Needed when building libcrypto
- Fix instruction guard. This prevents the compiler from using
the MIPS64 mul instruction on pre-MIPS64 subtargets.
Index: lib/Target/Mips/Mips64InstrInfo.td
--- lib/Target/Mips/Mips64InstrInfo.td.orig
+++ lib/Target/Mips/Mips64InstrInfo.td
@@ -845,7 +845,7 @@ def : MipsPat<(i64 (sext (i32 (sub GPR32:$src, GPR32:$
(SUBu GPR32:$src, GPR32:$src2), sub_32)>;
def : MipsPat<(i64 (sext (i32 (mul GPR32:$src, GPR32:$src2)))),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)),
- (MUL GPR32:$src, GPR32:$src2), sub_32)>, ISA_MIPS3_NOT_32R6_64R6;
+ (MUL GPR32:$src, GPR32:$src2), sub_32)>, ISA_MIPS32_NOT_32R6_64R6;
def : MipsPat<(i64 (sext (i32 (MipsMFHI ACC64:$src)))),
(INSERT_SUBREG (i64 (IMPLICIT_DEF)),
(PseudoMFHI ACC64:$src), sub_32)>;
@@ -1136,6 +1136,12 @@ let AdditionalPredicates = [NotInMicroMips] in {
def NORImm64 : NORIMM_DESC_BASE<GPR64Opnd, imm64>, GPR_64;
def : MipsInstAlias<"nor\t$rs, $imm", (NORImm64 GPR64Opnd:$rs, GPR64Opnd:$rs,
imm64:$imm)>, GPR_64;
+def SGEImm64 : MipsAsmPseudoInst<(outs GPR64Opnd:$rd),
+ (ins GPR64Opnd:$rs, imm64:$imm),
+ "sge\t$rd, $rs, $imm">, GPR_64;
+def SGEUImm64 : MipsAsmPseudoInst<(outs GPR64Opnd:$rd),
+ (ins GPR64Opnd:$rs, imm64:$imm),
+ "sgeu\t$rd, $rs, $imm">, GPR_64;
def SLTImm64 : MipsAsmPseudoInst<(outs GPR64Opnd:$rs),
(ins GPR64Opnd:$rt, imm64:$imm),
"slt\t$rs, $rt, $imm">, GPR_64;

View File

@ -0,0 +1,23 @@
$OpenBSD: patch-lib_Target_Mips_MipsAsmPrinter_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Restore previous section after setting the MIPS ABI marker. This keeps
the .text section in use after the file header, improving compatibility
with gcc. Without this change, module-level inline assembly blocks could
end up into wrong section.
Index: lib/Target/Mips/MipsAsmPrinter.cpp
--- lib/Target/Mips/MipsAsmPrinter.cpp.orig
+++ lib/Target/Mips/MipsAsmPrinter.cpp
@@ -795,10 +795,12 @@ void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
TS.emitDirectiveOptionPic0();
}
+ MCSection *CS = OutStreamer->getCurrentSectionOnly();
// Tell the assembler which ABI we are using
std::string SectionName = std::string(".mdebug.") + getCurrentABIString();
OutStreamer->SwitchSection(
OutContext.getELFSection(SectionName, ELF::SHT_PROGBITS, 0));
+ OutStreamer->SwitchSection(CS);
// NaN: At the moment we only support:
// 1. .nan legacy (default)

View File

@ -0,0 +1,50 @@
$OpenBSD: patch-lib_Target_Mips_MipsISelLowering_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Implement the 'h' register constraint on mips64. This lets clang build
pieces of software that use the constraint if the compiler claims
to be compatible with GCC 4.2.1.
Note that the constraint was removed in GCC 4.4. The reason was that
'h' could generate code whose result is unpredictable. The underlying
reason is that the HI and LO registers are special, and the optimizer
has to be careful when choosing the order of HI/LO accesses. It looks
that LLVM has the needed logic.
Index: lib/Target/Mips/MipsISelLowering.cpp
--- lib/Target/Mips/MipsISelLowering.cpp.orig
+++ lib/Target/Mips/MipsISelLowering.cpp
@@ -3685,6 +3685,7 @@ MipsTargetLowering::getConstraintType(StringRef Constr
// backwards compatibility.
// 'c' : A register suitable for use in an indirect
// jump. This will always be $25 for -mabicalls.
+ // 'h' : The hi register. 1 word storage.
// 'l' : The lo register. 1 word storage.
// 'x' : The hilo register pair. Double word storage.
if (Constraint.size() == 1) {
@@ -3694,6 +3695,7 @@ MipsTargetLowering::getConstraintType(StringRef Constr
case 'y':
case 'f':
case 'c':
+ case 'h':
case 'l':
case 'x':
return C_RegisterClass;
@@ -3739,6 +3741,7 @@ MipsTargetLowering::getSingleConstraintMatchWeight(
weight = CW_Register;
break;
case 'c': // $25 for indirect jumps
+ case 'h': // hi register
case 'l': // lo register
case 'x': // hilo register pair
if (type->isIntegerTy())
@@ -3913,6 +3916,11 @@ MipsTargetLowering::getRegForInlineAsmConstraint(const
return std::make_pair((unsigned)Mips::T9_64, &Mips::GPR64RegClass);
// This will generate an error message
return std::make_pair(0U, nullptr);
+ case 'h': // use the `hi` register to store values
+ // that are no bigger than a word
+ if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
+ return std::make_pair((unsigned)Mips::HI0, &Mips::HI32RegClass);
+ return std::make_pair((unsigned)Mips::HI0_64, &Mips::HI64RegClass);
case 'l': // use the `lo` register to store values
// that are no bigger than a word
if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)

View File

@ -0,0 +1,28 @@
$OpenBSD: patch-lib_Target_Mips_MipsInstrInfo_td,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Implement SGE pseudo-instructions. Needed when building libcrypto.
Index: lib/Target/Mips/MipsInstrInfo.td
--- lib/Target/Mips/MipsInstrInfo.td.orig
+++ lib/Target/Mips/MipsInstrInfo.td
@@ -3007,6 +3007,20 @@ def LDMacro : MipsAsmPseudoInst<(outs GPR32Opnd:$rt),
def SDMacro : MipsAsmPseudoInst<(outs GPR32Opnd:$rt),
(ins mem_simm16:$addr), "sd $rt, $addr">,
ISA_MIPS1_NOT_MIPS3;
+
+def SGE : MipsAsmPseudoInst<(outs GPR32Opnd:$rd),
+ (ins GPR32Opnd:$rs, GPR32Opnd:$rt),
+ "sge\t$rd, $rs, $rt">;
+def SGEU : MipsAsmPseudoInst<(outs GPR32Opnd:$rd),
+ (ins GPR32Opnd:$rs, GPR32Opnd:$rt),
+ "sgeu\t$rd, $rs, $rt">;
+def SGEImm : MipsAsmPseudoInst<(outs GPR32Opnd:$rd),
+ (ins GPR32Opnd:$rs, simm32_relaxed:$imm),
+ "sge\t$rd, $rs, $imm">, GPR_32;
+def SGEUImm : MipsAsmPseudoInst<(outs GPR32Opnd:$rd),
+ (ins GPR32Opnd:$rs, simm32_relaxed:$imm),
+ "sgeu\t$rd, $rs, $imm">, GPR_32;
+
//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
//===----------------------------------------------------------------------===//

View File

@ -0,0 +1,26 @@
$OpenBSD: patch-lib_Target_Mips_MipsTargetStreamer_h,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Implement .cplocal directive. Needed when building libcrypto.
Index: lib/Target/Mips/MipsTargetStreamer.h
--- lib/Target/Mips/MipsTargetStreamer.h.orig
+++ lib/Target/Mips/MipsTargetStreamer.h
@@ -185,6 +185,10 @@ class MipsTargetStreamer : public MCTargetStreamer { (
return *ABI;
}
+ void setGPReg(unsigned GPReg) {
+ this->GPReg = GPReg;
+ }
+
protected:
llvm::Optional<MipsABIInfo> ABI;
MipsABIFlagsSection ABIFlagsSection;
@@ -199,6 +203,7 @@ class MipsTargetStreamer : public MCTargetStreamer { (
bool FrameInfoSet;
int FrameOffset;
+ unsigned GPReg;
unsigned FrameReg;
unsigned ReturnReg;

View File

@ -0,0 +1,27 @@
$OpenBSD: patch-lib_Target_PowerPC_PPCISelLowering_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
When generating code for OpenBSD/powerpc, avoid unaligned floating-point
load and store instructions. The vast majority of PowerPC CPUs that
OpenBSD runs on don't implement those and will generate an alignment
exceptions. While we do emulate lfd and stfd (to work around GCC bugs),
we don't emulate lfs and stfs. It is way more efficient to have the
compiler generate code that only uses aligned load and store instructions.
Index: lib/Target/PowerPC/PPCISelLowering.cpp
--- lib/Target/PowerPC/PPCISelLowering.cpp.orig
+++ lib/Target/PowerPC/PPCISelLowering.cpp
@@ -14204,6 +14204,14 @@ bool PPCTargetLowering::allowsMisalignedMemoryAccesses
if (VT == MVT::ppcf128)
return false;
+ if (Subtarget.isTargetOpenBSD()) {
+ // Traditional PowerPC does not support unaligned memory access
+ // for floating-point and the OpenBSD kernel does not emulate
+ // all possible floating-point load and store instructions.
+ if (VT == MVT::f32 || VT == MVT::f64)
+ return false;
+ }
+
if (Fast)
*Fast = true;

View File

@ -0,0 +1,20 @@
$OpenBSD: patch-lib_Target_PowerPC_PPCSubtarget_h,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
When generating code for OpenBSD/powerpc, avoid unaligned floating-point
load and store instructions. The vast majority of PowerPC CPUs that
OpenBSD runs on don't implement those and will generate an alignment
exceptions. While we do emulate lfd and stfd (to work around GCC bugs),
we don't emulate lfs and stfs. It is way more efficient to have the
compiler generate code that only uses aligned load and store instructions.
Index: lib/Target/PowerPC/PPCSubtarget.h
--- lib/Target/PowerPC/PPCSubtarget.h.orig
+++ lib/Target/PowerPC/PPCSubtarget.h
@@ -305,6 +305,7 @@ class PPCSubtarget : public PPCGenSubtargetInfo { (pub
bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
bool isTargetMachO() const { return TargetTriple.isOSBinFormatMachO(); }
bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
+ bool isTargetOpenBSD() const { return TargetTriple.isOSOpenBSD(); }
bool isDarwinABI() const { return isTargetMachO() || isDarwin(); }
bool isSVR4ABI() const { return !isDarwinABI(); }

View File

@ -0,0 +1,17 @@
$OpenBSD: patch-lib_Target_Sparc_SparcAsmPrinter_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Remove cast that truncates immediate operands to 32 bits. This fixes
genassym.sh on sparc64 when using clang as the compiler.
Index: lib/Target/Sparc/SparcAsmPrinter.cpp
--- lib/Target/Sparc/SparcAsmPrinter.cpp.orig
+++ lib/Target/Sparc/SparcAsmPrinter.cpp
@@ -354,7 +354,7 @@ void SparcAsmPrinter::printOperand(const MachineInstr
break;
case MachineOperand::MO_Immediate:
- O << (int)MO.getImm();
+ O << MO.getImm();
break;
case MachineOperand::MO_MachineBasicBlock:
MO.getMBB()->getSymbol()->print(O, MAI);

View File

@ -0,0 +1,16 @@
$OpenBSD: patch-lib_Target_Sparc_SparcISelLowering_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Use a 64-bit register when required
Index: lib/Target/Sparc/SparcISelLowering.cpp
--- lib/Target/Sparc/SparcISelLowering.cpp.orig
+++ lib/Target/Sparc/SparcISelLowering.cpp
@@ -3258,6 +3258,8 @@ SparcTargetLowering::getRegForInlineAsmConstraint(cons
case 'r':
if (VT == MVT::v2i32)
return std::make_pair(0U, &SP::IntPairRegClass);
+ else if (VT == MVT::i64 && Subtarget->is64Bit())
+ return std::make_pair(0U, &SP::I64RegsRegClass);
else
return std::make_pair(0U, &SP::IntRegsRegClass);
case 'f':

View File

@ -0,0 +1,28 @@
$OpenBSD: patch-lib_Target_X86_CMakeLists_txt,v 1.1.1.1 2019/11/06 10:07:55 rsadowski Exp $
- Add a clang pass that identifies potential ROP gadgets and replaces ROP
friendly instructions with safe alternatives. This initial commit fixes
3 instruction forms that will lower to include a c3 (return) byte.
Additional problematic instructions can be fixed incrementally using
this framework.
- Refactor retguard to make adding additional arches easier.
Index: lib/Target/X86/CMakeLists.txt
--- lib/Target/X86/CMakeLists.txt.orig
+++ lib/Target/X86/CMakeLists.txt
@@ -34,6 +34,7 @@ set(sources
X86ExpandPseudo.cpp
X86FastISel.cpp
X86FixupBWInsts.cpp
+ X86FixupGadgets.cpp
X86FixupLEAs.cpp
X86AvoidStoreForwardingBlocks.cpp
X86FixupSetCC.cpp
@@ -59,6 +60,7 @@ set(sources
X86RegisterBankInfo.cpp
X86RegisterInfo.cpp
X86RetpolineThunks.cpp
+ X86ReturnProtectorLowering.cpp
X86SelectionDAGInfo.cpp
X86ShuffleDecodeConstantPool.cpp
X86SpeculativeLoadHardening.cpp

View File

@ -0,0 +1,80 @@
$OpenBSD: patch-lib_Target_X86_MCTargetDesc_X86AsmBackend_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
trapsleds
Index: lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
--- lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp.orig
+++ lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -319,57 +319,23 @@ void X86AsmBackend::relaxInstruction(const MCInst &Ins
/// bytes.
/// \return - true on success, false on failure
bool X86AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
- static const char Nops[10][11] = {
- // nop
- "\x90",
- // xchg %ax,%ax
- "\x66\x90",
- // nopl (%[re]ax)
- "\x0f\x1f\x00",
- // nopl 0(%[re]ax)
- "\x0f\x1f\x40\x00",
- // nopl 0(%[re]ax,%[re]ax,1)
- "\x0f\x1f\x44\x00\x00",
- // nopw 0(%[re]ax,%[re]ax,1)
- "\x66\x0f\x1f\x44\x00\x00",
- // nopl 0L(%[re]ax)
- "\x0f\x1f\x80\x00\x00\x00\x00",
- // nopl 0L(%[re]ax,%[re]ax,1)
- "\x0f\x1f\x84\x00\x00\x00\x00\x00",
- // nopw 0L(%[re]ax,%[re]ax,1)
- "\x66\x0f\x1f\x84\x00\x00\x00\x00\x00",
- // nopw %cs:0L(%[re]ax,%[re]ax,1)
- "\x66\x2e\x0f\x1f\x84\x00\x00\x00\x00\x00",
- };
- // This CPU doesn't support long nops. If needed add more.
- // FIXME: We could generated something better than plain 0x90.
- if (!STI.getFeatureBits()[X86::FeatureNOPL]) {
- for (uint64_t i = 0; i < Count; ++i)
- OS << '\x90';
- return true;
- }
-
- // 15-bytes is the longest single NOP instruction, but 10-bytes is
- // commonly the longest that can be efficiently decoded.
- uint64_t MaxNopLength = 10;
- if (STI.getFeatureBits()[X86::ProcIntelSLM])
- MaxNopLength = 7;
- else if (STI.getFeatureBits()[X86::FeatureFast15ByteNOP])
- MaxNopLength = 15;
- else if (STI.getFeatureBits()[X86::FeatureFast11ByteNOP])
- MaxNopLength = 11;
-
- // Emit as many MaxNopLength NOPs as needed, then emit a NOP of the remaining
- // length.
+ // Write 1 or 2 byte NOP sequences, or a longer trapsled, until
+ // we have written Count bytes
do {
- const uint8_t ThisNopLength = (uint8_t) std::min(Count, MaxNopLength);
- const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
- for (uint8_t i = 0; i < Prefixes; i++)
- OS << '\x66';
- const uint8_t Rest = ThisNopLength - Prefixes;
- if (Rest != 0)
- OS.write(Nops[Rest - 1], Rest);
+ const uint8_t ThisNopLength = (uint8_t) std::min(Count, (uint64_t)127);
+ switch (ThisNopLength) {
+ case 0: break;
+ case 1: OS << '\x90';
+ break;
+ case 2: OS << '\x66';
+ OS << '\x90';
+ break;
+ default: OS << '\xEB';
+ OS << (uint8_t)(ThisNopLength - 2);
+ for(uint8_t i = 2; i < ThisNopLength; ++i)
+ OS << '\xCC';
+ }
Count -= ThisNopLength;
} while (Count != 0);

View File

@ -0,0 +1,16 @@
$OpenBSD: patch-lib_Target_X86_X86AsmPrinter_h,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
Use int3 trap padding between functions instead of trapsleds with a leading jump.
Index: lib/Target/X86/X86AsmPrinter.h
--- lib/Target/X86/X86AsmPrinter.h.orig
+++ lib/Target/X86/X86AsmPrinter.h
@@ -118,6 +118,8 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public A
void EmitInstruction(const MachineInstr *MI) override;
+ void EmitTrapToAlignment(unsigned NumBits) const override;
+
void EmitBasicBlockEnd(const MachineBasicBlock &MBB) override {
AsmPrinter::EmitBasicBlockEnd(MBB);
SMShadowTracker.emitShadowPadding(*OutStreamer, getSubtargetInfo());

View File

@ -0,0 +1,683 @@
$OpenBSD: patch-lib_Target_X86_X86FixupGadgets_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Add a clang pass that identifies potential ROP gadgets and replaces ROP
friendly instructions with safe alternatives. This initial commit fixes
3 instruction forms that will lower to include a c3 (return) byte.
Additional problematic instructions can be fixed incrementally using
this framework.
- Improve the X86FixupGadgets pass
Index: lib/Target/X86/X86FixupGadgets.cpp
--- lib/Target/X86/X86FixupGadgets.cpp.orig
+++ lib/Target/X86/X86FixupGadgets.cpp
@@ -0,0 +1,670 @@
+//===-- X86FixupGadgets.cpp - Fixup Instructions that make ROP Gadgets ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file defines a function pass that checks instructions for sequences
+/// that will lower to a potentially useful ROP gadget, and attempts to
+/// replace those sequences with alternatives that are not useful for ROP.
+///
+//===----------------------------------------------------------------------===//
+
+#include "X86.h"
+#include "X86InstrBuilder.h"
+#include "X86InstrInfo.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86Subtarget.h"
+#include "X86TargetMachine.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define FIXUPGADGETS_DESC "X86 ROP Gadget Fixup"
+#define FIXUPGADGETS_NAME "x86-fixup-gadgets"
+
+#define DEBUG_TYPE FIXUPGADGETS_NAME
+
+// Toggle with cc1 option: -mllvm -x86-fixup-gadgets=<true|false>
+static cl::opt<bool> FixupGadgets(
+ "x86-fixup-gadgets", cl::Hidden,
+ cl::desc("Replace ROP friendly instructions with safe alternatives"),
+ cl::init(true));
+
+namespace {
+class FixupGadgetsPass : public MachineFunctionPass {
+
+public:
+ static char ID;
+
+ StringRef getPassName() const override { return FIXUPGADGETS_DESC; }
+
+ FixupGadgetsPass()
+ : MachineFunctionPass(ID), STI(nullptr), TII(nullptr), TRI(nullptr) {}
+
+ /// Loop over all the instructions and replace ROP friendly
+ /// seuqences with less ROP friendly alternatives
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::NoVRegs);
+ }
+
+private:
+ const X86Subtarget *STI;
+ const X86InstrInfo *TII;
+ const X86RegisterInfo *TRI;
+ bool Is64Bit;
+
+ struct FixupInfo {
+ unsigned op1;
+ unsigned op2;
+ bool fixup;
+ bool align;
+ };
+
+ uint8_t getRegNum(const MachineOperand &MO) const;
+ uint8_t getRegNum(unsigned reg) const;
+ struct FixupInfo isROPFriendly(MachineInstr &MI) const;
+ bool isROPFriendlyImm(const MachineOperand &MO) const;
+ bool isROPFriendlyRegPair(const MachineOperand &Dst,
+ const MachineOperand &Src) const;
+ bool isROPFriendlyReg(const MachineOperand &Dst, uint8_t RegOpcode) const;
+ bool badModRM(uint8_t Mod, uint8_t RegOpcode, uint8_t RM) const;
+ void checkSIB(const MachineInstr &MI, unsigned CurOp,
+ struct FixupInfo &info) const;
+ bool needsFixup(struct FixupInfo &fi) const;
+ bool needsAlign(struct FixupInfo &fi) const;
+ unsigned getWidestRegForReg(unsigned reg) const;
+ unsigned getEquivalentRegForReg(unsigned oreg, unsigned nreg) const;
+ bool hasImplicitUseOrDef(const MachineInstr &MI, unsigned Reg1,
+ unsigned Reg2) const;
+
+ bool fixupInstruction(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineInstr &MI, struct FixupInfo Info);
+};
+char FixupGadgetsPass::ID = 0;
+} // namespace
+
+FunctionPass *llvm::createX86FixupGadgetsPass() {
+ return new FixupGadgetsPass();
+}
+
+uint8_t FixupGadgetsPass::getRegNum(const MachineOperand &MO) const {
+ return TRI->getEncodingValue(MO.getReg()) & 0x7;
+}
+
+uint8_t FixupGadgetsPass::getRegNum(unsigned reg) const {
+ return TRI->getEncodingValue(reg) & 0x7;
+}
+
+bool FixupGadgetsPass::isROPFriendlyImm(const MachineOperand &MO) const {
+ int64_t imm = MO.getImm();
+ for (int i = 0; i < 8; ++i) {
+ uint8_t byte = (imm & 0xff);
+ if (byte == 0xc2 || byte == 0xc3 || byte == 0xca || byte == 0xcb) {
+ return true;
+ }
+ imm = imm >> 8;
+ }
+ return false;
+}
+
+bool FixupGadgetsPass::isROPFriendlyRegPair(const MachineOperand &Dst,
+ const MachineOperand &Src) const {
+
+ if (!Dst.isReg() || !Src.isReg())
+ llvm_unreachable("Testing non registers for bad reg pair!");
+
+ uint8_t Mod = 3;
+ uint8_t RegOpcode = getRegNum(Src);
+ uint8_t RM = getRegNum(Dst);
+ return badModRM(Mod, RegOpcode, RM);
+}
+
+bool FixupGadgetsPass::isROPFriendlyReg(const MachineOperand &Dst, uint8_t RegOpcode) const {
+
+ if (!Dst.isReg())
+ llvm_unreachable("Testing non register for bad reg!");
+
+ uint8_t Mod = 3;
+ uint8_t RM = getRegNum(Dst);
+ return badModRM(Mod, RegOpcode, RM);
+}
+
+bool FixupGadgetsPass::badModRM(uint8_t Mod, uint8_t RegOpcode,
+ uint8_t RM) const {
+ uint8_t ModRM = ((Mod << 6) | (RegOpcode << 3) | RM);
+ if (ModRM == 0xc2 || ModRM == 0xc3 || ModRM == 0xca || ModRM == 0xcb)
+ return true;
+ return false;
+}
+
+void FixupGadgetsPass::checkSIB(const MachineInstr &MI, unsigned CurOp,
+ struct FixupInfo &info) const {
+
+ const MachineOperand &Base = MI.getOperand(CurOp + X86::AddrBaseReg);
+ const MachineOperand &Scale = MI.getOperand(CurOp + X86::AddrScaleAmt);
+ const MachineOperand &Index = MI.getOperand(CurOp + X86::AddrIndexReg);
+
+ if (!Scale.isImm() || !Base.isReg() || !Index.isReg())
+ llvm_unreachable("Wrong type operands");
+
+ if (Scale.getImm() != 8 || Base.getReg() == 0 || Index.getReg() == 0)
+ return;
+
+ if (badModRM(3, getRegNum(Index), getRegNum(Base))) {
+ info.op1 = CurOp + X86::AddrBaseReg;
+ info.op2 = CurOp + X86::AddrIndexReg;
+ info.fixup = true;
+ }
+}
+
+struct FixupGadgetsPass::FixupInfo
+FixupGadgetsPass::isROPFriendly(MachineInstr &MI) const {
+
+ const MCInstrDesc &Desc = MI.getDesc();
+ unsigned CurOp = X86II::getOperandBias(Desc);
+ uint64_t TSFlags = Desc.TSFlags;
+ uint64_t Form = TSFlags & X86II::FormMask;
+ bool HasVEX_4V = TSFlags & X86II::VEX_4V;
+ bool HasEVEX_K = TSFlags & X86II::EVEX_K;
+
+ struct FixupInfo info = {0, 0, false, false};
+
+ // Look for constants with c3 in them
+ for (const auto &MO : MI.operands()) {
+ if (MO.isImm() && isROPFriendlyImm(MO)) {
+ info.align = true;
+ break;
+ }
+ }
+
+ switch (Form) {
+ case X86II::Pseudo: {
+ // Pesudos that are replaced with real instructions later
+ switch (MI.getOpcode()) {
+ case X86::ADD64rr_DB:
+ case X86::ADD32rr_DB:
+ case X86::ADD16rr_DB:
+ goto Handle_MRMDestReg;
+ case X86::ADD16ri_DB:
+ case X86::ADD32ri_DB:
+ case X86::ADD64ri32_DB:
+ case X86::ADD16ri8_DB:
+ case X86::ADD32ri8_DB:
+ case X86::ADD64ri8_DB:
+ goto Handle_MRMXr;
+ default:
+ break;
+ }
+ break;
+ }
+ case X86II::AddRegFrm: {
+ uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
+ uint8_t Opcode = BaseOpcode + getRegNum(MI.getOperand(CurOp));
+ if (Opcode == 0xc2 || Opcode == 0xc3 || Opcode == 0xca || Opcode == 0xcb) {
+ info.op1 = CurOp;
+ info.fixup = true;
+ }
+ break;
+ }
+ case X86II::MRMDestMem: {
+ checkSIB(MI, CurOp, info);
+ unsigned opcode = MI.getOpcode();
+ if (opcode == X86::MOVNTImr || opcode == X86::MOVNTI_64mr)
+ info.align = true;
+ break;
+ }
+ case X86II::MRMSrcMem: {
+ CurOp += 1;
+ if (HasVEX_4V)
+ CurOp += 1;
+ if (HasEVEX_K)
+ CurOp += 1;
+ checkSIB(MI, CurOp, info);
+ break;
+ }
+ case X86II::MRMSrcMem4VOp3: {
+ CurOp += 1;
+ checkSIB(MI, CurOp, info);
+ break;
+ }
+ case X86II::MRMSrcMemOp4: {
+ CurOp += 3;
+ checkSIB(MI, CurOp, info);
+ break;
+ }
+ case X86II::MRMXm:
+ case X86II::MRM0m:
+ case X86II::MRM1m:
+ case X86II::MRM2m:
+ case X86II::MRM3m:
+ case X86II::MRM4m:
+ case X86II::MRM5m:
+ case X86II::MRM6m:
+ case X86II::MRM7m: {
+ if (HasVEX_4V)
+ CurOp += 1;
+ if (HasEVEX_K)
+ CurOp += 1;
+ checkSIB(MI, CurOp, info);
+ break;
+ }
+ case X86II::MRMDestReg: {
+ Handle_MRMDestReg:
+ const MachineOperand &DstReg = MI.getOperand(CurOp);
+ info.op1 = CurOp;
+ CurOp += 1;
+ if (HasVEX_4V)
+ CurOp += 1;
+ if (HasEVEX_K)
+ CurOp += 1;
+ const MachineOperand &SrcReg = MI.getOperand(CurOp);
+ info.op2 = CurOp;
+ if (isROPFriendlyRegPair(DstReg, SrcReg))
+ info.fixup = true;
+ break;
+ }
+ case X86II::MRMSrcReg: {
+ const MachineOperand &DstReg = MI.getOperand(CurOp);
+ info.op1 = CurOp;
+ CurOp += 1;
+ if (HasVEX_4V)
+ CurOp += 1;
+ if (HasEVEX_K)
+ CurOp += 1;
+ const MachineOperand &SrcReg = MI.getOperand(CurOp);
+ info.op2 = CurOp;
+ if (isROPFriendlyRegPair(SrcReg, DstReg))
+ info.fixup = true;
+ break;
+ }
+ case X86II::MRMSrcReg4VOp3: {
+ const MachineOperand &DstReg = MI.getOperand(CurOp);
+ info.op1 = CurOp;
+ CurOp += 1;
+ const MachineOperand &SrcReg = MI.getOperand(CurOp);
+ info.op2 = CurOp;
+ if (isROPFriendlyRegPair(SrcReg, DstReg))
+ info.fixup = true;
+ break;
+ }
+ case X86II::MRMSrcRegOp4: {
+ const MachineOperand &DstReg = MI.getOperand(CurOp);
+ info.op1 = CurOp;
+ CurOp += 3;
+ const MachineOperand &SrcReg = MI.getOperand(CurOp);
+ info.op2 = CurOp;
+ if (isROPFriendlyRegPair(SrcReg, DstReg))
+ info.fixup = true;
+ break;
+ }
+ case X86II::MRMXr:
+ case X86II::MRM0r:
+ case X86II::MRM1r: {
+Handle_MRMXr:
+ if (HasVEX_4V)
+ CurOp += 1;
+ if (HasEVEX_K)
+ CurOp += 1;
+ const MachineOperand &DstReg = MI.getOperand(CurOp);
+ info.op1 = CurOp;
+ if (isROPFriendlyReg(DstReg, Form == X86II::MRM1r ? 1 : 0))
+ info.fixup = true;
+ break;
+ }
+ case X86II::MRM_C2:
+ case X86II::MRM_C3:
+ case X86II::MRM_CA:
+ case X86II::MRM_CB: {
+ info.align = true;
+ break;
+ }
+ default:
+ break;
+ }
+ return info;
+}
+
+bool FixupGadgetsPass::needsFixup(struct FixupInfo &fi) const {
+ return (fi.fixup == true);
+}
+
+bool FixupGadgetsPass::needsAlign(struct FixupInfo &fi) const {
+ return (fi.align == true);
+}
+
+unsigned FixupGadgetsPass::getWidestRegForReg(unsigned reg) const {
+
+ switch (reg) {
+ case X86::AL:
+ case X86::AH:
+ case X86::AX:
+ case X86::EAX:
+ case X86::RAX:
+ return Is64Bit ? X86::RAX : X86::EAX;
+ case X86::BL:
+ case X86::BH:
+ case X86::BX:
+ case X86::EBX:
+ case X86::RBX:
+ return Is64Bit ? X86::RBX : X86::EBX;
+ case X86::CL:
+ case X86::CH:
+ case X86::CX:
+ case X86::ECX:
+ case X86::RCX:
+ return Is64Bit ? X86::RCX : X86::ECX;
+ case X86::DL:
+ case X86::DH:
+ case X86::DX:
+ case X86::EDX:
+ case X86::RDX:
+ return Is64Bit ? X86::RDX : X86::EDX;
+ case X86::R8B:
+ case X86::R8W:
+ case X86::R8D:
+ case X86::R8:
+ return X86::R8;
+ case X86::R9B:
+ case X86::R9W:
+ case X86::R9D:
+ case X86::R9:
+ return X86::R9;
+ case X86::R10B:
+ case X86::R10W:
+ case X86::R10D:
+ case X86::R10:
+ return X86::R10;
+ case X86::R11B:
+ case X86::R11W:
+ case X86::R11D:
+ case X86::R11:
+ return X86::R11;
+ default:
+ return X86::NoRegister; // Non-GP Reg
+ }
+ return 0;
+}
+
+// For given register oreg return the equivalent size register
+// from the nreg register set. Eg. For oreg ebx and nreg ax, return eax.
+unsigned FixupGadgetsPass::getEquivalentRegForReg(unsigned oreg,
+ unsigned nreg) const {
+ unsigned compreg = getWidestRegForReg(nreg);
+
+ switch (oreg) {
+ case X86::AL:
+ case X86::BL:
+ case X86::CL:
+ case X86::DL:
+ case X86::R8B:
+ case X86::R9B:
+ case X86::R10B:
+ case X86::R11B:
+ switch (compreg) {
+ case X86::EAX:
+ case X86::RAX:
+ return X86::AL;
+ case X86::EBX:
+ case X86::RBX:
+ return X86::BL;
+ case X86::ECX:
+ case X86::RCX:
+ return X86::CL;
+ case X86::EDX:
+ case X86::RDX:
+ return X86::DL;
+ case X86::R8:
+ return X86::R8B;
+ case X86::R9:
+ return X86::R9B;
+ case X86::R10:
+ return X86::R10B;
+ case X86::R11:
+ return X86::R11B;
+ default:
+ llvm_unreachable("Unknown 8 bit register");
+ }
+ break;
+ case X86::AH:
+ case X86::BH:
+ case X86::CH:
+ case X86::DH:
+ switch (compreg) {
+ case X86::EAX:
+ return X86::AH;
+ case X86::EBX:
+ return X86::BH;
+ case X86::ECX:
+ return X86::CH;
+ case X86::EDX:
+ return X86::DH;
+ default:
+ llvm_unreachable("Using H registers in REX mode");
+ }
+ break;
+ case X86::AX:
+ case X86::BX:
+ case X86::CX:
+ case X86::DX:
+ case X86::R8W:
+ case X86::R9W:
+ case X86::R10W:
+ case X86::R11W:
+ switch (compreg) {
+ case X86::EAX:
+ case X86::RAX:
+ return X86::AX;
+ case X86::EBX:
+ case X86::RBX:
+ return X86::BX;
+ case X86::ECX:
+ case X86::RCX:
+ return X86::CX;
+ case X86::EDX:
+ case X86::RDX:
+ return X86::DX;
+ case X86::R8:
+ return X86::R8W;
+ case X86::R9:
+ return X86::R9W;
+ case X86::R10:
+ return X86::R10W;
+ case X86::R11:
+ return X86::R11W;
+ default:
+ llvm_unreachable("Unknown 16 bit register");
+ }
+ break;
+ case X86::EAX:
+ case X86::EBX:
+ case X86::ECX:
+ case X86::EDX:
+ case X86::R8D:
+ case X86::R9D:
+ case X86::R10D:
+ case X86::R11D:
+ switch (compreg) {
+ case X86::EAX:
+ case X86::RAX:
+ return X86::EAX;
+ case X86::EBX:
+ case X86::RBX:
+ return X86::EBX;
+ case X86::ECX:
+ case X86::RCX:
+ return X86::ECX;
+ case X86::EDX:
+ case X86::RDX:
+ return X86::EDX;
+ case X86::R8:
+ return X86::R8D;
+ case X86::R9:
+ return X86::R9D;
+ case X86::R10:
+ return X86::R10D;
+ case X86::R11:
+ return X86::R11D;
+ default:
+ llvm_unreachable("Unknown 32 bit register");
+ }
+ break;
+ case X86::RAX:
+ case X86::RBX:
+ case X86::RCX:
+ case X86::RDX:
+ case X86::R8:
+ case X86::R9:
+ case X86::R10:
+ case X86::R11:
+ return compreg;
+ default:
+ llvm_unreachable("Unknown input register!");
+ }
+}
+
+bool FixupGadgetsPass::hasImplicitUseOrDef(const MachineInstr &MI,
+ unsigned Reg1, unsigned Reg2) const {
+
+ const MCInstrDesc &Desc = MI.getDesc();
+
+ const MCPhysReg *ImpDefs = Desc.getImplicitDefs();
+ if (ImpDefs) {
+ for (; *ImpDefs; ++ImpDefs) {
+ unsigned w = getWidestRegForReg(*ImpDefs);
+ if (w == Reg1 || w == Reg2) {
+ return true;
+ }
+ }
+ }
+
+ const MCPhysReg *ImpUses = Desc.getImplicitUses();
+ if (ImpUses) {
+ for (; *ImpUses; ++ImpUses) {
+ unsigned w = getWidestRegForReg(*ImpUses);
+ if (w == Reg1 || w == Reg2) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool FixupGadgetsPass::fixupInstruction(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineInstr &MI, FixupInfo Info) {
+
+ if (!needsAlign(Info) && !needsFixup(Info))
+ return false;
+
+ DebugLoc DL = MI.getDebugLoc();
+
+ // Check for only needs alignment
+ if (needsAlign(Info) && !needsFixup(Info)) {
+ BuildMI(MBB, MI, DL, TII->get(X86::JMP_TRAP));
+ return true;
+ }
+
+ unsigned XCHG = Is64Bit ? X86::XCHG64rr : X86::XCHG32rr;
+
+ unsigned OrigReg1 = MI.getOperand(Info.op1).getReg();
+ // Swap with RAX/EAX unless we have a second register to swap with
+ unsigned OrigReg2 = Is64Bit ? X86::RAX : X86::EAX;
+ if (Info.op2)
+ OrigReg2 = MI.getOperand(Info.op2).getReg();
+
+ unsigned SwapReg1 = getWidestRegForReg(OrigReg1);
+ unsigned SwapReg2 = getWidestRegForReg(OrigReg2);
+ unsigned CompReg1 = SwapReg1;
+ unsigned CompReg2 = SwapReg2;
+
+ // Just align if:
+ // - we have a non-GP reg to swap with
+ // - the instruction implicitly uses one of the registers we are swapping
+ // - if we are fixing an instruction that skips the xchg back
+ if (SwapReg1 == X86::NoRegister || SwapReg2 == X86::NoRegister ||
+ hasImplicitUseOrDef(MI, CompReg1, CompReg2) || MI.isCall() ||
+ MI.isReturn() || MI.isBranch() || MI.isIndirectBranch() ||
+ MI.isBarrier()) {
+ BuildMI(MBB, MI, DL, TII->get(X86::JMP_TRAP));
+ return true;
+ }
+
+ // Make sure our XCHG doesn't make a gadget
+ if (badModRM(3, getRegNum(SwapReg1), getRegNum(SwapReg2))) {
+ unsigned treg = SwapReg1;
+ SwapReg1 = SwapReg2;
+ SwapReg2 = treg;
+ }
+
+ // Swap the two registers to start
+ BuildMI(MBB, MI, DL, TII->get(XCHG))
+ .addReg(SwapReg1, RegState::Define)
+ .addReg(SwapReg2, RegState::Define)
+ .addReg(SwapReg1).addReg(SwapReg2);
+
+ // Check for needs alignment
+ if (needsAlign(Info))
+ BuildMI(MBB, MI, DL, TII->get(X86::JMP_TRAP));
+
+ // Swap the registers inside the instruction
+ for (MachineOperand &MO : MI.operands()) {
+ if (!MO.isReg())
+ continue;
+
+ unsigned reg = MO.getReg();
+ unsigned match = getWidestRegForReg(reg);
+ if (match == CompReg1)
+ MO.setReg(getEquivalentRegForReg(reg, OrigReg2));
+ else if (match == CompReg2)
+ MO.setReg(getEquivalentRegForReg(reg, OrigReg1));
+ }
+
+ // And swap the two registers back
+ BuildMI(MBB, ++MachineBasicBlock::instr_iterator(MI), DL, TII->get(XCHG))
+ .addReg(SwapReg1, RegState::Define)
+ .addReg(SwapReg2, RegState::Define)
+ .addReg(SwapReg1).addReg(SwapReg2);
+
+ return true;
+}
+
+bool FixupGadgetsPass::runOnMachineFunction(MachineFunction &MF) {
+ if (!FixupGadgets)
+ return false;
+
+ STI = &MF.getSubtarget<X86Subtarget>();
+ TII = STI->getInstrInfo();
+ TRI = STI->getRegisterInfo();
+ Is64Bit = STI->is64Bit();
+ std::vector<std::pair<MachineInstr *, FixupInfo>> fixups;
+ FixupInfo info;
+
+ bool modified = false;
+
+ for (auto &MBB : MF) {
+ fixups.clear();
+ for (auto &MI : MBB) {
+ info = isROPFriendly(MI);
+ if (needsAlign(info) || needsFixup(info))
+ fixups.push_back(std::make_pair(&MI, info));
+ }
+ for (auto &fixup : fixups)
+ modified |= fixupInstruction(MF, MBB, *fixup.first, fixup.second);
+ }
+
+ return modified;
+}

View File

@ -0,0 +1,197 @@
$OpenBSD: patch-lib_Target_X86_X86FrameLowering_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
- Refactor retguard to make adding additional arches easier.
- implement -msave-args in clang/llvm, like the sun did for gcc
Index: lib/Target/X86/X86FrameLowering.cpp
--- lib/Target/X86/X86FrameLowering.cpp.orig
+++ lib/Target/X86/X86FrameLowering.cpp
@@ -15,6 +15,7 @@
#include "X86InstrBuilder.h"
#include "X86InstrInfo.h"
#include "X86MachineFunctionInfo.h"
+#include "X86ReturnProtectorLowering.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/ADT/SmallSet.h"
@@ -39,7 +40,7 @@ X86FrameLowering::X86FrameLowering(const X86Subtarget
unsigned StackAlignOverride)
: TargetFrameLowering(StackGrowsDown, StackAlignOverride,
STI.is64Bit() ? -8 : -4),
- STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
+ STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()), RPL() {
// Cache a bunch of frame-related predicates for this subtarget.
SlotSize = TRI->getSlotSize();
Is64Bit = STI.is64Bit();
@@ -47,6 +48,7 @@ X86FrameLowering::X86FrameLowering(const X86Subtarget
// standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
StackPtr = TRI->getStackRegister();
+ SaveArgs = Is64Bit ? STI.getSaveArgs() : 0;
}
bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
@@ -90,7 +92,8 @@ bool X86FrameLowering::hasFP(const MachineFunction &MF
MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
MF.callsUnwindInit() || MF.hasEHFunclets() || MF.callsEHReturn() ||
MFI.hasStackMap() || MFI.hasPatchPoint() ||
- MFI.hasCopyImplyingStackAdjustment());
+ MFI.hasCopyImplyingStackAdjustment() ||
+ SaveArgs);
}
static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
@@ -872,6 +875,24 @@ void X86FrameLowering::BuildStackAlignAND(MachineBasic
MI->getOperand(3).setIsDead();
}
+// FIXME: Get this from tablegen.
+static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
+ const X86Subtarget &Subtarget) {
+ assert(Subtarget.is64Bit());
+
+ if (Subtarget.isCallingConvWin64(CallConv)) {
+ static const MCPhysReg GPR64ArgRegsWin64[] = {
+ X86::RCX, X86::RDX, X86::R8, X86::R9
+ };
+ return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
+ }
+
+ static const MCPhysReg GPR64ArgRegs64Bit[] = {
+ X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
+ };
+ return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
+}
+
/// emitPrologue - Push callee-saved registers onto the stack, which
/// automatically adjust the stack pointer. Adjust the stack pointer to allocate
/// space for local variables. Also emit labels used by the exception handler to
@@ -1145,6 +1166,43 @@ void X86FrameLowering::emitPrologue(MachineFunction &M
nullptr, DwarfFramePtr));
}
+ if (SaveArgs && !Fn.arg_empty()) {
+ ArrayRef<MCPhysReg> GPRs =
+ get64BitArgumentGPRs(Fn.getCallingConv(), STI);
+ unsigned arg_size = Fn.arg_size();
+ unsigned RI = 0;
+ int64_t SaveSize = 0;
+
+ if (Fn.hasStructRetAttr()) {
+ GPRs = GPRs.drop_front(1);
+ arg_size--;
+ }
+
+ for (MCPhysReg Reg : GPRs) {
+ if (++RI > arg_size)
+ break;
+
+ SaveSize += SlotSize;
+
+ BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
+ .addReg(Reg)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ // Realign the stack. PUSHes are the most space efficient.
+ while (SaveSize % getStackAlignment()) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
+ .addReg(GPRs.front())
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ SaveSize += SlotSize;
+ }
+
+ //dlg StackSize -= SaveSize;
+ //dlg MFI.setStackSize(StackSize);
+ X86FI->setSaveArgSize(SaveSize);
+ }
+
if (NeedsWinFPO) {
// .cv_fpo_setframe $FramePtr
HasWinCFI = true;
@@ -1626,20 +1684,6 @@ void X86FrameLowering::emitEpilogue(MachineFunction &M
}
uint64_t SEHStackAllocAmt = NumBytes;
- if (HasFP) {
- // Pop EBP.
- BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
- MachineFramePtr)
- .setMIFlag(MachineInstr::FrameDestroy);
- if (NeedsDwarfCFI) {
- unsigned DwarfStackPtr =
- TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true);
- BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfa(
- nullptr, DwarfStackPtr, -SlotSize));
- --MBBI;
- }
- }
-
MachineBasicBlock::iterator FirstCSPop = MBBI;
// Skip the callee-saved pop instructions.
while (MBBI != MBB.begin()) {
@@ -1709,6 +1753,28 @@ void X86FrameLowering::emitEpilogue(MachineFunction &M
--MBBI;
}
+ if (HasFP) {
+ MBBI = Terminator;
+
+ if (X86FI->getSaveArgSize()) {
+ // LEAVE is effectively mov rbp,rsp; pop rbp
+ BuildMI(MBB, MBBI, DL, TII.get(X86::LEAVE64))
+ .setMIFlag(MachineInstr::FrameDestroy);
+ } else {
+ // Pop EBP.
+ BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
+ MachineFramePtr)
+ .setMIFlag(MachineInstr::FrameDestroy);
+ }
+ if (NeedsDwarfCFI) {
+ unsigned DwarfStackPtr =
+ TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true);
+ BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfa(
+ nullptr, DwarfStackPtr, -SlotSize));
+ --MBBI;
+ }
+ }
+
// Windows unwinder will not invoke function's exception handler if IP is
// either in prologue or in epilogue. This behavior causes a problem when a
// call immediately precedes an epilogue, because the return address points
@@ -1797,6 +1863,8 @@ int X86FrameLowering::getFrameIndexReference(const Mac
"FPDelta isn't aligned per the Win64 ABI!");
}
+ if (FI >= 0)
+ Offset -= X86FI->getSaveArgSize();
if (TRI->hasBasePointer(MF)) {
assert(HasFP && "VLAs and dynamic stack realign, but no FP?!");
@@ -3168,4 +3236,8 @@ void X86FrameLowering::processFunctionBeforeFrameFinal
addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mi32)),
UnwindHelpFI)
.addImm(-2);
+}
+
+const ReturnProtectorLowering *X86FrameLowering::getReturnProtector() const {
+ return &RPL;
}

View File

@ -0,0 +1,61 @@
$OpenBSD: patch-lib_Target_X86_X86FrameLowering_h,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
- Refactor retguard to make adding additional arches easier.
- implement -msave-args in clang/llvm, like the sun did for gcc
Index: lib/Target/X86/X86FrameLowering.h
--- lib/Target/X86/X86FrameLowering.h.orig
+++ lib/Target/X86/X86FrameLowering.h
@@ -14,6 +14,7 @@
#ifndef LLVM_LIB_TARGET_X86_X86FRAMELOWERING_H
#define LLVM_LIB_TARGET_X86_X86FRAMELOWERING_H
+#include "X86ReturnProtectorLowering.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
namespace llvm {
@@ -23,6 +24,7 @@ class MCCFIInstruction;
class X86InstrInfo;
class X86Subtarget;
class X86RegisterInfo;
+class X86ReturnProtectorLowering;
class X86FrameLowering : public TargetFrameLowering {
public:
@@ -33,7 +35,10 @@ class X86FrameLowering : public TargetFrameLowering {
const X86Subtarget &STI;
const X86InstrInfo &TII;
const X86RegisterInfo *TRI;
+ const X86ReturnProtectorLowering RPL;
+ bool SaveArgs;
+
unsigned SlotSize;
/// Is64Bit implies that x86_64 instructions are available.
@@ -67,6 +72,8 @@ class X86FrameLowering : public TargetFrameLowering {
/// the function.
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+
+ const ReturnProtectorLowering *getReturnProtector() const override;
void adjustForSegmentedStacks(MachineFunction &MF,
MachineBasicBlock &PrologueMBB) const override;

View File

@ -0,0 +1,50 @@
$OpenBSD: patch-lib_Target_X86_X86InstrCompiler_td,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
- Improve the X86FixupGadgets pass
Index: lib/Target/X86/X86InstrCompiler.td
--- lib/Target/X86/X86InstrCompiler.td.orig
+++ lib/Target/X86/X86InstrCompiler.td
@@ -264,6 +264,25 @@ def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (
}
//===----------------------------------------------------------------------===//
+// Pseudo instruction used by retguard
+
+// This is lowered to a JE 2; INT3; INT3. Prior to this pseudo should be a
+// compare instruction to ensure the retguard cookie is correct.
+// We use a pseudo here in order to avoid splitting the BB just before the return.
+// Splitting the BB and inserting a JE_1 over a new INT3 BB occasionally
+// resulted in incorrect code when a value from a byte register (CL) was
+// used as a return value. When emitted as a split BB, the single byte
+// register would sometimes be widened to 4 bytes, which would corrupt
+// the return value (ie mov %ecx, %eax instead of mov %cl, %al).
+let isCodeGenOnly = 1, Uses = [EFLAGS] in {
+def RETGUARD_JMP_TRAP: I<0, Pseudo, (outs), (ins), "", []>;
+}
+
+let isCodeGenOnly = 1 in {
+def JMP_TRAP: I<0, Pseudo, (outs), (ins), "", []>;
+}
+
+//===----------------------------------------------------------------------===//
// Alias Instructions
//===----------------------------------------------------------------------===//

View File

@ -0,0 +1,87 @@
$OpenBSD: patch-lib_Target_X86_X86MCInstLower_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
- Use int3 trap padding between functions instead of trapsleds with a leading jump.
- Emit variable length trap padding in retguard epilogue.
This adds more trap padding before the return while ensuring that the
return is still in the same cache line.
Index: lib/Target/X86/X86MCInstLower.cpp
--- lib/Target/X86/X86MCInstLower.cpp.orig
+++ lib/Target/X86/X86MCInstLower.cpp
@@ -1786,6 +1786,48 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr
MCInstBuilder(X86::MOV64rr).addReg(X86::R10).addReg(X86::RAX));
return;
+ case X86::RETGUARD_JMP_TRAP: {
+ // Make a symbol for the end of the trapsled and emit a jump to it
+ MCSymbol *RGSuccSym = OutContext.createTempSymbol();
+ const MCExpr *RGSuccExpr = MCSymbolRefExpr::create(RGSuccSym, OutContext);
+ EmitAndCountInstruction(MCInstBuilder(X86::JE_1).addExpr(RGSuccExpr));
+
+ // Emit at least two trap instructions
+ EmitAndCountInstruction(MCInstBuilder(X86::INT3));
+ EmitAndCountInstruction(MCInstBuilder(X86::INT3));
+
+ // Now .fill up to 0xe byte, so the ret happens on 0xf
+ MCSymbol *Dot = OutContext.createTempSymbol();
+ OutStreamer->EmitLabel(Dot);
+ const MCExpr *DotE = MCSymbolRefExpr::create(Dot, OutContext);
+ const MCExpr *BaseE = MCSymbolRefExpr::create(
+ TM.getSymbol(&MF->getFunction()), OutContext);
+ // .fill (0xf - ((DotE - BaseE) & 0xf)), 1, 0xcc
+ const MCExpr *FillE = MCBinaryExpr::createSub(
+ MCConstantExpr::create(0xf, OutContext),
+ MCBinaryExpr::createAnd(
+ MCBinaryExpr::createSub(DotE, BaseE, OutContext),
+ MCConstantExpr::create(0xf, OutContext),
+ OutContext),
+ OutContext);
+ OutStreamer->emitFill(*FillE, 0xCC);
+
+ // And finally emit the jump target symbol
+ OutStreamer->EmitLabel(RGSuccSym);
+ return;
+ }
+
+ case X86::JMP_TRAP: {
+ MCSymbol *RGSuccSym = OutContext.createTempSymbol();
+ const MCExpr *RGSuccExpr = MCSymbolRefExpr::create(RGSuccSym, OutContext);
+ EmitAndCountInstruction(MCInstBuilder(X86::JMP_1).addExpr(RGSuccExpr));
+ EmitAndCountInstruction(MCInstBuilder(X86::INT3));
+ EmitAndCountInstruction(MCInstBuilder(X86::INT3));
+ OutStreamer->EmitValueToAlignment(8, 0xCC, 1);
+ OutStreamer->EmitLabel(RGSuccSym);
+ return;
+ }
+
case X86::SEH_PushReg:
case X86::SEH_SaveReg:
case X86::SEH_SaveXMM:
@@ -2223,4 +2265,10 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr
}
EmitAndCountInstruction(TmpInst);
+}
+
+/// Emit Trap bytes to the specified power of two alignment
+void X86AsmPrinter::EmitTrapToAlignment(unsigned NumBits) const {
+ if (NumBits == 0) return;
+ OutStreamer->EmitValueToAlignment(1u << NumBits, 0xCC, 1);
}

View File

@ -0,0 +1,27 @@
$OpenBSD: patch-lib_Target_X86_X86MachineFunctionInfo_h,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
implement -msave-args in clang/llvm, like the sun did for gcc
Index: lib/Target/X86/X86MachineFunctionInfo.h
--- lib/Target/X86/X86MachineFunctionInfo.h.orig
+++ lib/Target/X86/X86MachineFunctionInfo.h
@@ -41,6 +41,9 @@ class X86MachineFunctionInfo : public MachineFunctionI
/// stack frame in bytes.
unsigned CalleeSavedFrameSize = 0;
+ // SaveArgSize - Number of register arguments saved on the stack
+ unsigned SaveArgSize = 0;
+
/// BytesToPopOnReturn - Number of bytes function pops on return (in addition
/// to the space used by the return address).
/// Used on windows platform for stdcall & fastcall name decoration
@@ -123,6 +126,9 @@ class X86MachineFunctionInfo : public MachineFunctionI
unsigned getCalleeSavedFrameSize() const { return CalleeSavedFrameSize; }
void setCalleeSavedFrameSize(unsigned bytes) { CalleeSavedFrameSize = bytes; }
+
+ unsigned getSaveArgSize() const { return SaveArgSize; }
+ void setSaveArgSize(unsigned bytes) { SaveArgSize = bytes; }
unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; }
void setBytesToPopOnReturn (unsigned bytes) { BytesToPopOnReturn = bytes;}

View File

@ -0,0 +1,45 @@
$OpenBSD: patch-lib_Target_X86_X86RegisterInfo_td,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- The compiler is generally free to allocate general purpose registers in
whatever order it chooses. Reasons for choosing one register before another
usually include compiled instruction size (avoidance of REX prefixes, etc.)
or usage conventions, but somehow haven't included security implications in
the compiled bytecode. Some bytecode is more useful in polymorphic ROP
sequences than others, so it seems prudent to try to avoid that bytecode
when possible.
This patch moves EBX/RBX towards the end of the allocation preference for 32
and 64 bit general purpose registers. Some instructions using RBX/EBX/BX/BL
as a destination register end up with a ModR/M byte of C3 or CB, which is often
useful in ROP gadgets. Because these gadgets often occur in the middle of
functions, they exhibit somewhat higher diversity than some other C3/CB
terminated gadgets. This change removes about 3% of total gadgets from the
kernel, but about 6% of unique gadgets.
There are other possible changes in this direction. BX/BL are obvious next
targets for avoidance, and MM3/XMM3 may also be useful to try to avoid if
possible.
Index: lib/Target/X86/X86RegisterInfo.td
--- lib/Target/X86/X86RegisterInfo.td.orig
+++ lib/Target/X86/X86RegisterInfo.td
@@ -405,8 +405,8 @@ def GRH16 : RegisterClass<"X86", [i16], 16,
R15WH)>;
def GR32 : RegisterClass<"X86", [i32], 32,
- (add EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP,
- R8D, R9D, R10D, R11D, R14D, R15D, R12D, R13D)>;
+ (add EAX, ECX, EDX, ESI, EDI,
+ R8D, R9D, R10D, R11D, R14D, R15D, R12D, R13D, EBX, EBP, ESP)>;
// GR64 - 64-bit GPRs. This oddly includes RIP, which isn't accurate, since
// RIP isn't really a register and it can't be used anywhere except in an
@@ -415,7 +415,7 @@ def GR32 : RegisterClass<"X86", [i32], 32,
// tests because of the inclusion of RIP in this register class.
def GR64 : RegisterClass<"X86", [i64], 64,
(add RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
- RBX, R14, R15, R12, R13, RBP, RSP, RIP)>;
+ R14, R15, R12, R13, RBX, RBP, RSP, RIP)>;
// Segment registers for use by MOV instructions (and others) that have a
// segment register as one operand. Always contain a 16-bit segment

View File

@ -0,0 +1,132 @@
$OpenBSD: patch-lib_Target_X86_X86ReturnProtectorLowering_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Refactor retguard to make adding additional arches easier.
- Do not store the retguard cookie in frame in leaf functions if possible.
Makes things slightly faster and also improves security in these functions,
since the retguard cookie can't leak via the stack.
Index: lib/Target/X86/X86ReturnProtectorLowering.cpp
--- lib/Target/X86/X86ReturnProtectorLowering.cpp.orig
+++ lib/Target/X86/X86ReturnProtectorLowering.cpp
@@ -0,0 +1,121 @@
+//===-- X86ReturnProtectorLowering.cpp - ----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the X86 implementation of ReturnProtectorLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86ReturnProtectorLowering.h"
+#include "X86InstrBuilder.h"
+#include "X86InstrInfo.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86Subtarget.h"
+#include "X86TargetMachine.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetOptions.h"
+#include <cstdlib>
+
+using namespace llvm;
+
+void X86ReturnProtectorLowering::insertReturnProtectorPrologue(
+ MachineFunction &MF, MachineBasicBlock &MBB, GlobalVariable *cookie) const {
+
+ MachineBasicBlock::instr_iterator MI = MBB.instr_begin();
+ DebugLoc MBBDL = MBB.findDebugLoc(MI);
+ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
+ unsigned REG = MF.getFrameInfo().getReturnProtectorRegister();
+
+ BuildMI(MBB, MI, MBBDL, TII->get(X86::MOV64rm), REG)
+ .addReg(X86::RIP)
+ .addImm(0)
+ .addReg(0)
+ .addGlobalAddress(cookie)
+ .addReg(0);
+ addDirectMem(BuildMI(MBB, MI, MBBDL, TII->get(X86::XOR64rm), REG).addReg(REG),
+ X86::RSP);
+}
+
+void X86ReturnProtectorLowering::insertReturnProtectorEpilogue(
+ MachineFunction &MF, MachineInstr &MI, GlobalVariable *cookie) const {
+
+ MachineBasicBlock &MBB = *MI.getParent();
+ DebugLoc MBBDL = MI.getDebugLoc();
+ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
+ unsigned REG = MF.getFrameInfo().getReturnProtectorRegister();
+
+ addDirectMem(BuildMI(MBB, MI, MBBDL, TII->get(X86::XOR64rm), REG).addReg(REG),
+ X86::RSP);
+ BuildMI(MBB, MI, MBBDL, TII->get(X86::CMP64rm))
+ .addReg(REG)
+ .addReg(X86::RIP)
+ .addImm(0)
+ .addReg(0)
+ .addGlobalAddress(cookie)
+ .addReg(0);
+ BuildMI(MBB, MI, MBBDL, TII->get(X86::RETGUARD_JMP_TRAP));
+}
+
+bool X86ReturnProtectorLowering::opcodeIsReturn(unsigned opcode) const {
+ switch (opcode) {
+ case X86::RET:
+ case X86::RETL:
+ case X86::RETQ:
+ case X86::RETW:
+ case X86::RETIL:
+ case X86::RETIQ:
+ case X86::RETIW:
+ case X86::LRETL:
+ case X86::LRETQ:
+ case X86::LRETW:
+ case X86::LRETIL:
+ case X86::LRETIQ:
+ case X86::LRETIW:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void X86ReturnProtectorLowering::fillTempRegisters(
+ MachineFunction &MF, std::vector<unsigned> &TempRegs) const {
+
+ TempRegs.push_back(X86::R11);
+ TempRegs.push_back(X86::R10);
+ const Function &F = MF.getFunction();
+ if (!F.isVarArg()) {
+ // We can use any of the caller saved unused arg registers
+ switch (F.arg_size()) {
+ case 0:
+ TempRegs.push_back(X86::RDI);
+ LLVM_FALLTHROUGH;
+ case 1:
+ TempRegs.push_back(X86::RSI);
+ LLVM_FALLTHROUGH;
+ case 2: // RDX is the 2nd return register
+ case 3:
+ TempRegs.push_back(X86::RCX);
+ LLVM_FALLTHROUGH;
+ case 4:
+ TempRegs.push_back(X86::R8);
+ LLVM_FALLTHROUGH;
+ case 5:
+ TempRegs.push_back(X86::R9);
+ LLVM_FALLTHROUGH;
+ default:
+ break;
+ }
+ }
+}

View File

@ -0,0 +1,53 @@
$OpenBSD: patch-lib_Target_X86_X86ReturnProtectorLowering_h,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
Refactor retguard to make adding additional arches easier.
Index: lib/Target/X86/X86ReturnProtectorLowering.h
--- lib/Target/X86/X86ReturnProtectorLowering.h.orig
+++ lib/Target/X86/X86ReturnProtectorLowering.h
@@ -0,0 +1,45 @@
+//===-- X86ReturnProtectorLowering.h - ------------------------- -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the X86 implementation of ReturnProtectorLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_X86_X86RETURNPROTECTORLOWERING_H
+#define LLVM_LIB_TARGET_X86_X86RETURNPROTECTORLOWERING_H
+
+#include "llvm/CodeGen/ReturnProtectorLowering.h"
+
+namespace llvm {
+
+class X86ReturnProtectorLowering : public ReturnProtectorLowering {
+public:
+ /// insertReturnProtectorPrologue/Epilogue - insert return protector
+ /// instrumentation in prologue or epilogue.
+ virtual void
+ insertReturnProtectorPrologue(MachineFunction &MF, MachineBasicBlock &MBB,
+ GlobalVariable *cookie) const override;
+ virtual void
+ insertReturnProtectorEpilogue(MachineFunction &MF, MachineInstr &MI,
+ GlobalVariable *cookie) const override;
+
+ /// opcodeIsReturn - Reuturn true is the given opcode is a return
+ /// instruction needing return protection, false otherwise.
+ virtual bool opcodeIsReturn(unsigned opcode) const override;
+
+ /// fillTempRegisters - Fill the list of available temp registers we can
+ /// use as a return protector register.
+ virtual void
+ fillTempRegisters(MachineFunction &MF,
+ std::vector<unsigned> &TempRegs) const override;
+};
+
+} // namespace llvm
+
+#endif

View File

@ -0,0 +1,35 @@
$OpenBSD: patch-lib_Target_X86_X86Subtarget_h,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- implement -msave-args in clang/llvm, like the sun did for gcc
- Turn on -mretpoline by default in clang on amd64.
Index: lib/Target/X86/X86Subtarget.h
--- lib/Target/X86/X86Subtarget.h.orig
+++ lib/Target/X86/X86Subtarget.h
@@ -411,6 +411,9 @@ class X86Subtarget final : public X86GenSubtargetInfo
/// entry to the function and which must be maintained by every function.
unsigned stackAlignment = 4;
+ /// Whether function prologues should save register arguments on the stack.
+ bool SaveArgs = false;
+
/// Max. memset / memcpy size that is turned into rep/movs, rep/stos ops.
///
// FIXME: this is a known good value for Yonah. How about others?
@@ -492,6 +495,8 @@ class X86Subtarget final : public X86GenSubtargetInfo
return &getInstrInfo()->getRegisterInfo();
}
+ bool getSaveArgs() const { return SaveArgs; }
+
/// Returns the minimum alignment known to hold of the
/// stack frame on entry to the function and which must be maintained by every
/// function for this subtarget.
@@ -722,6 +727,7 @@ class X86Subtarget final : public X86GenSubtargetInfo
bool isTargetDarwin() const { return TargetTriple.isOSDarwin(); }
bool isTargetFreeBSD() const { return TargetTriple.isOSFreeBSD(); }
+ bool isTargetOpenBSD() const { return TargetTriple.isOSOpenBSD(); }
bool isTargetDragonFly() const { return TargetTriple.isOSDragonFly(); }
bool isTargetSolaris() const { return TargetTriple.isOSSolaris(); }
bool isTargetPS4() const { return TargetTriple.isPS4CPU(); }

View File

@ -0,0 +1,19 @@
$OpenBSD: patch-lib_Target_X86_X86TargetMachine_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
Add a clang pass that identifies potential ROP gadgets and replaces ROP
friendly instructions with safe alternatives. This initial commit fixes
3 instruction forms that will lower to include a c3 (return) byte.
Additional problematic instructions can be fixed incrementally using
this framework.
Index: lib/Target/X86/X86TargetMachine.cpp
--- lib/Target/X86/X86TargetMachine.cpp.orig
+++ lib/Target/X86/X86TargetMachine.cpp
@@ -505,6 +505,7 @@ void X86PassConfig::addPreEmitPass() {
}
addPass(createX86DiscriminateMemOpsPass());
addPass(createX86InsertPrefetchPass());
+ addPass(createX86FixupGadgetsPass());
}
void X86PassConfig::addPreEmitPass2() {

View File

@ -0,0 +1,22 @@
$OpenBSD: patch-lib_Target_X86_X86_h,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
Add a clang pass that identifies potential ROP gadgets and replaces ROP
friendly instructions with safe alternatives. This initial commit fixes
3 instruction forms that will lower to include a c3 (return) byte.
Additional problematic instructions can be fixed incrementally using
this framework.
Index: lib/Target/X86/X86.h
--- lib/Target/X86/X86.h.orig
+++ lib/Target/X86/X86.h
@@ -115,6 +115,10 @@ FunctionPass *createX86FixupBWInsts();
/// to another, when profitable.
FunctionPass *createX86DomainReassignmentPass();
+/// Return a Machine Function pass that attempts to replace
+/// ROP friendly instructions with alternatives.
+FunctionPass *createX86FixupGadgetsPass();
+
/// This pass replaces EVEX encoded of AVX-512 instructiosn by VEX
/// encoding when possible in order to reduce code size.
FunctionPass *createX86EvexToVexInsts();

View File

@ -0,0 +1,17 @@
$OpenBSD: patch-lib_Target_X86_X86_td,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
implement -msave-args in clang/llvm, like the sun did for gcc
Index: lib/Target/X86/X86.td
--- lib/Target/X86/X86.td.orig
+++ lib/Target/X86/X86.td
@@ -283,6 +283,9 @@ def FeatureLZCNTFalseDeps : SubtargetFeature<"false-de
"LZCNT/TZCNT have a false dependency on dest register">;
def FeaturePCONFIG : SubtargetFeature<"pconfig", "HasPCONFIG", "true",
"platform configuration instruction">;
+def FeatureSaveArgs
+ : SubtargetFeature<"save-args", "SaveArgs", "true",
+ "Save register arguments on the stack.">;
// On recent X86 (port bound) processors, its preferable to combine to a single shuffle
// using a variable mask over multiple fixed shuffles.
def FeatureFastVariableShuffle

View File

@ -0,0 +1,19 @@
$OpenBSD: patch-lib_Transforms_Scalar_LoopIdiomRecognize_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
Disable loop idiom recognition for _libc_memset and _libc_memcpy. These are
the internal names we use in libc for memset and memcpy and having the
compiler optimize them as calls to memset and memcpy will lead to infinite
recursion.
Index: lib/Transforms/Scalar/LoopIdiomRecognize.cpp
--- lib/Transforms/Scalar/LoopIdiomRecognize.cpp.orig
+++ lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -282,6 +282,8 @@ bool LoopIdiomRecognize::runOnLoop(Loop *L) {
StringRef Name = L->getHeader()->getParent()->getName();
if (Name == "memset" || Name == "memcpy")
return false;
+ if (Name == "_libc_memset" || Name == "_libc_memcpy")
+ return false;
// Determine if code size heuristics need to be applied.
ApplyCodeSizeHeuristics =

View File

@ -0,0 +1,23 @@
$OpenBSD: patch-test_Assembler_debug-variant-discriminator_ll,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
https://github.com/llvm-mirror/llvm/commit/cc1f2a595ead516812a6c50398f0f3480ebe031f
Index: test/Assembler/debug-variant-discriminator.ll
--- test/Assembler/debug-variant-discriminator.ll.orig
+++ test/Assembler/debug-variant-discriminator.ll
@@ -12,3 +12,17 @@
!1 = !DICompositeType(tag: DW_TAG_variant_part, scope: !0, size: 64, discriminator: !2)
!2 = !DIDerivedType(tag: DW_TAG_member, scope: !1, baseType: !3, size: 64, align: 64, flags: DIFlagArtificial)
!3 = !DIBasicType(name: "u64", size: 64, encoding: DW_ATE_unsigned)
+; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s
+; RUN: verify-uselistorder %s
+
+; CHECK: !named = !{!0, !1, !2}
+!named = !{!0, !1, !2}
+
+; CHECK: !0 = !DICompositeType(tag: DW_TAG_structure_type, name: "Outer", size: 64, align: 64, identifier: "Outer")
+; CHECK-NEXT: !1 = !DICompositeType(tag: DW_TAG_variant_part, scope: !0, size: 64, discriminator: !2)
+; CHECK-NEXT: !2 = !DIDerivedType(tag: DW_TAG_member, scope: !1, baseType: !3, size: 64, align: 64, flags: DIFlagArtificial)
+; CHECK-NEXT: !3 = !DIBasicType(name: "u64", size: 64, encoding: DW_ATE_unsigned)
+!0 = !DICompositeType(tag: DW_TAG_structure_type, name: "Outer", size: 64, align: 64, identifier: "Outer")
+!1 = !DICompositeType(tag: DW_TAG_variant_part, scope: !0, size: 64, discriminator: !2)
+!2 = !DIDerivedType(tag: DW_TAG_member, scope: !1, baseType: !3, size: 64, align: 64, flags: DIFlagArtificial)
+!3 = !DIBasicType(name: "u64", size: 64, encoding: DW_ATE_unsigned)

View File

@ -0,0 +1,22 @@
$OpenBSD: patch-tools_clang_include_clang_AST_FormatString_h,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- The %b printf extension in the kernel is not fixed to a int type. On sparc64
there are various %llb formats. Adjust the code to handle the length specifiers
and type check like it is used by the regular case.
Index: tools/clang/include/clang/AST/FormatString.h
--- tools/clang/include/clang/AST/FormatString.h.orig
+++ tools/clang/include/clang/AST/FormatString.h
@@ -227,8 +227,10 @@ class ConversionSpecifier { (public)
bool isIntArg() const { return (kind >= IntArgBeg && kind <= IntArgEnd) ||
kind == FreeBSDrArg || kind == FreeBSDyArg; }
- bool isUIntArg() const { return kind >= UIntArgBeg && kind <= UIntArgEnd; }
- bool isAnyIntArg() const { return kind >= IntArgBeg && kind <= UIntArgEnd; }
+ bool isUIntArg() const { return (kind >= UIntArgBeg && kind <= UIntArgEnd) ||
+ kind == FreeBSDbArg; }
+ bool isAnyIntArg() const { return (kind >= IntArgBeg && kind <= UIntArgEnd) ||
+ kind == FreeBSDbArg; }
bool isDoubleArg() const {
return kind >= DoubleArgBeg && kind <= DoubleArgEnd;
}

View File

@ -0,0 +1,33 @@
$OpenBSD: patch-tools_clang_include_clang_Basic_CodeGenOptions_def,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
Index: tools/clang/include/clang/Basic/CodeGenOptions.def
--- tools/clang/include/clang/Basic/CodeGenOptions.def.orig
+++ tools/clang/include/clang/Basic/CodeGenOptions.def
@@ -286,6 +286,9 @@ VALUE_CODEGENOPT(NumRegisterParameters, 32, 0)
/// The lower bound for a buffer to be considered for stack protection.
VALUE_CODEGENOPT(SSPBufferSize, 32, 0)
+/// Whether to use return protectors
+CODEGENOPT(ReturnProtector, 1, 0)
+
/// The kind of generated debug info.
ENUM_CODEGENOPT(DebugInfo, codegenoptions::DebugInfoKind, 3, codegenoptions::NoDebugInfo)

View File

@ -0,0 +1,32 @@
$OpenBSD: patch-tools_clang_include_clang_Basic_DiagnosticSemaKinds_td,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Disable -Waddress-of-packed-member by default.
While these warnings have the potential to be useful, there are too many
false positives right now.
- Disable -Wpointer-sign warnings per default
base gcc does the same.
Index: tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
--- tools/clang/include/clang/Basic/DiagnosticSemaKinds.td.orig
+++ tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -5857,7 +5857,7 @@ def warn_pointer_indirection_from_incompatible_type :
InGroup<UndefinedReinterpretCast>, DefaultIgnore;
def warn_taking_address_of_packed_member : Warning<
"taking address of packed member %0 of class or structure %q1 may result in an unaligned pointer value">,
- InGroup<DiagGroup<"address-of-packed-member">>;
+ InGroup<DiagGroup<"address-of-packed-member">>, DefaultIgnore;
def err_objc_object_assignment : Error<
"cannot assign to class object (%0 invalid)">;
@@ -6841,7 +6841,7 @@ def ext_typecheck_convert_incompatible_pointer_sign :
"sending to parameter of different type}0,1"
"|%diff{casting $ to type $|casting between types}0,1}2"
" converts between pointers to integer types with different sign">,
- InGroup<DiagGroup<"pointer-sign">>;
+ InGroup<DiagGroup<"pointer-sign">>, DefaultIgnore;
def ext_typecheck_convert_incompatible_pointer : ExtWarn<
"incompatible pointer types "
"%select{%diff{assigning to $ from $|assigning to different types}0,1"

View File

@ -0,0 +1,32 @@
$OpenBSD: patch-tools_clang_include_clang_Driver_CC1Options_td,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
Index: tools/clang/include/clang/Driver/CC1Options.td
--- tools/clang/include/clang/Driver/CC1Options.td.orig
+++ tools/clang/include/clang/Driver/CC1Options.td
@@ -699,6 +699,8 @@ def stack_protector : Separate<["-"], "stack-protector
HelpText<"Enable stack protectors">;
def stack_protector_buffer_size : Separate<["-"], "stack-protector-buffer-size">,
HelpText<"Lower bound for a buffer to be considered for stack protection">;
+def ret_protector : Flag<["-"], "ret-protector">,
+ HelpText<"Enable Return Protectors">;
def fvisibility : Separate<["-"], "fvisibility">,
HelpText<"Default type and symbol visibility">;
def ftype_visibility : Separate<["-"], "ftype-visibility">,

View File

@ -0,0 +1,43 @@
$OpenBSD: patch-tools_clang_include_clang_Driver_Options_td,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Add ret protctor options as no-ops.
- Improve the X86FixupGadgets pass
- Alias the command line parameter -p to -pg.
- implement -msave-args in clang/llvm, like the sun did for gcc
Index: tools/clang/include/clang/Driver/Options.td
--- tools/clang/include/clang/Driver/Options.td.orig
+++ tools/clang/include/clang/Driver/Options.td
@@ -1665,6 +1665,14 @@ def ftrivial_auto_var_init : Joined<["-"], "ftrivial-a
def enable_trivial_var_init_zero : Joined<["-"], "enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang">,
Flags<[CC1Option]>,
HelpText<"Trivial automatic variable initialization to zero is only here for benchmarks, it'll eventually be removed, and I'm OK with that because I'm only using it to benchmark">;
+def fno_ret_protector : Flag<["-"], "fno-ret-protector">, Group<f_Group>,
+ HelpText<"Disable return protector">;
+def fret_protector : Flag<["-"], "fret-protector">, Group<f_Group>,
+ HelpText<"Enable return protector">;
+def fno_fixup_gadgets : Flag<["-"], "fno-fixup-gadgets">, Group<f_Group>,
+ HelpText<"Disable FixupGadgets pass (x86 only)">;
+def ffixup_gadgets : Flag<["-"], "ffixup-gadgets">, Group<f_Group>,
+ HelpText<"Replace ROP friendly instructions with safe alternatives (x86 only)">;
def fstandalone_debug : Flag<["-"], "fstandalone-debug">, Group<f_Group>, Flags<[CoreOption]>,
HelpText<"Emit full debug info for all types used by the program">;
def fno_standalone_debug : Flag<["-"], "fno-standalone-debug">, Group<f_Group>, Flags<[CoreOption]>,
@@ -2500,7 +2508,7 @@ def pthreads : Flag<["-"], "pthreads">;
def pthread : Flag<["-"], "pthread">, Flags<[CC1Option]>,
HelpText<"Support POSIX threads in generated code">;
def no_pthread : Flag<["-"], "no-pthread">, Flags<[CC1Option]>;
-def p : Flag<["-"], "p">;
+def p : Flag<["-"], "p">, Alias<pg>;
def pie : Flag<["-"], "pie">;
def read__only__relocs : Separate<["-"], "read_only_relocs">;
def remap : Flag<["-"], "remap">;
@@ -2949,6 +2957,8 @@ def mshstk : Flag<["-"], "mshstk">, Group<m_x86_Featur
def mno_shstk : Flag<["-"], "mno-shstk">, Group<m_x86_Features_Group>;
def mretpoline_external_thunk : Flag<["-"], "mretpoline-external-thunk">, Group<m_x86_Features_Group>;
def mno_retpoline_external_thunk : Flag<["-"], "mno-retpoline-external-thunk">, Group<m_x86_Features_Group>;
+def msave_args : Flag<["-"], "msave-args">, Group<m_x86_Features_Group>;
+def mno_save_args : Flag<["-"], "mno-save-args">, Group<m_x86_Features_Group>;
// These are legacy user-facing driver-level option spellings. They are always
// aliases for options that are spelled using the more common Unix / GNU flag

View File

@ -0,0 +1,15 @@
$OpenBSD: patch-tools_clang_include_clang_Sema_Sema_h,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
Teach Clang about syslog format attribute
Index: tools/clang/include/clang/Sema/Sema.h
--- tools/clang/include/clang/Sema/Sema.h.orig
+++ tools/clang/include/clang/Sema/Sema.h
@@ -10561,6 +10561,7 @@ class Sema { (public)
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
+ FST_Syslog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);

View File

@ -0,0 +1,39 @@
$OpenBSD: patch-tools_clang_lib_AST_FormatString_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- The %b printf extension in the kernel is not fixed to a int type. On sparc64
there are various %llb formats. Adjust the code to handle the length specifiers
and type check like it is used by the regular case.
Index: tools/clang/lib/AST/FormatString.cpp
--- tools/clang/lib/AST/FormatString.cpp.orig
+++ tools/clang/lib/AST/FormatString.cpp
@@ -746,6 +746,10 @@ bool FormatSpecifier::hasValidLengthModifier(const Tar
case ConversionSpecifier::XArg:
case ConversionSpecifier::nArg:
return true;
+ case ConversionSpecifier::FreeBSDbArg:
+ return Target.getTriple().isOSFreeBSD() ||
+ Target.getTriple().isPS4() ||
+ Target.getTriple().isOSOpenBSD();
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS4();
@@ -779,6 +783,10 @@ bool FormatSpecifier::hasValidLengthModifier(const Tar
case ConversionSpecifier::ScanListArg:
case ConversionSpecifier::ZArg:
return true;
+ case ConversionSpecifier::FreeBSDbArg:
+ return Target.getTriple().isOSFreeBSD() ||
+ Target.getTriple().isPS4() ||
+ Target.getTriple().isOSOpenBSD();
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS4();
@@ -937,6 +945,7 @@ bool FormatSpecifier::hasStandardLengthConversionCombi
case ConversionSpecifier::uArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
+ case ConversionSpecifier::FreeBSDbArg:
return false;
default:
return true;

View File

@ -0,0 +1,22 @@
$OpenBSD: patch-tools_clang_lib_Basic_Targets_Mips_h,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Implement the 'h' register constraint on mips64. This lets clang build
pieces of software that use the constraint if the compiler claims
to be compatible with GCC 4.2.1.
Note that the constraint was removed in GCC 4.4. The reason was that
'h' could generate code whose result is unpredictable. The underlying
reason is that the HI and LO registers are special, and the optimizer
has to be careful when choosing the order of HI/LO accesses. It looks
that LLVM has the needed logic.
Index: tools/clang/lib/Basic/Targets/Mips.h
--- tools/clang/lib/Basic/Targets/Mips.h.orig
+++ tools/clang/lib/Basic/Targets/Mips.h
@@ -238,6 +238,7 @@ class LLVM_LIBRARY_VISIBILITY MipsTargetInfo : public
case 'y': // Equivalent to "r", backward compatibility only.
case 'f': // floating-point registers.
case 'c': // $25 for indirect jumps
+ case 'h': // hi register
case 'l': // lo register
case 'x': // hilo register pair
Info.setAllowsRegister();

View File

@ -0,0 +1,24 @@
$OpenBSD: patch-tools_clang_lib_Basic_Targets_X86_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
implement -msave-args in clang/llvm, like the sun did for gcc
Index: tools/clang/lib/Basic/Targets/X86.cpp
--- tools/clang/lib/Basic/Targets/X86.cpp.orig
+++ tools/clang/lib/Basic/Targets/X86.cpp
@@ -817,6 +817,8 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<s
HasPTWRITE = true;
} else if (Feature == "+invpcid") {
HasINVPCID = true;
+ } else if (Feature == "+save-args") {
+ HasSaveArgs = true;
}
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
@@ -1398,6 +1400,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) cons
.Case("movdiri", HasMOVDIRI)
.Case("movdir64b", HasMOVDIR64B)
.Case("mpx", HasMPX)
+ .Case("save-args", HasSaveArgs)
.Case("mwaitx", HasMWAITX)
.Case("pclmul", HasPCLMUL)
.Case("pconfig", HasPCONFIG)

View File

@ -0,0 +1,15 @@
$OpenBSD: patch-tools_clang_lib_Basic_Targets_X86_h,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
implement -msave-args in clang/llvm, like the sun did for gcc
Index: tools/clang/lib/Basic/Targets/X86.h
--- tools/clang/lib/Basic/Targets/X86.h.orig
+++ tools/clang/lib/Basic/Targets/X86.h
@@ -106,6 +106,7 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public T
bool HasMOVDIR64B = false;
bool HasPTWRITE = false;
bool HasINVPCID = false;
+ bool HasSaveArgs = false;
protected:
/// Enumeration of all of the X86 CPUs supported by Clang.

View File

@ -0,0 +1,33 @@
$OpenBSD: patch-tools_clang_lib_CodeGen_CGCall_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
Index: tools/clang/lib/CodeGen/CGCall.cpp
--- tools/clang/lib/CodeGen/CGCall.cpp.orig
+++ tools/clang/lib/CodeGen/CGCall.cpp
@@ -1958,6 +1958,9 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAttribute("disable-tail-calls",
llvm::toStringRef(DisableTailCalls));
GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
+
+ if (CodeGenOpts.ReturnProtector)
+ FuncAttrs.addAttribute("ret-protector");
}
ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);

View File

@ -0,0 +1,20 @@
$OpenBSD: patch-tools_clang_lib_Driver_ToolChains_Arch_AArch64_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
Make LLVM create strict aligned code for OpenBSD/arm64.
Index: tools/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
--- tools/clang/lib/Driver/ToolChains/Arch/AArch64.cpp.orig
+++ tools/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -311,9 +311,11 @@ fp16_fml_fallthrough:
}
if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
- options::OPT_munaligned_access))
+ options::OPT_munaligned_access)) {
if (A->getOption().matches(options::OPT_mno_unaligned_access))
Features.push_back("+strict-align");
+ } else if (Triple.isOSOpenBSD())
+ Features.push_back("+strict-align");
if (Args.hasArg(options::OPT_ffixed_x1))
Features.push_back("+reserve-x1");

View File

@ -0,0 +1,15 @@
$OpenBSD: patch-tools_clang_lib_Driver_ToolChains_Arch_Sparc_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
Select proper SPARCv9 variant for the external assembler.
--- tools/clang/lib/Driver/ToolChains/Arch/Sparc.cpp.orig Sun Apr 2 02:16:38 2017
+++ tools/clang/lib/Driver/ToolChains/Arch/Sparc.cpp Sun Apr 2 02:16:48 2017
@@ -27,7 +27,7 @@ const char *sparc::getSparcAsmModeForCPU(StringRef Nam
.Case("niagara2", "-Av9b")
.Case("niagara3", "-Av9d")
.Case("niagara4", "-Av9d")
- .Default("-Av9");
+ .Default("-Av9a");
} else {
return llvm::StringSwitch<const char *>(Name)
.Case("v8", "-Av8")

View File

@ -0,0 +1,19 @@
$OpenBSD: patch-tools_clang_lib_Driver_ToolChains_Arch_X86_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Turn on -mretpoline by default in clang on amd64.
Index: tools/clang/lib/Driver/ToolChains/Arch/X86.cpp
--- tools/clang/lib/Driver/ToolChains/Arch/X86.cpp.orig
+++ tools/clang/lib/Driver/ToolChains/Arch/X86.cpp
@@ -146,6 +146,11 @@ void x86::getX86TargetFeatures(const Driver &D, const
// flags). This is a bit hacky but keeps existing usages working. We should
// consider deprecating this and instead warn if the user requests external
// retpoline thunks and *doesn't* request some form of retpolines.
+ if (Triple.isOSOpenBSD() && Triple.getArch() == llvm::Triple::x86_64 &&
+ Args.hasFlag(options::OPT_mretpoline, options::OPT_mno_retpoline, true)) {
+ Features.push_back("+retpoline-indirect-calls");
+ Features.push_back("+retpoline-indirect-branches");
+ } else
if (Args.hasArgNoClaim(options::OPT_mretpoline, options::OPT_mno_retpoline,
options::OPT_mspeculative_load_hardening,
options::OPT_mno_speculative_load_hardening)) {

View File

@ -0,0 +1,124 @@
$OpenBSD: patch-tools_clang_lib_Driver_ToolChains_Clang_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Make LLVM create strict aligned code for OpenBSD/arm64.
- Disable -fstrict-aliasing per default on OpenBSD.
- Enable -fwrapv by default
- Add ret protctor options as no-ops.
- Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
- Add retguard for arm64.
- Improve the X86FixupGadgets pass
- On OpenBSD disable the malloc/calloc/realloc/free/str*dup builtins, since
they can perform strange transforms and optimizations. Some of those could
gain a slight advantage, but would avoid the variety of important runtime
checks our malloc(3) code does. In essence, the transforms performed are
considered "anti-mitigation".
Index: tools/clang/lib/Driver/ToolChains/Clang.cpp
--- tools/clang/lib/Driver/ToolChains/Clang.cpp.orig
+++ tools/clang/lib/Driver/ToolChains/Clang.cpp
@@ -3899,9 +3899,12 @@ void Clang::ConstructJob(Compilation &C, const JobActi
OFastEnabled ? options::OPT_Ofast : options::OPT_fstrict_aliasing;
// We turn strict aliasing off by default if we're in CL mode, since MSVC
// doesn't do any TBAA.
- bool TBAAOnByDefault = !D.IsCLMode();
+ bool StrictAliasingDefault = !D.IsCLMode();
+ // We also turn off strict aliasing on OpenBSD.
+ if (getToolChain().getTriple().isOSOpenBSD())
+ StrictAliasingDefault = false;
if (!Args.hasFlag(options::OPT_fstrict_aliasing, StrictAliasingAliasOption,
- options::OPT_fno_strict_aliasing, TBAAOnByDefault))
+ options::OPT_fno_strict_aliasing, StrictAliasingDefault))
CmdArgs.push_back("-relaxed-aliasing");
if (!Args.hasFlag(options::OPT_fstruct_path_tbaa,
options::OPT_fno_struct_path_tbaa))
@@ -4527,7 +4530,8 @@ void Clang::ConstructJob(Compilation &C, const JobActi
options::OPT_fno_strict_overflow)) {
if (A->getOption().matches(options::OPT_fno_strict_overflow))
CmdArgs.push_back("-fwrapv");
- }
+ } else if (getToolChain().getTriple().isOSOpenBSD())
+ CmdArgs.push_back("-fwrapv");
if (Arg *A = Args.getLastArg(options::OPT_freroll_loops,
options::OPT_fno_reroll_loops))
@@ -4544,9 +4548,44 @@ void Clang::ConstructJob(Compilation &C, const JobActi
false))
CmdArgs.push_back(Args.MakeArgString("-mspeculative-load-hardening"));
- RenderSSPOptions(TC, Args, CmdArgs, KernelOrKext);
RenderTrivialAutoVarInitOptions(D, TC, Args, CmdArgs);
+ // -ret-protector
+ unsigned RetProtector = 1;
+ if (Arg *A = Args.getLastArg(options::OPT_fno_ret_protector,
+ options::OPT_fret_protector)) {
+ if (A->getOption().matches(options::OPT_fno_ret_protector))
+ RetProtector = 0;
+ else if (A->getOption().matches(options::OPT_fret_protector))
+ RetProtector = 1;
+ }
+ if (RetProtector &&
+ ((getToolChain().getArch() == llvm::Triple::x86_64) ||
+ (getToolChain().getArch() == llvm::Triple::aarch64)) &&
+ !Args.hasArg(options::OPT_fno_stack_protector) &&
+ !Args.hasArg(options::OPT_pg)) {
+ CmdArgs.push_back(Args.MakeArgString("-D_RET_PROTECTOR"));
+ CmdArgs.push_back(Args.MakeArgString("-ret-protector"));
+ // Consume the stack protector arguments to prevent warning
+ Args.getLastArg(options::OPT_fstack_protector_all,
+ options::OPT_fstack_protector_strong,
+ options::OPT_fstack_protector,
+ options::OPT__param); // ssp-buffer-size
+ } else {
+ // If we're not using retguard, then do the usual stack protector
+ RenderSSPOptions(getToolChain(), Args, CmdArgs, KernelOrKext);
+ }
+
+ // -fixup-gadgets
+ if (Arg *A = Args.getLastArg(options::OPT_fno_fixup_gadgets,
+ options::OPT_ffixup_gadgets)) {
+ CmdArgs.push_back(Args.MakeArgString(Twine("-mllvm")));
+ if (A->getOption().matches(options::OPT_fno_fixup_gadgets))
+ CmdArgs.push_back(Args.MakeArgString(Twine("-x86-fixup-gadgets=false")));
+ else if (A->getOption().matches(options::OPT_ffixup_gadgets))
+ CmdArgs.push_back(Args.MakeArgString(Twine("-x86-fixup-gadgets=true")));
+ }
+
// Translate -mstackrealign
if (Args.hasFlag(options::OPT_mstackrealign, options::OPT_mno_stackrealign,
false))
@@ -5029,6 +5068,18 @@ void Clang::ConstructJob(Compilation &C, const JobActi
options::OPT_fno_rewrite_imports, false);
if (RewriteImports)
CmdArgs.push_back("-frewrite-imports");
+
+ // Disable some builtins on OpenBSD because they are just not
+ // right...
+ if (getToolChain().getTriple().isOSOpenBSD()) {
+ CmdArgs.push_back("-fno-builtin-malloc");
+ CmdArgs.push_back("-fno-builtin-calloc");
+ CmdArgs.push_back("-fno-builtin-realloc");
+ CmdArgs.push_back("-fno-builtin-valloc");
+ CmdArgs.push_back("-fno-builtin-free");
+ CmdArgs.push_back("-fno-builtin-strdup");
+ CmdArgs.push_back("-fno-builtin-strndup");
+ }
// Enable rewrite includes if the user's asked for it or if we're generating
// diagnostics.

View File

@ -0,0 +1,16 @@
$OpenBSD: patch-tools_clang_lib_Driver_ToolChains_Gnu_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Disable IAS for OpenBSD SPARC.
Index: tools/clang/lib/Driver/ToolChains/Gnu.cpp
--- tools/clang/lib/Driver/ToolChains/Gnu.cpp.orig
+++ tools/clang/lib/Driver/ToolChains/Gnu.cpp
@@ -2512,7 +2512,7 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
case llvm::Triple::sparcv9:
- if (getTriple().isOSSolaris() || getTriple().isOSOpenBSD())
+ if (getTriple().isOSSolaris())
return true;
return false;
default:

View File

@ -0,0 +1,123 @@
$OpenBSD: patch-tools_clang_lib_Driver_ToolChains_OpenBSD_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
Add support for building against libestdc++ from ports-gcc.
Use more *_p.a libraries from base when profiling is requested.
Index: tools/clang/lib/Driver/ToolChains/OpenBSD.cpp
--- tools/clang/lib/Driver/ToolChains/OpenBSD.cpp.orig
+++ tools/clang/lib/Driver/ToolChains/OpenBSD.cpp
@@ -12,6 +12,8 @@
#include "Arch/Sparc.h"
#include "CommonArgs.h"
#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
@@ -198,7 +200,13 @@ void openbsd::Linker::ConstructJob(Compilation &C, con
}
// FIXME: For some reason GCC passes -lgcc before adding
// the default system libraries. Just mimic this for now.
- CmdArgs.push_back("-lcompiler_rt");
+ if (ToolChain.GetCXXStdlibType(Args) == ToolChain::CST_Libcxx)
+ CmdArgs.push_back("-lcompiler_rt");
+ else {
+ CmdArgs.push_back("-L${LOCALBASE}/lib/gcc/${GCC_CONFIG}/${GCC_VER}");
+ CmdArgs.push_back("-L${LOCALBASE}/lib"); // XXX nasty
+ CmdArgs.push_back("-lgcc");
+ }
if (Args.hasArg(options::OPT_pthread)) {
if (!Args.hasArg(options::OPT_shared) && Args.hasArg(options::OPT_pg))
@@ -214,7 +222,10 @@ void openbsd::Linker::ConstructJob(Compilation &C, con
CmdArgs.push_back("-lc");
}
- CmdArgs.push_back("-lcompiler_rt");
+ if (ToolChain.GetCXXStdlibType(Args) == ToolChain::CST_Libcxx)
+ CmdArgs.push_back("-lcompiler_rt");
+ else
+ CmdArgs.push_back("-lgcc");
}
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
@@ -255,16 +266,70 @@ OpenBSD::OpenBSD(const Driver &D, const llvm::Triple &
getFilePaths().push_back(getDriver().SysRoot + "/usr/lib");
}
-void OpenBSD::AddCXXStdlibLibArgs(const ArgList &Args,
- ArgStringList &CmdArgs) const {
- bool Profiling = Args.hasArg(options::OPT_pg);
-
- CmdArgs.push_back(Profiling ? "-lc++_p" : "-lc++");
- CmdArgs.push_back(Profiling ? "-lc++abi_p" : "-lc++abi");
-}
-
Tool *OpenBSD::buildAssembler() const {
return new tools::openbsd::Assembler(*this);
}
Tool *OpenBSD::buildLinker() const { return new tools::openbsd::Linker(*this); }
+
+ToolChain::CXXStdlibType OpenBSD::GetCXXStdlibType(const ArgList &Args) const {
+ if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value == "libstdc++")
+ return ToolChain::CST_Libstdcxx;
+ if (Value == "libc++")
+ return ToolChain::CST_Libcxx;
+
+ getDriver().Diag(clang::diag::err_drv_invalid_stdlib_name)
+ << A->getAsString(Args);
+ }
+ switch (getTriple().getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::aarch64:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return ToolChain::CST_Libcxx;
+ break;
+ default:
+ return ToolChain::CST_Libstdcxx;
+ break;
+ }
+}
+
+void OpenBSD::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx:
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/v1");
+ break;
+ case ToolChain::CST_Libstdcxx:
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "${LOCALBASE}/include/c++/${GCC_VER}");
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "${LOCALBASE}/include/c++/${GCC_VER}/${GCC_CONFIG}");
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "${LOCALBASE}/include/c++/${GCC_VER}/backward");
+ break;
+ }
+}
+
+void OpenBSD::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ bool Profiling = Args.hasArg(options::OPT_pg);
+
+ switch (GetCXXStdlibType(Args)) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back(Profiling ? "-lc++_p" : "-lc++");
+ CmdArgs.push_back(Profiling ? "-lc++abi_p" : "-lc++abi");
+ CmdArgs.push_back(Profiling ? "-lpthread_p" : "-lpthread");
+ break;
+ case ToolChain::CST_Libstdcxx:
+ CmdArgs.push_back("-lestdc++");
+ break;
+ }
+}

View File

@ -0,0 +1,16 @@
$OpenBSD: patch-tools_clang_lib_Driver_ToolChains_OpenBSD_h,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
Index: tools/clang/lib/Driver/ToolChains/OpenBSD.h
--- tools/clang/lib/Driver/ToolChains/OpenBSD.h.orig
+++ tools/clang/lib/Driver/ToolChains/OpenBSD.h
@@ -69,6 +69,11 @@ class LLVM_LIBRARY_VISIBILITY OpenBSD : public Generic
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
return 2;
}

View File

@ -0,0 +1,32 @@
$OpenBSD: patch-tools_clang_lib_Frontend_CompilerInvocation_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
Add RETGUARD to clang for amd64. This security mechanism uses per-function
random cookies to protect access to function return instructions, with the
effect that the integrity of the return address is protected, and function
return instructions are harder to use in ROP gadgets.
On function entry the return address is combined with a per-function random
cookie and stored in the stack frame. The integrity of this value is verified
before function return, and if this check fails, the program aborts. In this way
RETGUARD is an improved stack protector, since the cookies are per-function. The
verification routine is constructed such that the binary space immediately
before each ret instruction is padded with int03 instructions, which makes these
return instructions difficult to use in ROP gadgets. In the kernel, this has the
effect of removing approximately 50% of total ROP gadgets, and 15% of unique
ROP gadgets compared to the 6.3 release kernel. Function epilogues are
essentially gadget free, leaving only the polymorphic gadgets that result from
jumping into the instruction stream partway through other instructions. Work to
remove these gadgets will continue through other mechanisms.
Index: tools/clang/lib/Frontend/CompilerInvocation.cpp
--- tools/clang/lib/Frontend/CompilerInvocation.cpp.orig
+++ tools/clang/lib/Frontend/CompilerInvocation.cpp
@@ -1133,6 +1133,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, Arg
Opts.StackAlignment = StackAlignment;
}
+ Opts.ReturnProtector = Args.hasArg(OPT_ret_protector);
+
if (Arg *A = Args.getLastArg(OPT_mstack_probe_size)) {
StringRef Val = A->getValue();
unsigned StackProbeSize = Opts.StackProbeSize;

View File

@ -0,0 +1,92 @@
$OpenBSD: patch-tools_clang_lib_Sema_SemaChecking_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
- Teach Clang about syslog format attribute
- Enable the kprintf format attribute
- The %b printf extension in the kernel is not fixed to a int type. On sparc64
there are various %llb formats. Adjust the code to handle the length
specifiers and type check like it is used by the regular case.
Index: tools/clang/lib/Sema/SemaChecking.cpp
--- tools/clang/lib/Sema/SemaChecking.cpp.orig
+++ tools/clang/lib/Sema/SemaChecking.cpp
@@ -6608,7 +6608,7 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef
Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
return llvm::StringSwitch<FormatStringType>(Format->getType()->getName())
.Case("scanf", FST_Scanf)
- .Cases("printf", "printf0", FST_Printf)
+ .Cases("printf", "printf0", "syslog", FST_Printf)
.Cases("NSString", "CFString", FST_NSString)
.Case("strftime", FST_Strftime)
.Case("strfmon", FST_Strfmon)
@@ -6705,6 +6705,7 @@ bool Sema::CheckFormatArguments(ArrayRef<const Expr *>
case FST_Kprintf:
case FST_FreeBSDKPrintf:
case FST_Printf:
+ case FST_Syslog:
Diag(FormatLoc, diag::note_format_security_fixit)
<< FixItHint::CreateInsertion(FormatLoc, "\"%s\", ");
break;
@@ -7530,19 +7531,33 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyz
// Claim the second argument.
CoveredArgs.set(argIndex + 1);
- // Type check the first argument (int for %b, pointer for %D)
const Expr *Ex = getDataArg(argIndex);
- const analyze_printf::ArgType &AT =
- (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ?
- ArgType(S.Context.IntTy) : ArgType::CPointerTy;
- if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType()))
- EmitFormatDiagnostic(
+ if (CS.getKind() == ConversionSpecifier::FreeBSDDArg) {
+ // Type check the first argument (pointer for %D)
+ const analyze_printf::ArgType &AT = ArgType::CPointerTy;
+ if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType()))
+ EmitFormatDiagnostic(
S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
- << AT.getRepresentativeTypeName(S.Context) << Ex->getType()
- << false << Ex->getSourceRange(),
- Ex->getBeginLoc(), /*IsStringLocation*/ false,
+ << AT.getRepresentativeTypeName(S.Context) << Ex->getType()
+ << false << Ex->getSourceRange(),
+ Ex->getBeginLoc(), /*IsStringLocation*/false,
getSpecifierRange(startSpecifier, specifierLen));
+ } else {
+ // Check the length modifier for %b
+ if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo()))
+ HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
+ diag::warn_format_nonsensical_length);
+ else if (!FS.hasStandardLengthModifier())
+ HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
+ else if (!FS.hasStandardLengthConversionCombination())
+ HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
+ diag::warn_format_non_standard_conversion_spec);
+ // Type check the first argument of %b
+ if (!checkFormatExpr(FS, startSpecifier, specifierLen, Ex))
+ return false;
+ }
+
// Type check the second argument (char * for both %b and %D)
Ex = getDataArg(argIndex + 1);
const analyze_printf::ArgType &AT2 = ArgType::CStrTy;
@@ -8277,8 +8292,9 @@ static void CheckFormatString(Sema &S, const FormatStr
}
if (Type == Sema::FST_Printf || Type == Sema::FST_NSString ||
- Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog ||
- Type == Sema::FST_OSTrace) {
+ Type == Sema::FST_Kprintf || Type == Sema::FST_FreeBSDKPrintf ||
+ Type == Sema::FST_OSLog || Type == Sema::FST_OSTrace ||
+ Type == Sema::FST_Syslog) {
CheckPrintfHandler H(
S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs,
(Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str,
@@ -8288,7 +8304,7 @@ static void CheckFormatString(Sema &S, const FormatStr
if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen,
S.getLangOpts(),
S.Context.getTargetInfo(),
- Type == Sema::FST_FreeBSDKPrintf))
+ Type == Sema::FST_Kprintf || Type == Sema::FST_FreeBSDKPrintf))
H.DoneProcessing();
} else if (Type == Sema::FST_Scanf) {
CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg,

View File

@ -0,0 +1,15 @@
$OpenBSD: patch-tools_clang_lib_Sema_SemaDeclAttr_cpp,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
Teach Clang about syslog format attribute
Index: tools/clang/lib/Sema/SemaDeclAttr.cpp
--- tools/clang/lib/Sema/SemaDeclAttr.cpp.orig
+++ tools/clang/lib/Sema/SemaDeclAttr.cpp
@@ -3290,6 +3290,7 @@ static FormatAttrKind getFormatAttrKind(StringRef Form
.Case("freebsd_kprintf", SupportedFormat) // FreeBSD.
.Case("os_trace", SupportedFormat)
.Case("os_log", SupportedFormat)
+ .Case("syslog", SupportedFormat)
.Cases("gcc_diag", "gcc_cdiag", "gcc_cxxdiag", "gcc_tdiag", IgnoredFormat)
.Default(InvalidFormat);

View File

@ -0,0 +1,2 @@
The clang-tools-extra contains helpful developer tools using Clang's tooling
API.

View File

@ -0,0 +1,50 @@
@comment $OpenBSD: PLIST,v 1.1.1.1 2019/11/06 10:07:56 rsadowski Exp $
@bin bin/clang-apply-replacements
@bin bin/clang-change-namespace
@bin bin/clang-include-fixer
@bin bin/clang-query
@bin bin/clang-reorder-fields
@bin bin/clang-tidy
@bin bin/clangd
@bin bin/find-all-symbols
@bin bin/modularize
lib/libclangApplyReplacements.a
lib/libclangChangeNamespace.a
lib/libclangDaemon.a
lib/libclangDoc.a
lib/libclangIncludeFixer.a
lib/libclangIncludeFixerPlugin.a
lib/libclangMove.a
lib/libclangQuery.a
lib/libclangReorderFields.a
lib/libclangTidy.a
lib/libclangTidyAbseilModule.a
lib/libclangTidyAndroidModule.a
lib/libclangTidyBoostModule.a
lib/libclangTidyBugproneModule.a
lib/libclangTidyCERTModule.a
lib/libclangTidyCppCoreGuidelinesModule.a
lib/libclangTidyFuchsiaModule.a
lib/libclangTidyGoogleModule.a
lib/libclangTidyHICPPModule.a
lib/libclangTidyLLVMModule.a
lib/libclangTidyMPIModule.a
lib/libclangTidyMiscModule.a
lib/libclangTidyModernizeModule.a
lib/libclangTidyObjCModule.a
lib/libclangTidyPerformanceModule.a
lib/libclangTidyPlugin.a
lib/libclangTidyPortabilityModule.a
lib/libclangTidyReadabilityModule.a
lib/libclangTidyUtils.a
lib/libclangTidyZirconModule.a
lib/libfindAllSymbols.a
share/clang/clang-include-fixer.el
share/clang/clang-include-fixer.py
share/clang/clang-include-fixer.pyc
share/clang/clang-tidy-diff.py
share/clang/clang-tidy-diff.pyc
share/clang/run-clang-tidy.py
share/clang/run-clang-tidy.pyc
share/clang/run-find-all-symbols.py
share/clang/run-find-all-symbols.pyc