Merge various improvements from base
o clang: - add back kernel printf %b length specifier support (%llb, etc, lost in the update to 8.0.0) o lld: - Restore previous section after setting the MIPS ABI marker - Fix output section alignement when entry size isn't a power of two o arm64, amd64: - Do not store the retguard cookie in frame in leaf functions if possible - Emit variable length trap padding in retguard epilogue o amd64: - move code that selects retpoline by default to a different source file o mips64: - Fix a bug in memory operand handling - Implement SGE pseudo-instructions - Implement .cplocal directive - Fix instruction guard - Implement the 'h' register constraint on mips64 o sparc64: - Remove cast that truncates immediate operands to 32 bits
This commit is contained in:
parent
52da7c5584
commit
3335a863b8
@ -1,4 +1,4 @@
|
||||
# $OpenBSD: Makefile,v 1.229 2019/07/08 07:56:52 jca Exp $
|
||||
# $OpenBSD: Makefile,v 1.230 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
# XXX If broken on an architecture, remove the arch from LLVM_ARCHS.
|
||||
ONLY_FOR_ARCHS = ${LLVM_ARCHS}
|
||||
@ -18,7 +18,7 @@ PKGSPEC-main = llvm-=${LLVM_V}
|
||||
PKGNAME-main = llvm-${LLVM_V}
|
||||
PKGNAME-python = py-llvm-${LLVM_V}
|
||||
PKGNAME-lldb = lldb-${LLVM_V}
|
||||
REVISION = 0
|
||||
REVISION = 1
|
||||
|
||||
CATEGORIES = devel
|
||||
|
||||
|
@ -1,11 +1,14 @@
|
||||
$OpenBSD: patch-include_llvm_CodeGen_ReturnProtectorLowering_h,v 1.1 2018/08/21 06:56:09 ajacoutot Exp $
|
||||
$OpenBSD: patch-include_llvm_CodeGen_ReturnProtectorLowering_h,v 1.2 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
Refactor retguard to make adding additional arches easier.
|
||||
- Refactor retguard to make adding additional arches easier.
|
||||
- Do not store the retguard cookie in frame in leaf functions if possible.
|
||||
Makes things slightly faster and also improves security in these functions,
|
||||
since the retguard cookie can't leak via the stack.
|
||||
|
||||
Index: include/llvm/CodeGen/ReturnProtectorLowering.h
|
||||
--- include/llvm/CodeGen/ReturnProtectorLowering.h.orig
|
||||
+++ include/llvm/CodeGen/ReturnProtectorLowering.h
|
||||
@@ -0,0 +1,78 @@
|
||||
@@ -0,0 +1,79 @@
|
||||
+//===-- llvm/CodeGen/ReturnProtectorLowering.h ------------------*- C++ -*-===//
|
||||
+//
|
||||
+// The LLVM Compiler Infrastructure
|
||||
@ -36,6 +39,7 @@ Index: include/llvm/CodeGen/ReturnProtectorLowering.h
|
||||
+
|
||||
+class ReturnProtectorLowering {
|
||||
+public:
|
||||
+ virtual ~ReturnProtectorLowering() {}
|
||||
+ /// Subclass interface - subclasses need to implement these functions.
|
||||
+
|
||||
+ /// insertReturnProtectorPrologue/Epilogue - insert return protector
|
||||
@ -65,7 +69,7 @@ Index: include/llvm/CodeGen/ReturnProtectorLowering.h
|
||||
+ /// saveReturnProtectorRegister - Allows the target to save the
|
||||
+ /// CalculationRegister in the CalleeSavedInfo vector if needed.
|
||||
+ virtual void
|
||||
+ saveReturnProtectorRegister(const MachineFunction &MF,
|
||||
+ saveReturnProtectorRegister(MachineFunction &MF,
|
||||
+ std::vector<CalleeSavedInfo> &CSI) const;
|
||||
+
|
||||
+ /// determineReturnProtectorTempRegister - Find a register that can be used
|
||||
|
@ -1,4 +1,4 @@
|
||||
$OpenBSD: patch-include_llvm_CodeGen_TargetFrameLowering_h,v 1.3 2019/01/28 06:27:28 jca Exp $
|
||||
$OpenBSD: patch-include_llvm_CodeGen_TargetFrameLowering_h,v 1.4 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
- Add RETGUARD to clang for amd64. This security mechanism uses per-function
|
||||
random cookies to protect access to function return instructions, with the
|
||||
@ -30,23 +30,13 @@ Index: include/llvm/CodeGen/TargetFrameLowering.h
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
@@ -168,6 +169,20 @@ class TargetFrameLowering { (public)
|
||||
@@ -168,6 +169,10 @@ class TargetFrameLowering { (public)
|
||||
MachineBasicBlock &MBB) const = 0;
|
||||
virtual void emitEpilogue(MachineFunction &MF,
|
||||
MachineBasicBlock &MBB) const = 0;
|
||||
+
|
||||
+ virtual const ReturnProtectorLowering *getReturnProtector() const {
|
||||
+ return nullptr;
|
||||
+ }
|
||||
+
|
||||
+ /// insertReturnProtectorPrologue/Epilogue - Insert ret-protector code
|
||||
+ virtual void insertReturnProtectorPrologue(MachineFunction &MF,
|
||||
+ MachineBasicBlock &MBB) const
|
||||
+ {}
|
||||
+ virtual bool insertReturnProtectorEpilogue(MachineFunction &MF,
|
||||
+ MachineBasicBlock &MBB) const
|
||||
+ {
|
||||
+ return false;
|
||||
+ }
|
||||
|
||||
/// Replace a StackProbe stub (if any) with the actual probe code inline
|
||||
|
@ -1,11 +1,14 @@
|
||||
$OpenBSD: patch-lib_CodeGen_ReturnProtectorLowering_cpp,v 1.1 2018/08/21 06:56:09 ajacoutot Exp $
|
||||
$OpenBSD: patch-lib_CodeGen_ReturnProtectorLowering_cpp,v 1.2 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
Refactor retguard to make adding additional arches easier.
|
||||
- Refactor retguard to make adding additional arches easier.
|
||||
- Do not store the retguard cookie in frame in leaf functions if possible.
|
||||
Makes things slightly faster and also improves security in these functions,
|
||||
since the retguard cookie can't leak via the stack.
|
||||
|
||||
Index: lib/CodeGen/ReturnProtectorLowering.cpp
|
||||
--- lib/CodeGen/ReturnProtectorLowering.cpp.orig
|
||||
+++ lib/CodeGen/ReturnProtectorLowering.cpp
|
||||
@@ -0,0 +1,177 @@
|
||||
@@ -0,0 +1,229 @@
|
||||
+//===- ReturnProtectorLowering.cpp - ---------------------------------------==//
|
||||
+//
|
||||
+// The LLVM Compiler Infrastructure
|
||||
@ -72,7 +75,7 @@ Index: lib/CodeGen/ReturnProtectorLowering.cpp
|
||||
+/// saveReturnProtectorRegister - Allows the target to save the
|
||||
+/// ReturnProtectorRegister in the CalleeSavedInfo vector if needed.
|
||||
+void ReturnProtectorLowering::saveReturnProtectorRegister(
|
||||
+ const MachineFunction &MF, std::vector<CalleeSavedInfo> &CSI) const {
|
||||
+ MachineFunction &MF, std::vector<CalleeSavedInfo> &CSI) const {
|
||||
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
|
||||
+ if (!MFI.getReturnProtectorNeeded())
|
||||
+ return;
|
||||
@ -80,7 +83,15 @@ Index: lib/CodeGen/ReturnProtectorLowering.cpp
|
||||
+ if (!MFI.hasReturnProtectorRegister())
|
||||
+ llvm_unreachable("Saving unset return protector register");
|
||||
+
|
||||
+ CSI.push_back(CalleeSavedInfo(MFI.getReturnProtectorRegister()));
|
||||
+ unsigned Reg = MFI.getReturnProtectorRegister();
|
||||
+ if (MFI.getReturnProtectorNeedsStore())
|
||||
+ CSI.push_back(CalleeSavedInfo(Reg));
|
||||
+ else {
|
||||
+ for (auto &MBB : MF) {
|
||||
+ if (!MBB.isLiveIn(Reg))
|
||||
+ MBB.addLiveIn(Reg);
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+/// determineReturnProtectorTempRegister - Find a register that can be used
|
||||
@ -96,6 +107,45 @@ Index: lib/CodeGen/ReturnProtectorLowering.cpp
|
||||
+
|
||||
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
|
||||
+
|
||||
+ std::vector<unsigned> TempRegs;
|
||||
+ fillTempRegisters(MF, TempRegs);
|
||||
+
|
||||
+ // For leaf functions, try to find a free register that is available
|
||||
+ // in every BB, so we do not need to store it in the frame at all.
|
||||
+ // We walk the entire function here because MFI.hasCalls() is unreliable.
|
||||
+ bool hasCalls = false;
|
||||
+ for (auto &MBB : MF) {
|
||||
+ for (auto &MI : MBB) {
|
||||
+ if (MI.isCall() && !MI.isReturn()) {
|
||||
+ hasCalls = true;
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+ if (hasCalls)
|
||||
+ break;
|
||||
+ }
|
||||
+
|
||||
+ if (!hasCalls) {
|
||||
+ SmallSet<unsigned, 16> LeafUsed;
|
||||
+ SmallSet<int, 24> LeafVisited;
|
||||
+ markUsedRegsInSuccessors(MF.front(), LeafUsed, LeafVisited);
|
||||
+ for (unsigned Reg : TempRegs) {
|
||||
+ bool canUse = true;
|
||||
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
|
||||
+ if (LeafUsed.count(*AI)) {
|
||||
+ canUse = false;
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
+ if (canUse) {
|
||||
+ MFI.setReturnProtectorRegister(Reg);
|
||||
+ MFI.setReturnProtectorNeedsStore(false);
|
||||
+ return true;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ // For non-leaf functions, we only need to search save / restore blocks
|
||||
+ SmallSet<unsigned, 16> Used;
|
||||
+ SmallSet<int, 24> Visited;
|
||||
+
|
||||
@ -123,9 +173,6 @@ Index: lib/CodeGen/ReturnProtectorLowering.cpp
|
||||
+
|
||||
+ // Now we have gathered all the regs used outside the frame save / restore,
|
||||
+ // so we can see if we have a free reg to use for the retguard cookie.
|
||||
+ std::vector<unsigned> TempRegs;
|
||||
+ fillTempRegisters(MF, TempRegs);
|
||||
+
|
||||
+ for (unsigned Reg : TempRegs) {
|
||||
+ bool canUse = true;
|
||||
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
|
||||
@ -165,12 +212,17 @@ Index: lib/CodeGen/ReturnProtectorLowering.cpp
|
||||
+ if (!cookie)
|
||||
+ llvm_unreachable("Function needs return protector but no cookie assigned");
|
||||
+
|
||||
+ unsigned Reg = MFI.getReturnProtectorRegister();
|
||||
+
|
||||
+ std::vector<MachineInstr *> returns;
|
||||
+ for (auto &MBB : MF) {
|
||||
+ if (MBB.isReturnBlock()) {
|
||||
+ for (auto &MI : MBB.terminators()) {
|
||||
+ if (opcodeIsReturn(MI.getOpcode()))
|
||||
+ if (opcodeIsReturn(MI.getOpcode())) {
|
||||
+ returns.push_back(&MI);
|
||||
+ if (!MBB.isLiveIn(Reg))
|
||||
+ MBB.addLiveIn(Reg);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
@ -182,4 +234,7 @@ Index: lib/CodeGen/ReturnProtectorLowering.cpp
|
||||
+ insertReturnProtectorEpilogue(MF, *MI, cookie);
|
||||
+
|
||||
+ insertReturnProtectorPrologue(MF, MF.front(), cookie);
|
||||
+
|
||||
+ if (!MF.front().isLiveIn(Reg))
|
||||
+ MF.front().addLiveIn(Reg);
|
||||
+}
|
||||
|
@ -1,11 +1,14 @@
|
||||
$OpenBSD: patch-lib_Target_AArch64_AArch64ReturnProtectorLowering_cpp,v 1.1 2018/08/21 06:56:09 ajacoutot Exp $
|
||||
$OpenBSD: patch-lib_Target_AArch64_AArch64ReturnProtectorLowering_cpp,v 1.2 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
Add retguard for arm64.
|
||||
- Add retguard for arm64.
|
||||
- Do not store the retguard cookie in frame in leaf functions if possible.
|
||||
Makes things slightly faster and also improves security in these functions,
|
||||
since the retguard cookie can't leak via the stack.
|
||||
|
||||
Index: lib/Target/AArch64/AArch64ReturnProtectorLowering.cpp
|
||||
--- lib/Target/AArch64/AArch64ReturnProtectorLowering.cpp.orig
|
||||
+++ lib/Target/AArch64/AArch64ReturnProtectorLowering.cpp
|
||||
@@ -0,0 +1,123 @@
|
||||
@@ -0,0 +1,130 @@
|
||||
+//===-- AArch64ReturnProtectorLowering.cpp --------------------------------===//
|
||||
+//
|
||||
+// The LLVM Compiler Infrastructure
|
||||
@ -47,7 +50,6 @@ Index: lib/Target/AArch64/AArch64ReturnProtectorLowering.cpp
|
||||
+ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
|
||||
+ unsigned REG = MF.getFrameInfo().getReturnProtectorRegister();
|
||||
+
|
||||
+ MBB.addLiveIn(REG);
|
||||
+ BuildMI(MBB, MI, MBBDL, TII->get(AArch64::ADRP), REG)
|
||||
+ .addGlobalAddress(cookie, 0, AArch64II::MO_PAGE);
|
||||
+ BuildMI(MBB, MI, MBBDL, TII->get(AArch64::LDRXui), REG)
|
||||
@ -66,7 +68,6 @@ Index: lib/Target/AArch64/AArch64ReturnProtectorLowering.cpp
|
||||
+ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
|
||||
+ unsigned REG = MF.getFrameInfo().getReturnProtectorRegister();
|
||||
+
|
||||
+ MBB.addLiveIn(REG);
|
||||
+ MBB.addLiveIn(AArch64::X9);
|
||||
+ // REG holds the cookie we calculated in prologue. We use X9 as a
|
||||
+ // scratch reg to pull the random data. XOR REG with LR should yield
|
||||
@ -107,7 +108,7 @@ Index: lib/Target/AArch64/AArch64ReturnProtectorLowering.cpp
|
||||
+}
|
||||
+
|
||||
+void AArch64ReturnProtectorLowering::saveReturnProtectorRegister(
|
||||
+ const MachineFunction &MF, std::vector<CalleeSavedInfo> &CSI) const {
|
||||
+ MachineFunction &MF, std::vector<CalleeSavedInfo> &CSI) const {
|
||||
+
|
||||
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
|
||||
+ if (!MFI.getReturnProtectorNeeded())
|
||||
@ -116,6 +117,15 @@ Index: lib/Target/AArch64/AArch64ReturnProtectorLowering.cpp
|
||||
+ if (!MFI.hasReturnProtectorRegister())
|
||||
+ llvm_unreachable("Saving unset return protector register");
|
||||
+
|
||||
+ unsigned Reg = MFI.getReturnProtectorRegister();
|
||||
+ if (!MFI.getReturnProtectorNeedsStore()) {
|
||||
+ for (auto &MBB : MF) {
|
||||
+ if (!MBB.isLiveIn(Reg))
|
||||
+ MBB.addLiveIn(Reg);
|
||||
+ }
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ // Put the temp reg after FP and LR to avoid layout issues
|
||||
+ // with the D registers later.
|
||||
+ bool added = false;
|
||||
|
@ -1,6 +1,9 @@
|
||||
$OpenBSD: patch-lib_Target_AArch64_AArch64ReturnProtectorLowering_h,v 1.1 2018/08/21 06:56:09 ajacoutot Exp $
|
||||
$OpenBSD: patch-lib_Target_AArch64_AArch64ReturnProtectorLowering_h,v 1.2 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
Add retguard for arm64.
|
||||
- Add retguard for arm64.
|
||||
- Do not store the retguard cookie in frame in leaf functions if possible.
|
||||
Makes things slightly faster and also improves security in these functions,
|
||||
since the retguard cookie can't leak via the stack.
|
||||
|
||||
Index: lib/Target/AArch64/AArch64ReturnProtectorLowering.h
|
||||
--- lib/Target/AArch64/AArch64ReturnProtectorLowering.h.orig
|
||||
@ -51,7 +54,7 @@ Index: lib/Target/AArch64/AArch64ReturnProtectorLowering.h
|
||||
+ /// saveReturnProtectorRegister - Allows the target to save the
|
||||
+ /// CalculationRegister in the CalleeSavedInfo vector if needed.
|
||||
+ virtual void
|
||||
+ saveReturnProtectorRegister(const MachineFunction &MF,
|
||||
+ saveReturnProtectorRegister(MachineFunction &MF,
|
||||
+ std::vector<CalleeSavedInfo> &CSI) const override;
|
||||
+};
|
||||
+
|
||||
|
@ -0,0 +1,268 @@
|
||||
$OpenBSD: patch-lib_Target_Mips_AsmParser_MipsAsmParser_cpp,v 1.1 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
- Fix a bug in memory operand handling. If a load or store uses a symbol
|
||||
as a memory operand, the assembler generates incorrect relocations in
|
||||
PIC mode. As a simple fix, expand the instruction into an address load
|
||||
sequence, which works, that is followed by the actual memory
|
||||
instruction.
|
||||
Note that the generated sequence is not always optimal. If the symbol
|
||||
has a small offset, the offset could be fused with the memory
|
||||
instruction. The fix does not achieve that, however. A symbol offset
|
||||
adds an extra instruction.
|
||||
- Implement SGE pseudo-instructions. Needed when building libcrypto.
|
||||
- Implement .cplocal directive. Needed when building libcrypto.
|
||||
|
||||
Index: lib/Target/Mips/AsmParser/MipsAsmParser.cpp
|
||||
--- lib/Target/Mips/AsmParser/MipsAsmParser.cpp.orig
|
||||
+++ lib/Target/Mips/AsmParser/MipsAsmParser.cpp
|
||||
@@ -145,6 +145,7 @@ class MipsAsmParser : public MCTargetAsmParser {
|
||||
bool IsPicEnabled;
|
||||
bool IsCpRestoreSet;
|
||||
int CpRestoreOffset;
|
||||
+ unsigned GPRegister;
|
||||
unsigned CpSaveLocation;
|
||||
/// If true, then CpSaveLocation is a register, otherwise it's an offset.
|
||||
bool CpSaveLocationIsRegister;
|
||||
@@ -307,6 +308,11 @@ class MipsAsmParser : public MCTargetAsmParser {
|
||||
bool expandSeqI(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
|
||||
const MCSubtargetInfo *STI);
|
||||
|
||||
+ bool expandSGE(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
|
||||
+ const MCSubtargetInfo *STI);
|
||||
+ bool expandSGEImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
|
||||
+ const MCSubtargetInfo *STI);
|
||||
+
|
||||
bool expandMXTRAlias(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
|
||||
const MCSubtargetInfo *STI);
|
||||
|
||||
@@ -321,6 +327,7 @@ class MipsAsmParser : public MCTargetAsmParser {
|
||||
bool parseSetFeature(uint64_t Feature);
|
||||
bool isPicAndNotNxxAbi(); // Used by .cpload, .cprestore, and .cpsetup.
|
||||
bool parseDirectiveCpLoad(SMLoc Loc);
|
||||
+ bool parseDirectiveCpLocal(SMLoc Loc);
|
||||
bool parseDirectiveCpRestore(SMLoc Loc);
|
||||
bool parseDirectiveCPSetup();
|
||||
bool parseDirectiveCPReturn();
|
||||
@@ -514,6 +521,7 @@ class MipsAsmParser : public MCTargetAsmParser {
|
||||
|
||||
IsCpRestoreSet = false;
|
||||
CpRestoreOffset = -1;
|
||||
+ GPRegister = ABI.GetGlobalPtr();
|
||||
|
||||
const Triple &TheTriple = sti.getTargetTriple();
|
||||
IsLittleEndian = TheTriple.isLittleEndian();
|
||||
@@ -2054,7 +2062,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, S
|
||||
MipsMCExpr::create(MipsMCExpr::MEK_GOT_DISP, JalExpr, getContext());
|
||||
|
||||
TOut.emitRRX(ABI.ArePtrs64bit() ? Mips::LD : Mips::LW, Mips::T9,
|
||||
- Mips::GP, MCOperand::createExpr(GotDispRelocExpr), IDLoc,
|
||||
+ GPRegister, MCOperand::createExpr(GotDispRelocExpr), IDLoc,
|
||||
STI);
|
||||
}
|
||||
} else {
|
||||
@@ -2065,7 +2073,8 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, S
|
||||
const MCExpr *Call16RelocExpr =
|
||||
MipsMCExpr::create(MipsMCExpr::MEK_GOT_CALL, JalExpr, getContext());
|
||||
|
||||
- TOut.emitRRX(ABI.ArePtrs64bit() ? Mips::LD : Mips::LW, Mips::T9, Mips::GP,
|
||||
+ TOut.emitRRX(ABI.ArePtrs64bit() ? Mips::LD : Mips::LW, Mips::T9,
|
||||
+ GPRegister,
|
||||
MCOperand::createExpr(Call16RelocExpr), IDLoc, STI);
|
||||
}
|
||||
|
||||
@@ -2482,6 +2491,14 @@ MipsAsmParser::tryExpandInstruction(MCInst &Inst, SMLo
|
||||
case Mips::NORImm:
|
||||
case Mips::NORImm64:
|
||||
return expandAliasImmediate(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
|
||||
+ case Mips::SGE:
|
||||
+ case Mips::SGEU:
|
||||
+ return expandSGE(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
|
||||
+ case Mips::SGEImm:
|
||||
+ case Mips::SGEImm64:
|
||||
+ case Mips::SGEUImm:
|
||||
+ case Mips::SGEUImm64:
|
||||
+ return expandSGEImm(Inst, IDLoc, Out, STI) ? MER_Fail : MER_Success;
|
||||
case Mips::SLTImm64:
|
||||
if (isInt<16>(Inst.getOperand(2).getImm())) {
|
||||
Inst.setOpcode(Mips::SLTi64);
|
||||
@@ -2876,7 +2893,7 @@ bool MipsAsmParser::loadAndAddSymbolAddress(const MCEx
|
||||
ELF::STB_LOCAL))) {
|
||||
const MCExpr *CallExpr =
|
||||
MipsMCExpr::create(MipsMCExpr::MEK_GOT_CALL, SymExpr, getContext());
|
||||
- TOut.emitRRX(Mips::LW, DstReg, ABI.GetGlobalPtr(),
|
||||
+ TOut.emitRRX(Mips::LW, DstReg, GPRegister,
|
||||
MCOperand::createExpr(CallExpr), IDLoc, STI);
|
||||
return false;
|
||||
}
|
||||
@@ -2916,7 +2933,7 @@ bool MipsAsmParser::loadAndAddSymbolAddress(const MCEx
|
||||
TmpReg = ATReg;
|
||||
}
|
||||
|
||||
- TOut.emitRRX(Mips::LW, TmpReg, ABI.GetGlobalPtr(),
|
||||
+ TOut.emitRRX(Mips::LW, TmpReg, GPRegister,
|
||||
MCOperand::createExpr(GotExpr), IDLoc, STI);
|
||||
|
||||
if (LoExpr)
|
||||
@@ -2952,7 +2969,7 @@ bool MipsAsmParser::loadAndAddSymbolAddress(const MCEx
|
||||
ELF::STB_LOCAL))) {
|
||||
const MCExpr *CallExpr =
|
||||
MipsMCExpr::create(MipsMCExpr::MEK_GOT_CALL, SymExpr, getContext());
|
||||
- TOut.emitRRX(Mips::LD, DstReg, ABI.GetGlobalPtr(),
|
||||
+ TOut.emitRRX(Mips::LD, DstReg, GPRegister,
|
||||
MCOperand::createExpr(CallExpr), IDLoc, STI);
|
||||
return false;
|
||||
}
|
||||
@@ -2995,7 +3012,7 @@ bool MipsAsmParser::loadAndAddSymbolAddress(const MCEx
|
||||
TmpReg = ATReg;
|
||||
}
|
||||
|
||||
- TOut.emitRRX(Mips::LD, TmpReg, ABI.GetGlobalPtr(),
|
||||
+ TOut.emitRRX(Mips::LD, TmpReg, GPRegister,
|
||||
MCOperand::createExpr(GotExpr), IDLoc, STI);
|
||||
|
||||
if (LoExpr)
|
||||
@@ -3226,10 +3243,10 @@ bool MipsAsmParser::emitPartialAddress(MipsTargetStrea
|
||||
MipsMCExpr::create(MipsMCExpr::MEK_GOT, GotSym, getContext());
|
||||
|
||||
if(isABI_O32() || isABI_N32()) {
|
||||
- TOut.emitRRX(Mips::LW, ATReg, Mips::GP, MCOperand::createExpr(GotExpr),
|
||||
+ TOut.emitRRX(Mips::LW, ATReg, GPRegister, MCOperand::createExpr(GotExpr),
|
||||
IDLoc, STI);
|
||||
} else { //isABI_N64()
|
||||
- TOut.emitRRX(Mips::LD, ATReg, Mips::GP, MCOperand::createExpr(GotExpr),
|
||||
+ TOut.emitRRX(Mips::LD, ATReg, GPRegister, MCOperand::createExpr(GotExpr),
|
||||
IDLoc, STI);
|
||||
}
|
||||
} else { //!IsPicEnabled
|
||||
@@ -3605,6 +3622,10 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc
|
||||
TOut.emitRRR(isGP64bit() ? Mips::DADDu : Mips::ADDu, TmpReg, TmpReg,
|
||||
BaseReg, IDLoc, STI);
|
||||
TOut.emitRRI(Inst.getOpcode(), DstReg, TmpReg, LoOffset, IDLoc, STI);
|
||||
+ } else if (inPicMode()) {
|
||||
+ expandLoadAddress(TmpReg, Mips::NoRegister, OffsetOp, !ABI.ArePtrs64bit(),
|
||||
+ IDLoc, Out, STI);
|
||||
+ TOut.emitRRI(Inst.getOpcode(), DstReg, TmpReg, 0, IDLoc, STI);
|
||||
} else {
|
||||
assert(OffsetOp.isExpr() && "expected expression operand kind");
|
||||
const MCExpr *ExprOffset = OffsetOp.getExpr();
|
||||
@@ -4934,6 +4955,72 @@ bool MipsAsmParser::expandSeqI(MCInst &Inst, SMLoc IDL
|
||||
return false;
|
||||
}
|
||||
|
||||
+bool MipsAsmParser::expandSGE(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
|
||||
+ const MCSubtargetInfo *STI) {
|
||||
+ MipsTargetStreamer &TOut = getTargetStreamer();
|
||||
+ unsigned DReg = Inst.getOperand(0).getReg();
|
||||
+ unsigned SReg = Inst.getOperand(1).getReg();
|
||||
+ unsigned TReg = Inst.getOperand(2).getReg();
|
||||
+ unsigned OpCode;
|
||||
+
|
||||
+ warnIfNoMacro(IDLoc);
|
||||
+
|
||||
+ /* "$sr >= $tr" is equivalent to "not ($sr < $tr)". */
|
||||
+ switch (Inst.getOpcode()) {
|
||||
+ case Mips::SGE:
|
||||
+ OpCode = Mips::SLT;
|
||||
+ break;
|
||||
+ case Mips::SGEU:
|
||||
+ OpCode = Mips::SLTu;
|
||||
+ break;
|
||||
+ default:
|
||||
+ llvm_unreachable("unexpected 'sge' opcode");
|
||||
+ }
|
||||
+ TOut.emitRRR(OpCode, DReg, SReg, TReg, IDLoc, STI);
|
||||
+ TOut.emitRRI(Mips::XORi, DReg, DReg, 1, IDLoc, STI);
|
||||
+
|
||||
+ return false;
|
||||
+}
|
||||
+
|
||||
+bool MipsAsmParser::expandSGEImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
|
||||
+ const MCSubtargetInfo *STI) {
|
||||
+ MipsTargetStreamer &TOut = getTargetStreamer();
|
||||
+ unsigned DReg = Inst.getOperand(0).getReg();
|
||||
+ unsigned SReg = Inst.getOperand(1).getReg();
|
||||
+ int64_t ImmVal = Inst.getOperand(2).getImm();
|
||||
+ unsigned OpCode, OpiCode;
|
||||
+
|
||||
+ warnIfNoMacro(IDLoc);
|
||||
+
|
||||
+ /* "$sr >= $imm" is equivalent to "not ($sr < $imm)". */
|
||||
+ switch (Inst.getOpcode()) {
|
||||
+ case Mips::SGEImm:
|
||||
+ case Mips::SGEImm64:
|
||||
+ OpCode = Mips::SLT;
|
||||
+ OpiCode = Mips::SLTi;
|
||||
+ break;
|
||||
+ case Mips::SGEUImm:
|
||||
+ case Mips::SGEUImm64:
|
||||
+ OpCode = Mips::SLTu;
|
||||
+ OpiCode = Mips::SLTiu;
|
||||
+ break;
|
||||
+ default:
|
||||
+ llvm_unreachable("unexpected 'sge' opcode with immediate");
|
||||
+ }
|
||||
+
|
||||
+ if (isInt<16>(ImmVal)) {
|
||||
+ TOut.emitRRI(OpiCode, DReg, SReg, ImmVal, IDLoc, STI);
|
||||
+ } else {
|
||||
+ if (loadImmediate(ImmVal, DReg, Mips::NoRegister, isInt<32>(ImmVal), false,
|
||||
+ IDLoc, Out, STI))
|
||||
+ return true;
|
||||
+ TOut.emitRRR(OpCode, DReg, SReg, DReg, IDLoc, STI);
|
||||
+ }
|
||||
+ TOut.emitRRI(Mips::XORi, DReg, DReg, 1, IDLoc, STI);
|
||||
+
|
||||
+ return false;
|
||||
+}
|
||||
+
|
||||
// Map the DSP accumulator and control register to the corresponding gpr
|
||||
// operand. Unlike the other alias, the m(f|t)t(lo|hi|acx) instructions
|
||||
// do not map the DSP registers contigously to gpr registers.
|
||||
@@ -7044,6 +7131,36 @@ bool MipsAsmParser::parseDirectiveCpLoad(SMLoc Loc) {
|
||||
return false;
|
||||
}
|
||||
|
||||
+bool MipsAsmParser::parseDirectiveCpLocal(SMLoc Loc) {
|
||||
+ if (!isABI_N32() && !isABI_N64()) {
|
||||
+ reportParseError(".cplocal is allowed only in N32 or N64 mode");
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
+ SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Reg;
|
||||
+ OperandMatchResultTy ResTy = parseAnyRegister(Reg);
|
||||
+ if (ResTy == MatchOperand_NoMatch || ResTy == MatchOperand_ParseFail) {
|
||||
+ reportParseError("expected register containing function address");
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
+ MipsOperand &RegOpnd = static_cast<MipsOperand &>(*Reg[0]);
|
||||
+ if (!RegOpnd.isGPRAsmReg()) {
|
||||
+ reportParseError(RegOpnd.getStartLoc(), "invalid register");
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
+ // If this is not the end of the statement, report an error.
|
||||
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
|
||||
+ reportParseError("unexpected token, expected end of statement");
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
+ GPRegister = RegOpnd.getGPR32Reg();
|
||||
+ getTargetStreamer().setGPReg(GPRegister);
|
||||
+ return false;
|
||||
+}
|
||||
+
|
||||
bool MipsAsmParser::parseDirectiveCpRestore(SMLoc Loc) {
|
||||
MCAsmParser &Parser = getParser();
|
||||
|
||||
@@ -7888,6 +8005,10 @@ bool MipsAsmParser::ParseDirective(AsmToken DirectiveI
|
||||
|
||||
if (IDVal == ".cpload") {
|
||||
parseDirectiveCpLoad(DirectiveID.getLoc());
|
||||
+ return false;
|
||||
+ }
|
||||
+ if (IDVal == ".cplocal") {
|
||||
+ parseDirectiveCpLocal(DirectiveID.getLoc());
|
||||
return false;
|
||||
}
|
||||
if (IDVal == ".cprestore") {
|
@ -0,0 +1,124 @@
|
||||
$OpenBSD: patch-lib_Target_Mips_MCTargetDesc_MipsTargetStreamer_cpp,v 1.1 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
- Implement .cplocal directive. Needed when building libcrypto.
|
||||
|
||||
Index: lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
|
||||
--- lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp.orig
|
||||
+++ lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
|
||||
@@ -38,6 +38,7 @@ static cl::opt<bool> RoundSectionSizes(
|
||||
MipsTargetStreamer::MipsTargetStreamer(MCStreamer &S)
|
||||
: MCTargetStreamer(S), ModuleDirectiveAllowed(true) {
|
||||
GPRInfoSet = FPRInfoSet = FrameInfoSet = false;
|
||||
+ GPReg = Mips::GP;
|
||||
}
|
||||
void MipsTargetStreamer::emitDirectiveSetMicroMips() {}
|
||||
void MipsTargetStreamer::emitDirectiveSetNoMicroMips() {}
|
||||
@@ -258,8 +259,7 @@ void MipsTargetStreamer::emitNop(SMLoc IDLoc, const MC
|
||||
/// Emit the $gp restore operation for .cprestore.
|
||||
void MipsTargetStreamer::emitGPRestore(int Offset, SMLoc IDLoc,
|
||||
const MCSubtargetInfo *STI) {
|
||||
- emitLoadWithImmOffset(Mips::LW, Mips::GP, Mips::SP, Offset, Mips::GP, IDLoc,
|
||||
- STI);
|
||||
+ emitLoadWithImmOffset(Mips::LW, GPReg, Mips::SP, Offset, GPReg, IDLoc, STI);
|
||||
}
|
||||
|
||||
/// Emit a store instruction with an immediate offset.
|
||||
@@ -1133,7 +1133,7 @@ void MipsTargetELFStreamer::emitDirectiveCpLoad(unsign
|
||||
|
||||
MCInst TmpInst;
|
||||
TmpInst.setOpcode(Mips::LUi);
|
||||
- TmpInst.addOperand(MCOperand::createReg(Mips::GP));
|
||||
+ TmpInst.addOperand(MCOperand::createReg(GPReg));
|
||||
const MCExpr *HiSym = MipsMCExpr::create(
|
||||
MipsMCExpr::MEK_HI,
|
||||
MCSymbolRefExpr::create("_gp_disp", MCSymbolRefExpr::VK_None,
|
||||
@@ -1145,8 +1145,8 @@ void MipsTargetELFStreamer::emitDirectiveCpLoad(unsign
|
||||
TmpInst.clear();
|
||||
|
||||
TmpInst.setOpcode(Mips::ADDiu);
|
||||
- TmpInst.addOperand(MCOperand::createReg(Mips::GP));
|
||||
- TmpInst.addOperand(MCOperand::createReg(Mips::GP));
|
||||
+ TmpInst.addOperand(MCOperand::createReg(GPReg));
|
||||
+ TmpInst.addOperand(MCOperand::createReg(GPReg));
|
||||
const MCExpr *LoSym = MipsMCExpr::create(
|
||||
MipsMCExpr::MEK_LO,
|
||||
MCSymbolRefExpr::create("_gp_disp", MCSymbolRefExpr::VK_None,
|
||||
@@ -1158,8 +1158,8 @@ void MipsTargetELFStreamer::emitDirectiveCpLoad(unsign
|
||||
TmpInst.clear();
|
||||
|
||||
TmpInst.setOpcode(Mips::ADDu);
|
||||
- TmpInst.addOperand(MCOperand::createReg(Mips::GP));
|
||||
- TmpInst.addOperand(MCOperand::createReg(Mips::GP));
|
||||
+ TmpInst.addOperand(MCOperand::createReg(GPReg));
|
||||
+ TmpInst.addOperand(MCOperand::createReg(GPReg));
|
||||
TmpInst.addOperand(MCOperand::createReg(RegNo));
|
||||
getStreamer().EmitInstruction(TmpInst, STI);
|
||||
|
||||
@@ -1182,7 +1182,7 @@ bool MipsTargetELFStreamer::emitDirectiveCpRestore(
|
||||
return true;
|
||||
|
||||
// Store the $gp on the stack.
|
||||
- emitStoreWithImmOffset(Mips::SW, Mips::GP, Mips::SP, Offset, GetATReg, IDLoc,
|
||||
+ emitStoreWithImmOffset(Mips::SW, GPReg, Mips::SP, Offset, GetATReg, IDLoc,
|
||||
STI);
|
||||
return true;
|
||||
}
|
||||
@@ -1203,10 +1203,10 @@ void MipsTargetELFStreamer::emitDirectiveCpsetup(unsig
|
||||
// Either store the old $gp in a register or on the stack
|
||||
if (IsReg) {
|
||||
// move $save, $gpreg
|
||||
- emitRRR(Mips::OR64, RegOrOffset, Mips::GP, Mips::ZERO, SMLoc(), &STI);
|
||||
+ emitRRR(Mips::OR64, RegOrOffset, GPReg, Mips::ZERO, SMLoc(), &STI);
|
||||
} else {
|
||||
// sd $gpreg, offset($sp)
|
||||
- emitRRI(Mips::SD, Mips::GP, Mips::SP, RegOrOffset, SMLoc(), &STI);
|
||||
+ emitRRI(Mips::SD, GPReg, Mips::SP, RegOrOffset, SMLoc(), &STI);
|
||||
}
|
||||
|
||||
if (getABI().IsN32()) {
|
||||
@@ -1219,10 +1219,10 @@ void MipsTargetELFStreamer::emitDirectiveCpsetup(unsig
|
||||
MCA.getContext());
|
||||
|
||||
// lui $gp, %hi(__gnu_local_gp)
|
||||
- emitRX(Mips::LUi, Mips::GP, MCOperand::createExpr(HiExpr), SMLoc(), &STI);
|
||||
+ emitRX(Mips::LUi, GPReg, MCOperand::createExpr(HiExpr), SMLoc(), &STI);
|
||||
|
||||
// addiu $gp, $gp, %lo(__gnu_local_gp)
|
||||
- emitRRX(Mips::ADDiu, Mips::GP, Mips::GP, MCOperand::createExpr(LoExpr),
|
||||
+ emitRRX(Mips::ADDiu, GPReg, GPReg, MCOperand::createExpr(LoExpr),
|
||||
SMLoc(), &STI);
|
||||
|
||||
return;
|
||||
@@ -1236,14 +1236,14 @@ void MipsTargetELFStreamer::emitDirectiveCpsetup(unsig
|
||||
MCA.getContext());
|
||||
|
||||
// lui $gp, %hi(%neg(%gp_rel(funcSym)))
|
||||
- emitRX(Mips::LUi, Mips::GP, MCOperand::createExpr(HiExpr), SMLoc(), &STI);
|
||||
+ emitRX(Mips::LUi, GPReg, MCOperand::createExpr(HiExpr), SMLoc(), &STI);
|
||||
|
||||
// addiu $gp, $gp, %lo(%neg(%gp_rel(funcSym)))
|
||||
- emitRRX(Mips::ADDiu, Mips::GP, Mips::GP, MCOperand::createExpr(LoExpr),
|
||||
+ emitRRX(Mips::ADDiu, GPReg, GPReg, MCOperand::createExpr(LoExpr),
|
||||
SMLoc(), &STI);
|
||||
|
||||
// daddu $gp, $gp, $funcreg
|
||||
- emitRRR(Mips::DADDu, Mips::GP, Mips::GP, RegNo, SMLoc(), &STI);
|
||||
+ emitRRR(Mips::DADDu, GPReg, GPReg, RegNo, SMLoc(), &STI);
|
||||
}
|
||||
|
||||
void MipsTargetELFStreamer::emitDirectiveCpreturn(unsigned SaveLocation,
|
||||
@@ -1256,12 +1256,12 @@ void MipsTargetELFStreamer::emitDirectiveCpreturn(unsi
|
||||
// Either restore the old $gp from a register or on the stack
|
||||
if (SaveLocationIsRegister) {
|
||||
Inst.setOpcode(Mips::OR);
|
||||
- Inst.addOperand(MCOperand::createReg(Mips::GP));
|
||||
+ Inst.addOperand(MCOperand::createReg(GPReg));
|
||||
Inst.addOperand(MCOperand::createReg(SaveLocation));
|
||||
Inst.addOperand(MCOperand::createReg(Mips::ZERO));
|
||||
} else {
|
||||
Inst.setOpcode(Mips::LD);
|
||||
- Inst.addOperand(MCOperand::createReg(Mips::GP));
|
||||
+ Inst.addOperand(MCOperand::createReg(GPReg));
|
||||
Inst.addOperand(MCOperand::createReg(Mips::SP));
|
||||
Inst.addOperand(MCOperand::createImm(SaveLocation));
|
||||
}
|
31
devel/llvm/patches/patch-lib_Target_Mips_Mips64InstrInfo_td
Normal file
31
devel/llvm/patches/patch-lib_Target_Mips_Mips64InstrInfo_td
Normal file
@ -0,0 +1,31 @@
|
||||
$OpenBSD: patch-lib_Target_Mips_Mips64InstrInfo_td,v 1.1 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
- Implement SGE pseudo-instructions. Needed when building libcrypto
|
||||
- Fix instruction guard. This prevents the compiler from using
|
||||
the MIPS64 mul instruction on pre-MIPS64 subtargets.
|
||||
|
||||
Index: lib/Target/Mips/Mips64InstrInfo.td
|
||||
--- lib/Target/Mips/Mips64InstrInfo.td.orig
|
||||
+++ lib/Target/Mips/Mips64InstrInfo.td
|
||||
@@ -845,7 +845,7 @@ def : MipsPat<(i64 (sext (i32 (sub GPR32:$src, GPR32:$
|
||||
(SUBu GPR32:$src, GPR32:$src2), sub_32)>;
|
||||
def : MipsPat<(i64 (sext (i32 (mul GPR32:$src, GPR32:$src2)))),
|
||||
(INSERT_SUBREG (i64 (IMPLICIT_DEF)),
|
||||
- (MUL GPR32:$src, GPR32:$src2), sub_32)>, ISA_MIPS3_NOT_32R6_64R6;
|
||||
+ (MUL GPR32:$src, GPR32:$src2), sub_32)>, ISA_MIPS32_NOT_32R6_64R6;
|
||||
def : MipsPat<(i64 (sext (i32 (MipsMFHI ACC64:$src)))),
|
||||
(INSERT_SUBREG (i64 (IMPLICIT_DEF)),
|
||||
(PseudoMFHI ACC64:$src), sub_32)>;
|
||||
@@ -1136,6 +1136,12 @@ let AdditionalPredicates = [NotInMicroMips] in {
|
||||
def NORImm64 : NORIMM_DESC_BASE<GPR64Opnd, imm64>, GPR_64;
|
||||
def : MipsInstAlias<"nor\t$rs, $imm", (NORImm64 GPR64Opnd:$rs, GPR64Opnd:$rs,
|
||||
imm64:$imm)>, GPR_64;
|
||||
+def SGEImm64 : MipsAsmPseudoInst<(outs GPR64Opnd:$rd),
|
||||
+ (ins GPR64Opnd:$rs, imm64:$imm),
|
||||
+ "sge\t$rd, $rs, $imm">, GPR_64;
|
||||
+def SGEUImm64 : MipsAsmPseudoInst<(outs GPR64Opnd:$rd),
|
||||
+ (ins GPR64Opnd:$rs, imm64:$imm),
|
||||
+ "sgeu\t$rd, $rs, $imm">, GPR_64;
|
||||
def SLTImm64 : MipsAsmPseudoInst<(outs GPR64Opnd:$rs),
|
||||
(ins GPR64Opnd:$rt, imm64:$imm),
|
||||
"slt\t$rs, $rt, $imm">, GPR_64;
|
23
devel/llvm/patches/patch-lib_Target_Mips_MipsAsmPrinter_cpp
Normal file
23
devel/llvm/patches/patch-lib_Target_Mips_MipsAsmPrinter_cpp
Normal file
@ -0,0 +1,23 @@
|
||||
$OpenBSD: patch-lib_Target_Mips_MipsAsmPrinter_cpp,v 1.1 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
- Restore previous section after setting the MIPS ABI marker. This keeps
|
||||
the .text section in use after the file header, improving compatibility
|
||||
with gcc. Without this change, module-level inline assembly blocks could
|
||||
end up into wrong section.
|
||||
|
||||
Index: lib/Target/Mips/MipsAsmPrinter.cpp
|
||||
--- lib/Target/Mips/MipsAsmPrinter.cpp.orig
|
||||
+++ lib/Target/Mips/MipsAsmPrinter.cpp
|
||||
@@ -795,10 +795,12 @@ void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
|
||||
TS.emitDirectiveOptionPic0();
|
||||
}
|
||||
|
||||
+ MCSection *CS = OutStreamer->getCurrentSectionOnly();
|
||||
// Tell the assembler which ABI we are using
|
||||
std::string SectionName = std::string(".mdebug.") + getCurrentABIString();
|
||||
OutStreamer->SwitchSection(
|
||||
OutContext.getELFSection(SectionName, ELF::SHT_PROGBITS, 0));
|
||||
+ OutStreamer->SwitchSection(CS);
|
||||
|
||||
// NaN: At the moment we only support:
|
||||
// 1. .nan legacy (default)
|
@ -0,0 +1,50 @@
|
||||
$OpenBSD: patch-lib_Target_Mips_MipsISelLowering_cpp,v 1.1 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
- Implement the 'h' register constraint on mips64. This lets clang build
|
||||
pieces of software that use the constraint if the compiler claims
|
||||
to be compatible with GCC 4.2.1.
|
||||
Note that the constraint was removed in GCC 4.4. The reason was that
|
||||
'h' could generate code whose result is unpredictable. The underlying
|
||||
reason is that the HI and LO registers are special, and the optimizer
|
||||
has to be careful when choosing the order of HI/LO accesses. It looks
|
||||
that LLVM has the needed logic.
|
||||
|
||||
Index: lib/Target/Mips/MipsISelLowering.cpp
|
||||
--- lib/Target/Mips/MipsISelLowering.cpp.orig
|
||||
+++ lib/Target/Mips/MipsISelLowering.cpp
|
||||
@@ -3685,6 +3685,7 @@ MipsTargetLowering::getConstraintType(StringRef Constr
|
||||
// backwards compatibility.
|
||||
// 'c' : A register suitable for use in an indirect
|
||||
// jump. This will always be $25 for -mabicalls.
|
||||
+ // 'h' : The hi register. 1 word storage.
|
||||
// 'l' : The lo register. 1 word storage.
|
||||
// 'x' : The hilo register pair. Double word storage.
|
||||
if (Constraint.size() == 1) {
|
||||
@@ -3694,6 +3695,7 @@ MipsTargetLowering::getConstraintType(StringRef Constr
|
||||
case 'y':
|
||||
case 'f':
|
||||
case 'c':
|
||||
+ case 'h':
|
||||
case 'l':
|
||||
case 'x':
|
||||
return C_RegisterClass;
|
||||
@@ -3739,6 +3741,7 @@ MipsTargetLowering::getSingleConstraintMatchWeight(
|
||||
weight = CW_Register;
|
||||
break;
|
||||
case 'c': // $25 for indirect jumps
|
||||
+ case 'h': // hi register
|
||||
case 'l': // lo register
|
||||
case 'x': // hilo register pair
|
||||
if (type->isIntegerTy())
|
||||
@@ -3913,6 +3916,11 @@ MipsTargetLowering::getRegForInlineAsmConstraint(const
|
||||
return std::make_pair((unsigned)Mips::T9_64, &Mips::GPR64RegClass);
|
||||
// This will generate an error message
|
||||
return std::make_pair(0U, nullptr);
|
||||
+ case 'h': // use the `hi` register to store values
|
||||
+ // that are no bigger than a word
|
||||
+ if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
|
||||
+ return std::make_pair((unsigned)Mips::HI0, &Mips::HI32RegClass);
|
||||
+ return std::make_pair((unsigned)Mips::HI0_64, &Mips::HI64RegClass);
|
||||
case 'l': // use the `lo` register to store values
|
||||
// that are no bigger than a word
|
||||
if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
|
28
devel/llvm/patches/patch-lib_Target_Mips_MipsInstrInfo_td
Normal file
28
devel/llvm/patches/patch-lib_Target_Mips_MipsInstrInfo_td
Normal file
@ -0,0 +1,28 @@
|
||||
$OpenBSD: patch-lib_Target_Mips_MipsInstrInfo_td,v 1.1 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
- Implement SGE pseudo-instructions. Needed when building libcrypto.
|
||||
|
||||
Index: lib/Target/Mips/MipsInstrInfo.td
|
||||
--- lib/Target/Mips/MipsInstrInfo.td.orig
|
||||
+++ lib/Target/Mips/MipsInstrInfo.td
|
||||
@@ -3007,6 +3007,20 @@ def LDMacro : MipsAsmPseudoInst<(outs GPR32Opnd:$rt),
|
||||
def SDMacro : MipsAsmPseudoInst<(outs GPR32Opnd:$rt),
|
||||
(ins mem_simm16:$addr), "sd $rt, $addr">,
|
||||
ISA_MIPS1_NOT_MIPS3;
|
||||
+
|
||||
+def SGE : MipsAsmPseudoInst<(outs GPR32Opnd:$rd),
|
||||
+ (ins GPR32Opnd:$rs, GPR32Opnd:$rt),
|
||||
+ "sge\t$rd, $rs, $rt">;
|
||||
+def SGEU : MipsAsmPseudoInst<(outs GPR32Opnd:$rd),
|
||||
+ (ins GPR32Opnd:$rs, GPR32Opnd:$rt),
|
||||
+ "sgeu\t$rd, $rs, $rt">;
|
||||
+def SGEImm : MipsAsmPseudoInst<(outs GPR32Opnd:$rd),
|
||||
+ (ins GPR32Opnd:$rs, simm32_relaxed:$imm),
|
||||
+ "sge\t$rd, $rs, $imm">, GPR_32;
|
||||
+def SGEUImm : MipsAsmPseudoInst<(outs GPR32Opnd:$rd),
|
||||
+ (ins GPR32Opnd:$rs, simm32_relaxed:$imm),
|
||||
+ "sgeu\t$rd, $rs, $imm">, GPR_32;
|
||||
+
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Arbitrary patterns that map to one or more instructions
|
||||
//===----------------------------------------------------------------------===//
|
@ -0,0 +1,26 @@
|
||||
$OpenBSD: patch-lib_Target_Mips_MipsTargetStreamer_h,v 1.1 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
- Implement .cplocal directive. Needed when building libcrypto.
|
||||
|
||||
Index: lib/Target/Mips/MipsTargetStreamer.h
|
||||
--- lib/Target/Mips/MipsTargetStreamer.h.orig
|
||||
+++ lib/Target/Mips/MipsTargetStreamer.h
|
||||
@@ -185,6 +185,10 @@ class MipsTargetStreamer : public MCTargetStreamer { (
|
||||
return *ABI;
|
||||
}
|
||||
|
||||
+ void setGPReg(unsigned GPReg) {
|
||||
+ this->GPReg = GPReg;
|
||||
+ }
|
||||
+
|
||||
protected:
|
||||
llvm::Optional<MipsABIInfo> ABI;
|
||||
MipsABIFlagsSection ABIFlagsSection;
|
||||
@@ -199,6 +203,7 @@ class MipsTargetStreamer : public MCTargetStreamer { (
|
||||
|
||||
bool FrameInfoSet;
|
||||
int FrameOffset;
|
||||
+ unsigned GPReg;
|
||||
unsigned FrameReg;
|
||||
unsigned ReturnReg;
|
||||
|
@ -0,0 +1,17 @@
|
||||
$OpenBSD: patch-lib_Target_Sparc_SparcAsmPrinter_cpp,v 1.1 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
- Remove cast that truncates immediate operands to 32 bits. This fixes
|
||||
genassym.sh on sparc64 when using clang as the compiler.
|
||||
|
||||
Index: lib/Target/Sparc/SparcAsmPrinter.cpp
|
||||
--- lib/Target/Sparc/SparcAsmPrinter.cpp.orig
|
||||
+++ lib/Target/Sparc/SparcAsmPrinter.cpp
|
||||
@@ -354,7 +354,7 @@ void SparcAsmPrinter::printOperand(const MachineInstr
|
||||
break;
|
||||
|
||||
case MachineOperand::MO_Immediate:
|
||||
- O << (int)MO.getImm();
|
||||
+ O << MO.getImm();
|
||||
break;
|
||||
case MachineOperand::MO_MachineBasicBlock:
|
||||
MO.getMBB()->getSymbol()->print(O, MAI);
|
@ -1,4 +1,4 @@
|
||||
$OpenBSD: patch-lib_Target_X86_X86MCInstLower_cpp,v 1.7 2019/07/06 15:06:36 jca Exp $
|
||||
$OpenBSD: patch-lib_Target_X86_X86MCInstLower_cpp,v 1.8 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
- Add RETGUARD to clang for amd64. This security mechanism uses per-function
|
||||
random cookies to protect access to function return instructions, with the
|
||||
@ -18,20 +18,44 @@ $OpenBSD: patch-lib_Target_X86_X86MCInstLower_cpp,v 1.7 2019/07/06 15:06:36 jca
|
||||
jumping into the instruction stream partway through other instructions. Work to
|
||||
remove these gadgets will continue through other mechanisms.
|
||||
- Use int3 trap padding between functions instead of trapsleds with a leading jump.
|
||||
- Emit variable length trap padding in retguard epilogue.
|
||||
This adds more trap padding before the return while ensuring that the
|
||||
return is still in the same cache line.
|
||||
|
||||
Index: lib/Target/X86/X86MCInstLower.cpp
|
||||
--- lib/Target/X86/X86MCInstLower.cpp.orig
|
||||
+++ lib/Target/X86/X86MCInstLower.cpp
|
||||
@@ -1786,6 +1786,27 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr
|
||||
@@ -1786,6 +1786,48 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr
|
||||
MCInstBuilder(X86::MOV64rr).addReg(X86::R10).addReg(X86::RAX));
|
||||
return;
|
||||
|
||||
+ case X86::RETGUARD_JMP_TRAP: {
|
||||
+ // Make a symbol for the end of the trapsled and emit a jump to it
|
||||
+ MCSymbol *RGSuccSym = OutContext.createTempSymbol();
|
||||
+ const MCExpr *RGSuccExpr = MCSymbolRefExpr::create(RGSuccSym, OutContext);
|
||||
+ EmitAndCountInstruction(MCInstBuilder(X86::JE_1).addExpr(RGSuccExpr));
|
||||
+
|
||||
+ // Emit at least two trap instructions
|
||||
+ EmitAndCountInstruction(MCInstBuilder(X86::INT3));
|
||||
+ EmitAndCountInstruction(MCInstBuilder(X86::INT3));
|
||||
+
|
||||
+ // Now .fill up to 0xe byte, so the ret happens on 0xf
|
||||
+ MCSymbol *Dot = OutContext.createTempSymbol();
|
||||
+ OutStreamer->EmitLabel(Dot);
|
||||
+ const MCExpr *DotE = MCSymbolRefExpr::create(Dot, OutContext);
|
||||
+ const MCExpr *BaseE = MCSymbolRefExpr::create(
|
||||
+ TM.getSymbol(&MF->getFunction()), OutContext);
|
||||
+ // .fill (0xf - ((DotE - BaseE) & 0xf)), 1, 0xcc
|
||||
+ const MCExpr *FillE = MCBinaryExpr::createSub(
|
||||
+ MCConstantExpr::create(0xf, OutContext),
|
||||
+ MCBinaryExpr::createAnd(
|
||||
+ MCBinaryExpr::createSub(DotE, BaseE, OutContext),
|
||||
+ MCConstantExpr::create(0xf, OutContext),
|
||||
+ OutContext),
|
||||
+ OutContext);
|
||||
+ OutStreamer->emitFill(*FillE, 0xCC);
|
||||
+
|
||||
+ // And finally emit the jump target symbol
|
||||
+ OutStreamer->EmitLabel(RGSuccSym);
|
||||
+ return;
|
||||
+ }
|
||||
@ -50,7 +74,7 @@ Index: lib/Target/X86/X86MCInstLower.cpp
|
||||
case X86::SEH_PushReg:
|
||||
case X86::SEH_SaveReg:
|
||||
case X86::SEH_SaveXMM:
|
||||
@@ -2223,4 +2244,10 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr
|
||||
@@ -2223,4 +2265,10 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr
|
||||
}
|
||||
|
||||
EmitAndCountInstruction(TmpInst);
|
||||
|
@ -1,11 +1,14 @@
|
||||
$OpenBSD: patch-lib_Target_X86_X86ReturnProtectorLowering_cpp,v 1.1 2018/08/21 06:56:09 ajacoutot Exp $
|
||||
$OpenBSD: patch-lib_Target_X86_X86ReturnProtectorLowering_cpp,v 1.2 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
Refactor retguard to make adding additional arches easier.
|
||||
- Refactor retguard to make adding additional arches easier.
|
||||
- Do not store the retguard cookie in frame in leaf functions if possible.
|
||||
Makes things slightly faster and also improves security in these functions,
|
||||
since the retguard cookie can't leak via the stack.
|
||||
|
||||
Index: lib/Target/X86/X86ReturnProtectorLowering.cpp
|
||||
--- lib/Target/X86/X86ReturnProtectorLowering.cpp.orig
|
||||
+++ lib/Target/X86/X86ReturnProtectorLowering.cpp
|
||||
@@ -0,0 +1,118 @@
|
||||
@@ -0,0 +1,121 @@
|
||||
+//===-- X86ReturnProtectorLowering.cpp - ----------------------------------===//
|
||||
+//
|
||||
+// The LLVM Compiler Infrastructure
|
||||
@ -46,7 +49,6 @@ Index: lib/Target/X86/X86ReturnProtectorLowering.cpp
|
||||
+ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
|
||||
+ unsigned REG = MF.getFrameInfo().getReturnProtectorRegister();
|
||||
+
|
||||
+ MBB.addLiveIn(REG);
|
||||
+ BuildMI(MBB, MI, MBBDL, TII->get(X86::MOV64rm), REG)
|
||||
+ .addReg(X86::RIP)
|
||||
+ .addImm(0)
|
||||
@ -65,7 +67,6 @@ Index: lib/Target/X86/X86ReturnProtectorLowering.cpp
|
||||
+ const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
|
||||
+ unsigned REG = MF.getFrameInfo().getReturnProtectorRegister();
|
||||
+
|
||||
+ MBB.addLiveIn(REG);
|
||||
+ addDirectMem(BuildMI(MBB, MI, MBBDL, TII->get(X86::XOR64rm), REG).addReg(REG),
|
||||
+ X86::RSP);
|
||||
+ BuildMI(MBB, MI, MBBDL, TII->get(X86::CMP64rm))
|
||||
@ -110,15 +111,20 @@ Index: lib/Target/X86/X86ReturnProtectorLowering.cpp
|
||||
+ switch (F.arg_size()) {
|
||||
+ case 0:
|
||||
+ TempRegs.push_back(X86::RDI);
|
||||
+ LLVM_FALLTHROUGH;
|
||||
+ case 1:
|
||||
+ TempRegs.push_back(X86::RSI);
|
||||
+ LLVM_FALLTHROUGH;
|
||||
+ case 2: // RDX is the 2nd return register
|
||||
+ case 3:
|
||||
+ TempRegs.push_back(X86::RCX);
|
||||
+ LLVM_FALLTHROUGH;
|
||||
+ case 4:
|
||||
+ TempRegs.push_back(X86::R8);
|
||||
+ LLVM_FALLTHROUGH;
|
||||
+ case 5:
|
||||
+ TempRegs.push_back(X86::R9);
|
||||
+ LLVM_FALLTHROUGH;
|
||||
+ default:
|
||||
+ break;
|
||||
+ }
|
||||
|
@ -1,22 +0,0 @@
|
||||
$OpenBSD: patch-lib_Target_X86_X86Subtarget_cpp,v 1.3 2019/07/06 15:06:36 jca Exp $
|
||||
|
||||
Turn on -mretpoline by default in clang on amd64.
|
||||
|
||||
Index: lib/Target/X86/X86Subtarget.cpp
|
||||
--- lib/Target/X86/X86Subtarget.cpp.orig
|
||||
+++ lib/Target/X86/X86Subtarget.cpp
|
||||
@@ -251,6 +251,14 @@ void X86Subtarget::initSubtargetFeatures(StringRef CPU
|
||||
FullFS = "+sahf";
|
||||
}
|
||||
|
||||
+ // OpenBSD/amd64 defaults to -mretpoline.
|
||||
+ if (isTargetOpenBSD() && In64BitMode) {
|
||||
+ if (!FullFS.empty())
|
||||
+ FullFS = "+retpoline," + FullFS;
|
||||
+ else
|
||||
+ FullFS = "+retpoline";
|
||||
+ }
|
||||
+
|
||||
// Parse features string and set the CPU.
|
||||
ParseSubtargetFeatures(CPUName, FullFS);
|
||||
|
@ -0,0 +1,22 @@
|
||||
$OpenBSD: patch-tools_clang_include_clang_AST_FormatString_h,v 1.1 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
- The %b printf extension in the kernel is not fixed to a int type. On sparc64
|
||||
there are various %llb formats. Adjust the code to handle the length specifiers
|
||||
and type check like it is used by the regular case.
|
||||
|
||||
Index: tools/clang/include/clang/AST/FormatString.h
|
||||
--- tools/clang/include/clang/AST/FormatString.h.orig
|
||||
+++ tools/clang/include/clang/AST/FormatString.h
|
||||
@@ -227,8 +227,10 @@ class ConversionSpecifier { (public)
|
||||
|
||||
bool isIntArg() const { return (kind >= IntArgBeg && kind <= IntArgEnd) ||
|
||||
kind == FreeBSDrArg || kind == FreeBSDyArg; }
|
||||
- bool isUIntArg() const { return kind >= UIntArgBeg && kind <= UIntArgEnd; }
|
||||
- bool isAnyIntArg() const { return kind >= IntArgBeg && kind <= UIntArgEnd; }
|
||||
+ bool isUIntArg() const { return (kind >= UIntArgBeg && kind <= UIntArgEnd) ||
|
||||
+ kind == FreeBSDbArg; }
|
||||
+ bool isAnyIntArg() const { return (kind >= IntArgBeg && kind <= UIntArgEnd) ||
|
||||
+ kind == FreeBSDbArg; }
|
||||
bool isDoubleArg() const {
|
||||
return kind >= DoubleArgBeg && kind <= DoubleArgEnd;
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
$OpenBSD: patch-tools_clang_include_clang_Basic_CodeGenOptions_def,v 1.1 2019/07/06 15:06:36 jca Exp $
|
||||
$OpenBSD: patch-tools_clang_include_clang_Basic_CodeGenOptions_def,v 1.2 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
Add RETGUARD to clang for amd64. This security mechanism uses per-function
|
||||
random cookies to protect access to function return instructions, with the
|
||||
|
@ -0,0 +1,39 @@
|
||||
$OpenBSD: patch-tools_clang_lib_AST_FormatString_cpp,v 1.1 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
- The %b printf extension in the kernel is not fixed to a int type. On sparc64
|
||||
there are various %llb formats. Adjust the code to handle the length specifiers
|
||||
and type check like it is used by the regular case.
|
||||
|
||||
Index: tools/clang/lib/AST/FormatString.cpp
|
||||
--- tools/clang/lib/AST/FormatString.cpp.orig
|
||||
+++ tools/clang/lib/AST/FormatString.cpp
|
||||
@@ -746,6 +746,10 @@ bool FormatSpecifier::hasValidLengthModifier(const Tar
|
||||
case ConversionSpecifier::XArg:
|
||||
case ConversionSpecifier::nArg:
|
||||
return true;
|
||||
+ case ConversionSpecifier::FreeBSDbArg:
|
||||
+ return Target.getTriple().isOSFreeBSD() ||
|
||||
+ Target.getTriple().isPS4() ||
|
||||
+ Target.getTriple().isOSOpenBSD();
|
||||
case ConversionSpecifier::FreeBSDrArg:
|
||||
case ConversionSpecifier::FreeBSDyArg:
|
||||
return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS4();
|
||||
@@ -779,6 +783,10 @@ bool FormatSpecifier::hasValidLengthModifier(const Tar
|
||||
case ConversionSpecifier::ScanListArg:
|
||||
case ConversionSpecifier::ZArg:
|
||||
return true;
|
||||
+ case ConversionSpecifier::FreeBSDbArg:
|
||||
+ return Target.getTriple().isOSFreeBSD() ||
|
||||
+ Target.getTriple().isPS4() ||
|
||||
+ Target.getTriple().isOSOpenBSD();
|
||||
case ConversionSpecifier::FreeBSDrArg:
|
||||
case ConversionSpecifier::FreeBSDyArg:
|
||||
return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS4();
|
||||
@@ -937,6 +945,7 @@ bool FormatSpecifier::hasStandardLengthConversionCombi
|
||||
case ConversionSpecifier::uArg:
|
||||
case ConversionSpecifier::xArg:
|
||||
case ConversionSpecifier::XArg:
|
||||
+ case ConversionSpecifier::FreeBSDbArg:
|
||||
return false;
|
||||
default:
|
||||
return true;
|
@ -0,0 +1,22 @@
|
||||
$OpenBSD: patch-tools_clang_lib_Basic_Targets_Mips_h,v 1.1 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
- Implement the 'h' register constraint on mips64. This lets clang build
|
||||
pieces of software that use the constraint if the compiler claims
|
||||
to be compatible with GCC 4.2.1.
|
||||
Note that the constraint was removed in GCC 4.4. The reason was that
|
||||
'h' could generate code whose result is unpredictable. The underlying
|
||||
reason is that the HI and LO registers are special, and the optimizer
|
||||
has to be careful when choosing the order of HI/LO accesses. It looks
|
||||
that LLVM has the needed logic.
|
||||
|
||||
Index: tools/clang/lib/Basic/Targets/Mips.h
|
||||
--- tools/clang/lib/Basic/Targets/Mips.h.orig
|
||||
+++ tools/clang/lib/Basic/Targets/Mips.h
|
||||
@@ -238,6 +238,7 @@ class LLVM_LIBRARY_VISIBILITY MipsTargetInfo : public
|
||||
case 'y': // Equivalent to "r", backward compatibility only.
|
||||
case 'f': // floating-point registers.
|
||||
case 'c': // $25 for indirect jumps
|
||||
+ case 'h': // hi register
|
||||
case 'l': // lo register
|
||||
case 'x': // hilo register pair
|
||||
Info.setAllowsRegister();
|
@ -0,0 +1,19 @@
|
||||
$OpenBSD: patch-tools_clang_lib_Driver_ToolChains_Arch_X86_cpp,v 1.1 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
- Turn on -mretpoline by default in clang on amd64.
|
||||
|
||||
Index: tools/clang/lib/Driver/ToolChains/Arch/X86.cpp
|
||||
--- tools/clang/lib/Driver/ToolChains/Arch/X86.cpp.orig
|
||||
+++ tools/clang/lib/Driver/ToolChains/Arch/X86.cpp
|
||||
@@ -146,6 +146,11 @@ void x86::getX86TargetFeatures(const Driver &D, const
|
||||
// flags). This is a bit hacky but keeps existing usages working. We should
|
||||
// consider deprecating this and instead warn if the user requests external
|
||||
// retpoline thunks and *doesn't* request some form of retpolines.
|
||||
+ if (Triple.isOSOpenBSD() && Triple.getArch() == llvm::Triple::x86_64 &&
|
||||
+ Args.hasFlag(options::OPT_mretpoline, options::OPT_mno_retpoline, true)) {
|
||||
+ Features.push_back("+retpoline-indirect-calls");
|
||||
+ Features.push_back("+retpoline-indirect-branches");
|
||||
+ } else
|
||||
if (Args.hasArgNoClaim(options::OPT_mretpoline, options::OPT_mno_retpoline,
|
||||
options::OPT_mspeculative_load_hardening,
|
||||
options::OPT_mno_speculative_load_hardening)) {
|
@ -1,6 +1,7 @@
|
||||
$OpenBSD: patch-tools_lld_ELF_Options_td,v 1.4 2019/07/06 15:06:36 jca Exp $
|
||||
$OpenBSD: patch-tools_lld_ELF_Options_td,v 1.5 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
[ELF] Add -nopie alias for -no-pie
|
||||
- [ELF] Add -nopie alias for -no-pie
|
||||
- defm ignore_function_address_equality: TODO (document it)
|
||||
|
||||
Index: tools/lld/ELF/Options.td
|
||||
--- tools/lld/ELF/Options.td.orig
|
||||
@ -17,11 +18,11 @@ Index: tools/lld/ELF/Options.td
|
||||
|
||||
def ignore_data_address_equality: F<"ignore-data-address-equality">,
|
||||
HelpText<"lld can break the address equality of data">;
|
||||
@@ -407,6 +408,7 @@ def: Separate<["-"], "b">, Alias<format>, HelpText<"Al
|
||||
@@ -406,6 +407,7 @@ def: Separate<["-"], "F">, Alias<filter>, HelpText<"Al
|
||||
def: Separate<["-"], "b">, Alias<format>, HelpText<"Alias for --format">;
|
||||
def: JoinedOrSeparate<["-"], "l">, Alias<library>, HelpText<"Alias for --library">;
|
||||
def: JoinedOrSeparate<["-"], "L">, Alias<library_path>, HelpText<"Alias for --library-path">;
|
||||
def: F<"no-pic-executable">, Alias<no_pie>, HelpText<"Alias for --no-pie">;
|
||||
+def: F<"nopie">, Alias<no_pie>, HelpText<"Alias for --no-pie">;
|
||||
def: F<"no-pic-executable">, Alias<no_pie>, HelpText<"Alias for --no-pie">;
|
||||
def: Flag<["-"], "N">, Alias<omagic>, HelpText<"Alias for --omagic">;
|
||||
def: Joined<["--"], "output=">, Alias<o>, HelpText<"Alias for -o">;
|
||||
def: Separate<["--"], "output">, Alias<o>, HelpText<"Alias for -o">;
|
||||
|
23
devel/llvm/patches/patch-tools_lld_ELF_SyntheticSections_cpp
Normal file
23
devel/llvm/patches/patch-tools_lld_ELF_SyntheticSections_cpp
Normal file
@ -0,0 +1,23 @@
|
||||
$OpenBSD: patch-tools_lld_ELF_SyntheticSections_cpp,v 1.12 2019/07/09 13:21:37 jca Exp $
|
||||
|
||||
- When merging sections into the output, lld tries to adjust the alignment of
|
||||
the section to be at least as large as the entry size of the section.
|
||||
This causes a later check that validates the alignment to fail if the
|
||||
entry size isn't a power of two. This happens when building some of the
|
||||
java support code in ports gcc. Fix this by sticking to the original
|
||||
alignment if the entry size isn't a power of two.
|
||||
|
||||
Index: tools/lld/ELF/SyntheticSections.cpp
|
||||
--- tools/lld/ELF/SyntheticSections.cpp.orig
|
||||
+++ tools/lld/ELF/SyntheticSections.cpp
|
||||
@@ -2996,7 +2996,9 @@ void elf::mergeSections() {
|
||||
}
|
||||
|
||||
StringRef OutsecName = getOutputSectionName(MS);
|
||||
- uint32_t Alignment = std::max<uint32_t>(MS->Alignment, MS->Entsize);
|
||||
+ uint32_t Alignment = MS->Alignment;
|
||||
+ if (isPowerOf2_32(MS->Entsize))
|
||||
+ Alignment = std::max<uint32_t>(Alignment, MS->Entsize);
|
||||
|
||||
auto I = llvm::find_if(MergeSections, [=](MergeSyntheticSection *Sec) {
|
||||
// While we could create a single synthetic section for two different
|
Loading…
Reference in New Issue
Block a user