LLVM 19.0.0git
MipsNaClELFStreamer.cpp
Go to the documentation of this file.
1//===-- MipsNaClELFStreamer.cpp - ELF Object Output for Mips NaCl ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements MCELFStreamer for Mips NaCl. It emits .o object files
10// as required by NaCl's SFI sandbox. It inserts address-masking instructions
11// before dangerous control-flow and memory access instructions. It inserts
12// address-masking instructions after instructions that change the stack
13// pointer. It ensures that the mask and the dangerous instruction are always
14// emitted in the same bundle. It aligns call + branch delay to the bundle end,
15// so that return address is always aligned to the start of next bundle.
16//
17//===----------------------------------------------------------------------===//
18
19#include "Mips.h"
20#include "MipsELFStreamer.h"
21#include "MipsMCNaCl.h"
23#include "llvm/MC/MCAssembler.h"
26#include "llvm/MC/MCInst.h"
29#include <cassert>
30
31using namespace llvm;
32
33#define DEBUG_TYPE "mips-mc-nacl"
34
35namespace {
36
37const unsigned IndirectBranchMaskReg = Mips::T6;
38const unsigned LoadStoreStackMaskReg = Mips::T7;
39
40/// Extend the generic MCELFStreamer class so that it can mask dangerous
41/// instructions.
42
43class MipsNaClELFStreamer : public MipsELFStreamer {
44public:
45 MipsNaClELFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
46 std::unique_ptr<MCObjectWriter> OW,
47 std::unique_ptr<MCCodeEmitter> Emitter)
48 : MipsELFStreamer(Context, std::move(TAB), std::move(OW),
49 std::move(Emitter)) {}
50
51 ~MipsNaClELFStreamer() override = default;
52
53private:
54 // Whether we started the sandboxing sequence for calls. Calls are bundled
55 // with branch delays and aligned to the bundle end.
56 bool PendingCall = false;
57
58 bool isIndirectJump(const MCInst &MI) {
59 if (MI.getOpcode() == Mips::JALR) {
60 // MIPS32r6/MIPS64r6 doesn't have a JR instruction and uses JALR instead.
61 // JALR is an indirect branch if the link register is $0.
62 assert(MI.getOperand(0).isReg());
63 return MI.getOperand(0).getReg() == Mips::ZERO;
64 }
65 return MI.getOpcode() == Mips::JR;
66 }
67
68 bool isStackPointerFirstOperand(const MCInst &MI) {
69 return (MI.getNumOperands() > 0 && MI.getOperand(0).isReg()
70 && MI.getOperand(0).getReg() == Mips::SP);
71 }
72
73 bool isCall(const MCInst &MI, bool *IsIndirectCall) {
74 unsigned Opcode = MI.getOpcode();
75
76 *IsIndirectCall = false;
77
78 switch (Opcode) {
79 default:
80 return false;
81
82 case Mips::JAL:
83 case Mips::BAL:
84 case Mips::BAL_BR:
85 case Mips::BLTZAL:
86 case Mips::BGEZAL:
87 return true;
88
89 case Mips::JALR:
90 // JALR is only a call if the link register is not $0. Otherwise it's an
91 // indirect branch.
92 assert(MI.getOperand(0).isReg());
93 if (MI.getOperand(0).getReg() == Mips::ZERO)
94 return false;
95
96 *IsIndirectCall = true;
97 return true;
98 }
99 }
100
101 void emitMask(unsigned AddrReg, unsigned MaskReg,
102 const MCSubtargetInfo &STI) {
103 MCInst MaskInst;
104 MaskInst.setOpcode(Mips::AND);
105 MaskInst.addOperand(MCOperand::createReg(AddrReg));
106 MaskInst.addOperand(MCOperand::createReg(AddrReg));
107 MaskInst.addOperand(MCOperand::createReg(MaskReg));
109 }
110
111 // Sandbox indirect branch or return instruction by inserting mask operation
112 // before it.
113 void sandboxIndirectJump(const MCInst &MI, const MCSubtargetInfo &STI) {
114 unsigned AddrReg = MI.getOperand(0).getReg();
115
116 emitBundleLock(false);
117 emitMask(AddrReg, IndirectBranchMaskReg, STI);
120 }
121
122 // Sandbox memory access or SP change. Insert mask operation before and/or
123 // after the instruction.
124 void sandboxLoadStoreStackChange(const MCInst &MI, unsigned AddrIdx,
125 const MCSubtargetInfo &STI, bool MaskBefore,
126 bool MaskAfter) {
127 emitBundleLock(false);
128 if (MaskBefore) {
129 // Sandbox memory access.
130 unsigned BaseReg = MI.getOperand(AddrIdx).getReg();
131 emitMask(BaseReg, LoadStoreStackMaskReg, STI);
132 }
134 if (MaskAfter) {
135 // Sandbox SP change.
136 unsigned SPReg = MI.getOperand(0).getReg();
137 assert((Mips::SP == SPReg) && "Unexpected stack-pointer register.");
138 emitMask(SPReg, LoadStoreStackMaskReg, STI);
139 }
141 }
142
143public:
144 /// This function is the one used to emit instruction data into the ELF
145 /// streamer. We override it to mask dangerous instructions.
146 void emitInstruction(const MCInst &Inst,
147 const MCSubtargetInfo &STI) override {
148 // Sandbox indirect jumps.
149 if (isIndirectJump(Inst)) {
150 if (PendingCall)
151 report_fatal_error("Dangerous instruction in branch delay slot!");
152 sandboxIndirectJump(Inst, STI);
153 return;
154 }
155
156 // Sandbox loads, stores and SP changes.
157 unsigned AddrIdx = 0;
158 bool IsStore = false;
159 bool IsMemAccess = isBasePlusOffsetMemoryAccess(Inst.getOpcode(), &AddrIdx,
160 &IsStore);
161 bool IsSPFirstOperand = isStackPointerFirstOperand(Inst);
162 if (IsMemAccess || IsSPFirstOperand) {
163 bool MaskBefore = (IsMemAccess
165 .getReg()));
166 bool MaskAfter = IsSPFirstOperand && !IsStore;
167 if (MaskBefore || MaskAfter) {
168 if (PendingCall)
169 report_fatal_error("Dangerous instruction in branch delay slot!");
170 sandboxLoadStoreStackChange(Inst, AddrIdx, STI, MaskBefore, MaskAfter);
171 return;
172 }
173 // fallthrough
174 }
175
176 // Sandbox calls by aligning call and branch delay to the bundle end.
177 // For indirect calls, emit the mask before the call.
178 bool IsIndirectCall;
179 if (isCall(Inst, &IsIndirectCall)) {
180 if (PendingCall)
181 report_fatal_error("Dangerous instruction in branch delay slot!");
182
183 // Start the sandboxing sequence by emitting call.
184 emitBundleLock(true);
185 if (IsIndirectCall) {
186 unsigned TargetReg = Inst.getOperand(1).getReg();
187 emitMask(TargetReg, IndirectBranchMaskReg, STI);
188 }
190 PendingCall = true;
191 return;
192 }
193 if (PendingCall) {
194 // Finish the sandboxing sequence by emitting branch delay.
197 PendingCall = false;
198 return;
199 }
200
201 // None of the sandboxing applies, just emit the instruction.
203 }
204};
205
206} // end anonymous namespace
207
208namespace llvm {
209
210bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx,
211 bool *IsStore) {
212 if (IsStore)
213 *IsStore = false;
214
215 switch (Opcode) {
216 default:
217 return false;
218
219 // Load instructions with base address register in position 1.
220 case Mips::LB:
221 case Mips::LBu:
222 case Mips::LH:
223 case Mips::LHu:
224 case Mips::LW:
225 case Mips::LWC1:
226 case Mips::LDC1:
227 case Mips::LL:
228 case Mips::LL_R6:
229 case Mips::LWL:
230 case Mips::LWR:
231 *AddrIdx = 1;
232 return true;
233
234 // Store instructions with base address register in position 1.
235 case Mips::SB:
236 case Mips::SH:
237 case Mips::SW:
238 case Mips::SWC1:
239 case Mips::SDC1:
240 case Mips::SWL:
241 case Mips::SWR:
242 *AddrIdx = 1;
243 if (IsStore)
244 *IsStore = true;
245 return true;
246
247 // Store instructions with base address register in position 2.
248 case Mips::SC:
249 case Mips::SC_R6:
250 *AddrIdx = 2;
251 if (IsStore)
252 *IsStore = true;
253 return true;
254 }
255}
256
257bool baseRegNeedsLoadStoreMask(unsigned Reg) {
258 // The contents of SP and thread pointer register do not require masking.
259 return Reg != Mips::SP && Reg != Mips::T8;
260}
261
263 std::unique_ptr<MCAsmBackend> TAB,
264 std::unique_ptr<MCObjectWriter> OW,
265 std::unique_ptr<MCCodeEmitter> Emitter,
266 bool RelaxAll) {
267 MipsNaClELFStreamer *S = new MipsNaClELFStreamer(
268 Context, std::move(TAB), std::move(OW), std::move(Emitter));
269 if (RelaxAll)
270 S->getAssembler().setRelaxAll(true);
271
272 // Set bundle-alignment as required by the NaCl ABI for the target.
273 S->emitBundleAlignMode(MIPS_NACL_BUNDLE_ALIGN);
274
275 return S;
276}
277
278} // end namespace llvm
dxil DXContainer Global Emitter
IRTranslator LLVM IR MI
LLVMContext & Context
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Context object for machine code objects.
Definition: MCContext.h:76
void emitBundleLock(bool AlignToEnd) override
The following instructions are a bundle-locked group.
void emitBundleUnlock() override
Ends a bundle-locked group.
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
unsigned getOpcode() const
Definition: MCInst.h:198
void addOperand(const MCOperand Op)
Definition: MCInst.h:210
void setOpcode(unsigned Op)
Definition: MCInst.h:197
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:206
static MCOperand createReg(unsigned Reg)
Definition: MCInst.h:134
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
Generic base class for all target subtargets.
void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override
Overriding this function allows us to add arbitrary behaviour before the Inst is actually emitted.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx, bool *IsStore=nullptr)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
MCELFStreamer * createMipsNaClELFStreamer(MCContext &Context, std::unique_ptr< MCAsmBackend > TAB, std::unique_ptr< MCObjectWriter > OW, std::unique_ptr< MCCodeEmitter > Emitter, bool RelaxAll)
static const Align MIPS_NACL_BUNDLE_ALIGN
Definition: MipsMCNaCl.h:18
bool baseRegNeedsLoadStoreMask(unsigned Reg)