LLVM 20.0.0git
MipsNaClELFStreamer.cpp
Go to the documentation of this file.
1//===-- MipsNaClELFStreamer.cpp - ELF Object Output for Mips NaCl ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements MCELFStreamer for Mips NaCl. It emits .o object files
10// as required by NaCl's SFI sandbox. It inserts address-masking instructions
11// before dangerous control-flow and memory access instructions. It inserts
12// address-masking instructions after instructions that change the stack
13// pointer. It ensures that the mask and the dangerous instruction are always
14// emitted in the same bundle. It aligns call + branch delay to the bundle end,
15// so that return address is always aligned to the start of next bundle.
16//
17//===----------------------------------------------------------------------===//
18
19#include "MipsELFStreamer.h"
20#include "MipsMCNaCl.h"
22#include "llvm/MC/MCAssembler.h"
25#include "llvm/MC/MCInst.h"
28#include <cassert>
29
30using namespace llvm;
31
32#define DEBUG_TYPE "mips-mc-nacl"
33
34namespace {
35
36const unsigned IndirectBranchMaskReg = Mips::T6;
37const unsigned LoadStoreStackMaskReg = Mips::T7;
38
39/// Extend the generic MCELFStreamer class so that it can mask dangerous
40/// instructions.
41
42class MipsNaClELFStreamer : public MipsELFStreamer {
43public:
44 MipsNaClELFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
45 std::unique_ptr<MCObjectWriter> OW,
46 std::unique_ptr<MCCodeEmitter> Emitter)
47 : MipsELFStreamer(Context, std::move(TAB), std::move(OW),
48 std::move(Emitter)) {}
49
50 ~MipsNaClELFStreamer() override = default;
51
52private:
53 // Whether we started the sandboxing sequence for calls. Calls are bundled
54 // with branch delays and aligned to the bundle end.
55 bool PendingCall = false;
56
57 bool isIndirectJump(const MCInst &MI) {
58 if (MI.getOpcode() == Mips::JALR) {
59 // MIPS32r6/MIPS64r6 doesn't have a JR instruction and uses JALR instead.
60 // JALR is an indirect branch if the link register is $0.
61 assert(MI.getOperand(0).isReg());
62 return MI.getOperand(0).getReg() == Mips::ZERO;
63 }
64 return MI.getOpcode() == Mips::JR;
65 }
66
67 bool isStackPointerFirstOperand(const MCInst &MI) {
68 return (MI.getNumOperands() > 0 && MI.getOperand(0).isReg()
69 && MI.getOperand(0).getReg() == Mips::SP);
70 }
71
72 bool isCall(const MCInst &MI, bool *IsIndirectCall) {
73 unsigned Opcode = MI.getOpcode();
74
75 *IsIndirectCall = false;
76
77 switch (Opcode) {
78 default:
79 return false;
80
81 case Mips::JAL:
82 case Mips::BAL:
83 case Mips::BAL_BR:
84 case Mips::BLTZAL:
85 case Mips::BGEZAL:
86 return true;
87
88 case Mips::JALR:
89 // JALR is only a call if the link register is not $0. Otherwise it's an
90 // indirect branch.
91 assert(MI.getOperand(0).isReg());
92 if (MI.getOperand(0).getReg() == Mips::ZERO)
93 return false;
94
95 *IsIndirectCall = true;
96 return true;
97 }
98 }
99
100 void emitMask(unsigned AddrReg, unsigned MaskReg,
101 const MCSubtargetInfo &STI) {
102 MCInst MaskInst;
103 MaskInst.setOpcode(Mips::AND);
104 MaskInst.addOperand(MCOperand::createReg(AddrReg));
105 MaskInst.addOperand(MCOperand::createReg(AddrReg));
106 MaskInst.addOperand(MCOperand::createReg(MaskReg));
108 }
109
110 // Sandbox indirect branch or return instruction by inserting mask operation
111 // before it.
112 void sandboxIndirectJump(const MCInst &MI, const MCSubtargetInfo &STI) {
113 unsigned AddrReg = MI.getOperand(0).getReg();
114
115 emitBundleLock(false);
116 emitMask(AddrReg, IndirectBranchMaskReg, STI);
119 }
120
121 // Sandbox memory access or SP change. Insert mask operation before and/or
122 // after the instruction.
123 void sandboxLoadStoreStackChange(const MCInst &MI, unsigned AddrIdx,
124 const MCSubtargetInfo &STI, bool MaskBefore,
125 bool MaskAfter) {
126 emitBundleLock(false);
127 if (MaskBefore) {
128 // Sandbox memory access.
129 unsigned BaseReg = MI.getOperand(AddrIdx).getReg();
130 emitMask(BaseReg, LoadStoreStackMaskReg, STI);
131 }
133 if (MaskAfter) {
134 // Sandbox SP change.
135 unsigned SPReg = MI.getOperand(0).getReg();
136 assert((Mips::SP == SPReg) && "Unexpected stack-pointer register.");
137 emitMask(SPReg, LoadStoreStackMaskReg, STI);
138 }
140 }
141
142public:
143 /// This function is the one used to emit instruction data into the ELF
144 /// streamer. We override it to mask dangerous instructions.
145 void emitInstruction(const MCInst &Inst,
146 const MCSubtargetInfo &STI) override {
147 // Sandbox indirect jumps.
148 if (isIndirectJump(Inst)) {
149 if (PendingCall)
150 report_fatal_error("Dangerous instruction in branch delay slot!");
151 sandboxIndirectJump(Inst, STI);
152 return;
153 }
154
155 // Sandbox loads, stores and SP changes.
156 unsigned AddrIdx = 0;
157 bool IsStore = false;
158 bool IsMemAccess = isBasePlusOffsetMemoryAccess(Inst.getOpcode(), &AddrIdx,
159 &IsStore);
160 bool IsSPFirstOperand = isStackPointerFirstOperand(Inst);
161 if (IsMemAccess || IsSPFirstOperand) {
162 bool MaskBefore = (IsMemAccess
164 .getReg()));
165 bool MaskAfter = IsSPFirstOperand && !IsStore;
166 if (MaskBefore || MaskAfter) {
167 if (PendingCall)
168 report_fatal_error("Dangerous instruction in branch delay slot!");
169 sandboxLoadStoreStackChange(Inst, AddrIdx, STI, MaskBefore, MaskAfter);
170 return;
171 }
172 // fallthrough
173 }
174
175 // Sandbox calls by aligning call and branch delay to the bundle end.
176 // For indirect calls, emit the mask before the call.
177 bool IsIndirectCall;
178 if (isCall(Inst, &IsIndirectCall)) {
179 if (PendingCall)
180 report_fatal_error("Dangerous instruction in branch delay slot!");
181
182 // Start the sandboxing sequence by emitting call.
183 emitBundleLock(true);
184 if (IsIndirectCall) {
185 unsigned TargetReg = Inst.getOperand(1).getReg();
186 emitMask(TargetReg, IndirectBranchMaskReg, STI);
187 }
189 PendingCall = true;
190 return;
191 }
192 if (PendingCall) {
193 // Finish the sandboxing sequence by emitting branch delay.
196 PendingCall = false;
197 return;
198 }
199
200 // None of the sandboxing applies, just emit the instruction.
202 }
203};
204
205} // end anonymous namespace
206
207namespace llvm {
208
209bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx,
210 bool *IsStore) {
211 if (IsStore)
212 *IsStore = false;
213
214 switch (Opcode) {
215 default:
216 return false;
217
218 // Load instructions with base address register in position 1.
219 case Mips::LB:
220 case Mips::LBu:
221 case Mips::LH:
222 case Mips::LHu:
223 case Mips::LW:
224 case Mips::LWC1:
225 case Mips::LDC1:
226 case Mips::LL:
227 case Mips::LL_R6:
228 case Mips::LWL:
229 case Mips::LWR:
230 *AddrIdx = 1;
231 return true;
232
233 // Store instructions with base address register in position 1.
234 case Mips::SB:
235 case Mips::SH:
236 case Mips::SW:
237 case Mips::SWC1:
238 case Mips::SDC1:
239 case Mips::SWL:
240 case Mips::SWR:
241 *AddrIdx = 1;
242 if (IsStore)
243 *IsStore = true;
244 return true;
245
246 // Store instructions with base address register in position 2.
247 case Mips::SC:
248 case Mips::SC_R6:
249 *AddrIdx = 2;
250 if (IsStore)
251 *IsStore = true;
252 return true;
253 }
254}
255
256bool baseRegNeedsLoadStoreMask(unsigned Reg) {
257 // The contents of SP and thread pointer register do not require masking.
258 return Reg != Mips::SP && Reg != Mips::T8;
259}
260
262createMipsNaClELFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
263 std::unique_ptr<MCObjectWriter> OW,
264 std::unique_ptr<MCCodeEmitter> Emitter) {
265 MipsNaClELFStreamer *S = new MipsNaClELFStreamer(
266 Context, std::move(TAB), std::move(OW), std::move(Emitter));
267
268 // Set bundle-alignment as required by the NaCl ABI for the target.
269 S->emitBundleAlignMode(MIPS_NACL_BUNDLE_ALIGN);
270
271 return S;
272}
273
274} // end namespace llvm
dxil DXContainer Global Emitter
IRTranslator LLVM IR MI
static constexpr Register SPReg
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Context object for machine code objects.
Definition: MCContext.h:83
void emitBundleLock(bool AlignToEnd) override
The following instructions are a bundle-locked group.
void emitBundleUnlock() override
Ends a bundle-locked group.
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:185
unsigned getOpcode() const
Definition: MCInst.h:199
void addOperand(const MCOperand Op)
Definition: MCInst.h:211
void setOpcode(unsigned Op)
Definition: MCInst.h:198
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:207
static MCOperand createReg(MCRegister Reg)
Definition: MCInst.h:135
MCRegister getReg() const
Returns the register number.
Definition: MCInst.h:70
Generic base class for all target subtargets.
void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override
Overriding this function allows us to add arbitrary behaviour before the Inst is actually emitted.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx, bool *IsStore=nullptr)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
static const Align MIPS_NACL_BUNDLE_ALIGN
Definition: MipsMCNaCl.h:18
bool baseRegNeedsLoadStoreMask(unsigned Reg)
MCELFStreamer * createMipsNaClELFStreamer(MCContext &Context, std::unique_ptr< MCAsmBackend > TAB, std::unique_ptr< MCObjectWriter > OW, std::unique_ptr< MCCodeEmitter > Emitter)