LLVM  3.7.0
X86AsmInstrumentation.cpp
Go to the documentation of this file.
1 //===-- X86AsmInstrumentation.cpp - Instrument X86 inline assembly C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
11 #include "X86AsmInstrumentation.h"
12 #include "X86Operand.h"
13 #include "X86RegisterInfo.h"
14 #include "llvm/ADT/StringExtras.h"
15 #include "llvm/ADT/Triple.h"
17 #include "llvm/MC/MCAsmInfo.h"
18 #include "llvm/MC/MCContext.h"
19 #include "llvm/MC/MCInst.h"
20 #include "llvm/MC/MCInstBuilder.h"
21 #include "llvm/MC/MCInstrInfo.h"
23 #include "llvm/MC/MCStreamer.h"
28 #include <algorithm>
29 #include <cassert>
30 #include <vector>
31 
32 // Following comment describes how assembly instrumentation works.
33 // Currently we have only AddressSanitizer instrumentation, but we're
34 // planning to implement MemorySanitizer for inline assembly too. If
35 // you're not familiar with AddressSanitizer algorithm, please, read
36 // https://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm.
37 //
38 // When inline assembly is parsed by an instance of X86AsmParser, all
39 // instructions are emitted via EmitInstruction method. That's the
40 // place where X86AsmInstrumentation analyzes an instruction and
41 // decides, whether the instruction should be emitted as is or
42 // instrumentation is required. The latter case happens when an
43 // instruction reads from or writes to memory. Now instruction opcode
44 // is explicitly checked, and if an instruction has a memory operand
45 // (for instance, movq (%rsi, %rcx, 8), %rax) - it should be
46 // instrumented. There're also exist instructions that modify
47 // memory but don't have an explicit memory operands, for instance,
48 // movs.
49 //
50 // Let's consider at first 8-byte memory accesses when an instruction
51 // has an explicit memory operand. In this case we need two registers -
52 // AddressReg to compute address of a memory cells which are accessed
53 // and ShadowReg to compute corresponding shadow address. So, we need
54 // to spill both registers before instrumentation code and restore them
55 // after instrumentation. Thus, in general, instrumentation code will
56 // look like this:
57 // PUSHF # Store flags, otherwise they will be overwritten
58 // PUSH AddressReg # spill AddressReg
59 // PUSH ShadowReg # spill ShadowReg
60 // LEA MemOp, AddressReg # compute address of the memory operand
61 // MOV AddressReg, ShadowReg
62 // SHR ShadowReg, 3
63 // # ShadowOffset(AddressReg >> 3) contains address of a shadow
64 // # corresponding to MemOp.
65 // CMP ShadowOffset(ShadowReg), 0 # test shadow value
66 // JZ .Done # when shadow equals to zero, everything is fine
67 // MOV AddressReg, RDI
68 // # Call __asan_report function with AddressReg as an argument
69 // CALL __asan_report
70 // .Done:
71 // POP ShadowReg # Restore ShadowReg
72 // POP AddressReg # Restore AddressReg
73 // POPF # Restore flags
74 //
75 // Memory accesses with different size (1-, 2-, 4- and 16-byte) are
76 // handled in a similar manner, but small memory accesses (less than 8
77 // byte) require an additional ScratchReg, which is used for shadow value.
78 //
79 // If, suppose, we're instrumenting an instruction like movs, only
80 // contents of RDI, RDI + AccessSize * RCX, RSI, RSI + AccessSize *
81 // RCX are checked. In this case there're no need to spill and restore
82 // AddressReg , ShadowReg or flags four times, they're saved on stack
83 // just once, before instrumentation of these four addresses, and restored
84 // at the end of the instrumentation.
85 //
86 // There exist several things which complicate this simple algorithm.
87 // * Instrumented memory operand can have RSP as a base or an index
88 // register. So we need to add a constant offset before computation
89 // of memory address, since flags, AddressReg, ShadowReg, etc. were
90 // already stored on stack and RSP was modified.
91 // * Debug info (usually, DWARF) should be adjusted, because sometimes
92 // RSP is used as a frame register. So, we need to select some
93 // register as a frame register and temprorary override current CFA
94 // register.
95 
96 namespace llvm {
97 namespace {
98 
99 static cl::opt<bool> ClAsanInstrumentAssembly(
100  "asan-instrument-assembly",
101  cl::desc("instrument assembly with AddressSanitizer checks"), cl::Hidden,
102  cl::init(false));
103 
104 const int64_t MinAllowedDisplacement = std::numeric_limits<int32_t>::min();
105 const int64_t MaxAllowedDisplacement = std::numeric_limits<int32_t>::max();
106 
107 int64_t ApplyDisplacementBounds(int64_t Displacement) {
108  return std::max(std::min(MaxAllowedDisplacement, Displacement),
109  MinAllowedDisplacement);
110 }
111 
112 void CheckDisplacementBounds(int64_t Displacement) {
113  assert(Displacement >= MinAllowedDisplacement &&
114  Displacement <= MaxAllowedDisplacement);
115 }
116 
117 bool IsStackReg(unsigned Reg) { return Reg == X86::RSP || Reg == X86::ESP; }
118 
119 bool IsSmallMemAccess(unsigned AccessSize) { return AccessSize < 8; }
120 
121 std::string FuncName(unsigned AccessSize, bool IsWrite) {
122  return std::string("__asan_report_") + (IsWrite ? "store" : "load") +
123  utostr(AccessSize);
124 }
125 
126 class X86AddressSanitizer : public X86AsmInstrumentation {
127 public:
128  struct RegisterContext {
129  private:
130  enum RegOffset {
131  REG_OFFSET_ADDRESS = 0,
132  REG_OFFSET_SHADOW,
133  REG_OFFSET_SCRATCH
134  };
135 
136  public:
137  RegisterContext(unsigned AddressReg, unsigned ShadowReg,
138  unsigned ScratchReg) {
139  BusyRegs.push_back(convReg(AddressReg, MVT::i64));
140  BusyRegs.push_back(convReg(ShadowReg, MVT::i64));
141  BusyRegs.push_back(convReg(ScratchReg, MVT::i64));
142  }
143 
144  unsigned AddressReg(MVT::SimpleValueType VT) const {
145  return convReg(BusyRegs[REG_OFFSET_ADDRESS], VT);
146  }
147 
148  unsigned ShadowReg(MVT::SimpleValueType VT) const {
149  return convReg(BusyRegs[REG_OFFSET_SHADOW], VT);
150  }
151 
152  unsigned ScratchReg(MVT::SimpleValueType VT) const {
153  return convReg(BusyRegs[REG_OFFSET_SCRATCH], VT);
154  }
155 
156  void AddBusyReg(unsigned Reg) {
157  if (Reg != X86::NoRegister)
158  BusyRegs.push_back(convReg(Reg, MVT::i64));
159  }
160 
161  void AddBusyRegs(const X86Operand &Op) {
162  AddBusyReg(Op.getMemBaseReg());
163  AddBusyReg(Op.getMemIndexReg());
164  }
165 
166  unsigned ChooseFrameReg(MVT::SimpleValueType VT) const {
167  static const MCPhysReg Candidates[] = { X86::RBP, X86::RAX, X86::RBX,
168  X86::RCX, X86::RDX, X86::RDI,
169  X86::RSI };
170  for (unsigned Reg : Candidates) {
171  if (!std::count(BusyRegs.begin(), BusyRegs.end(), Reg))
172  return convReg(Reg, VT);
173  }
174  return X86::NoRegister;
175  }
176 
177  private:
178  unsigned convReg(unsigned Reg, MVT::SimpleValueType VT) const {
179  return Reg == X86::NoRegister ? Reg : getX86SubSuperRegister(Reg, VT);
180  }
181 
182  std::vector<unsigned> BusyRegs;
183  };
184 
185  X86AddressSanitizer(const MCSubtargetInfo &STI)
186  : X86AsmInstrumentation(STI), RepPrefix(false), OrigSPOffset(0) {}
187 
188  virtual ~X86AddressSanitizer() {}
189 
190  // X86AsmInstrumentation implementation:
191  virtual void InstrumentAndEmitInstruction(const MCInst &Inst,
193  MCContext &Ctx,
194  const MCInstrInfo &MII,
195  MCStreamer &Out) override {
196  InstrumentMOVS(Inst, Operands, Ctx, MII, Out);
197  if (RepPrefix)
198  EmitInstruction(Out, MCInstBuilder(X86::REP_PREFIX));
199 
200  InstrumentMOV(Inst, Operands, Ctx, MII, Out);
201 
202  RepPrefix = (Inst.getOpcode() == X86::REP_PREFIX);
203  if (!RepPrefix)
204  EmitInstruction(Out, Inst);
205  }
206 
207  // Adjusts up stack and saves all registers used in instrumentation.
208  virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
209  MCContext &Ctx,
210  MCStreamer &Out) = 0;
211 
212  // Restores all registers used in instrumentation and adjusts stack.
213  virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
214  MCContext &Ctx,
215  MCStreamer &Out) = 0;
216 
217  virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
218  bool IsWrite,
219  const RegisterContext &RegCtx,
220  MCContext &Ctx, MCStreamer &Out) = 0;
221  virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
222  bool IsWrite,
223  const RegisterContext &RegCtx,
224  MCContext &Ctx, MCStreamer &Out) = 0;
225 
226  virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
227  MCStreamer &Out) = 0;
228 
229  void InstrumentMemOperand(X86Operand &Op, unsigned AccessSize, bool IsWrite,
230  const RegisterContext &RegCtx, MCContext &Ctx,
231  MCStreamer &Out);
232  void InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg, unsigned CntReg,
233  unsigned AccessSize, MCContext &Ctx, MCStreamer &Out);
234 
235  void InstrumentMOVS(const MCInst &Inst, OperandVector &Operands,
236  MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
237  void InstrumentMOV(const MCInst &Inst, OperandVector &Operands,
238  MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
239 
240 protected:
241  void EmitLabel(MCStreamer &Out, MCSymbol *Label) { Out.EmitLabel(Label); }
242 
243  void EmitLEA(X86Operand &Op, MVT::SimpleValueType VT, unsigned Reg,
244  MCStreamer &Out) {
245  assert(VT == MVT::i32 || VT == MVT::i64);
246  MCInst Inst;
247  Inst.setOpcode(VT == MVT::i32 ? X86::LEA32r : X86::LEA64r);
248  Inst.addOperand(MCOperand::createReg(getX86SubSuperRegister(Reg, VT)));
249  Op.addMemOperands(Inst, 5);
250  EmitInstruction(Out, Inst);
251  }
252 
253  void ComputeMemOperandAddress(X86Operand &Op, MVT::SimpleValueType VT,
254  unsigned Reg, MCContext &Ctx, MCStreamer &Out);
255 
256  // Creates new memory operand with Displacement added to an original
257  // displacement. Residue will contain a residue which could happen when the
258  // total displacement exceeds 32-bit limitation.
259  std::unique_ptr<X86Operand> AddDisplacement(X86Operand &Op,
260  int64_t Displacement,
261  MCContext &Ctx, int64_t *Residue);
262 
263  bool is64BitMode() const {
264  return STI.getFeatureBits()[X86::Mode64Bit];
265  }
266  bool is32BitMode() const {
267  return STI.getFeatureBits()[X86::Mode32Bit];
268  }
269  bool is16BitMode() const {
270  return STI.getFeatureBits()[X86::Mode16Bit];
271  }
272 
273  unsigned getPointerWidth() {
274  if (is16BitMode()) return 16;
275  if (is32BitMode()) return 32;
276  if (is64BitMode()) return 64;
277  llvm_unreachable("invalid mode");
278  }
279 
280  // True when previous instruction was actually REP prefix.
281  bool RepPrefix;
282 
283  // Offset from the original SP register.
284  int64_t OrigSPOffset;
285 };
286 
287 void X86AddressSanitizer::InstrumentMemOperand(
288  X86Operand &Op, unsigned AccessSize, bool IsWrite,
289  const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
290  assert(Op.isMem() && "Op should be a memory operand.");
291  assert((AccessSize & (AccessSize - 1)) == 0 && AccessSize <= 16 &&
292  "AccessSize should be a power of two, less or equal than 16.");
293  // FIXME: take into account load/store alignment.
294  if (IsSmallMemAccess(AccessSize))
295  InstrumentMemOperandSmall(Op, AccessSize, IsWrite, RegCtx, Ctx, Out);
296  else
297  InstrumentMemOperandLarge(Op, AccessSize, IsWrite, RegCtx, Ctx, Out);
298 }
299 
300 void X86AddressSanitizer::InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg,
301  unsigned CntReg,
302  unsigned AccessSize,
303  MCContext &Ctx, MCStreamer &Out) {
304  // FIXME: check whole ranges [DstReg .. DstReg + AccessSize * (CntReg - 1)]
305  // and [SrcReg .. SrcReg + AccessSize * (CntReg - 1)].
306  RegisterContext RegCtx(X86::RDX /* AddressReg */, X86::RAX /* ShadowReg */,
307  IsSmallMemAccess(AccessSize)
308  ? X86::RBX
309  : X86::NoRegister /* ScratchReg */);
310  RegCtx.AddBusyReg(DstReg);
311  RegCtx.AddBusyReg(SrcReg);
312  RegCtx.AddBusyReg(CntReg);
313 
314  InstrumentMemOperandPrologue(RegCtx, Ctx, Out);
315 
316  // Test (%SrcReg)
317  {
318  const MCExpr *Disp = MCConstantExpr::create(0, Ctx);
319  std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
320  getPointerWidth(), 0, Disp, SrcReg, 0, AccessSize, SMLoc(), SMLoc()));
321  InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
322  Out);
323  }
324 
325  // Test -1(%SrcReg, %CntReg, AccessSize)
326  {
327  const MCExpr *Disp = MCConstantExpr::create(-1, Ctx);
328  std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
329  getPointerWidth(), 0, Disp, SrcReg, CntReg, AccessSize, SMLoc(),
330  SMLoc()));
331  InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
332  Out);
333  }
334 
335  // Test (%DstReg)
336  {
337  const MCExpr *Disp = MCConstantExpr::create(0, Ctx);
338  std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
339  getPointerWidth(), 0, Disp, DstReg, 0, AccessSize, SMLoc(), SMLoc()));
340  InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
341  }
342 
343  // Test -1(%DstReg, %CntReg, AccessSize)
344  {
345  const MCExpr *Disp = MCConstantExpr::create(-1, Ctx);
346  std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
347  getPointerWidth(), 0, Disp, DstReg, CntReg, AccessSize, SMLoc(),
348  SMLoc()));
349  InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
350  }
351 
352  InstrumentMemOperandEpilogue(RegCtx, Ctx, Out);
353 }
354 
355 void X86AddressSanitizer::InstrumentMOVS(const MCInst &Inst,
356  OperandVector &Operands,
357  MCContext &Ctx, const MCInstrInfo &MII,
358  MCStreamer &Out) {
359  // Access size in bytes.
360  unsigned AccessSize = 0;
361 
362  switch (Inst.getOpcode()) {
363  case X86::MOVSB:
364  AccessSize = 1;
365  break;
366  case X86::MOVSW:
367  AccessSize = 2;
368  break;
369  case X86::MOVSL:
370  AccessSize = 4;
371  break;
372  case X86::MOVSQ:
373  AccessSize = 8;
374  break;
375  default:
376  return;
377  }
378 
379  InstrumentMOVSImpl(AccessSize, Ctx, Out);
380 }
381 
382 void X86AddressSanitizer::InstrumentMOV(const MCInst &Inst,
383  OperandVector &Operands, MCContext &Ctx,
384  const MCInstrInfo &MII,
385  MCStreamer &Out) {
386  // Access size in bytes.
387  unsigned AccessSize = 0;
388 
389  switch (Inst.getOpcode()) {
390  case X86::MOV8mi:
391  case X86::MOV8mr:
392  case X86::MOV8rm:
393  AccessSize = 1;
394  break;
395  case X86::MOV16mi:
396  case X86::MOV16mr:
397  case X86::MOV16rm:
398  AccessSize = 2;
399  break;
400  case X86::MOV32mi:
401  case X86::MOV32mr:
402  case X86::MOV32rm:
403  AccessSize = 4;
404  break;
405  case X86::MOV64mi32:
406  case X86::MOV64mr:
407  case X86::MOV64rm:
408  AccessSize = 8;
409  break;
410  case X86::MOVAPDmr:
411  case X86::MOVAPSmr:
412  case X86::MOVAPDrm:
413  case X86::MOVAPSrm:
414  AccessSize = 16;
415  break;
416  default:
417  return;
418  }
419 
420  const bool IsWrite = MII.get(Inst.getOpcode()).mayStore();
421 
422  for (unsigned Ix = 0; Ix < Operands.size(); ++Ix) {
423  assert(Operands[Ix]);
424  MCParsedAsmOperand &Op = *Operands[Ix];
425  if (Op.isMem()) {
426  X86Operand &MemOp = static_cast<X86Operand &>(Op);
427  RegisterContext RegCtx(
428  X86::RDI /* AddressReg */, X86::RAX /* ShadowReg */,
429  IsSmallMemAccess(AccessSize) ? X86::RCX
430  : X86::NoRegister /* ScratchReg */);
431  RegCtx.AddBusyRegs(MemOp);
432  InstrumentMemOperandPrologue(RegCtx, Ctx, Out);
433  InstrumentMemOperand(MemOp, AccessSize, IsWrite, RegCtx, Ctx, Out);
434  InstrumentMemOperandEpilogue(RegCtx, Ctx, Out);
435  }
436  }
437 }
438 
439 void X86AddressSanitizer::ComputeMemOperandAddress(X86Operand &Op,
441  unsigned Reg, MCContext &Ctx,
442  MCStreamer &Out) {
443  int64_t Displacement = 0;
444  if (IsStackReg(Op.getMemBaseReg()))
445  Displacement -= OrigSPOffset;
446  if (IsStackReg(Op.getMemIndexReg()))
447  Displacement -= OrigSPOffset * Op.getMemScale();
448 
449  assert(Displacement >= 0);
450 
451  // Emit Op as is.
452  if (Displacement == 0) {
453  EmitLEA(Op, VT, Reg, Out);
454  return;
455  }
456 
457  int64_t Residue;
458  std::unique_ptr<X86Operand> NewOp =
459  AddDisplacement(Op, Displacement, Ctx, &Residue);
460  EmitLEA(*NewOp, VT, Reg, Out);
461 
462  while (Residue != 0) {
463  const MCConstantExpr *Disp =
464  MCConstantExpr::create(ApplyDisplacementBounds(Residue), Ctx);
465  std::unique_ptr<X86Operand> DispOp =
466  X86Operand::CreateMem(getPointerWidth(), 0, Disp, Reg, 0, 1, SMLoc(),
467  SMLoc());
468  EmitLEA(*DispOp, VT, Reg, Out);
469  Residue -= Disp->getValue();
470  }
471 }
472 
473 std::unique_ptr<X86Operand>
474 X86AddressSanitizer::AddDisplacement(X86Operand &Op, int64_t Displacement,
475  MCContext &Ctx, int64_t *Residue) {
476  assert(Displacement >= 0);
477 
478  if (Displacement == 0 ||
479  (Op.getMemDisp() && Op.getMemDisp()->getKind() != MCExpr::Constant)) {
480  *Residue = Displacement;
481  return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(),
482  Op.getMemDisp(), Op.getMemBaseReg(),
483  Op.getMemIndexReg(), Op.getMemScale(),
484  SMLoc(), SMLoc());
485  }
486 
487  int64_t OrigDisplacement =
488  static_cast<const MCConstantExpr *>(Op.getMemDisp())->getValue();
489  CheckDisplacementBounds(OrigDisplacement);
490  Displacement += OrigDisplacement;
491 
492  int64_t NewDisplacement = ApplyDisplacementBounds(Displacement);
493  CheckDisplacementBounds(NewDisplacement);
494 
495  *Residue = Displacement - NewDisplacement;
496  const MCExpr *Disp = MCConstantExpr::create(NewDisplacement, Ctx);
497  return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(), Disp,
498  Op.getMemBaseReg(), Op.getMemIndexReg(),
499  Op.getMemScale(), SMLoc(), SMLoc());
500 }
501 
502 class X86AddressSanitizer32 : public X86AddressSanitizer {
503 public:
504  static const long kShadowOffset = 0x20000000;
505 
506  X86AddressSanitizer32(const MCSubtargetInfo &STI)
507  : X86AddressSanitizer(STI) {}
508 
509  virtual ~X86AddressSanitizer32() {}
510 
511  unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
512  unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
513  if (FrameReg == X86::NoRegister)
514  return FrameReg;
515  return getX86SubSuperRegister(FrameReg, MVT::i32);
516  }
517 
518  void SpillReg(MCStreamer &Out, unsigned Reg) {
519  EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(Reg));
520  OrigSPOffset -= 4;
521  }
522 
523  void RestoreReg(MCStreamer &Out, unsigned Reg) {
524  EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(Reg));
525  OrigSPOffset += 4;
526  }
527 
528  void StoreFlags(MCStreamer &Out) {
529  EmitInstruction(Out, MCInstBuilder(X86::PUSHF32));
530  OrigSPOffset -= 4;
531  }
532 
533  void RestoreFlags(MCStreamer &Out) {
534  EmitInstruction(Out, MCInstBuilder(X86::POPF32));
535  OrigSPOffset += 4;
536  }
537 
538  virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
539  MCContext &Ctx,
540  MCStreamer &Out) override {
541  unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i32);
542  assert(LocalFrameReg != X86::NoRegister);
543 
544  const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
545  unsigned FrameReg = GetFrameReg(Ctx, Out);
546  if (MRI && FrameReg != X86::NoRegister) {
547  SpillReg(Out, LocalFrameReg);
548  if (FrameReg == X86::ESP) {
549  Out.EmitCFIAdjustCfaOffset(4 /* byte size of the LocalFrameReg */);
550  Out.EmitCFIRelOffset(
551  MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0);
552  }
553  EmitInstruction(
554  Out,
555  MCInstBuilder(X86::MOV32rr).addReg(LocalFrameReg).addReg(FrameReg));
556  Out.EmitCFIRememberState();
557  Out.EmitCFIDefCfaRegister(
558  MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */));
559  }
560 
561  SpillReg(Out, RegCtx.AddressReg(MVT::i32));
562  SpillReg(Out, RegCtx.ShadowReg(MVT::i32));
563  if (RegCtx.ScratchReg(MVT::i32) != X86::NoRegister)
564  SpillReg(Out, RegCtx.ScratchReg(MVT::i32));
565  StoreFlags(Out);
566  }
567 
568  virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
569  MCContext &Ctx,
570  MCStreamer &Out) override {
571  unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i32);
572  assert(LocalFrameReg != X86::NoRegister);
573 
574  RestoreFlags(Out);
575  if (RegCtx.ScratchReg(MVT::i32) != X86::NoRegister)
576  RestoreReg(Out, RegCtx.ScratchReg(MVT::i32));
577  RestoreReg(Out, RegCtx.ShadowReg(MVT::i32));
578  RestoreReg(Out, RegCtx.AddressReg(MVT::i32));
579 
580  unsigned FrameReg = GetFrameReg(Ctx, Out);
581  if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) {
582  RestoreReg(Out, LocalFrameReg);
583  Out.EmitCFIRestoreState();
584  if (FrameReg == X86::ESP)
585  Out.EmitCFIAdjustCfaOffset(-4 /* byte size of the LocalFrameReg */);
586  }
587  }
588 
589  virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
590  bool IsWrite,
591  const RegisterContext &RegCtx,
592  MCContext &Ctx,
593  MCStreamer &Out) override;
594  virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
595  bool IsWrite,
596  const RegisterContext &RegCtx,
597  MCContext &Ctx,
598  MCStreamer &Out) override;
599  virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
600  MCStreamer &Out) override;
601 
602 private:
603  void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx,
604  MCStreamer &Out, const RegisterContext &RegCtx) {
605  EmitInstruction(Out, MCInstBuilder(X86::CLD));
606  EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
607 
608  EmitInstruction(Out, MCInstBuilder(X86::AND64ri8)
609  .addReg(X86::ESP)
610  .addReg(X86::ESP)
611  .addImm(-16));
612  EmitInstruction(
613  Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.AddressReg(MVT::i32)));
614 
615  const std::string &Fn = FuncName(AccessSize, IsWrite);
616  MCSymbol *FnSym = Ctx.getOrCreateSymbol(StringRef(Fn));
617  const MCSymbolRefExpr *FnExpr =
619  EmitInstruction(Out, MCInstBuilder(X86::CALLpcrel32).addExpr(FnExpr));
620  }
621 };
622 
623 void X86AddressSanitizer32::InstrumentMemOperandSmall(
624  X86Operand &Op, unsigned AccessSize, bool IsWrite,
625  const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
626  unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32);
627  unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32);
628  unsigned ShadowRegI8 = RegCtx.ShadowReg(MVT::i8);
629 
630  assert(RegCtx.ScratchReg(MVT::i32) != X86::NoRegister);
631  unsigned ScratchRegI32 = RegCtx.ScratchReg(MVT::i32);
632 
633  ComputeMemOperandAddress(Op, MVT::i32, AddressRegI32, Ctx, Out);
634 
635  EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
636  AddressRegI32));
637  EmitInstruction(Out, MCInstBuilder(X86::SHR32ri)
638  .addReg(ShadowRegI32)
639  .addReg(ShadowRegI32)
640  .addImm(3));
641 
642  {
643  MCInst Inst;
644  Inst.setOpcode(X86::MOV8rm);
645  Inst.addOperand(MCOperand::createReg(ShadowRegI8));
646  const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
647  std::unique_ptr<X86Operand> Op(
648  X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
649  SMLoc(), SMLoc()));
650  Op->addMemOperands(Inst, 5);
651  EmitInstruction(Out, Inst);
652  }
653 
654  EmitInstruction(
655  Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
656  MCSymbol *DoneSym = Ctx.createTempSymbol();
657  const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
658  EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
659 
660  EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
661  AddressRegI32));
662  EmitInstruction(Out, MCInstBuilder(X86::AND32ri)
663  .addReg(ScratchRegI32)
664  .addReg(ScratchRegI32)
665  .addImm(7));
666 
667  switch (AccessSize) {
668  default: llvm_unreachable("Incorrect access size");
669  case 1:
670  break;
671  case 2: {
672  const MCExpr *Disp = MCConstantExpr::create(1, Ctx);
673  std::unique_ptr<X86Operand> Op(
674  X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
675  SMLoc(), SMLoc()));
676  EmitLEA(*Op, MVT::i32, ScratchRegI32, Out);
677  break;
678  }
679  case 4:
680  EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8)
681  .addReg(ScratchRegI32)
682  .addReg(ScratchRegI32)
683  .addImm(3));
684  break;
685  }
686 
687  EmitInstruction(
688  Out,
689  MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
690  EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
691  ShadowRegI32));
692  EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
693 
694  EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
695  EmitLabel(Out, DoneSym);
696 }
697 
698 void X86AddressSanitizer32::InstrumentMemOperandLarge(
699  X86Operand &Op, unsigned AccessSize, bool IsWrite,
700  const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
701  unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32);
702  unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32);
703 
704  ComputeMemOperandAddress(Op, MVT::i32, AddressRegI32, Ctx, Out);
705 
706  EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
707  AddressRegI32));
708  EmitInstruction(Out, MCInstBuilder(X86::SHR32ri)
709  .addReg(ShadowRegI32)
710  .addReg(ShadowRegI32)
711  .addImm(3));
712  {
713  MCInst Inst;
714  switch (AccessSize) {
715  default: llvm_unreachable("Incorrect access size");
716  case 8:
717  Inst.setOpcode(X86::CMP8mi);
718  break;
719  case 16:
720  Inst.setOpcode(X86::CMP16mi);
721  break;
722  }
723  const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
724  std::unique_ptr<X86Operand> Op(
725  X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
726  SMLoc(), SMLoc()));
727  Op->addMemOperands(Inst, 5);
728  Inst.addOperand(MCOperand::createImm(0));
729  EmitInstruction(Out, Inst);
730  }
731  MCSymbol *DoneSym = Ctx.createTempSymbol();
732  const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
733  EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
734 
735  EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
736  EmitLabel(Out, DoneSym);
737 }
738 
739 void X86AddressSanitizer32::InstrumentMOVSImpl(unsigned AccessSize,
740  MCContext &Ctx,
741  MCStreamer &Out) {
742  StoreFlags(Out);
743 
744  // No need to test when ECX is equals to zero.
745  MCSymbol *DoneSym = Ctx.createTempSymbol();
746  const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
747  EmitInstruction(
748  Out, MCInstBuilder(X86::TEST32rr).addReg(X86::ECX).addReg(X86::ECX));
749  EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
750 
751  // Instrument first and last elements in src and dst range.
752  InstrumentMOVSBase(X86::EDI /* DstReg */, X86::ESI /* SrcReg */,
753  X86::ECX /* CntReg */, AccessSize, Ctx, Out);
754 
755  EmitLabel(Out, DoneSym);
756  RestoreFlags(Out);
757 }
758 
759 class X86AddressSanitizer64 : public X86AddressSanitizer {
760 public:
761  static const long kShadowOffset = 0x7fff8000;
762 
763  X86AddressSanitizer64(const MCSubtargetInfo &STI)
764  : X86AddressSanitizer(STI) {}
765 
766  virtual ~X86AddressSanitizer64() {}
767 
768  unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
769  unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
770  if (FrameReg == X86::NoRegister)
771  return FrameReg;
772  return getX86SubSuperRegister(FrameReg, MVT::i64);
773  }
774 
775  void SpillReg(MCStreamer &Out, unsigned Reg) {
776  EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(Reg));
777  OrigSPOffset -= 8;
778  }
779 
780  void RestoreReg(MCStreamer &Out, unsigned Reg) {
781  EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(Reg));
782  OrigSPOffset += 8;
783  }
784 
785  void StoreFlags(MCStreamer &Out) {
786  EmitInstruction(Out, MCInstBuilder(X86::PUSHF64));
787  OrigSPOffset -= 8;
788  }
789 
790  void RestoreFlags(MCStreamer &Out) {
791  EmitInstruction(Out, MCInstBuilder(X86::POPF64));
792  OrigSPOffset += 8;
793  }
794 
795  virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
796  MCContext &Ctx,
797  MCStreamer &Out) override {
798  unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i64);
799  assert(LocalFrameReg != X86::NoRegister);
800 
801  const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
802  unsigned FrameReg = GetFrameReg(Ctx, Out);
803  if (MRI && FrameReg != X86::NoRegister) {
804  SpillReg(Out, X86::RBP);
805  if (FrameReg == X86::RSP) {
806  Out.EmitCFIAdjustCfaOffset(8 /* byte size of the LocalFrameReg */);
807  Out.EmitCFIRelOffset(
808  MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0);
809  }
810  EmitInstruction(
811  Out,
812  MCInstBuilder(X86::MOV64rr).addReg(LocalFrameReg).addReg(FrameReg));
813  Out.EmitCFIRememberState();
814  Out.EmitCFIDefCfaRegister(
815  MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */));
816  }
817 
818  EmitAdjustRSP(Ctx, Out, -128);
819  SpillReg(Out, RegCtx.ShadowReg(MVT::i64));
820  SpillReg(Out, RegCtx.AddressReg(MVT::i64));
821  if (RegCtx.ScratchReg(MVT::i64) != X86::NoRegister)
822  SpillReg(Out, RegCtx.ScratchReg(MVT::i64));
823  StoreFlags(Out);
824  }
825 
826  virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
827  MCContext &Ctx,
828  MCStreamer &Out) override {
829  unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i64);
830  assert(LocalFrameReg != X86::NoRegister);
831 
832  RestoreFlags(Out);
833  if (RegCtx.ScratchReg(MVT::i64) != X86::NoRegister)
834  RestoreReg(Out, RegCtx.ScratchReg(MVT::i64));
835  RestoreReg(Out, RegCtx.AddressReg(MVT::i64));
836  RestoreReg(Out, RegCtx.ShadowReg(MVT::i64));
837  EmitAdjustRSP(Ctx, Out, 128);
838 
839  unsigned FrameReg = GetFrameReg(Ctx, Out);
840  if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) {
841  RestoreReg(Out, LocalFrameReg);
842  Out.EmitCFIRestoreState();
843  if (FrameReg == X86::RSP)
844  Out.EmitCFIAdjustCfaOffset(-8 /* byte size of the LocalFrameReg */);
845  }
846  }
847 
848  virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
849  bool IsWrite,
850  const RegisterContext &RegCtx,
851  MCContext &Ctx,
852  MCStreamer &Out) override;
853  virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
854  bool IsWrite,
855  const RegisterContext &RegCtx,
856  MCContext &Ctx,
857  MCStreamer &Out) override;
858  virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
859  MCStreamer &Out) override;
860 
861 private:
862  void EmitAdjustRSP(MCContext &Ctx, MCStreamer &Out, long Offset) {
863  const MCExpr *Disp = MCConstantExpr::create(Offset, Ctx);
864  std::unique_ptr<X86Operand> Op(
865  X86Operand::CreateMem(getPointerWidth(), 0, Disp, X86::RSP, 0, 1,
866  SMLoc(), SMLoc()));
867  EmitLEA(*Op, MVT::i64, X86::RSP, Out);
868  OrigSPOffset += Offset;
869  }
870 
871  void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx,
872  MCStreamer &Out, const RegisterContext &RegCtx) {
873  EmitInstruction(Out, MCInstBuilder(X86::CLD));
874  EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
875 
876  EmitInstruction(Out, MCInstBuilder(X86::AND64ri8)
877  .addReg(X86::RSP)
878  .addReg(X86::RSP)
879  .addImm(-16));
880 
881  if (RegCtx.AddressReg(MVT::i64) != X86::RDI) {
882  EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(X86::RDI).addReg(
883  RegCtx.AddressReg(MVT::i64)));
884  }
885  const std::string &Fn = FuncName(AccessSize, IsWrite);
886  MCSymbol *FnSym = Ctx.getOrCreateSymbol(StringRef(Fn));
887  const MCSymbolRefExpr *FnExpr =
889  EmitInstruction(Out, MCInstBuilder(X86::CALL64pcrel32).addExpr(FnExpr));
890  }
891 };
892 
893 void X86AddressSanitizer64::InstrumentMemOperandSmall(
894  X86Operand &Op, unsigned AccessSize, bool IsWrite,
895  const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
896  unsigned AddressRegI64 = RegCtx.AddressReg(MVT::i64);
897  unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32);
898  unsigned ShadowRegI64 = RegCtx.ShadowReg(MVT::i64);
899  unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32);
900  unsigned ShadowRegI8 = RegCtx.ShadowReg(MVT::i8);
901 
902  assert(RegCtx.ScratchReg(MVT::i32) != X86::NoRegister);
903  unsigned ScratchRegI32 = RegCtx.ScratchReg(MVT::i32);
904 
905  ComputeMemOperandAddress(Op, MVT::i64, AddressRegI64, Ctx, Out);
906 
907  EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
908  AddressRegI64));
909  EmitInstruction(Out, MCInstBuilder(X86::SHR64ri)
910  .addReg(ShadowRegI64)
911  .addReg(ShadowRegI64)
912  .addImm(3));
913  {
914  MCInst Inst;
915  Inst.setOpcode(X86::MOV8rm);
916  Inst.addOperand(MCOperand::createReg(ShadowRegI8));
917  const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
918  std::unique_ptr<X86Operand> Op(
919  X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
920  SMLoc(), SMLoc()));
921  Op->addMemOperands(Inst, 5);
922  EmitInstruction(Out, Inst);
923  }
924 
925  EmitInstruction(
926  Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
927  MCSymbol *DoneSym = Ctx.createTempSymbol();
928  const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
929  EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
930 
931  EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
932  AddressRegI32));
933  EmitInstruction(Out, MCInstBuilder(X86::AND32ri)
934  .addReg(ScratchRegI32)
935  .addReg(ScratchRegI32)
936  .addImm(7));
937 
938  switch (AccessSize) {
939  default: llvm_unreachable("Incorrect access size");
940  case 1:
941  break;
942  case 2: {
943  const MCExpr *Disp = MCConstantExpr::create(1, Ctx);
944  std::unique_ptr<X86Operand> Op(
945  X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
946  SMLoc(), SMLoc()));
947  EmitLEA(*Op, MVT::i32, ScratchRegI32, Out);
948  break;
949  }
950  case 4:
951  EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8)
952  .addReg(ScratchRegI32)
953  .addReg(ScratchRegI32)
954  .addImm(3));
955  break;
956  }
957 
958  EmitInstruction(
959  Out,
960  MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
961  EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
962  ShadowRegI32));
963  EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
964 
965  EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
966  EmitLabel(Out, DoneSym);
967 }
968 
969 void X86AddressSanitizer64::InstrumentMemOperandLarge(
970  X86Operand &Op, unsigned AccessSize, bool IsWrite,
971  const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
972  unsigned AddressRegI64 = RegCtx.AddressReg(MVT::i64);
973  unsigned ShadowRegI64 = RegCtx.ShadowReg(MVT::i64);
974 
975  ComputeMemOperandAddress(Op, MVT::i64, AddressRegI64, Ctx, Out);
976 
977  EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
978  AddressRegI64));
979  EmitInstruction(Out, MCInstBuilder(X86::SHR64ri)
980  .addReg(ShadowRegI64)
981  .addReg(ShadowRegI64)
982  .addImm(3));
983  {
984  MCInst Inst;
985  switch (AccessSize) {
986  default: llvm_unreachable("Incorrect access size");
987  case 8:
988  Inst.setOpcode(X86::CMP8mi);
989  break;
990  case 16:
991  Inst.setOpcode(X86::CMP16mi);
992  break;
993  }
994  const MCExpr *Disp = MCConstantExpr::create(kShadowOffset, Ctx);
995  std::unique_ptr<X86Operand> Op(
996  X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
997  SMLoc(), SMLoc()));
998  Op->addMemOperands(Inst, 5);
999  Inst.addOperand(MCOperand::createImm(0));
1000  EmitInstruction(Out, Inst);
1001  }
1002 
1003  MCSymbol *DoneSym = Ctx.createTempSymbol();
1004  const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
1005  EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
1006 
1007  EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
1008  EmitLabel(Out, DoneSym);
1009 }
1010 
1011 void X86AddressSanitizer64::InstrumentMOVSImpl(unsigned AccessSize,
1012  MCContext &Ctx,
1013  MCStreamer &Out) {
1014  StoreFlags(Out);
1015 
1016  // No need to test when RCX is equals to zero.
1017  MCSymbol *DoneSym = Ctx.createTempSymbol();
1018  const MCExpr *DoneExpr = MCSymbolRefExpr::create(DoneSym, Ctx);
1019  EmitInstruction(
1020  Out, MCInstBuilder(X86::TEST64rr).addReg(X86::RCX).addReg(X86::RCX));
1021  EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
1022 
1023  // Instrument first and last elements in src and dst range.
1024  InstrumentMOVSBase(X86::RDI /* DstReg */, X86::RSI /* SrcReg */,
1025  X86::RCX /* CntReg */, AccessSize, Ctx, Out);
1026 
1027  EmitLabel(Out, DoneSym);
1028  RestoreFlags(Out);
1029 }
1030 
1031 } // End anonymous namespace
1032 
1034  : STI(STI), InitialFrameReg(0) {}
1035 
1037 
1039  const MCInst &Inst, OperandVector &Operands, MCContext &Ctx,
1040  const MCInstrInfo &MII, MCStreamer &Out) {
1041  EmitInstruction(Out, Inst);
1042 }
1043 
1045  const MCInst &Inst) {
1046  Out.EmitInstruction(Inst, STI);
1047 }
1048 
1050  MCStreamer &Out) {
1051  if (!Out.getNumFrameInfos()) // No active dwarf frame
1052  return X86::NoRegister;
1053  const MCDwarfFrameInfo &Frame = Out.getDwarfFrameInfos().back();
1054  if (Frame.End) // Active dwarf frame is closed
1055  return X86::NoRegister;
1056  const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
1057  if (!MRI) // No register info
1058  return X86::NoRegister;
1059 
1060  if (InitialFrameReg) {
1061  // FrameReg is set explicitly, we're instrumenting a MachineFunction.
1062  return InitialFrameReg;
1063  }
1064 
1065  return MRI->getLLVMRegNum(Frame.CurrentCfaRegister, true /* IsEH */);
1066 }
1067 
1070  const MCContext &Ctx, const MCSubtargetInfo &STI) {
1071  Triple T(STI.getTargetTriple());
1072  const bool hasCompilerRTSupport = T.isOSLinux();
1073  if (ClAsanInstrumentAssembly && hasCompilerRTSupport &&
1074  MCOptions.SanitizeAddress) {
1075  if (STI.getFeatureBits()[X86::Mode32Bit] != 0)
1076  return new X86AddressSanitizer32(STI);
1077  if (STI.getFeatureBits()[X86::Mode64Bit] != 0)
1078  return new X86AddressSanitizer64(STI);
1079  }
1080  return new X86AsmInstrumentation(STI);
1081 }
1082 
1083 } // End llvm namespace
X86AsmInstrumentation * CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions, const MCContext &Ctx, const MCSubtargetInfo &STI)
X86AsmInstrumentation(const MCSubtargetInfo &STI)
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:315
bool SanitizeAddress
Enables AddressSanitizer instrumentation at machine level.
static std::unique_ptr< X86Operand > CreateMem(unsigned ModeSize, const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc, unsigned Size=0, StringRef SymName=StringRef(), void *OpDecl=nullptr)
Create an absolute memory operand.
Definition: X86Operand.h:496
unsigned CurrentCfaRegister
Definition: MCDwarf.h:481
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
const MCSubtargetInfo & STI
ArrayRef< MCDwarfFrameInfo > getDwarfFrameInfos() const
Definition: MCStreamer.h:217
virtual void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
Definition: MCStreamer.cpp:639
unsigned GetFrameRegGeneric(const MCContext &Ctx, MCStreamer &Out)
static MCOperand createReg(unsigned Reg)
Definition: MCInst.h:111
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:98
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APInt.h:33
Reg
All possible values of the reg field in the ModR/M byte.
Number of individual test Apply this number of consecutive mutations to each input exit after the first new interesting input is found the minimized corpus is saved into the first input directory Number of jobs to run If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
#define false
Definition: ConvertUTF.c:65
Context object for machine code objects.
Definition: MCContext.h:48
#define T
static cl::opt< std::string > FuncName("cppfname", cl::desc("Specify the name of the generated function"), cl::value_desc("function name"))
static std::string utostr(uint64_t X, bool isNeg=false)
Definition: StringExtras.h:93
unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT, bool High)
Returns the sub or super register of a specific X86 register.
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:150
virtual void InstrumentAndEmitInstruction(const MCInst &Inst, SmallVectorImpl< std::unique_ptr< MCParsedAsmOperand > > &Operands, MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out)
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
void EmitInstruction(MCStreamer &Out, const MCInst &Inst)
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:325
Streaming machine code generation interface.
Definition: MCStreamer.h:157
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:24
SI Fold Operands
int getLLVMRegNum(unsigned RegNum, bool isEH) const
Map a dwarf register back to a target register.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
const FeatureBitset & getFeatureBits() const
getFeatureBits - Return the feature bits.
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:227
unsigned getNumFrameInfos()
Definition: MCStreamer.h:216
MCSubtargetInfo - Generic base class for all target subtargets.
const Triple & getTargetTriple() const
getTargetTriple - Return the target triple string.
SmallVectorImpl< std::unique_ptr< MCParsedAsmOperand > > OperandVector
Constant expressions.
Definition: MCExpr.h:37
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:117
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx)
Definition: MCExpr.cpp:150