LLVM 20.0.0git
BPFPreserveStaticOffset.cpp
Go to the documentation of this file.
1//===------ BPFPreserveStaticOffset.cpp -----------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// TLDR: replaces llvm.preserve.static.offset + GEP + load / store
10// with llvm.bpf.getelementptr.and.load / store
11//
12// This file implements BPFPreserveStaticOffsetPass transformation.
13// This transformation address two BPF verifier specific issues:
14//
15// (a) Access to the fields of some structural types is allowed only
16// using load and store instructions with static immediate offsets.
17//
18// Examples of such types are `struct __sk_buff` and `struct
19// bpf_sock_ops`. This is so because offsets of the fields of
20// these structures do not match real offsets in the running
21// kernel. During BPF program load LDX and STX instructions
22// referring to the fields of these types are rewritten so that
23// offsets match real offsets. For this rewrite to happen field
24// offsets have to be encoded as immediate operands of the
25// instructions.
26//
27// See kernel/bpf/verifier.c:convert_ctx_access function in the
28// Linux kernel source tree for details.
29//
30// (b) Pointers to context parameters of BPF programs must not be
31// modified before access.
32//
33// During BPF program verification a tag PTR_TO_CTX is tracked for
34// register values. In case if register with such tag is modified
35// BPF program is not allowed to read or write memory using this
36// register. See kernel/bpf/verifier.c:check_mem_access function
37// in the Linux kernel source tree for details.
38//
39// The following sequence of the IR instructions:
40//
41// %x = getelementptr %ptr, %constant_offset
42// %y = load %x
43//
44// Is translated as a single machine instruction:
45//
46// LDW %ptr, %constant_offset
47//
48// In order for cases (a) and (b) to work the sequence %x-%y above has
49// to be preserved by the IR passes.
50//
51// However, several optimization passes might sink `load` instruction
52// or hoist `getelementptr` instruction so that the instructions are
53// no longer in sequence. Examples of such passes are:
54// SimplifyCFGPass, InstCombinePass, GVNPass.
55// After such modification the verifier would reject the BPF program.
56//
57// To avoid this issue the patterns like (load/store (getelementptr ...))
58// are replaced by calls to BPF specific intrinsic functions:
59// - llvm.bpf.getelementptr.and.load
60// - llvm.bpf.getelementptr.and.store
61//
62// These calls are lowered back to (load/store (getelementptr ...))
63// by BPFCheckAndAdjustIR pass right before the translation from IR to
64// machine instructions.
65//
66// The transformation is split into the following steps:
67// - When IR is generated from AST the calls to intrinsic function
68// llvm.preserve.static.offset are inserted.
69// - BPFPreserveStaticOffsetPass is executed as early as possible
70// with AllowPatial set to true, this handles marked GEP chains
71// with constant offsets.
72// - BPFPreserveStaticOffsetPass is executed at ScalarOptimizerLateEPCallback
73// with AllowPatial set to false, this handles marked GEP chains
74// with offsets that became constant after loop unrolling, e.g.
75// to handle the following code:
76//
77// struct context { int x[4]; } __attribute__((preserve_static_offset));
78//
79// struct context *ctx = ...;
80// #pragma clang loop unroll(full)
81// for (int i = 0; i < 4; ++i)
82// foo(ctx->x[i]);
83//
84// The early BPFPreserveStaticOffsetPass run is necessary to allow
85// additional GVN / CSE opportunities after functions inlining.
86// The relative order of optimization applied to function:
87// - early stage (1)
88// - ...
89// - function inlining (2)
90// - ...
91// - loop unrolling
92// - ...
93// - ScalarOptimizerLateEPCallback (3)
94//
95// When function A is inlined into function B all optimizations for A
96// are already done, while some passes remain for B. In case if
97// BPFPreserveStaticOffsetPass is done at (3) but not done at (1)
98// the code after (2) would contain a mix of
99// (load (gep %p)) and (get.and.load %p) usages:
100// - the (load (gep %p)) would come from the calling function;
101// - the (get.and.load %p) would come from the callee function.
102// Thus clobbering CSE / GVN passes done after inlining.
103
104#include "BPF.h"
105#include "BPFCORE.h"
106#include "llvm/ADT/SmallPtrSet.h"
107#include "llvm/ADT/SmallVector.h"
108#include "llvm/IR/Argument.h"
109#include "llvm/IR/Attributes.h"
110#include "llvm/IR/BasicBlock.h"
111#include "llvm/IR/Constants.h"
114#include "llvm/IR/IRBuilder.h"
115#include "llvm/IR/InstIterator.h"
116#include "llvm/IR/Instructions.h"
117#include "llvm/IR/Intrinsics.h"
118#include "llvm/IR/IntrinsicsBPF.h"
119#include "llvm/IR/Module.h"
120#include "llvm/Support/Debug.h"
122
123#define DEBUG_TYPE "bpf-preserve-static-offset"
124
125using namespace llvm;
126
127static const unsigned GepAndLoadFirstIdxArg = 6;
128static const unsigned GepAndStoreFirstIdxArg = 7;
129
131 if (auto *Call = dyn_cast<CallInst>(I))
132 if (Function *Func = Call->getCalledFunction())
133 return Func->getIntrinsicID() == Id;
134 return false;
135}
136
138 return isIntrinsicCall(I, Intrinsic::preserve_static_offset);
139}
140
142 if (isIntrinsicCall(I, Intrinsic::bpf_getelementptr_and_load))
143 return cast<CallInst>(I);
144 return nullptr;
145}
146
148 if (isIntrinsicCall(I, Intrinsic::bpf_getelementptr_and_store))
149 return cast<CallInst>(I);
150 return nullptr;
151}
152
153template <class T = Instruction>
155 DILocation *Merged = (*Insns.begin())->getDebugLoc();
156 for (T *I : Insns)
157 Merged = DILocation::getMergedLocation(Merged, I->getDebugLoc());
158 return Merged;
159}
160
162 Intrinsic::BPFIntrinsics Intrinsic,
163 ArrayRef<Type *> Types,
164 ArrayRef<Value *> Args) {
165
166 Function *Fn = Intrinsic::getDeclaration(M, Intrinsic, Types);
167 return CallInst::Create(Fn, Args);
168}
169
170static void setParamElementType(CallInst *Call, unsigned ArgNo, Type *Type) {
171 LLVMContext &C = Call->getContext();
172 Call->addParamAttr(ArgNo, Attribute::get(C, Attribute::ElementType, Type));
173}
174
175static void setParamReadNone(CallInst *Call, unsigned ArgNo) {
176 LLVMContext &C = Call->getContext();
177 Call->addParamAttr(ArgNo, Attribute::get(C, Attribute::ReadNone));
178}
179
180static void setParamReadOnly(CallInst *Call, unsigned ArgNo) {
181 LLVMContext &C = Call->getContext();
182 Call->addParamAttr(ArgNo, Attribute::get(C, Attribute::ReadOnly));
183}
184
185static void setParamWriteOnly(CallInst *Call, unsigned ArgNo) {
186 LLVMContext &C = Call->getContext();
187 Call->addParamAttr(ArgNo, Attribute::get(C, Attribute::WriteOnly));
188}
189
190namespace {
191struct GEPChainInfo {
192 bool InBounds;
193 Type *SourceElementType;
194 SmallVector<Value *> Indices;
196
197 GEPChainInfo() { reset(); }
198
199 void reset() {
200 InBounds = true;
201 SourceElementType = nullptr;
202 Indices.clear();
203 Members.clear();
204 }
205};
206} // Anonymous namespace
207
208template <class T = std::disjunction<LoadInst, StoreInst>>
210 GEPChainInfo &GEP, T *Insn) {
211 Type *Int8Ty = Type::getInt8Ty(C);
212 Type *Int1Ty = Type::getInt1Ty(C);
213 // Implementation of Align guarantees that ShiftValue < 64
214 unsigned AlignShiftValue = Log2_64(Insn->getAlign().value());
215 Args.push_back(GEP.Members[0]->getPointerOperand());
216 Args.push_back(ConstantInt::get(Int1Ty, Insn->isVolatile()));
217 Args.push_back(ConstantInt::get(Int8Ty, (unsigned)Insn->getOrdering()));
218 Args.push_back(ConstantInt::get(Int8Ty, (unsigned)Insn->getSyncScopeID()));
219 Args.push_back(ConstantInt::get(Int8Ty, AlignShiftValue));
220 Args.push_back(ConstantInt::get(Int1Ty, GEP.InBounds));
221 Args.append(GEP.Indices.begin(), GEP.Indices.end());
222}
223
224static Instruction *makeGEPAndLoad(Module *M, GEPChainInfo &GEP,
225 LoadInst *Load) {
227 fillCommonArgs(M->getContext(), Args, GEP, Load);
228 CallInst *Call = makeIntrinsicCall(M, Intrinsic::bpf_getelementptr_and_load,
229 {Load->getType()}, Args);
230 setParamElementType(Call, 0, GEP.SourceElementType);
231 Call->applyMergedLocation(mergeDILocations(GEP.Members), Load->getDebugLoc());
232 Call->setName((*GEP.Members.rbegin())->getName());
233 if (Load->isUnordered()) {
234 Call->setOnlyReadsMemory();
235 Call->setOnlyAccessesArgMemory();
236 setParamReadOnly(Call, 0);
237 }
238 for (unsigned I = GepAndLoadFirstIdxArg; I < Args.size(); ++I)
239 Call->addParamAttr(I, Attribute::ImmArg);
240 Call->setAAMetadata(Load->getAAMetadata());
241 return Call;
242}
243
244static Instruction *makeGEPAndStore(Module *M, GEPChainInfo &GEP,
245 StoreInst *Store) {
247 Args.push_back(Store->getValueOperand());
248 fillCommonArgs(M->getContext(), Args, GEP, Store);
249 CallInst *Call =
250 makeIntrinsicCall(M, Intrinsic::bpf_getelementptr_and_store,
251 {Store->getValueOperand()->getType()}, Args);
252 setParamElementType(Call, 1, GEP.SourceElementType);
253 if (Store->getValueOperand()->getType()->isPointerTy())
254 setParamReadNone(Call, 0);
255 Call->applyMergedLocation(mergeDILocations(GEP.Members),
256 Store->getDebugLoc());
257 if (Store->isUnordered()) {
258 Call->setOnlyWritesMemory();
259 Call->setOnlyAccessesArgMemory();
260 setParamWriteOnly(Call, 1);
261 }
262 for (unsigned I = GepAndStoreFirstIdxArg; I < Args.size(); ++I)
263 Call->addParamAttr(I, Attribute::ImmArg);
264 Call->setAAMetadata(Store->getAAMetadata());
265 return Call;
266}
267
268static unsigned getOperandAsUnsigned(CallInst *Call, unsigned ArgNo) {
269 if (auto *Int = dyn_cast<ConstantInt>(Call->getOperand(ArgNo)))
270 return Int->getValue().getZExtValue();
271 std::string Report;
272 raw_string_ostream ReportS(Report);
273 ReportS << "Expecting ConstantInt as argument #" << ArgNo << " of " << *Call
274 << "\n";
276}
277
278static GetElementPtrInst *reconstructGEP(CallInst *Call, int Delta) {
279 SmallVector<Value *> Indices;
280 Indices.append(Call->data_operands_begin() + 6 + Delta,
281 Call->data_operands_end());
282 Type *GEPPointeeType = Call->getParamElementType(Delta);
283 auto *GEP =
284 GetElementPtrInst::Create(GEPPointeeType, Call->getOperand(Delta),
285 ArrayRef<Value *>(Indices), Call->getName());
286 GEP->setIsInBounds(getOperandAsUnsigned(Call, 5 + Delta));
287 return GEP;
288}
289
290template <class T = std::disjunction<LoadInst, StoreInst>>
292 int Delta) {
293 Insn->setVolatile(getOperandAsUnsigned(Call, 1 + Delta));
294 Insn->setOrdering((AtomicOrdering)getOperandAsUnsigned(Call, 2 + Delta));
295 Insn->setSyncScopeID(getOperandAsUnsigned(Call, 3 + Delta));
296 unsigned AlignShiftValue = getOperandAsUnsigned(Call, 4 + Delta);
297 Insn->setAlignment(Align(1ULL << AlignShiftValue));
298 GEP->setDebugLoc(Call->getDebugLoc());
299 Insn->setDebugLoc(Call->getDebugLoc());
300 Insn->setAAMetadata(Call->getAAMetadata());
301}
302
303std::pair<GetElementPtrInst *, LoadInst *>
306 Type *ReturnType = Call->getFunctionType()->getReturnType();
307 auto *Load = new LoadInst(ReturnType, GEP, "",
308 /* These would be set in reconstructCommon */
309 false, Align(1));
310 reconstructCommon(Call, GEP, Load, 0);
311 return std::pair{GEP, Load};
312}
313
314std::pair<GetElementPtrInst *, StoreInst *>
317 auto *Store = new StoreInst(Call->getOperand(0), GEP,
318 /* These would be set in reconstructCommon */
319 false, Align(1));
320 reconstructCommon(Call, GEP, Store, 1);
321 return std::pair{GEP, Store};
322}
323
324static bool isZero(Value *V) {
325 auto *CI = dyn_cast<ConstantInt>(V);
326 return CI && CI->isZero();
327}
328
329// Given a chain of GEP instructions collect information necessary to
330// merge this chain as a single GEP instruction of form:
331// getelementptr %<type>, ptr %p, i32 0, <field_idx1>, <field_idx2>, ...
333 GEPChainInfo &Info) {
334 if (GEPs.empty())
335 return false;
336
337 if (!all_of(GEPs, [=](GetElementPtrInst *GEP) {
338 return GEP->hasAllConstantIndices();
339 }))
340 return false;
341
342 GetElementPtrInst *First = GEPs[0];
343 Info.InBounds = First->isInBounds();
344 Info.SourceElementType = First->getSourceElementType();
345 Type *ResultElementType = First->getResultElementType();
346 Info.Indices.append(First->idx_begin(), First->idx_end());
347 Info.Members.push_back(First);
348
349 for (auto *Iter = GEPs.begin() + 1; Iter != GEPs.end(); ++Iter) {
350 GetElementPtrInst *GEP = *Iter;
351 if (!isZero(*GEP->idx_begin())) {
352 Info.reset();
353 return false;
354 }
355 if (!GEP->getSourceElementType() ||
356 GEP->getSourceElementType() != ResultElementType) {
357 Info.reset();
358 return false;
359 }
360 Info.InBounds &= GEP->isInBounds();
361 Info.Indices.append(GEP->idx_begin() + 1, GEP->idx_end());
362 Info.Members.push_back(GEP);
363 ResultElementType = GEP->getResultElementType();
364 }
365
366 return true;
367}
368
369// Given a chain of GEP instructions collect information necessary to
370// merge this chain as a single GEP instruction of form:
371// getelementptr i8, ptr %p, i64 %offset
373 GEPChainInfo &Info) {
374 if (GEPs.empty())
375 return false;
376
377 GetElementPtrInst *First = GEPs[0];
378 const DataLayout &DL = First->getDataLayout();
379 LLVMContext &C = First->getContext();
380 Type *PtrTy = First->getType()->getScalarType();
381 APInt Offset(DL.getIndexTypeSizeInBits(PtrTy), 0);
382 for (GetElementPtrInst *GEP : GEPs) {
383 if (!GEP->accumulateConstantOffset(DL, Offset)) {
384 Info.reset();
385 return false;
386 }
387 Info.InBounds &= GEP->isInBounds();
388 Info.Members.push_back(GEP);
389 }
390 Info.SourceElementType = Type::getInt8Ty(C);
391 Info.Indices.push_back(ConstantInt::get(C, Offset));
392
393 return true;
394}
395
397 auto Msg = DiagnosticInfoUnsupported(
398 *Insn->getFunction(),
399 Twine("Non-constant offset in access to a field of a type marked "
400 "with preserve_static_offset might be rejected by BPF verifier")
401 .concat(Insn->getDebugLoc()
402 ? ""
403 : " (pass -g option to get exact location)"),
404 Insn->getDebugLoc(), DS_Warning);
405 Insn->getContext().diagnose(Msg);
406}
407
409 return GEPs.empty() || all_of(GEPs, [=](GetElementPtrInst *GEP) {
410 return GEP->hasAllZeroIndices();
411 });
412}
413
414static bool tryToReplaceWithGEPBuiltin(Instruction *LoadOrStoreTemplate,
416 Instruction *InsnToReplace) {
417 GEPChainInfo GEPChain;
418 if (!foldGEPChainAsStructAccess(GEPs, GEPChain) &&
419 !foldGEPChainAsU8Access(GEPs, GEPChain)) {
420 return false;
421 }
422 Module *M = InsnToReplace->getModule();
423 if (auto *Load = dyn_cast<LoadInst>(LoadOrStoreTemplate)) {
424 Instruction *Replacement = makeGEPAndLoad(M, GEPChain, Load);
425 Replacement->insertBefore(InsnToReplace);
426 InsnToReplace->replaceAllUsesWith(Replacement);
427 }
428 if (auto *Store = dyn_cast<StoreInst>(LoadOrStoreTemplate)) {
429 Instruction *Replacement = makeGEPAndStore(M, GEPChain, Store);
430 Replacement->insertBefore(InsnToReplace);
431 }
432 return true;
433}
434
435// Check if U->getPointerOperand() == I
436static bool isPointerOperand(Value *I, User *U) {
437 if (auto *L = dyn_cast<LoadInst>(U))
438 return L->getPointerOperand() == I;
439 if (auto *S = dyn_cast<StoreInst>(U))
440 return S->getPointerOperand() == I;
441 if (auto *GEP = dyn_cast<GetElementPtrInst>(U))
442 return GEP->getPointerOperand() == I;
443 if (auto *Call = isGEPAndLoad(U))
444 return Call->getArgOperand(0) == I;
445 if (auto *Call = isGEPAndStore(U))
446 return Call->getArgOperand(1) == I;
447 return false;
448}
449
450static bool isInlineableCall(User *U) {
451 if (auto *Call = dyn_cast<CallInst>(U))
452 return Call->hasFnAttr(Attribute::InlineHint);
453 return false;
454}
455
459 bool AllowPatial, bool &StillUsed);
460
463 SmallVector<Instruction *> &Visited, bool AllowPatial,
464 bool &StillUsed) {
465 for (User *U : Insn->users()) {
466 auto *UI = dyn_cast<Instruction>(U);
467 if (UI && (isPointerOperand(Insn, UI) || isPreserveStaticOffsetCall(UI) ||
468 isInlineableCall(UI)))
469 rewriteAccessChain(UI, GEPs, Visited, AllowPatial, StillUsed);
470 else
471 LLVM_DEBUG({
472 llvm::dbgs() << "unsupported usage in BPFPreserveStaticOffsetPass:\n";
473 llvm::dbgs() << " Insn: " << *Insn << "\n";
474 llvm::dbgs() << " User: " << *U << "\n";
475 });
476 }
477}
478
479// A DFS traversal of GEP chain trees starting from Root.
480//
481// Recursion descends through GEP instructions and
482// llvm.preserve.static.offset calls. Recursion stops at any other
483// instruction. If load or store instruction is reached it is replaced
484// by a call to `llvm.bpf.getelementptr.and.load` or
485// `llvm.bpf.getelementptr.and.store` intrinsic.
486// If `llvm.bpf.getelementptr.and.load/store` is reached the accumulated
487// GEPs are merged into the intrinsic call.
488// If nested calls to `llvm.preserve.static.offset` are encountered these
489// calls are marked for deletion.
490//
491// Parameters description:
492// - Insn - current position in the tree
493// - GEPs - GEP instructions for the current branch
494// - Visited - a list of visited instructions in DFS order,
495// order is important for unused instruction deletion.
496// - AllowPartial - when true GEP chains that can't be folded are
497// not reported, otherwise diagnostic message is show for such chains.
498// - StillUsed - set to true if one of the GEP chains could not be
499// folded, makes sense when AllowPartial is false, means that root
500// preserve.static.offset call is still in use and should remain
501// until the next run of this pass.
505 bool AllowPatial, bool &StillUsed) {
506 auto MarkAndTraverseUses = [&]() {
507 Visited.push_back(Insn);
508 rewriteUses(Insn, GEPs, Visited, AllowPatial, StillUsed);
509 };
510 auto TryToReplace = [&](Instruction *LoadOrStore) {
511 // Do nothing for (preserve.static.offset (load/store ..)) or for
512 // GEPs with zero indices. Such constructs lead to zero offset and
513 // are simplified by other passes.
514 if (allZeroIndices(GEPs))
515 return;
516 if (tryToReplaceWithGEPBuiltin(LoadOrStore, GEPs, Insn)) {
517 Visited.push_back(Insn);
518 return;
519 }
520 if (!AllowPatial)
522 StillUsed = true;
523 };
524 if (isa<LoadInst>(Insn) || isa<StoreInst>(Insn)) {
525 TryToReplace(Insn);
526 } else if (isGEPAndLoad(Insn)) {
527 auto [GEP, Load] =
529 GEPs.push_back(GEP);
530 TryToReplace(Load);
531 GEPs.pop_back();
532 delete Load;
533 delete GEP;
534 } else if (isGEPAndStore(Insn)) {
535 // This case can't be merged with the above because
536 // `delete Load` / `delete Store` wants a concrete type,
537 // destructor of Instruction is protected.
538 auto [GEP, Store] =
540 GEPs.push_back(GEP);
541 TryToReplace(Store);
542 GEPs.pop_back();
543 delete Store;
544 delete GEP;
545 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(Insn)) {
546 GEPs.push_back(GEP);
547 MarkAndTraverseUses();
548 GEPs.pop_back();
549 } else if (isPreserveStaticOffsetCall(Insn)) {
550 MarkAndTraverseUses();
551 } else if (isInlineableCall(Insn)) {
552 // Preserve preserve.static.offset call for parameters of
553 // functions that might be inlined. These would be removed on a
554 // second pass after inlining.
555 // Might happen when a pointer to a preserve_static_offset
556 // structure is passed as parameter of a function that would be
557 // inlined inside a loop that would be unrolled.
558 if (AllowPatial)
559 StillUsed = true;
560 } else {
562 raw_svector_ostream BufStream(Buf);
563 BufStream << *Insn;
565 Twine("Unexpected rewriteAccessChain Insn = ").concat(Buf));
566 }
567}
568
569static void removeMarkerCall(Instruction *Marker) {
570 Marker->replaceAllUsesWith(Marker->getOperand(0));
571 Marker->eraseFromParent();
572}
573
574static bool rewriteAccessChain(Instruction *Marker, bool AllowPatial,
575 SmallPtrSetImpl<Instruction *> &RemovedMarkers) {
578 bool StillUsed = false;
579 rewriteUses(Marker, GEPs, Visited, AllowPatial, StillUsed);
580 // Check if Visited instructions could be removed, iterate in
581 // reverse to unblock instructions higher in the chain.
582 for (auto V = Visited.rbegin(); V != Visited.rend(); ++V) {
585 RemovedMarkers.insert(*V);
586 } else if ((*V)->use_empty()) {
587 (*V)->eraseFromParent();
588 }
589 }
590 return StillUsed;
591}
592
593static std::vector<Instruction *>
595 std::vector<Instruction *> Calls;
598 Calls.push_back(&Insn);
599 return Calls;
600}
601
603 return isIntrinsicCall(V, Intrinsic::preserve_array_access_index);
604}
605
607 return isIntrinsicCall(V, Intrinsic::preserve_struct_access_index);
608}
609
611 return isIntrinsicCall(V, Intrinsic::preserve_union_access_index);
612}
613
614static void removePAICalls(Instruction *Marker) {
615 auto IsPointerOperand = [](Value *Op, User *U) {
616 if (auto *GEP = dyn_cast<GetElementPtrInst>(U))
617 return GEP->getPointerOperand() == Op;
620 return cast<CallInst>(U)->getArgOperand(0) == Op;
621 return false;
622 };
623
625 WorkList.push_back(Marker);
626 do {
627 Value *V = WorkList.pop_back_val();
628 for (User *U : V->users())
629 if (IsPointerOperand(V, U))
630 WorkList.push_back(U);
631 auto *Call = dyn_cast<CallInst>(V);
632 if (!Call)
633 continue;
636 else if (isPreserveStructIndex(V))
638 else if (isPreserveUnionIndex(V))
640 } while (!WorkList.empty());
641}
642
643// Look for sequences:
644// - llvm.preserve.static.offset -> getelementptr... -> load
645// - llvm.preserve.static.offset -> getelementptr... -> store
646// And replace those with calls to intrinsics:
647// - llvm.bpf.getelementptr.and.load
648// - llvm.bpf.getelementptr.and.store
649static bool rewriteFunction(Function &F, bool AllowPartial) {
650 LLVM_DEBUG(dbgs() << "********** BPFPreserveStaticOffsetPass (AllowPartial="
651 << AllowPartial << ") ************\n");
652
653 auto MarkerCalls = collectPreserveStaticOffsetCalls(F);
654 SmallPtrSet<Instruction *, 16> RemovedMarkers;
655
656 LLVM_DEBUG(dbgs() << "There are " << MarkerCalls.size()
657 << " preserve.static.offset calls\n");
658
659 if (MarkerCalls.empty())
660 return false;
661
662 for (auto *Call : MarkerCalls)
663 removePAICalls(Call);
664
665 for (auto *Call : MarkerCalls) {
666 if (RemovedMarkers.contains(Call))
667 continue;
668 bool StillUsed = rewriteAccessChain(Call, AllowPartial, RemovedMarkers);
669 if (!StillUsed || !AllowPartial)
670 removeMarkerCall(Call);
671 }
672
673 return true;
674}
675
679 return rewriteFunction(F, AllowPartial) ? PreservedAnalyses::none()
681}
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static CallInst * isGEPAndLoad(Value *I)
bool isPreserveUnionIndex(Value *V)
bool isPreserveArrayIndex(Value *V)
static bool isPreserveStaticOffsetCall(Value *I)
static void setParamElementType(CallInst *Call, unsigned ArgNo, Type *Type)
static bool foldGEPChainAsU8Access(SmallVector< GetElementPtrInst * > &GEPs, GEPChainInfo &Info)
static void fillCommonArgs(LLVMContext &C, SmallVector< Value * > &Args, GEPChainInfo &GEP, T *Insn)
static void removePAICalls(Instruction *Marker)
static void reportNonStaticGEPChain(Instruction *Insn)
static bool foldGEPChainAsStructAccess(SmallVector< GetElementPtrInst * > &GEPs, GEPChainInfo &Info)
static const unsigned GepAndStoreFirstIdxArg
static void removeMarkerCall(Instruction *Marker)
static Instruction * makeGEPAndStore(Module *M, GEPChainInfo &GEP, StoreInst *Store)
static void rewriteUses(Instruction *Insn, SmallVector< GetElementPtrInst * > &GEPs, SmallVector< Instruction * > &Visited, bool AllowPatial, bool &StillUsed)
static void setParamReadNone(CallInst *Call, unsigned ArgNo)
static Instruction * makeGEPAndLoad(Module *M, GEPChainInfo &GEP, LoadInst *Load)
static unsigned getOperandAsUnsigned(CallInst *Call, unsigned ArgNo)
bool isPreserveStructIndex(Value *V)
static void setParamReadOnly(CallInst *Call, unsigned ArgNo)
static void rewriteAccessChain(Instruction *Insn, SmallVector< GetElementPtrInst * > &GEPs, SmallVector< Instruction * > &Visited, bool AllowPatial, bool &StillUsed)
static bool isInlineableCall(User *U)
static DILocation * mergeDILocations(SmallVector< T * > &Insns)
static const unsigned GepAndLoadFirstIdxArg
static GetElementPtrInst * reconstructGEP(CallInst *Call, int Delta)
static CallInst * makeIntrinsicCall(Module *M, Intrinsic::BPFIntrinsics Intrinsic, ArrayRef< Type * > Types, ArrayRef< Value * > Args)
static bool allZeroIndices(SmallVector< GetElementPtrInst * > &GEPs)
static std::vector< Instruction * > collectPreserveStaticOffsetCalls(Function &F)
static bool rewriteFunction(Function &F, bool AllowPartial)
static bool tryToReplaceWithGEPBuiltin(Instruction *LoadOrStoreTemplate, SmallVector< GetElementPtrInst * > &GEPs, Instruction *InsnToReplace)
static void reconstructCommon(CallInst *Call, GetElementPtrInst *GEP, T *Insn, int Delta)
static CallInst * isGEPAndStore(Value *I)
static void setParamWriteOnly(CallInst *Call, unsigned ArgNo)
static bool isIntrinsicCall(Value *I, Intrinsic::ID Id)
static bool isPointerOperand(Value *I, User *U)
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
Hexagon Common GEP
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
Definition: Lint.cpp:512
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
Module.h This file contains the declarations for the Module class.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
Class for arbitrary precision integers.
Definition: APInt.h:78
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
Definition: Attributes.cpp:94
static void removeArrayAccessCall(CallInst *Call)
static void removeStructAccessCall(CallInst *Call)
static void removeUnionAccessCall(CallInst *Call)
static std::pair< GetElementPtrInst *, StoreInst * > reconstructStore(CallInst *Call)
static std::pair< GetElementPtrInst *, LoadInst * > reconstructLoad(CallInst *Call)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Debug location.
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
Diagnostic information for unsupported feature in backend.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:915
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition: Instructions.h:938
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition: Instruction.cpp:97
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:66
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:174
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:114
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:347
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:368
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:442
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:503
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:95
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:697
void push_back(const T &Elt)
Definition: SmallVector.h:427
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1210
An instruction for storing to memory.
Definition: Instructions.h:290
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static IntegerType * getInt1Ty(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
Value * getOperand(unsigned i) const
Definition: User.h:169
LLVM Value Representation.
Definition: Value.h:74
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:691
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1539
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:346
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&... Ranges)
Concatenated range across two or more ranges.
Definition: STLExtras.h:1176
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
DWARFExpression::Operation Op
@ DS_Warning
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39