Bug Summary

File:build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
Warning:line 2598, column 15
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name SimplifyLibCalls.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Transforms/Utils -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Transforms/Utils -I include -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-09-04-125545-48738-1 -x c++ /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
1//===------ SimplifyLibCalls.cpp - Library calls simplifier ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the library calls simplifier. It does not implement
10// any pass, but can't be used by other passes to do simplifications.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
15#include "llvm/ADT/APSInt.h"
16#include "llvm/ADT/SmallString.h"
17#include "llvm/ADT/Triple.h"
18#include "llvm/Analysis/ConstantFolding.h"
19#include "llvm/Analysis/Loads.h"
20#include "llvm/Analysis/OptimizationRemarkEmitter.h"
21#include "llvm/Analysis/ValueTracking.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Function.h"
24#include "llvm/IR/IRBuilder.h"
25#include "llvm/IR/IntrinsicInst.h"
26#include "llvm/IR/Intrinsics.h"
27#include "llvm/IR/Module.h"
28#include "llvm/IR/PatternMatch.h"
29#include "llvm/Support/CommandLine.h"
30#include "llvm/Support/KnownBits.h"
31#include "llvm/Support/MathExtras.h"
32#include "llvm/Transforms/Utils/BuildLibCalls.h"
33#include "llvm/Transforms/Utils/Local.h"
34#include "llvm/Transforms/Utils/SizeOpts.h"
35
36using namespace llvm;
37using namespace PatternMatch;
38
39static cl::opt<bool>
40 EnableUnsafeFPShrink("enable-double-float-shrink", cl::Hidden,
41 cl::init(false),
42 cl::desc("Enable unsafe double to float "
43 "shrinking for math lib calls"));
44
45//===----------------------------------------------------------------------===//
46// Helper Functions
47//===----------------------------------------------------------------------===//
48
49static bool ignoreCallingConv(LibFunc Func) {
50 return Func == LibFunc_abs || Func == LibFunc_labs ||
51 Func == LibFunc_llabs || Func == LibFunc_strlen;
52}
53
54/// Return true if it is only used in equality comparisons with With.
55static bool isOnlyUsedInEqualityComparison(Value *V, Value *With) {
56 for (User *U : V->users()) {
57 if (ICmpInst *IC = dyn_cast<ICmpInst>(U))
58 if (IC->isEquality() && IC->getOperand(1) == With)
59 continue;
60 // Unknown instruction.
61 return false;
62 }
63 return true;
64}
65
66static bool callHasFloatingPointArgument(const CallInst *CI) {
67 return any_of(CI->operands(), [](const Use &OI) {
68 return OI->getType()->isFloatingPointTy();
69 });
70}
71
72static bool callHasFP128Argument(const CallInst *CI) {
73 return any_of(CI->operands(), [](const Use &OI) {
74 return OI->getType()->isFP128Ty();
75 });
76}
77
78// Convert the entire string Str representing an integer in Base, up to
79// the terminating nul if present, to a constant according to the rules
80// of strtoul[l] or, when AsSigned is set, of strtol[l]. On success
81// return the result, otherwise null.
82// The function assumes the string is encoded in ASCII and carefully
83// avoids converting sequences (including "") that the corresponding
84// library call might fail and set errno for.
85static Value *convertStrToInt(CallInst *CI, StringRef &Str, Value *EndPtr,
86 uint64_t Base, bool AsSigned, IRBuilderBase &B) {
87 if (Base < 2 || Base > 36)
88 if (Base != 0)
89 // Fail for an invalid base (required by POSIX).
90 return nullptr;
91
92 // Current offset into the original string to reflect in EndPtr.
93 size_t Offset = 0;
94 // Strip leading whitespace.
95 for ( ; Offset != Str.size(); ++Offset)
96 if (!isSpace((unsigned char)Str[Offset])) {
97 Str = Str.substr(Offset);
98 break;
99 }
100
101 if (Str.empty())
102 // Fail for empty subject sequences (POSIX allows but doesn't require
103 // strtol[l]/strtoul[l] to fail with EINVAL).
104 return nullptr;
105
106 // Strip but remember the sign.
107 bool Negate = Str[0] == '-';
108 if (Str[0] == '-' || Str[0] == '+') {
109 Str = Str.drop_front();
110 if (Str.empty())
111 // Fail for a sign with nothing after it.
112 return nullptr;
113 ++Offset;
114 }
115
116 // Set Max to the absolute value of the minimum (for signed), or
117 // to the maximum (for unsigned) value representable in the type.
118 Type *RetTy = CI->getType();
119 unsigned NBits = RetTy->getPrimitiveSizeInBits();
120 uint64_t Max = AsSigned && Negate ? 1 : 0;
121 Max += AsSigned ? maxIntN(NBits) : maxUIntN(NBits);
122
123 // Autodetect Base if it's zero and consume the "0x" prefix.
124 if (Str.size() > 1) {
125 if (Str[0] == '0') {
126 if (toUpper((unsigned char)Str[1]) == 'X') {
127 if (Str.size() == 2 || (Base && Base != 16))
128 // Fail if Base doesn't allow the "0x" prefix or for the prefix
129 // alone that implementations like BSD set errno to EINVAL for.
130 return nullptr;
131
132 Str = Str.drop_front(2);
133 Offset += 2;
134 Base = 16;
135 }
136 else if (Base == 0)
137 Base = 8;
138 } else if (Base == 0)
139 Base = 10;
140 }
141 else if (Base == 0)
142 Base = 10;
143
144 // Convert the rest of the subject sequence, not including the sign,
145 // to its uint64_t representation (this assumes the source character
146 // set is ASCII).
147 uint64_t Result = 0;
148 for (unsigned i = 0; i != Str.size(); ++i) {
149 unsigned char DigVal = Str[i];
150 if (isDigit(DigVal))
151 DigVal = DigVal - '0';
152 else {
153 DigVal = toUpper(DigVal);
154 if (isAlpha(DigVal))
155 DigVal = DigVal - 'A' + 10;
156 else
157 return nullptr;
158 }
159
160 if (DigVal >= Base)
161 // Fail if the digit is not valid in the Base.
162 return nullptr;
163
164 // Add the digit and fail if the result is not representable in
165 // the (unsigned form of the) destination type.
166 bool VFlow;
167 Result = SaturatingMultiplyAdd(Result, Base, (uint64_t)DigVal, &VFlow);
168 if (VFlow || Result > Max)
169 return nullptr;
170 }
171
172 if (EndPtr) {
173 // Store the pointer to the end.
174 Value *Off = B.getInt64(Offset + Str.size());
175 Value *StrBeg = CI->getArgOperand(0);
176 Value *StrEnd = B.CreateInBoundsGEP(B.getInt8Ty(), StrBeg, Off, "endptr");
177 B.CreateStore(StrEnd, EndPtr);
178 }
179
180 if (Negate)
181 // Unsigned negation doesn't overflow.
182 Result = -Result;
183
184 return ConstantInt::get(RetTy, Result);
185}
186
187static bool isOnlyUsedInComparisonWithZero(Value *V) {
188 for (User *U : V->users()) {
189 if (ICmpInst *IC = dyn_cast<ICmpInst>(U))
190 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
191 if (C->isNullValue())
192 continue;
193 // Unknown instruction.
194 return false;
195 }
196 return true;
197}
198
199static bool canTransformToMemCmp(CallInst *CI, Value *Str, uint64_t Len,
200 const DataLayout &DL) {
201 if (!isOnlyUsedInComparisonWithZero(CI))
202 return false;
203
204 if (!isDereferenceableAndAlignedPointer(Str, Align(1), APInt(64, Len), DL))
205 return false;
206
207 if (CI->getFunction()->hasFnAttribute(Attribute::SanitizeMemory))
208 return false;
209
210 return true;
211}
212
213static void annotateDereferenceableBytes(CallInst *CI,
214 ArrayRef<unsigned> ArgNos,
215 uint64_t DereferenceableBytes) {
216 const Function *F = CI->getCaller();
217 if (!F)
218 return;
219 for (unsigned ArgNo : ArgNos) {
220 uint64_t DerefBytes = DereferenceableBytes;
221 unsigned AS = CI->getArgOperand(ArgNo)->getType()->getPointerAddressSpace();
222 if (!llvm::NullPointerIsDefined(F, AS) ||
223 CI->paramHasAttr(ArgNo, Attribute::NonNull))
224 DerefBytes = std::max(CI->getParamDereferenceableOrNullBytes(ArgNo),
225 DereferenceableBytes);
226
227 if (CI->getParamDereferenceableBytes(ArgNo) < DerefBytes) {
228 CI->removeParamAttr(ArgNo, Attribute::Dereferenceable);
229 if (!llvm::NullPointerIsDefined(F, AS) ||
230 CI->paramHasAttr(ArgNo, Attribute::NonNull))
231 CI->removeParamAttr(ArgNo, Attribute::DereferenceableOrNull);
232 CI->addParamAttr(ArgNo, Attribute::getWithDereferenceableBytes(
233 CI->getContext(), DerefBytes));
234 }
235 }
236}
237
238static void annotateNonNullNoUndefBasedOnAccess(CallInst *CI,
239 ArrayRef<unsigned> ArgNos) {
240 Function *F = CI->getCaller();
241 if (!F)
242 return;
243
244 for (unsigned ArgNo : ArgNos) {
245 if (!CI->paramHasAttr(ArgNo, Attribute::NoUndef))
246 CI->addParamAttr(ArgNo, Attribute::NoUndef);
247
248 if (CI->paramHasAttr(ArgNo, Attribute::NonNull))
249 continue;
250 unsigned AS = CI->getArgOperand(ArgNo)->getType()->getPointerAddressSpace();
251 if (llvm::NullPointerIsDefined(F, AS))
252 continue;
253
254 CI->addParamAttr(ArgNo, Attribute::NonNull);
255 annotateDereferenceableBytes(CI, ArgNo, 1);
256 }
257}
258
259static void annotateNonNullAndDereferenceable(CallInst *CI, ArrayRef<unsigned> ArgNos,
260 Value *Size, const DataLayout &DL) {
261 if (ConstantInt *LenC = dyn_cast<ConstantInt>(Size)) {
262 annotateNonNullNoUndefBasedOnAccess(CI, ArgNos);
263 annotateDereferenceableBytes(CI, ArgNos, LenC->getZExtValue());
264 } else if (isKnownNonZero(Size, DL)) {
265 annotateNonNullNoUndefBasedOnAccess(CI, ArgNos);
266 const APInt *X, *Y;
267 uint64_t DerefMin = 1;
268 if (match(Size, m_Select(m_Value(), m_APInt(X), m_APInt(Y)))) {
269 DerefMin = std::min(X->getZExtValue(), Y->getZExtValue());
270 annotateDereferenceableBytes(CI, ArgNos, DerefMin);
271 }
272 }
273}
274
275// Copy CallInst "flags" like musttail, notail, and tail. Return New param for
276// easier chaining. Calls to emit* and B.createCall should probably be wrapped
277// in this function when New is created to replace Old. Callers should take
278// care to check Old.isMustTailCall() if they aren't replacing Old directly
279// with New.
280static Value *copyFlags(const CallInst &Old, Value *New) {
281 assert(!Old.isMustTailCall() && "do not copy musttail call flags")(static_cast <bool> (!Old.isMustTailCall() && "do not copy musttail call flags"
) ? void (0) : __assert_fail ("!Old.isMustTailCall() && \"do not copy musttail call flags\""
, "llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp", 281, __extension__
__PRETTY_FUNCTION__))
;
282 assert(!Old.isNoTailCall() && "do not copy notail call flags")(static_cast <bool> (!Old.isNoTailCall() && "do not copy notail call flags"
) ? void (0) : __assert_fail ("!Old.isNoTailCall() && \"do not copy notail call flags\""
, "llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp", 282, __extension__
__PRETTY_FUNCTION__))
;
283 if (auto *NewCI = dyn_cast_or_null<CallInst>(New))
284 NewCI->setTailCallKind(Old.getTailCallKind());
285 return New;
286}
287
288// Helper to avoid truncating the length if size_t is 32-bits.
289static StringRef substr(StringRef Str, uint64_t Len) {
290 return Len >= Str.size() ? Str : Str.substr(0, Len);
291}
292
293//===----------------------------------------------------------------------===//
294// String and Memory Library Call Optimizations
295//===----------------------------------------------------------------------===//
296
297Value *LibCallSimplifier::optimizeStrCat(CallInst *CI, IRBuilderBase &B) {
298 // Extract some information from the instruction
299 Value *Dst = CI->getArgOperand(0);
300 Value *Src = CI->getArgOperand(1);
301 annotateNonNullNoUndefBasedOnAccess(CI, {0, 1});
302
303 // See if we can get the length of the input string.
304 uint64_t Len = GetStringLength(Src);
305 if (Len)
306 annotateDereferenceableBytes(CI, 1, Len);
307 else
308 return nullptr;
309 --Len; // Unbias length.
310
311 // Handle the simple, do-nothing case: strcat(x, "") -> x
312 if (Len == 0)
313 return Dst;
314
315 return copyFlags(*CI, emitStrLenMemCpy(Src, Dst, Len, B));
316}
317
318Value *LibCallSimplifier::emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len,
319 IRBuilderBase &B) {
320 // We need to find the end of the destination string. That's where the
321 // memory is to be moved to. We just generate a call to strlen.
322 Value *DstLen = emitStrLen(Dst, B, DL, TLI);
323 if (!DstLen)
324 return nullptr;
325
326 // Now that we have the destination's length, we must index into the
327 // destination's pointer to get the actual memcpy destination (end of
328 // the string .. we're concatenating).
329 Value *CpyDst = B.CreateInBoundsGEP(B.getInt8Ty(), Dst, DstLen, "endptr");
330
331 // We have enough information to now generate the memcpy call to do the
332 // concatenation for us. Make a memcpy to copy the nul byte with align = 1.
333 B.CreateMemCpy(
334 CpyDst, Align(1), Src, Align(1),
335 ConstantInt::get(DL.getIntPtrType(Src->getContext()), Len + 1));
336 return Dst;
337}
338
339Value *LibCallSimplifier::optimizeStrNCat(CallInst *CI, IRBuilderBase &B) {
340 // Extract some information from the instruction.
341 Value *Dst = CI->getArgOperand(0);
342 Value *Src = CI->getArgOperand(1);
343 Value *Size = CI->getArgOperand(2);
344 uint64_t Len;
345 annotateNonNullNoUndefBasedOnAccess(CI, 0);
346 if (isKnownNonZero(Size, DL))
347 annotateNonNullNoUndefBasedOnAccess(CI, 1);
348
349 // We don't do anything if length is not constant.
350 ConstantInt *LengthArg = dyn_cast<ConstantInt>(Size);
351 if (LengthArg) {
352 Len = LengthArg->getZExtValue();
353 // strncat(x, c, 0) -> x
354 if (!Len)
355 return Dst;
356 } else {
357 return nullptr;
358 }
359
360 // See if we can get the length of the input string.
361 uint64_t SrcLen = GetStringLength(Src);
362 if (SrcLen) {
363 annotateDereferenceableBytes(CI, 1, SrcLen);
364 --SrcLen; // Unbias length.
365 } else {
366 return nullptr;
367 }
368
369 // strncat(x, "", c) -> x
370 if (SrcLen == 0)
371 return Dst;
372
373 // We don't optimize this case.
374 if (Len < SrcLen)
375 return nullptr;
376
377 // strncat(x, s, c) -> strcat(x, s)
378 // s is constant so the strcat can be optimized further.
379 return copyFlags(*CI, emitStrLenMemCpy(Src, Dst, SrcLen, B));
380}
381
382// Helper to transform memchr(S, C, N) == S to N && *S == C and, when
383// NBytes is null, strchr(S, C) to *S == C. A precondition of the function
384// is that either S is dereferenceable or the value of N is nonzero.
385static Value* memChrToCharCompare(CallInst *CI, Value *NBytes,
386 IRBuilderBase &B, const DataLayout &DL)
387{
388 Value *Src = CI->getArgOperand(0);
389 Value *CharVal = CI->getArgOperand(1);
390
391 // Fold memchr(A, C, N) == A to N && *A == C.
392 Type *CharTy = B.getInt8Ty();
393 Value *Char0 = B.CreateLoad(CharTy, Src);
394 CharVal = B.CreateTrunc(CharVal, CharTy);
395 Value *Cmp = B.CreateICmpEQ(Char0, CharVal, "char0cmp");
396
397 if (NBytes) {
398 Value *Zero = ConstantInt::get(NBytes->getType(), 0);
399 Value *And = B.CreateICmpNE(NBytes, Zero);
400 Cmp = B.CreateLogicalAnd(And, Cmp);
401 }
402
403 Value *NullPtr = Constant::getNullValue(CI->getType());
404 return B.CreateSelect(Cmp, Src, NullPtr);
405}
406
407Value *LibCallSimplifier::optimizeStrChr(CallInst *CI, IRBuilderBase &B) {
408 Value *SrcStr = CI->getArgOperand(0);
409 Value *CharVal = CI->getArgOperand(1);
410 annotateNonNullNoUndefBasedOnAccess(CI, 0);
411
412 if (isOnlyUsedInEqualityComparison(CI, SrcStr))
413 return memChrToCharCompare(CI, nullptr, B, DL);
414
415 // If the second operand is non-constant, see if we can compute the length
416 // of the input string and turn this into memchr.
417 ConstantInt *CharC = dyn_cast<ConstantInt>(CharVal);
418 if (!CharC) {
419 uint64_t Len = GetStringLength(SrcStr);
420 if (Len)
421 annotateDereferenceableBytes(CI, 0, Len);
422 else
423 return nullptr;
424
425 Function *Callee = CI->getCalledFunction();
426 FunctionType *FT = Callee->getFunctionType();
427 if (!FT->getParamType(1)->isIntegerTy(32)) // memchr needs i32.
428 return nullptr;
429
430 return copyFlags(
431 *CI,
432 emitMemChr(SrcStr, CharVal, // include nul.
433 ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len), B,
434 DL, TLI));
435 }
436
437 if (CharC->isZero()) {
438 Value *NullPtr = Constant::getNullValue(CI->getType());
439 if (isOnlyUsedInEqualityComparison(CI, NullPtr))
440 // Pre-empt the transformation to strlen below and fold
441 // strchr(A, '\0') == null to false.
442 return B.CreateIntToPtr(B.getTrue(), CI->getType());
443 }
444
445 // Otherwise, the character is a constant, see if the first argument is
446 // a string literal. If so, we can constant fold.
447 StringRef Str;
448 if (!getConstantStringInfo(SrcStr, Str)) {
449 if (CharC->isZero()) // strchr(p, 0) -> p + strlen(p)
450 if (Value *StrLen = emitStrLen(SrcStr, B, DL, TLI))
451 return B.CreateInBoundsGEP(B.getInt8Ty(), SrcStr, StrLen, "strchr");
452 return nullptr;
453 }
454
455 // Compute the offset, make sure to handle the case when we're searching for
456 // zero (a weird way to spell strlen).
457 size_t I = (0xFF & CharC->getSExtValue()) == 0
458 ? Str.size()
459 : Str.find(CharC->getSExtValue());
460 if (I == StringRef::npos) // Didn't find the char. strchr returns null.
461 return Constant::getNullValue(CI->getType());
462
463 // strchr(s+n,c) -> gep(s+n+i,c)
464 return B.CreateInBoundsGEP(B.getInt8Ty(), SrcStr, B.getInt64(I), "strchr");
465}
466
467Value *LibCallSimplifier::optimizeStrRChr(CallInst *CI, IRBuilderBase &B) {
468 Value *SrcStr = CI->getArgOperand(0);
469 Value *CharVal = CI->getArgOperand(1);
470 ConstantInt *CharC = dyn_cast<ConstantInt>(CharVal);
471 annotateNonNullNoUndefBasedOnAccess(CI, 0);
472
473 StringRef Str;
474 if (!getConstantStringInfo(SrcStr, Str)) {
475 // strrchr(s, 0) -> strchr(s, 0)
476 if (CharC && CharC->isZero())
477 return copyFlags(*CI, emitStrChr(SrcStr, '\0', B, TLI));
478 return nullptr;
479 }
480
481 // Try to expand strrchr to the memrchr nonstandard extension if it's
482 // available, or simply fail otherwise.
483 uint64_t NBytes = Str.size() + 1; // Include the terminating nul.
484 Type *IntPtrType = DL.getIntPtrType(CI->getContext());
485 Value *Size = ConstantInt::get(IntPtrType, NBytes);
486 return copyFlags(*CI, emitMemRChr(SrcStr, CharVal, Size, B, DL, TLI));
487}
488
489Value *LibCallSimplifier::optimizeStrCmp(CallInst *CI, IRBuilderBase &B) {
490 Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
491 if (Str1P == Str2P) // strcmp(x,x) -> 0
492 return ConstantInt::get(CI->getType(), 0);
493
494 StringRef Str1, Str2;
495 bool HasStr1 = getConstantStringInfo(Str1P, Str1);
496 bool HasStr2 = getConstantStringInfo(Str2P, Str2);
497
498 // strcmp(x, y) -> cnst (if both x and y are constant strings)
499 if (HasStr1 && HasStr2)
500 return ConstantInt::get(CI->getType(), Str1.compare(Str2));
501
502 if (HasStr1 && Str1.empty()) // strcmp("", x) -> -*x
503 return B.CreateNeg(B.CreateZExt(
504 B.CreateLoad(B.getInt8Ty(), Str2P, "strcmpload"), CI->getType()));
505
506 if (HasStr2 && Str2.empty()) // strcmp(x,"") -> *x
507 return B.CreateZExt(B.CreateLoad(B.getInt8Ty(), Str1P, "strcmpload"),
508 CI->getType());
509
510 // strcmp(P, "x") -> memcmp(P, "x", 2)
511 uint64_t Len1 = GetStringLength(Str1P);
512 if (Len1)
513 annotateDereferenceableBytes(CI, 0, Len1);
514 uint64_t Len2 = GetStringLength(Str2P);
515 if (Len2)
516 annotateDereferenceableBytes(CI, 1, Len2);
517
518 if (Len1 && Len2) {
519 return copyFlags(
520 *CI, emitMemCmp(Str1P, Str2P,
521 ConstantInt::get(DL.getIntPtrType(CI->getContext()),
522 std::min(Len1, Len2)),
523 B, DL, TLI));
524 }
525
526 // strcmp to memcmp
527 if (!HasStr1 && HasStr2) {
528 if (canTransformToMemCmp(CI, Str1P, Len2, DL))
529 return copyFlags(
530 *CI,
531 emitMemCmp(Str1P, Str2P,
532 ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len2),
533 B, DL, TLI));
534 } else if (HasStr1 && !HasStr2) {
535 if (canTransformToMemCmp(CI, Str2P, Len1, DL))
536 return copyFlags(
537 *CI,
538 emitMemCmp(Str1P, Str2P,
539 ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len1),
540 B, DL, TLI));
541 }
542
543 annotateNonNullNoUndefBasedOnAccess(CI, {0, 1});
544 return nullptr;
545}
546
547// Optimize a memcmp or, when StrNCmp is true, strncmp call CI with constant
548// arrays LHS and RHS and nonconstant Size.
549static Value *optimizeMemCmpVarSize(CallInst *CI, Value *LHS, Value *RHS,
550 Value *Size, bool StrNCmp,
551 IRBuilderBase &B, const DataLayout &DL);
552
553Value *LibCallSimplifier::optimizeStrNCmp(CallInst *CI, IRBuilderBase &B) {
554 Value *Str1P = CI->getArgOperand(0);
555 Value *Str2P = CI->getArgOperand(1);
556 Value *Size = CI->getArgOperand(2);
557 if (Str1P == Str2P) // strncmp(x,x,n) -> 0
558 return ConstantInt::get(CI->getType(), 0);
559
560 if (isKnownNonZero(Size, DL))
561 annotateNonNullNoUndefBasedOnAccess(CI, {0, 1});
562 // Get the length argument if it is constant.
563 uint64_t Length;
564 if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(Size))
565 Length = LengthArg->getZExtValue();
566 else
567 return optimizeMemCmpVarSize(CI, Str1P, Str2P, Size, true, B, DL);
568
569 if (Length == 0) // strncmp(x,y,0) -> 0
570 return ConstantInt::get(CI->getType(), 0);
571
572 if (Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
573 return copyFlags(*CI, emitMemCmp(Str1P, Str2P, Size, B, DL, TLI));
574
575 StringRef Str1, Str2;
576 bool HasStr1 = getConstantStringInfo(Str1P, Str1);
577 bool HasStr2 = getConstantStringInfo(Str2P, Str2);
578
579 // strncmp(x, y) -> cnst (if both x and y are constant strings)
580 if (HasStr1 && HasStr2) {
581 // Avoid truncating the 64-bit Length to 32 bits in ILP32.
582 StringRef SubStr1 = substr(Str1, Length);
583 StringRef SubStr2 = substr(Str2, Length);
584 return ConstantInt::get(CI->getType(), SubStr1.compare(SubStr2));
585 }
586
587 if (HasStr1 && Str1.empty()) // strncmp("", x, n) -> -*x
588 return B.CreateNeg(B.CreateZExt(
589 B.CreateLoad(B.getInt8Ty(), Str2P, "strcmpload"), CI->getType()));
590
591 if (HasStr2 && Str2.empty()) // strncmp(x, "", n) -> *x
592 return B.CreateZExt(B.CreateLoad(B.getInt8Ty(), Str1P, "strcmpload"),
593 CI->getType());
594
595 uint64_t Len1 = GetStringLength(Str1P);
596 if (Len1)
597 annotateDereferenceableBytes(CI, 0, Len1);
598 uint64_t Len2 = GetStringLength(Str2P);
599 if (Len2)
600 annotateDereferenceableBytes(CI, 1, Len2);
601
602 // strncmp to memcmp
603 if (!HasStr1 && HasStr2) {
604 Len2 = std::min(Len2, Length);
605 if (canTransformToMemCmp(CI, Str1P, Len2, DL))
606 return copyFlags(
607 *CI,
608 emitMemCmp(Str1P, Str2P,
609 ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len2),
610 B, DL, TLI));
611 } else if (HasStr1 && !HasStr2) {
612 Len1 = std::min(Len1, Length);
613 if (canTransformToMemCmp(CI, Str2P, Len1, DL))
614 return copyFlags(
615 *CI,
616 emitMemCmp(Str1P, Str2P,
617 ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len1),
618 B, DL, TLI));
619 }
620
621 return nullptr;
622}
623
624Value *LibCallSimplifier::optimizeStrNDup(CallInst *CI, IRBuilderBase &B) {
625 Value *Src = CI->getArgOperand(0);
626 ConstantInt *Size = dyn_cast<ConstantInt>(CI->getArgOperand(1));
627 uint64_t SrcLen = GetStringLength(Src);
628 if (SrcLen && Size) {
629 annotateDereferenceableBytes(CI, 0, SrcLen);
630 if (SrcLen <= Size->getZExtValue() + 1)
631 return copyFlags(*CI, emitStrDup(Src, B, TLI));
632 }
633
634 return nullptr;
635}
636
637Value *LibCallSimplifier::optimizeStrCpy(CallInst *CI, IRBuilderBase &B) {
638 Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
639 if (Dst == Src) // strcpy(x,x) -> x
640 return Src;
641
642 annotateNonNullNoUndefBasedOnAccess(CI, {0, 1});
643 // See if we can get the length of the input string.
644 uint64_t Len = GetStringLength(Src);
645 if (Len)
646 annotateDereferenceableBytes(CI, 1, Len);
647 else
648 return nullptr;
649
650 // We have enough information to now generate the memcpy call to do the
651 // copy for us. Make a memcpy to copy the nul byte with align = 1.
652 CallInst *NewCI =
653 B.CreateMemCpy(Dst, Align(1), Src, Align(1),
654 ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len));
655 NewCI->setAttributes(CI->getAttributes());
656 NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
657 copyFlags(*CI, NewCI);
658 return Dst;
659}
660
661Value *LibCallSimplifier::optimizeStpCpy(CallInst *CI, IRBuilderBase &B) {
662 Function *Callee = CI->getCalledFunction();
663 Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
664
665 // stpcpy(d,s) -> strcpy(d,s) if the result is not used.
666 if (CI->use_empty())
667 return copyFlags(*CI, emitStrCpy(Dst, Src, B, TLI));
668
669 if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)
670 Value *StrLen = emitStrLen(Src, B, DL, TLI);
671 return StrLen ? B.CreateInBoundsGEP(B.getInt8Ty(), Dst, StrLen) : nullptr;
672 }
673
674 // See if we can get the length of the input string.
675 uint64_t Len = GetStringLength(Src);
676 if (Len)
677 annotateDereferenceableBytes(CI, 1, Len);
678 else
679 return nullptr;
680
681 Type *PT = Callee->getFunctionType()->getParamType(0);
682 Value *LenV = ConstantInt::get(DL.getIntPtrType(PT), Len);
683 Value *DstEnd = B.CreateInBoundsGEP(
684 B.getInt8Ty(), Dst, ConstantInt::get(DL.getIntPtrType(PT), Len - 1));
685
686 // We have enough information to now generate the memcpy call to do the
687 // copy for us. Make a memcpy to copy the nul byte with align = 1.
688 CallInst *NewCI = B.CreateMemCpy(Dst, Align(1), Src, Align(1), LenV);
689 NewCI->setAttributes(CI->getAttributes());
690 NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
691 copyFlags(*CI, NewCI);
692 return DstEnd;
693}
694
695// Optimize a call to size_t strlcpy(char*, const char*, size_t).
696
697Value *LibCallSimplifier::optimizeStrLCpy(CallInst *CI, IRBuilderBase &B) {
698 Value *Size = CI->getArgOperand(2);
699 if (isKnownNonZero(Size, DL))
700 // Like snprintf, the function stores into the destination only when
701 // the size argument is nonzero.
702 annotateNonNullNoUndefBasedOnAccess(CI, 0);
703 // The function reads the source argument regardless of Size (it returns
704 // its length).
705 annotateNonNullNoUndefBasedOnAccess(CI, 1);
706
707 uint64_t NBytes;
708 if (ConstantInt *SizeC = dyn_cast<ConstantInt>(Size))
709 NBytes = SizeC->getZExtValue();
710 else
711 return nullptr;
712
713 Value *Dst = CI->getArgOperand(0);
714 Value *Src = CI->getArgOperand(1);
715 if (NBytes <= 1) {
716 if (NBytes == 1)
717 // For a call to strlcpy(D, S, 1) first store a nul in *D.
718 B.CreateStore(B.getInt8(0), Dst);
719
720 // Transform strlcpy(D, S, 0) to a call to strlen(S).
721 return copyFlags(*CI, emitStrLen(Src, B, DL, TLI));
722 }
723
724 // Try to determine the length of the source, substituting its size
725 // when it's not nul-terminated (as it's required to be) to avoid
726 // reading past its end.
727 StringRef Str;
728 if (!getConstantStringInfo(Src, Str, 0, /*TrimAtNul=*/false))
729 return nullptr;
730
731 uint64_t SrcLen = Str.find('\0');
732 // Set if the terminating nul should be copied by the call to memcpy
733 // below.
734 bool NulTerm = SrcLen < NBytes;
735
736 if (NulTerm)
737 // Overwrite NBytes with the number of bytes to copy, including
738 // the terminating nul.
739 NBytes = SrcLen + 1;
740 else {
741 // Set the length of the source for the function to return to its
742 // size, and cap NBytes at the same.
743 SrcLen = std::min(SrcLen, uint64_t(Str.size()));
744 NBytes = std::min(NBytes - 1, SrcLen);
745 }
746
747 if (SrcLen == 0) {
748 // Transform strlcpy(D, "", N) to (*D = '\0, 0).
749 B.CreateStore(B.getInt8(0), Dst);
750 return ConstantInt::get(CI->getType(), 0);
751 }
752
753 Function *Callee = CI->getCalledFunction();
754 Type *PT = Callee->getFunctionType()->getParamType(0);
755 // Transform strlcpy(D, S, N) to memcpy(D, S, N') where N' is the lower
756 // bound on strlen(S) + 1 and N, optionally followed by a nul store to
757 // D[N' - 1] if necessary.
758 CallInst *NewCI = B.CreateMemCpy(Dst, Align(1), Src, Align(1),
759 ConstantInt::get(DL.getIntPtrType(PT), NBytes));
760 NewCI->setAttributes(CI->getAttributes());
761 NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
762 copyFlags(*CI, NewCI);
763
764 if (!NulTerm) {
765 Value *EndOff = ConstantInt::get(CI->getType(), NBytes);
766 Value *EndPtr = B.CreateInBoundsGEP(B.getInt8Ty(), Dst, EndOff);
767 B.CreateStore(B.getInt8(0), EndPtr);
768 }
769
770 // Like snprintf, strlcpy returns the number of nonzero bytes that would
771 // have been copied if the bound had been sufficiently big (which in this
772 // case is strlen(Src)).
773 return ConstantInt::get(CI->getType(), SrcLen);
774}
775
776// Optimize a call to strncpy.
777
778Value *LibCallSimplifier::optimizeStrNCpy(CallInst *CI, IRBuilderBase &B) {
779 Function *Callee = CI->getCalledFunction();
780 Value *Dst = CI->getArgOperand(0);
781 Value *Src = CI->getArgOperand(1);
782 Value *Size = CI->getArgOperand(2);
783 annotateNonNullNoUndefBasedOnAccess(CI, 0);
784 if (isKnownNonZero(Size, DL))
785 annotateNonNullNoUndefBasedOnAccess(CI, 1);
786
787 uint64_t Len;
788 if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(Size))
789 Len = LengthArg->getZExtValue();
790 else
791 return nullptr;
792
793 // strncpy(x, y, 0) -> x
794 if (Len == 0)
795 return Dst;
796
797 // See if we can get the length of the input string.
798 uint64_t SrcLen = GetStringLength(Src);
799 if (SrcLen) {
800 annotateDereferenceableBytes(CI, 1, SrcLen);
801 --SrcLen; // Unbias length.
802 } else {
803 return nullptr;
804 }
805
806 if (SrcLen == 0) {
807 // strncpy(x, "", y) -> memset(x, '\0', y)
808 Align MemSetAlign =
809 CI->getAttributes().getParamAttrs(0).getAlignment().valueOrOne();
810 CallInst *NewCI = B.CreateMemSet(Dst, B.getInt8('\0'), Size, MemSetAlign);
811 AttrBuilder ArgAttrs(CI->getContext(), CI->getAttributes().getParamAttrs(0));
812 NewCI->setAttributes(NewCI->getAttributes().addParamAttributes(
813 CI->getContext(), 0, ArgAttrs));
814 copyFlags(*CI, NewCI);
815 return Dst;
816 }
817
818 // strncpy(a, "a", 4) - > memcpy(a, "a\0\0\0", 4)
819 if (Len > SrcLen + 1) {
820 if (Len <= 128) {
821 StringRef Str;
822 if (!getConstantStringInfo(Src, Str))
823 return nullptr;
824 std::string SrcStr = Str.str();
825 SrcStr.resize(Len, '\0');
826 Src = B.CreateGlobalString(SrcStr, "str");
827 } else {
828 return nullptr;
829 }
830 }
831
832 Type *PT = Callee->getFunctionType()->getParamType(0);
833 // strncpy(x, s, c) -> memcpy(align 1 x, align 1 s, c) [s and c are constant]
834 CallInst *NewCI = B.CreateMemCpy(Dst, Align(1), Src, Align(1),
835 ConstantInt::get(DL.getIntPtrType(PT), Len));
836 NewCI->setAttributes(CI->getAttributes());
837 NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
838 copyFlags(*CI, NewCI);
839 return Dst;
840}
841
842Value *LibCallSimplifier::optimizeStringLength(CallInst *CI, IRBuilderBase &B,
843 unsigned CharSize,
844 Value *Bound) {
845 Value *Src = CI->getArgOperand(0);
846 Type *CharTy = B.getIntNTy(CharSize);
847
848 if (isOnlyUsedInZeroEqualityComparison(CI) &&
849 (!Bound || isKnownNonZero(Bound, DL))) {
850 // Fold strlen:
851 // strlen(x) != 0 --> *x != 0
852 // strlen(x) == 0 --> *x == 0
853 // and likewise strnlen with constant N > 0:
854 // strnlen(x, N) != 0 --> *x != 0
855 // strnlen(x, N) == 0 --> *x == 0
856 return B.CreateZExt(B.CreateLoad(CharTy, Src, "char0"),
857 CI->getType());
858 }
859
860 if (Bound) {
861 if (ConstantInt *BoundCst = dyn_cast<ConstantInt>(Bound)) {
862 if (BoundCst->isZero())
863 // Fold strnlen(s, 0) -> 0 for any s, constant or otherwise.
864 return ConstantInt::get(CI->getType(), 0);
865
866 if (BoundCst->isOne()) {
867 // Fold strnlen(s, 1) -> *s ? 1 : 0 for any s.
868 Value *CharVal = B.CreateLoad(CharTy, Src, "strnlen.char0");
869 Value *ZeroChar = ConstantInt::get(CharTy, 0);
870 Value *Cmp = B.CreateICmpNE(CharVal, ZeroChar, "strnlen.char0cmp");
871 return B.CreateZExt(Cmp, CI->getType());
872 }
873 }
874 }
875
876 if (uint64_t Len = GetStringLength(Src, CharSize)) {
877 Value *LenC = ConstantInt::get(CI->getType(), Len - 1);
878 // Fold strlen("xyz") -> 3 and strnlen("xyz", 2) -> 2
879 // and strnlen("xyz", Bound) -> min(3, Bound) for nonconstant Bound.
880 if (Bound)
881 return B.CreateBinaryIntrinsic(Intrinsic::umin, LenC, Bound);
882 return LenC;
883 }
884
885 if (Bound)
886 // Punt for strnlen for now.
887 return nullptr;
888
889 // If s is a constant pointer pointing to a string literal, we can fold
890 // strlen(s + x) to strlen(s) - x, when x is known to be in the range
891 // [0, strlen(s)] or the string has a single null terminator '\0' at the end.
892 // We only try to simplify strlen when the pointer s points to an array
893 // of i8. Otherwise, we would need to scale the offset x before doing the
894 // subtraction. This will make the optimization more complex, and it's not
895 // very useful because calling strlen for a pointer of other types is
896 // very uncommon.
897 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Src)) {
898 // TODO: Handle subobjects.
899 if (!isGEPBasedOnPointerToString(GEP, CharSize))
900 return nullptr;
901
902 ConstantDataArraySlice Slice;
903 if (getConstantDataArrayInfo(GEP->getOperand(0), Slice, CharSize)) {
904 uint64_t NullTermIdx;
905 if (Slice.Array == nullptr) {
906 NullTermIdx = 0;
907 } else {
908 NullTermIdx = ~((uint64_t)0);
909 for (uint64_t I = 0, E = Slice.Length; I < E; ++I) {
910 if (Slice.Array->getElementAsInteger(I + Slice.Offset) == 0) {
911 NullTermIdx = I;
912 break;
913 }
914 }
915 // If the string does not have '\0', leave it to strlen to compute
916 // its length.
917 if (NullTermIdx == ~((uint64_t)0))
918 return nullptr;
919 }
920
921 Value *Offset = GEP->getOperand(2);
922 KnownBits Known = computeKnownBits(Offset, DL, 0, nullptr, CI, nullptr);
923 uint64_t ArrSize =
924 cast<ArrayType>(GEP->getSourceElementType())->getNumElements();
925
926 // If Offset is not provably in the range [0, NullTermIdx], we can still
927 // optimize if we can prove that the program has undefined behavior when
928 // Offset is outside that range. That is the case when GEP->getOperand(0)
929 // is a pointer to an object whose memory extent is NullTermIdx+1.
930 if ((Known.isNonNegative() && Known.getMaxValue().ule(NullTermIdx)) ||
931 (isa<GlobalVariable>(GEP->getOperand(0)) &&
932 NullTermIdx == ArrSize - 1)) {
933 Offset = B.CreateSExtOrTrunc(Offset, CI->getType());
934 return B.CreateSub(ConstantInt::get(CI->getType(), NullTermIdx),
935 Offset);
936 }
937 }
938 }
939
940 // strlen(x?"foo":"bars") --> x ? 3 : 4
941 if (SelectInst *SI = dyn_cast<SelectInst>(Src)) {
942 uint64_t LenTrue = GetStringLength(SI->getTrueValue(), CharSize);
943 uint64_t LenFalse = GetStringLength(SI->getFalseValue(), CharSize);
944 if (LenTrue && LenFalse) {
945 ORE.emit([&]() {
946 return OptimizationRemark("instcombine", "simplify-libcalls", CI)
947 << "folded strlen(select) to select of constants";
948 });
949 return B.CreateSelect(SI->getCondition(),
950 ConstantInt::get(CI->getType(), LenTrue - 1),
951 ConstantInt::get(CI->getType(), LenFalse - 1));
952 }
953 }
954
955 return nullptr;
956}
957
958Value *LibCallSimplifier::optimizeStrLen(CallInst *CI, IRBuilderBase &B) {
959 if (Value *V = optimizeStringLength(CI, B, 8))
960 return V;
961 annotateNonNullNoUndefBasedOnAccess(CI, 0);
962 return nullptr;
963}
964
965Value *LibCallSimplifier::optimizeStrNLen(CallInst *CI, IRBuilderBase &B) {
966 Value *Bound = CI->getArgOperand(1);
967 if (Value *V = optimizeStringLength(CI, B, 8, Bound))
968 return V;
969
970 if (isKnownNonZero(Bound, DL))
971 annotateNonNullNoUndefBasedOnAccess(CI, 0);
972 return nullptr;
973}
974
975Value *LibCallSimplifier::optimizeWcslen(CallInst *CI, IRBuilderBase &B) {
976 Module &M = *CI->getModule();
977 unsigned WCharSize = TLI->getWCharSize(M) * 8;
978 // We cannot perform this optimization without wchar_size metadata.
979 if (WCharSize == 0)
980 return nullptr;
981
982 return optimizeStringLength(CI, B, WCharSize);
983}
984
985Value *LibCallSimplifier::optimizeStrPBrk(CallInst *CI, IRBuilderBase &B) {
986 StringRef S1, S2;
987 bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
988 bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
989
990 // strpbrk(s, "") -> nullptr
991 // strpbrk("", s) -> nullptr
992 if ((HasS1 && S1.empty()) || (HasS2 && S2.empty()))
993 return Constant::getNullValue(CI->getType());
994
995 // Constant folding.
996 if (HasS1 && HasS2) {
997 size_t I = S1.find_first_of(S2);
998 if (I == StringRef::npos) // No match.
999 return Constant::getNullValue(CI->getType());
1000
1001 return B.CreateInBoundsGEP(B.getInt8Ty(), CI->getArgOperand(0),
1002 B.getInt64(I), "strpbrk");
1003 }
1004
1005 // strpbrk(s, "a") -> strchr(s, 'a')
1006 if (HasS2 && S2.size() == 1)
1007 return copyFlags(*CI, emitStrChr(CI->getArgOperand(0), S2[0], B, TLI));
1008
1009 return nullptr;
1010}
1011
1012Value *LibCallSimplifier::optimizeStrTo(CallInst *CI, IRBuilderBase &B) {
1013 Value *EndPtr = CI->getArgOperand(1);
1014 if (isa<ConstantPointerNull>(EndPtr)) {
1015 // With a null EndPtr, this function won't capture the main argument.
1016 // It would be readonly too, except that it still may write to errno.
1017 CI->addParamAttr(0, Attribute::NoCapture);
1018 }
1019
1020 return nullptr;
1021}
1022
1023Value *LibCallSimplifier::optimizeStrSpn(CallInst *CI, IRBuilderBase &B) {
1024 StringRef S1, S2;
1025 bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
1026 bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
1027
1028 // strspn(s, "") -> 0
1029 // strspn("", s) -> 0
1030 if ((HasS1 && S1.empty()) || (HasS2 && S2.empty()))
1031 return Constant::getNullValue(CI->getType());
1032
1033 // Constant folding.
1034 if (HasS1 && HasS2) {
1035 size_t Pos = S1.find_first_not_of(S2);
1036 if (Pos == StringRef::npos)
1037 Pos = S1.size();
1038 return ConstantInt::get(CI->getType(), Pos);
1039 }
1040
1041 return nullptr;
1042}
1043
1044Value *LibCallSimplifier::optimizeStrCSpn(CallInst *CI, IRBuilderBase &B) {
1045 StringRef S1, S2;
1046 bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
1047 bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2);
1048
1049 // strcspn("", s) -> 0
1050 if (HasS1 && S1.empty())
1051 return Constant::getNullValue(CI->getType());
1052
1053 // Constant folding.
1054 if (HasS1 && HasS2) {
1055 size_t Pos = S1.find_first_of(S2);
1056 if (Pos == StringRef::npos)
1057 Pos = S1.size();
1058 return ConstantInt::get(CI->getType(), Pos);
1059 }
1060
1061 // strcspn(s, "") -> strlen(s)
1062 if (HasS2 && S2.empty())
1063 return copyFlags(*CI, emitStrLen(CI->getArgOperand(0), B, DL, TLI));
1064
1065 return nullptr;
1066}
1067
1068Value *LibCallSimplifier::optimizeStrStr(CallInst *CI, IRBuilderBase &B) {
1069 // fold strstr(x, x) -> x.
1070 if (CI->getArgOperand(0) == CI->getArgOperand(1))
1071 return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
1072
1073 // fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0
1074 if (isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
1075 Value *StrLen = emitStrLen(CI->getArgOperand(1), B, DL, TLI);
1076 if (!StrLen)
1077 return nullptr;
1078 Value *StrNCmp = emitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1),
1079 StrLen, B, DL, TLI);
1080 if (!StrNCmp)
1081 return nullptr;
1082 for (User *U : llvm::make_early_inc_range(CI->users())) {
1083 ICmpInst *Old = cast<ICmpInst>(U);
1084 Value *Cmp =
1085 B.CreateICmp(Old->getPredicate(), StrNCmp,
1086 ConstantInt::getNullValue(StrNCmp->getType()), "cmp");
1087 replaceAllUsesWith(Old, Cmp);
1088 }
1089 return CI;
1090 }
1091
1092 // See if either input string is a constant string.
1093 StringRef SearchStr, ToFindStr;
1094 bool HasStr1 = getConstantStringInfo(CI->getArgOperand(0), SearchStr);
1095 bool HasStr2 = getConstantStringInfo(CI->getArgOperand(1), ToFindStr);
1096
1097 // fold strstr(x, "") -> x.
1098 if (HasStr2 && ToFindStr.empty())
1099 return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
1100
1101 // If both strings are known, constant fold it.
1102 if (HasStr1 && HasStr2) {
1103 size_t Offset = SearchStr.find(ToFindStr);
1104
1105 if (Offset == StringRef::npos) // strstr("foo", "bar") -> null
1106 return Constant::getNullValue(CI->getType());
1107
1108 // strstr("abcd", "bc") -> gep((char*)"abcd", 1)
1109 Value *Result = castToCStr(CI->getArgOperand(0), B);
1110 Result =
1111 B.CreateConstInBoundsGEP1_64(B.getInt8Ty(), Result, Offset, "strstr");
1112 return B.CreateBitCast(Result, CI->getType());
1113 }
1114
1115 // fold strstr(x, "y") -> strchr(x, 'y').
1116 if (HasStr2 && ToFindStr.size() == 1) {
1117 Value *StrChr = emitStrChr(CI->getArgOperand(0), ToFindStr[0], B, TLI);
1118 return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : nullptr;
1119 }
1120
1121 annotateNonNullNoUndefBasedOnAccess(CI, {0, 1});
1122 return nullptr;
1123}
1124
1125Value *LibCallSimplifier::optimizeMemRChr(CallInst *CI, IRBuilderBase &B) {
1126 Value *SrcStr = CI->getArgOperand(0);
1127 Value *Size = CI->getArgOperand(2);
1128 annotateNonNullAndDereferenceable(CI, 0, Size, DL);
1129 Value *CharVal = CI->getArgOperand(1);
1130 ConstantInt *LenC = dyn_cast<ConstantInt>(Size);
1131 Value *NullPtr = Constant::getNullValue(CI->getType());
1132
1133 if (LenC) {
1134 if (LenC->isZero())
1135 // Fold memrchr(x, y, 0) --> null.
1136 return NullPtr;
1137
1138 if (LenC->isOne()) {
1139 // Fold memrchr(x, y, 1) --> *x == y ? x : null for any x and y,
1140 // constant or otherwise.
1141 Value *Val = B.CreateLoad(B.getInt8Ty(), SrcStr, "memrchr.char0");
1142 // Slice off the character's high end bits.
1143 CharVal = B.CreateTrunc(CharVal, B.getInt8Ty());
1144 Value *Cmp = B.CreateICmpEQ(Val, CharVal, "memrchr.char0cmp");
1145 return B.CreateSelect(Cmp, SrcStr, NullPtr, "memrchr.sel");
1146 }
1147 }
1148
1149 StringRef Str;
1150 if (!getConstantStringInfo(SrcStr, Str, 0, /*TrimAtNul=*/false))
1151 return nullptr;
1152
1153 if (Str.size() == 0)
1154 // If the array is empty fold memrchr(A, C, N) to null for any value
1155 // of C and N on the basis that the only valid value of N is zero
1156 // (otherwise the call is undefined).
1157 return NullPtr;
1158
1159 uint64_t EndOff = UINT64_MAX(18446744073709551615UL);
1160 if (LenC) {
1161 EndOff = LenC->getZExtValue();
1162 if (Str.size() < EndOff)
1163 // Punt out-of-bounds accesses to sanitizers and/or libc.
1164 return nullptr;
1165 }
1166
1167 if (ConstantInt *CharC = dyn_cast<ConstantInt>(CharVal)) {
1168 // Fold memrchr(S, C, N) for a constant C.
1169 size_t Pos = Str.rfind(CharC->getZExtValue(), EndOff);
1170 if (Pos == StringRef::npos)
1171 // When the character is not in the source array fold the result
1172 // to null regardless of Size.
1173 return NullPtr;
1174
1175 if (LenC)
1176 // Fold memrchr(s, c, N) --> s + Pos for constant N > Pos.
1177 return B.CreateInBoundsGEP(B.getInt8Ty(), SrcStr, B.getInt64(Pos));
1178
1179 if (Str.find(Str[Pos]) == Pos) {
1180 // When there is just a single occurrence of C in S, i.e., the one
1181 // in Str[Pos], fold
1182 // memrchr(s, c, N) --> N <= Pos ? null : s + Pos
1183 // for nonconstant N.
1184 Value *Cmp = B.CreateICmpULE(Size, ConstantInt::get(Size->getType(), Pos),
1185 "memrchr.cmp");
1186 Value *SrcPlus = B.CreateInBoundsGEP(B.getInt8Ty(), SrcStr,
1187 B.getInt64(Pos), "memrchr.ptr_plus");
1188 return B.CreateSelect(Cmp, NullPtr, SrcPlus, "memrchr.sel");
1189 }
1190 }
1191
1192 // Truncate the string to search at most EndOff characters.
1193 Str = Str.substr(0, EndOff);
1194 if (Str.find_first_not_of(Str[0]) != StringRef::npos)
1195 return nullptr;
1196
1197 // If the source array consists of all equal characters, then for any
1198 // C and N (whether in bounds or not), fold memrchr(S, C, N) to
1199 // N != 0 && *S == C ? S + N - 1 : null
1200 Type *SizeTy = Size->getType();
1201 Type *Int8Ty = B.getInt8Ty();
1202 Value *NNeZ = B.CreateICmpNE(Size, ConstantInt::get(SizeTy, 0));
1203 // Slice off the sought character's high end bits.
1204 CharVal = B.CreateTrunc(CharVal, Int8Ty);
1205 Value *CEqS0 = B.CreateICmpEQ(ConstantInt::get(Int8Ty, Str[0]), CharVal);
1206 Value *And = B.CreateLogicalAnd(NNeZ, CEqS0);
1207 Value *SizeM1 = B.CreateSub(Size, ConstantInt::get(SizeTy, 1));
1208 Value *SrcPlus =
1209 B.CreateInBoundsGEP(Int8Ty, SrcStr, SizeM1, "memrchr.ptr_plus");
1210 return B.CreateSelect(And, SrcPlus, NullPtr, "memrchr.sel");
1211}
1212
1213Value *LibCallSimplifier::optimizeMemChr(CallInst *CI, IRBuilderBase &B) {
1214 Value *SrcStr = CI->getArgOperand(0);
1215 Value *Size = CI->getArgOperand(2);
1216
1217 if (isKnownNonZero(Size, DL)) {
1218 annotateNonNullNoUndefBasedOnAccess(CI, 0);
1219 if (isOnlyUsedInEqualityComparison(CI, SrcStr))
1220 return memChrToCharCompare(CI, Size, B, DL);
1221 }
1222
1223 Value *CharVal = CI->getArgOperand(1);
1224 ConstantInt *CharC = dyn_cast<ConstantInt>(CharVal);
1225 ConstantInt *LenC = dyn_cast<ConstantInt>(Size);
1226 Value *NullPtr = Constant::getNullValue(CI->getType());
1227
1228 // memchr(x, y, 0) -> null
1229 if (LenC) {
1230 if (LenC->isZero())
1231 return NullPtr;
1232
1233 if (LenC->isOne()) {
1234 // Fold memchr(x, y, 1) --> *x == y ? x : null for any x and y,
1235 // constant or otherwise.
1236 Value *Val = B.CreateLoad(B.getInt8Ty(), SrcStr, "memchr.char0");
1237 // Slice off the character's high end bits.
1238 CharVal = B.CreateTrunc(CharVal, B.getInt8Ty());
1239 Value *Cmp = B.CreateICmpEQ(Val, CharVal, "memchr.char0cmp");
1240 return B.CreateSelect(Cmp, SrcStr, NullPtr, "memchr.sel");
1241 }
1242 }
1243
1244 StringRef Str;
1245 if (!getConstantStringInfo(SrcStr, Str, 0, /*TrimAtNul=*/false))
1246 return nullptr;
1247
1248 if (CharC) {
1249 size_t Pos = Str.find(CharC->getZExtValue());
1250 if (Pos == StringRef::npos)
1251 // When the character is not in the source array fold the result
1252 // to null regardless of Size.
1253 return NullPtr;
1254
1255 // Fold memchr(s, c, n) -> n <= Pos ? null : s + Pos
1256 // When the constant Size is less than or equal to the character
1257 // position also fold the result to null.
1258 Value *Cmp = B.CreateICmpULE(Size, ConstantInt::get(Size->getType(), Pos),
1259 "memchr.cmp");
1260 Value *SrcPlus = B.CreateInBoundsGEP(B.getInt8Ty(), SrcStr, B.getInt64(Pos),
1261 "memchr.ptr");
1262 return B.CreateSelect(Cmp, NullPtr, SrcPlus);
1263 }
1264
1265 if (Str.size() == 0)
1266 // If the array is empty fold memchr(A, C, N) to null for any value
1267 // of C and N on the basis that the only valid value of N is zero
1268 // (otherwise the call is undefined).
1269 return NullPtr;
1270
1271 if (LenC)
1272 Str = substr(Str, LenC->getZExtValue());
1273
1274 size_t Pos = Str.find_first_not_of(Str[0]);
1275 if (Pos == StringRef::npos
1276 || Str.find_first_not_of(Str[Pos], Pos) == StringRef::npos) {
1277 // If the source array consists of at most two consecutive sequences
1278 // of the same characters, then for any C and N (whether in bounds or
1279 // not), fold memchr(S, C, N) to
1280 // N != 0 && *S == C ? S : null
1281 // or for the two sequences to:
1282 // N != 0 && *S == C ? S : (N > Pos && S[Pos] == C ? S + Pos : null)
1283 // ^Sel2 ^Sel1 are denoted above.
1284 // The latter makes it also possible to fold strchr() calls with strings
1285 // of the same characters.
1286 Type *SizeTy = Size->getType();
1287 Type *Int8Ty = B.getInt8Ty();
1288
1289 // Slice off the sought character's high end bits.
1290 CharVal = B.CreateTrunc(CharVal, Int8Ty);
1291
1292 Value *Sel1 = NullPtr;
1293 if (Pos != StringRef::npos) {
1294 // Handle two consecutive sequences of the same characters.
1295 Value *PosVal = ConstantInt::get(SizeTy, Pos);
1296 Value *StrPos = ConstantInt::get(Int8Ty, Str[Pos]);
1297 Value *CEqSPos = B.CreateICmpEQ(CharVal, StrPos);
1298 Value *NGtPos = B.CreateICmp(ICmpInst::ICMP_UGT, Size, PosVal);
1299 Value *And = B.CreateAnd(CEqSPos, NGtPos);
1300 Value *SrcPlus = B.CreateInBoundsGEP(B.getInt8Ty(), SrcStr, PosVal);
1301 Sel1 = B.CreateSelect(And, SrcPlus, NullPtr, "memchr.sel1");
1302 }
1303
1304 Value *Str0 = ConstantInt::get(Int8Ty, Str[0]);
1305 Value *CEqS0 = B.CreateICmpEQ(Str0, CharVal);
1306 Value *NNeZ = B.CreateICmpNE(Size, ConstantInt::get(SizeTy, 0));
1307 Value *And = B.CreateAnd(NNeZ, CEqS0);
1308 return B.CreateSelect(And, SrcStr, Sel1, "memchr.sel2");
1309 }
1310
1311 if (!LenC) {
1312 if (isOnlyUsedInEqualityComparison(CI, SrcStr))
1313 // S is dereferenceable so it's safe to load from it and fold
1314 // memchr(S, C, N) == S to N && *S == C for any C and N.
1315 // TODO: This is safe even even for nonconstant S.
1316 return memChrToCharCompare(CI, Size, B, DL);
1317
1318 // From now on we need a constant length and constant array.
1319 return nullptr;
1320 }
1321
1322 // If the char is variable but the input str and length are not we can turn
1323 // this memchr call into a simple bit field test. Of course this only works
1324 // when the return value is only checked against null.
1325 //
1326 // It would be really nice to reuse switch lowering here but we can't change
1327 // the CFG at this point.
1328 //
1329 // memchr("\r\n", C, 2) != nullptr -> (1 << C & ((1 << '\r') | (1 << '\n')))
1330 // != 0
1331 // after bounds check.
1332 if (Str.empty() || !isOnlyUsedInZeroEqualityComparison(CI))
1333 return nullptr;
1334
1335 unsigned char Max =
1336 *std::max_element(reinterpret_cast<const unsigned char *>(Str.begin()),
1337 reinterpret_cast<const unsigned char *>(Str.end()));
1338
1339 // Make sure the bit field we're about to create fits in a register on the
1340 // target.
1341 // FIXME: On a 64 bit architecture this prevents us from using the
1342 // interesting range of alpha ascii chars. We could do better by emitting
1343 // two bitfields or shifting the range by 64 if no lower chars are used.
1344 if (!DL.fitsInLegalInteger(Max + 1))
1345 return nullptr;
1346
1347 // For the bit field use a power-of-2 type with at least 8 bits to avoid
1348 // creating unnecessary illegal types.
1349 unsigned char Width = NextPowerOf2(std::max((unsigned char)7, Max));
1350
1351 // Now build the bit field.
1352 APInt Bitfield(Width, 0);
1353 for (char C : Str)
1354 Bitfield.setBit((unsigned char)C);
1355 Value *BitfieldC = B.getInt(Bitfield);
1356
1357 // Adjust width of "C" to the bitfield width, then mask off the high bits.
1358 Value *C = B.CreateZExtOrTrunc(CharVal, BitfieldC->getType());
1359 C = B.CreateAnd(C, B.getIntN(Width, 0xFF));
1360
1361 // First check that the bit field access is within bounds.
1362 Value *Bounds = B.CreateICmp(ICmpInst::ICMP_ULT, C, B.getIntN(Width, Width),
1363 "memchr.bounds");
1364
1365 // Create code that checks if the given bit is set in the field.
1366 Value *Shl = B.CreateShl(B.getIntN(Width, 1ULL), C);
1367 Value *Bits = B.CreateIsNotNull(B.CreateAnd(Shl, BitfieldC), "memchr.bits");
1368
1369 // Finally merge both checks and cast to pointer type. The inttoptr
1370 // implicitly zexts the i1 to intptr type.
1371 return B.CreateIntToPtr(B.CreateLogicalAnd(Bounds, Bits, "memchr"),
1372 CI->getType());
1373}
1374
1375// Optimize a memcmp or, when StrNCmp is true, strncmp call CI with constant
1376// arrays LHS and RHS and nonconstant Size.
1377static Value *optimizeMemCmpVarSize(CallInst *CI, Value *LHS, Value *RHS,
1378 Value *Size, bool StrNCmp,
1379 IRBuilderBase &B, const DataLayout &DL) {
1380 if (LHS == RHS) // memcmp(s,s,x) -> 0
1381 return Constant::getNullValue(CI->getType());
1382
1383 StringRef LStr, RStr;
1384 if (!getConstantStringInfo(LHS, LStr, 0, /*TrimAtNul=*/false) ||
1385 !getConstantStringInfo(RHS, RStr, 0, /*TrimAtNul=*/false))
1386 return nullptr;
1387
1388 // If the contents of both constant arrays are known, fold a call to
1389 // memcmp(A, B, N) to
1390 // N <= Pos ? 0 : (A < B ? -1 : B < A ? +1 : 0)
1391 // where Pos is the first mismatch between A and B, determined below.
1392
1393 uint64_t Pos = 0;
1394 Value *Zero = ConstantInt::get(CI->getType(), 0);
1395 for (uint64_t MinSize = std::min(LStr.size(), RStr.size()); ; ++Pos) {
1396 if (Pos == MinSize ||
1397 (StrNCmp && (LStr[Pos] == '\0' && RStr[Pos] == '\0'))) {
1398 // One array is a leading part of the other of equal or greater
1399 // size, or for strncmp, the arrays are equal strings.
1400 // Fold the result to zero. Size is assumed to be in bounds, since
1401 // otherwise the call would be undefined.
1402 return Zero;
1403 }
1404
1405 if (LStr[Pos] != RStr[Pos])
1406 break;
1407 }
1408
1409 // Normalize the result.
1410 typedef unsigned char UChar;
1411 int IRes = UChar(LStr[Pos]) < UChar(RStr[Pos]) ? -1 : 1;
1412 Value *MaxSize = ConstantInt::get(Size->getType(), Pos);
1413 Value *Cmp = B.CreateICmp(ICmpInst::ICMP_ULE, Size, MaxSize);
1414 Value *Res = ConstantInt::get(CI->getType(), IRes);
1415 return B.CreateSelect(Cmp, Zero, Res);
1416}
1417
1418// Optimize a memcmp call CI with constant size Len.
1419static Value *optimizeMemCmpConstantSize(CallInst *CI, Value *LHS, Value *RHS,
1420 uint64_t Len, IRBuilderBase &B,
1421 const DataLayout &DL) {
1422 if (Len == 0) // memcmp(s1,s2,0) -> 0
1423 return Constant::getNullValue(CI->getType());
1424
1425 // memcmp(S1,S2,1) -> *(unsigned char*)LHS - *(unsigned char*)RHS
1426 if (Len == 1) {
1427 Value *LHSV =
1428 B.CreateZExt(B.CreateLoad(B.getInt8Ty(), castToCStr(LHS, B), "lhsc"),
1429 CI->getType(), "lhsv");
1430 Value *RHSV =
1431 B.CreateZExt(B.CreateLoad(B.getInt8Ty(), castToCStr(RHS, B), "rhsc"),
1432 CI->getType(), "rhsv");
1433 return B.CreateSub(LHSV, RHSV, "chardiff");
1434 }
1435
1436 // memcmp(S1,S2,N/8)==0 -> (*(intN_t*)S1 != *(intN_t*)S2)==0
1437 // TODO: The case where both inputs are constants does not need to be limited
1438 // to legal integers or equality comparison. See block below this.
1439 if (DL.isLegalInteger(Len * 8) && isOnlyUsedInZeroEqualityComparison(CI)) {
1440 IntegerType *IntType = IntegerType::get(CI->getContext(), Len * 8);
1441 unsigned PrefAlignment = DL.getPrefTypeAlignment(IntType);
1442
1443 // First, see if we can fold either argument to a constant.
1444 Value *LHSV = nullptr;
1445 if (auto *LHSC = dyn_cast<Constant>(LHS)) {
1446 LHSC = ConstantExpr::getBitCast(LHSC, IntType->getPointerTo());
1447 LHSV = ConstantFoldLoadFromConstPtr(LHSC, IntType, DL);
1448 }
1449 Value *RHSV = nullptr;
1450 if (auto *RHSC = dyn_cast<Constant>(RHS)) {
1451 RHSC = ConstantExpr::getBitCast(RHSC, IntType->getPointerTo());
1452 RHSV = ConstantFoldLoadFromConstPtr(RHSC, IntType, DL);
1453 }
1454
1455 // Don't generate unaligned loads. If either source is constant data,
1456 // alignment doesn't matter for that source because there is no load.
1457 if ((LHSV || getKnownAlignment(LHS, DL, CI) >= PrefAlignment) &&
1458 (RHSV || getKnownAlignment(RHS, DL, CI) >= PrefAlignment)) {
1459 if (!LHSV) {
1460 Type *LHSPtrTy =
1461 IntType->getPointerTo(LHS->getType()->getPointerAddressSpace());
1462 LHSV = B.CreateLoad(IntType, B.CreateBitCast(LHS, LHSPtrTy), "lhsv");
1463 }
1464 if (!RHSV) {
1465 Type *RHSPtrTy =
1466 IntType->getPointerTo(RHS->getType()->getPointerAddressSpace());
1467 RHSV = B.CreateLoad(IntType, B.CreateBitCast(RHS, RHSPtrTy), "rhsv");
1468 }
1469 return B.CreateZExt(B.CreateICmpNE(LHSV, RHSV), CI->getType(), "memcmp");
1470 }
1471 }
1472
1473 return nullptr;
1474}
1475
1476// Most simplifications for memcmp also apply to bcmp.
1477Value *LibCallSimplifier::optimizeMemCmpBCmpCommon(CallInst *CI,
1478 IRBuilderBase &B) {
1479 Value *LHS = CI->getArgOperand(0), *RHS = CI->getArgOperand(1);
1480 Value *Size = CI->getArgOperand(2);
1481
1482 annotateNonNullAndDereferenceable(CI, {0, 1}, Size, DL);
1483
1484 if (Value *Res = optimizeMemCmpVarSize(CI, LHS, RHS, Size, false, B, DL))
1485 return Res;
1486
1487 // Handle constant Size.
1488 ConstantInt *LenC = dyn_cast<ConstantInt>(Size);
1489 if (!LenC)
1490 return nullptr;
1491
1492 return optimizeMemCmpConstantSize(CI, LHS, RHS, LenC->getZExtValue(), B, DL);
1493}
1494
1495Value *LibCallSimplifier::optimizeMemCmp(CallInst *CI, IRBuilderBase &B) {
1496 Module *M = CI->getModule();
1497 if (Value *V = optimizeMemCmpBCmpCommon(CI, B))
1498 return V;
1499
1500 // memcmp(x, y, Len) == 0 -> bcmp(x, y, Len) == 0
1501 // bcmp can be more efficient than memcmp because it only has to know that
1502 // there is a difference, not how different one is to the other.
1503 if (isLibFuncEmittable(M, TLI, LibFunc_bcmp) &&
1504 isOnlyUsedInZeroEqualityComparison(CI)) {
1505 Value *LHS = CI->getArgOperand(0);
1506 Value *RHS = CI->getArgOperand(1);
1507 Value *Size = CI->getArgOperand(2);
1508 return copyFlags(*CI, emitBCmp(LHS, RHS, Size, B, DL, TLI));
1509 }
1510
1511 return nullptr;
1512}
1513
1514Value *LibCallSimplifier::optimizeBCmp(CallInst *CI, IRBuilderBase &B) {
1515 return optimizeMemCmpBCmpCommon(CI, B);
1516}
1517
1518Value *LibCallSimplifier::optimizeMemCpy(CallInst *CI, IRBuilderBase &B) {
1519 Value *Size = CI->getArgOperand(2);
1520 annotateNonNullAndDereferenceable(CI, {0, 1}, Size, DL);
1521 if (isa<IntrinsicInst>(CI))
1522 return nullptr;
1523
1524 // memcpy(x, y, n) -> llvm.memcpy(align 1 x, align 1 y, n)
1525 CallInst *NewCI = B.CreateMemCpy(CI->getArgOperand(0), Align(1),
1526 CI->getArgOperand(1), Align(1), Size);
1527 NewCI->setAttributes(CI->getAttributes());
1528 NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
1529 copyFlags(*CI, NewCI);
1530 return CI->getArgOperand(0);
1531}
1532
1533Value *LibCallSimplifier::optimizeMemCCpy(CallInst *CI, IRBuilderBase &B) {
1534 Value *Dst = CI->getArgOperand(0);
1535 Value *Src = CI->getArgOperand(1);
1536 ConstantInt *StopChar = dyn_cast<ConstantInt>(CI->getArgOperand(2));
1537 ConstantInt *N = dyn_cast<ConstantInt>(CI->getArgOperand(3));
1538 StringRef SrcStr;
1539 if (CI->use_empty() && Dst == Src)
1540 return Dst;
1541 // memccpy(d, s, c, 0) -> nullptr
1542 if (N) {
1543 if (N->isNullValue())
1544 return Constant::getNullValue(CI->getType());
1545 if (!getConstantStringInfo(Src, SrcStr, /*Offset=*/0,
1546 /*TrimAtNul=*/false) ||
1547 // TODO: Handle zeroinitializer.
1548 !StopChar)
1549 return nullptr;
1550 } else {
1551 return nullptr;
1552 }
1553
1554 // Wrap arg 'c' of type int to char
1555 size_t Pos = SrcStr.find(StopChar->getSExtValue() & 0xFF);
1556 if (Pos == StringRef::npos) {
1557 if (N->getZExtValue() <= SrcStr.size()) {
1558 copyFlags(*CI, B.CreateMemCpy(Dst, Align(1), Src, Align(1),
1559 CI->getArgOperand(3)));
1560 return Constant::getNullValue(CI->getType());
1561 }
1562 return nullptr;
1563 }
1564
1565 Value *NewN =
1566 ConstantInt::get(N->getType(), std::min(uint64_t(Pos + 1), N->getZExtValue()));
1567 // memccpy -> llvm.memcpy
1568 copyFlags(*CI, B.CreateMemCpy(Dst, Align(1), Src, Align(1), NewN));
1569 return Pos + 1 <= N->getZExtValue()
1570 ? B.CreateInBoundsGEP(B.getInt8Ty(), Dst, NewN)
1571 : Constant::getNullValue(CI->getType());
1572}
1573
1574Value *LibCallSimplifier::optimizeMemPCpy(CallInst *CI, IRBuilderBase &B) {
1575 Value *Dst = CI->getArgOperand(0);
1576 Value *N = CI->getArgOperand(2);
1577 // mempcpy(x, y, n) -> llvm.memcpy(align 1 x, align 1 y, n), x + n
1578 CallInst *NewCI =
1579 B.CreateMemCpy(Dst, Align(1), CI->getArgOperand(1), Align(1), N);
1580 // Propagate attributes, but memcpy has no return value, so make sure that
1581 // any return attributes are compliant.
1582 // TODO: Attach return value attributes to the 1st operand to preserve them?
1583 NewCI->setAttributes(CI->getAttributes());
1584 NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
1585 copyFlags(*CI, NewCI);
1586 return B.CreateInBoundsGEP(B.getInt8Ty(), Dst, N);
1587}
1588
1589Value *LibCallSimplifier::optimizeMemMove(CallInst *CI, IRBuilderBase &B) {
1590 Value *Size = CI->getArgOperand(2);
1591 annotateNonNullAndDereferenceable(CI, {0, 1}, Size, DL);
1592 if (isa<IntrinsicInst>(CI))
1593 return nullptr;
1594
1595 // memmove(x, y, n) -> llvm.memmove(align 1 x, align 1 y, n)
1596 CallInst *NewCI = B.CreateMemMove(CI->getArgOperand(0), Align(1),
1597 CI->getArgOperand(1), Align(1), Size);
1598 NewCI->setAttributes(CI->getAttributes());
1599 NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
1600 copyFlags(*CI, NewCI);
1601 return CI->getArgOperand(0);
1602}
1603
1604Value *LibCallSimplifier::optimizeMemSet(CallInst *CI, IRBuilderBase &B) {
1605 Value *Size = CI->getArgOperand(2);
1606 annotateNonNullAndDereferenceable(CI, 0, Size, DL);
1607 if (isa<IntrinsicInst>(CI))
1608 return nullptr;
1609
1610 // memset(p, v, n) -> llvm.memset(align 1 p, v, n)
1611 Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false);
1612 CallInst *NewCI = B.CreateMemSet(CI->getArgOperand(0), Val, Size, Align(1));
1613 NewCI->setAttributes(CI->getAttributes());
1614 NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
1615 copyFlags(*CI, NewCI);
1616 return CI->getArgOperand(0);
1617}
1618
1619Value *LibCallSimplifier::optimizeRealloc(CallInst *CI, IRBuilderBase &B) {
1620 if (isa<ConstantPointerNull>(CI->getArgOperand(0)))
1621 return copyFlags(*CI, emitMalloc(CI->getArgOperand(1), B, DL, TLI));
1622
1623 return nullptr;
1624}
1625
1626//===----------------------------------------------------------------------===//
1627// Math Library Optimizations
1628//===----------------------------------------------------------------------===//
1629
1630// Replace a libcall \p CI with a call to intrinsic \p IID
1631static Value *replaceUnaryCall(CallInst *CI, IRBuilderBase &B,
1632 Intrinsic::ID IID) {
1633 // Propagate fast-math flags from the existing call to the new call.
1634 IRBuilderBase::FastMathFlagGuard Guard(B);
1635 B.setFastMathFlags(CI->getFastMathFlags());
1636
1637 Module *M = CI->getModule();
1638 Value *V = CI->getArgOperand(0);
1639 Function *F = Intrinsic::getDeclaration(M, IID, CI->getType());
1640 CallInst *NewCall = B.CreateCall(F, V);
1641 NewCall->takeName(CI);
1642 return copyFlags(*CI, NewCall);
1643}
1644
1645/// Return a variant of Val with float type.
1646/// Currently this works in two cases: If Val is an FPExtension of a float
1647/// value to something bigger, simply return the operand.
1648/// If Val is a ConstantFP but can be converted to a float ConstantFP without
1649/// loss of precision do so.
1650static Value *valueHasFloatPrecision(Value *Val) {
1651 if (FPExtInst *Cast = dyn_cast<FPExtInst>(Val)) {
1652 Value *Op = Cast->getOperand(0);
1653 if (Op->getType()->isFloatTy())
1654 return Op;
1655 }
1656 if (ConstantFP *Const = dyn_cast<ConstantFP>(Val)) {
1657 APFloat F = Const->getValueAPF();
1658 bool losesInfo;
1659 (void)F.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
1660 &losesInfo);
1661 if (!losesInfo)
1662 return ConstantFP::get(Const->getContext(), F);
1663 }
1664 return nullptr;
1665}
1666
1667/// Shrink double -> float functions.
1668static Value *optimizeDoubleFP(CallInst *CI, IRBuilderBase &B,
1669 bool isBinary, const TargetLibraryInfo *TLI,
1670 bool isPrecise = false) {
1671 Function *CalleeFn = CI->getCalledFunction();
1672 if (!CI->getType()->isDoubleTy() || !CalleeFn)
1673 return nullptr;
1674
1675 // If not all the uses of the function are converted to float, then bail out.
1676 // This matters if the precision of the result is more important than the
1677 // precision of the arguments.
1678 if (isPrecise)
1679 for (User *U : CI->users()) {
1680 FPTruncInst *Cast = dyn_cast<FPTruncInst>(U);
1681 if (!Cast || !Cast->getType()->isFloatTy())
1682 return nullptr;
1683 }
1684
1685 // If this is something like 'g((double) float)', convert to 'gf(float)'.
1686 Value *V[2];
1687 V[0] = valueHasFloatPrecision(CI->getArgOperand(0));
1688 V[1] = isBinary ? valueHasFloatPrecision(CI->getArgOperand(1)) : nullptr;
1689 if (!V[0] || (isBinary && !V[1]))
1690 return nullptr;
1691
1692 // If call isn't an intrinsic, check that it isn't within a function with the
1693 // same name as the float version of this call, otherwise the result is an
1694 // infinite loop. For example, from MinGW-w64:
1695 //
1696 // float expf(float val) { return (float) exp((double) val); }
1697 StringRef CalleeName = CalleeFn->getName();
1698 bool IsIntrinsic = CalleeFn->isIntrinsic();
1699 if (!IsIntrinsic) {
1700 StringRef CallerName = CI->getFunction()->getName();
1701 if (!CallerName.empty() && CallerName.back() == 'f' &&
1702 CallerName.size() == (CalleeName.size() + 1) &&
1703 CallerName.startswith(CalleeName))
1704 return nullptr;
1705 }
1706
1707 // Propagate the math semantics from the current function to the new function.
1708 IRBuilderBase::FastMathFlagGuard Guard(B);
1709 B.setFastMathFlags(CI->getFastMathFlags());
1710
1711 // g((double) float) -> (double) gf(float)
1712 Value *R;
1713 if (IsIntrinsic) {
1714 Module *M = CI->getModule();
1715 Intrinsic::ID IID = CalleeFn->getIntrinsicID();
1716 Function *Fn = Intrinsic::getDeclaration(M, IID, B.getFloatTy());
1717 R = isBinary ? B.CreateCall(Fn, V) : B.CreateCall(Fn, V[0]);
1718 } else {
1719 AttributeList CalleeAttrs = CalleeFn->getAttributes();
1720 R = isBinary ? emitBinaryFloatFnCall(V[0], V[1], TLI, CalleeName, B,
1721 CalleeAttrs)
1722 : emitUnaryFloatFnCall(V[0], TLI, CalleeName, B, CalleeAttrs);
1723 }
1724 return B.CreateFPExt(R, B.getDoubleTy());
1725}
1726
1727/// Shrink double -> float for unary functions.
1728static Value *optimizeUnaryDoubleFP(CallInst *CI, IRBuilderBase &B,
1729 const TargetLibraryInfo *TLI,
1730 bool isPrecise = false) {
1731 return optimizeDoubleFP(CI, B, false, TLI, isPrecise);
1732}
1733
1734/// Shrink double -> float for binary functions.
1735static Value *optimizeBinaryDoubleFP(CallInst *CI, IRBuilderBase &B,
1736 const TargetLibraryInfo *TLI,
1737 bool isPrecise = false) {
1738 return optimizeDoubleFP(CI, B, true, TLI, isPrecise);
1739}
1740
1741// cabs(z) -> sqrt((creal(z)*creal(z)) + (cimag(z)*cimag(z)))
1742Value *LibCallSimplifier::optimizeCAbs(CallInst *CI, IRBuilderBase &B) {
1743 if (!CI->isFast())
1744 return nullptr;
1745
1746 // Propagate fast-math flags from the existing call to new instructions.
1747 IRBuilderBase::FastMathFlagGuard Guard(B);
1748 B.setFastMathFlags(CI->getFastMathFlags());
1749
1750 Value *Real, *Imag;
1751 if (CI->arg_size() == 1) {
1752 Value *Op = CI->getArgOperand(0);
1753 assert(Op->getType()->isArrayTy() && "Unexpected signature for cabs!")(static_cast <bool> (Op->getType()->isArrayTy() &&
"Unexpected signature for cabs!") ? void (0) : __assert_fail
("Op->getType()->isArrayTy() && \"Unexpected signature for cabs!\""
, "llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp", 1753, __extension__
__PRETTY_FUNCTION__))
;
1754 Real = B.CreateExtractValue(Op, 0, "real");
1755 Imag = B.CreateExtractValue(Op, 1, "imag");
1756 } else {
1757 assert(CI->arg_size() == 2 && "Unexpected signature for cabs!")(static_cast <bool> (CI->arg_size() == 2 && "Unexpected signature for cabs!"
) ? void (0) : __assert_fail ("CI->arg_size() == 2 && \"Unexpected signature for cabs!\""
, "llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp", 1757, __extension__
__PRETTY_FUNCTION__))
;
1758 Real = CI->getArgOperand(0);
1759 Imag = CI->getArgOperand(1);
1760 }
1761
1762 Value *RealReal = B.CreateFMul(Real, Real);
1763 Value *ImagImag = B.CreateFMul(Imag, Imag);
1764
1765 Function *FSqrt = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::sqrt,
1766 CI->getType());
1767 return copyFlags(
1768 *CI, B.CreateCall(FSqrt, B.CreateFAdd(RealReal, ImagImag), "cabs"));
1769}
1770
1771static Value *optimizeTrigReflections(CallInst *Call, LibFunc Func,
1772 IRBuilderBase &B) {
1773 if (!isa<FPMathOperator>(Call))
1774 return nullptr;
1775
1776 IRBuilderBase::FastMathFlagGuard Guard(B);
1777 B.setFastMathFlags(Call->getFastMathFlags());
1778
1779 // TODO: Can this be shared to also handle LLVM intrinsics?
1780 Value *X;
1781 switch (Func) {
1782 case LibFunc_sin:
1783 case LibFunc_sinf:
1784 case LibFunc_sinl:
1785 case LibFunc_tan:
1786 case LibFunc_tanf:
1787 case LibFunc_tanl:
1788 // sin(-X) --> -sin(X)
1789 // tan(-X) --> -tan(X)
1790 if (match(Call->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X)))))
1791 return B.CreateFNeg(
1792 copyFlags(*Call, B.CreateCall(Call->getCalledFunction(), X)));
1793 break;
1794 case LibFunc_cos:
1795 case LibFunc_cosf:
1796 case LibFunc_cosl:
1797 // cos(-X) --> cos(X)
1798 if (match(Call->getArgOperand(0), m_FNeg(m_Value(X))))
1799 return copyFlags(*Call,
1800 B.CreateCall(Call->getCalledFunction(), X, "cos"));
1801 break;
1802 default:
1803 break;
1804 }
1805 return nullptr;
1806}
1807
1808// Return a properly extended integer (DstWidth bits wide) if the operation is
1809// an itofp.
1810static Value *getIntToFPVal(Value *I2F, IRBuilderBase &B, unsigned DstWidth) {
1811 if (isa<SIToFPInst>(I2F) || isa<UIToFPInst>(I2F)) {
1812 Value *Op = cast<Instruction>(I2F)->getOperand(0);
1813 // Make sure that the exponent fits inside an "int" of size DstWidth,
1814 // thus avoiding any range issues that FP has not.
1815 unsigned BitWidth = Op->getType()->getPrimitiveSizeInBits();
1816 if (BitWidth < DstWidth ||
1817 (BitWidth == DstWidth && isa<SIToFPInst>(I2F)))
1818 return isa<SIToFPInst>(I2F) ? B.CreateSExt(Op, B.getIntNTy(DstWidth))
1819 : B.CreateZExt(Op, B.getIntNTy(DstWidth));
1820 }
1821
1822 return nullptr;
1823}
1824
1825/// Use exp{,2}(x * y) for pow(exp{,2}(x), y);
1826/// ldexp(1.0, x) for pow(2.0, itofp(x)); exp2(n * x) for pow(2.0 ** n, x);
1827/// exp10(x) for pow(10.0, x); exp2(log2(n) * x) for pow(n, x).
1828Value *LibCallSimplifier::replacePowWithExp(CallInst *Pow, IRBuilderBase &B) {
1829 Module *M = Pow->getModule();
1830 Value *Base = Pow->getArgOperand(0), *Expo = Pow->getArgOperand(1);
1831 AttributeList Attrs; // Attributes are only meaningful on the original call
1832 Module *Mod = Pow->getModule();
1833 Type *Ty = Pow->getType();
1834 bool Ignored;
1835
1836 // Evaluate special cases related to a nested function as the base.
1837
1838 // pow(exp(x), y) -> exp(x * y)
1839 // pow(exp2(x), y) -> exp2(x * y)
1840 // If exp{,2}() is used only once, it is better to fold two transcendental
1841 // math functions into one. If used again, exp{,2}() would still have to be
1842 // called with the original argument, then keep both original transcendental
1843 // functions. However, this transformation is only safe with fully relaxed
1844 // math semantics, since, besides rounding differences, it changes overflow
1845 // and underflow behavior quite dramatically. For example:
1846 // pow(exp(1000), 0.001) = pow(inf, 0.001) = inf
1847 // Whereas:
1848 // exp(1000 * 0.001) = exp(1)
1849 // TODO: Loosen the requirement for fully relaxed math semantics.
1850 // TODO: Handle exp10() when more targets have it available.
1851 CallInst *BaseFn = dyn_cast<CallInst>(Base);
1852 if (BaseFn && BaseFn->hasOneUse() && BaseFn->isFast() && Pow->isFast()) {
1853 LibFunc LibFn;
1854
1855 Function *CalleeFn = BaseFn->getCalledFunction();
1856 if (CalleeFn &&
1857 TLI->getLibFunc(CalleeFn->getName(), LibFn) &&
1858 isLibFuncEmittable(M, TLI, LibFn)) {
1859 StringRef ExpName;
1860 Intrinsic::ID ID;
1861 Value *ExpFn;
1862 LibFunc LibFnFloat, LibFnDouble, LibFnLongDouble;
1863
1864 switch (LibFn) {
1865 default:
1866 return nullptr;
1867 case LibFunc_expf: case LibFunc_exp: case LibFunc_expl:
1868 ExpName = TLI->getName(LibFunc_exp);
1869 ID = Intrinsic::exp;
1870 LibFnFloat = LibFunc_expf;
1871 LibFnDouble = LibFunc_exp;
1872 LibFnLongDouble = LibFunc_expl;
1873 break;
1874 case LibFunc_exp2f: case LibFunc_exp2: case LibFunc_exp2l:
1875 ExpName = TLI->getName(LibFunc_exp2);
1876 ID = Intrinsic::exp2;
1877 LibFnFloat = LibFunc_exp2f;
1878 LibFnDouble = LibFunc_exp2;
1879 LibFnLongDouble = LibFunc_exp2l;
1880 break;
1881 }
1882
1883 // Create new exp{,2}() with the product as its argument.
1884 Value *FMul = B.CreateFMul(BaseFn->getArgOperand(0), Expo, "mul");
1885 ExpFn = BaseFn->doesNotAccessMemory()
1886 ? B.CreateCall(Intrinsic::getDeclaration(Mod, ID, Ty),
1887 FMul, ExpName)
1888 : emitUnaryFloatFnCall(FMul, TLI, LibFnDouble, LibFnFloat,
1889 LibFnLongDouble, B,
1890 BaseFn->getAttributes());
1891
1892 // Since the new exp{,2}() is different from the original one, dead code
1893 // elimination cannot be trusted to remove it, since it may have side
1894 // effects (e.g., errno). When the only consumer for the original
1895 // exp{,2}() is pow(), then it has to be explicitly erased.
1896 substituteInParent(BaseFn, ExpFn);
1897 return ExpFn;
1898 }
1899 }
1900
1901 // Evaluate special cases related to a constant base.
1902
1903 const APFloat *BaseF;
1904 if (!match(Pow->getArgOperand(0), m_APFloat(BaseF)))
1905 return nullptr;
1906
1907 // pow(2.0, itofp(x)) -> ldexp(1.0, x)
1908 if (match(Base, m_SpecificFP(2.0)) &&
1909 (isa<SIToFPInst>(Expo) || isa<UIToFPInst>(Expo)) &&
1910 hasFloatFn(M, TLI, Ty, LibFunc_ldexp, LibFunc_ldexpf, LibFunc_ldexpl)) {
1911 if (Value *ExpoI = getIntToFPVal(Expo, B, TLI->getIntSize()))
1912 return copyFlags(*Pow,
1913 emitBinaryFloatFnCall(ConstantFP::get(Ty, 1.0), ExpoI,
1914 TLI, LibFunc_ldexp, LibFunc_ldexpf,
1915 LibFunc_ldexpl, B, Attrs));
1916 }
1917
1918 // pow(2.0 ** n, x) -> exp2(n * x)
1919 if (hasFloatFn(M, TLI, Ty, LibFunc_exp2, LibFunc_exp2f, LibFunc_exp2l)) {
1920 APFloat BaseR = APFloat(1.0);
1921 BaseR.convert(BaseF->getSemantics(), APFloat::rmTowardZero, &Ignored);
1922 BaseR = BaseR / *BaseF;
1923 bool IsInteger = BaseF->isInteger(), IsReciprocal = BaseR.isInteger();
1924 const APFloat *NF = IsReciprocal ? &BaseR : BaseF;
1925 APSInt NI(64, false);
1926 if ((IsInteger || IsReciprocal) &&
1927 NF->convertToInteger(NI, APFloat::rmTowardZero, &Ignored) ==
1928 APFloat::opOK &&
1929 NI > 1 && NI.isPowerOf2()) {
1930 double N = NI.logBase2() * (IsReciprocal ? -1.0 : 1.0);
1931 Value *FMul = B.CreateFMul(Expo, ConstantFP::get(Ty, N), "mul");
1932 if (Pow->doesNotAccessMemory())
1933 return copyFlags(*Pow, B.CreateCall(Intrinsic::getDeclaration(
1934 Mod, Intrinsic::exp2, Ty),
1935 FMul, "exp2"));
1936 else
1937 return copyFlags(*Pow, emitUnaryFloatFnCall(FMul, TLI, LibFunc_exp2,
1938 LibFunc_exp2f,
1939 LibFunc_exp2l, B, Attrs));
1940 }
1941 }
1942
1943 // pow(10.0, x) -> exp10(x)
1944 // TODO: There is no exp10() intrinsic yet, but some day there shall be one.
1945 if (match(Base, m_SpecificFP(10.0)) &&
1946 hasFloatFn(M, TLI, Ty, LibFunc_exp10, LibFunc_exp10f, LibFunc_exp10l))
1947 return copyFlags(*Pow, emitUnaryFloatFnCall(Expo, TLI, LibFunc_exp10,
1948 LibFunc_exp10f, LibFunc_exp10l,
1949 B, Attrs));
1950
1951 // pow(x, y) -> exp2(log2(x) * y)
1952 if (Pow->hasApproxFunc() && Pow->hasNoNaNs() && BaseF->isFiniteNonZero() &&
1953 !BaseF->isNegative()) {
1954 // pow(1, inf) is defined to be 1 but exp2(log2(1) * inf) evaluates to NaN.
1955 // Luckily optimizePow has already handled the x == 1 case.
1956 assert(!match(Base, m_FPOne()) &&(static_cast <bool> (!match(Base, m_FPOne()) &&
"pow(1.0, y) should have been simplified earlier!") ? void (
0) : __assert_fail ("!match(Base, m_FPOne()) && \"pow(1.0, y) should have been simplified earlier!\""
, "llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp", 1957, __extension__
__PRETTY_FUNCTION__))
1957 "pow(1.0, y) should have been simplified earlier!")(static_cast <bool> (!match(Base, m_FPOne()) &&
"pow(1.0, y) should have been simplified earlier!") ? void (
0) : __assert_fail ("!match(Base, m_FPOne()) && \"pow(1.0, y) should have been simplified earlier!\""
, "llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp", 1957, __extension__
__PRETTY_FUNCTION__))
;
1958
1959 Value *Log = nullptr;
1960 if (Ty->isFloatTy())
1961 Log = ConstantFP::get(Ty, std::log2(BaseF->convertToFloat()));
1962 else if (Ty->isDoubleTy())
1963 Log = ConstantFP::get(Ty, std::log2(BaseF->convertToDouble()));
1964
1965 if (Log) {
1966 Value *FMul = B.CreateFMul(Log, Expo, "mul");
1967 if (Pow->doesNotAccessMemory())
1968 return copyFlags(*Pow, B.CreateCall(Intrinsic::getDeclaration(
1969 Mod, Intrinsic::exp2, Ty),
1970 FMul, "exp2"));
1971 else if (hasFloatFn(M, TLI, Ty, LibFunc_exp2, LibFunc_exp2f,
1972 LibFunc_exp2l))
1973 return copyFlags(*Pow, emitUnaryFloatFnCall(FMul, TLI, LibFunc_exp2,
1974 LibFunc_exp2f,
1975 LibFunc_exp2l, B, Attrs));
1976 }
1977 }
1978
1979 return nullptr;
1980}
1981
1982static Value *getSqrtCall(Value *V, AttributeList Attrs, bool NoErrno,
1983 Module *M, IRBuilderBase &B,
1984 const TargetLibraryInfo *TLI) {
1985 // If errno is never set, then use the intrinsic for sqrt().
1986 if (NoErrno) {
1987 Function *SqrtFn =
1988 Intrinsic::getDeclaration(M, Intrinsic::sqrt, V->getType());
1989 return B.CreateCall(SqrtFn, V, "sqrt");
1990 }
1991
1992 // Otherwise, use the libcall for sqrt().
1993 if (hasFloatFn(M, TLI, V->getType(), LibFunc_sqrt, LibFunc_sqrtf,
1994 LibFunc_sqrtl))
1995 // TODO: We also should check that the target can in fact lower the sqrt()
1996 // libcall. We currently have no way to ask this question, so we ask if
1997 // the target has a sqrt() libcall, which is not exactly the same.
1998 return emitUnaryFloatFnCall(V, TLI, LibFunc_sqrt, LibFunc_sqrtf,
1999 LibFunc_sqrtl, B, Attrs);
2000
2001 return nullptr;
2002}
2003
2004/// Use square root in place of pow(x, +/-0.5).
2005Value *LibCallSimplifier::replacePowWithSqrt(CallInst *Pow, IRBuilderBase &B) {
2006 Value *Sqrt, *Base = Pow->getArgOperand(0), *Expo = Pow->getArgOperand(1);
2007 AttributeList Attrs; // Attributes are only meaningful on the original call
2008 Module *Mod = Pow->getModule();
2009 Type *Ty = Pow->getType();
2010
2011 const APFloat *ExpoF;
2012 if (!match(Expo, m_APFloat(ExpoF)) ||
2013 (!ExpoF->isExactlyValue(0.5) && !ExpoF->isExactlyValue(-0.5)))
2014 return nullptr;
2015
2016 // Converting pow(X, -0.5) to 1/sqrt(X) may introduce an extra rounding step,
2017 // so that requires fast-math-flags (afn or reassoc).
2018 if (ExpoF->isNegative() && (!Pow->hasApproxFunc() && !Pow->hasAllowReassoc()))
2019 return nullptr;
2020
2021 // If we have a pow() library call (accesses memory) and we can't guarantee
2022 // that the base is not an infinity, give up:
2023 // pow(-Inf, 0.5) is optionally required to have a result of +Inf (not setting
2024 // errno), but sqrt(-Inf) is required by various standards to set errno.
2025 if (!Pow->doesNotAccessMemory() && !Pow->hasNoInfs() &&
2026 !isKnownNeverInfinity(Base, TLI))
2027 return nullptr;
2028
2029 Sqrt = getSqrtCall(Base, Attrs, Pow->doesNotAccessMemory(), Mod, B, TLI);
2030 if (!Sqrt)
2031 return nullptr;
2032
2033 // Handle signed zero base by expanding to fabs(sqrt(x)).
2034 if (!Pow->hasNoSignedZeros()) {
2035 Function *FAbsFn = Intrinsic::getDeclaration(Mod, Intrinsic::fabs, Ty);
2036 Sqrt = B.CreateCall(FAbsFn, Sqrt, "abs");
2037 }
2038
2039 Sqrt = copyFlags(*Pow, Sqrt);
2040
2041 // Handle non finite base by expanding to
2042 // (x == -infinity ? +infinity : sqrt(x)).
2043 if (!Pow->hasNoInfs()) {
2044 Value *PosInf = ConstantFP::getInfinity(Ty),
2045 *NegInf = ConstantFP::getInfinity(Ty, true);
2046 Value *FCmp = B.CreateFCmpOEQ(Base, NegInf, "isinf");
2047 Sqrt = B.CreateSelect(FCmp, PosInf, Sqrt);
2048 }
2049
2050 // If the exponent is negative, then get the reciprocal.
2051 if (ExpoF->isNegative())
2052 Sqrt = B.CreateFDiv(ConstantFP::get(Ty, 1.0), Sqrt, "reciprocal");
2053
2054 return Sqrt;
2055}
2056
2057static Value *createPowWithIntegerExponent(Value *Base, Value *Expo, Module *M,
2058 IRBuilderBase &B) {
2059 Value *Args[] = {Base, Expo};
2060 Type *Types[] = {Base->getType(), Expo->getType()};
2061 Function *F = Intrinsic::getDeclaration(M, Intrinsic::powi, Types);
2062 return B.CreateCall(F, Args);
2063}
2064
2065Value *LibCallSimplifier::optimizePow(CallInst *Pow, IRBuilderBase &B) {
2066 Value *Base = Pow->getArgOperand(0);
2067 Value *Expo = Pow->getArgOperand(1);
2068 Function *Callee = Pow->getCalledFunction();
2069 StringRef Name = Callee->getName();
2070 Type *Ty = Pow->getType();
2071 Module *M = Pow->getModule();
2072 bool AllowApprox = Pow->hasApproxFunc();
2073 bool Ignored;
2074
2075 // Propagate the math semantics from the call to any created instructions.
2076 IRBuilderBase::FastMathFlagGuard Guard(B);
2077 B.setFastMathFlags(Pow->getFastMathFlags());
2078 // Evaluate special cases related to the base.
2079
2080 // pow(1.0, x) -> 1.0
2081 if (match(Base, m_FPOne()))
2082 return Base;
2083
2084 if (Value *Exp = replacePowWithExp(Pow, B))
2085 return Exp;
2086
2087 // Evaluate special cases related to the exponent.
2088
2089 // pow(x, -1.0) -> 1.0 / x
2090 if (match(Expo, m_SpecificFP(-1.0)))
2091 return B.CreateFDiv(ConstantFP::get(Ty, 1.0), Base, "reciprocal");
2092
2093 // pow(x, +/-0.0) -> 1.0
2094 if (match(Expo, m_AnyZeroFP()))
2095 return ConstantFP::get(Ty, 1.0);
2096
2097 // pow(x, 1.0) -> x
2098 if (match(Expo, m_FPOne()))
2099 return Base;
2100
2101 // pow(x, 2.0) -> x * x
2102 if (match(Expo, m_SpecificFP(2.0)))
2103 return B.CreateFMul(Base, Base, "square");
2104
2105 if (Value *Sqrt = replacePowWithSqrt(Pow, B))
2106 return Sqrt;
2107
2108 // If we can approximate pow:
2109 // pow(x, n) -> powi(x, n) * sqrt(x) if n has exactly a 0.5 fraction
2110 // pow(x, n) -> powi(x, n) if n is a constant signed integer value
2111 const APFloat *ExpoF;
2112 if (AllowApprox && match(Expo, m_APFloat(ExpoF)) &&
2113 !ExpoF->isExactlyValue(0.5) && !ExpoF->isExactlyValue(-0.5)) {
2114 APFloat ExpoA(abs(*ExpoF));
2115 APFloat ExpoI(*ExpoF);
2116 Value *Sqrt = nullptr;
2117 if (!ExpoA.isInteger()) {
2118 APFloat Expo2 = ExpoA;
2119 // To check if ExpoA is an integer + 0.5, we add it to itself. If there
2120 // is no floating point exception and the result is an integer, then
2121 // ExpoA == integer + 0.5
2122 if (Expo2.add(ExpoA, APFloat::rmNearestTiesToEven) != APFloat::opOK)
2123 return nullptr;
2124
2125 if (!Expo2.isInteger())
2126 return nullptr;
2127
2128 if (ExpoI.roundToIntegral(APFloat::rmTowardNegative) !=
2129 APFloat::opInexact)
2130 return nullptr;
2131 if (!ExpoI.isInteger())
2132 return nullptr;
2133 ExpoF = &ExpoI;
2134
2135 Sqrt = getSqrtCall(Base, Pow->getCalledFunction()->getAttributes(),
2136 Pow->doesNotAccessMemory(), M, B, TLI);
2137 if (!Sqrt)
2138 return nullptr;
2139 }
2140
2141 // 0.5 fraction is now optionally handled.
2142 // Do pow -> powi for remaining integer exponent
2143 APSInt IntExpo(TLI->getIntSize(), /*isUnsigned=*/false);
2144 if (ExpoF->isInteger() &&
2145 ExpoF->convertToInteger(IntExpo, APFloat::rmTowardZero, &Ignored) ==
2146 APFloat::opOK) {
2147 Value *PowI = copyFlags(
2148 *Pow,
2149 createPowWithIntegerExponent(
2150 Base, ConstantInt::get(B.getIntNTy(TLI->getIntSize()), IntExpo),
2151 M, B));
2152
2153 if (PowI && Sqrt)
2154 return B.CreateFMul(PowI, Sqrt);
2155
2156 return PowI;
2157 }
2158 }
2159
2160 // powf(x, itofp(y)) -> powi(x, y)
2161 if (AllowApprox && (isa<SIToFPInst>(Expo) || isa<UIToFPInst>(Expo))) {
2162 if (Value *ExpoI = getIntToFPVal(Expo, B, TLI->getIntSize()))
2163 return copyFlags(*Pow, createPowWithIntegerExponent(Base, ExpoI, M, B));
2164 }
2165
2166 // Shrink pow() to powf() if the arguments are single precision,
2167 // unless the result is expected to be double precision.
2168 if (UnsafeFPShrink && Name == TLI->getName(LibFunc_pow) &&
2169 hasFloatVersion(M, Name)) {
2170 if (Value *Shrunk = optimizeBinaryDoubleFP(Pow, B, TLI, true))
2171 return Shrunk;
2172 }
2173
2174 return nullptr;
2175}
2176
2177Value *LibCallSimplifier::optimizeExp2(CallInst *CI, IRBuilderBase &B) {
2178 Module *M = CI->getModule();
2179 Function *Callee = CI->getCalledFunction();
2180 AttributeList Attrs; // Attributes are only meaningful on the original call
2181 StringRef Name = Callee->getName();
2182 Value *Ret = nullptr;
2183 if (UnsafeFPShrink && Name == TLI->getName(LibFunc_exp2) &&
2184 hasFloatVersion(M, Name))
2185 Ret = optimizeUnaryDoubleFP(CI, B, TLI, true);
2186
2187 Type *Ty = CI->getType();
2188 Value *Op = CI->getArgOperand(0);
2189
2190 // Turn exp2(sitofp(x)) -> ldexp(1.0, sext(x)) if sizeof(x) <= IntSize
2191 // Turn exp2(uitofp(x)) -> ldexp(1.0, zext(x)) if sizeof(x) < IntSize
2192 if ((isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op)) &&
2193 hasFloatFn(M, TLI, Ty, LibFunc_ldexp, LibFunc_ldexpf, LibFunc_ldexpl)) {
2194 if (Value *Exp = getIntToFPVal(Op, B, TLI->getIntSize()))
2195 return emitBinaryFloatFnCall(ConstantFP::get(Ty, 1.0), Exp, TLI,
2196 LibFunc_ldexp, LibFunc_ldexpf, LibFunc_ldexpl,
2197 B, Attrs);
2198 }
2199
2200 return Ret;
2201}
2202
2203Value *LibCallSimplifier::optimizeFMinFMax(CallInst *CI, IRBuilderBase &B) {
2204 Module *M = CI->getModule();
2205
2206 // If we can shrink the call to a float function rather than a double
2207 // function, do that first.
2208 Function *Callee = CI->getCalledFunction();
2209 StringRef Name = Callee->getName();
2210 if ((Name == "fmin" || Name == "fmax") && hasFloatVersion(M, Name))
2211 if (Value *Ret = optimizeBinaryDoubleFP(CI, B, TLI))
2212 return Ret;
2213
2214 // The LLVM intrinsics minnum/maxnum correspond to fmin/fmax. Canonicalize to
2215 // the intrinsics for improved optimization (for example, vectorization).
2216 // No-signed-zeros is implied by the definitions of fmax/fmin themselves.
2217 // From the C standard draft WG14/N1256:
2218 // "Ideally, fmax would be sensitive to the sign of zero, for example
2219 // fmax(-0.0, +0.0) would return +0; however, implementation in software
2220 // might be impractical."
2221 IRBuilderBase::FastMathFlagGuard Guard(B);
2222 FastMathFlags FMF = CI->getFastMathFlags();
2223 FMF.setNoSignedZeros();
2224 B.setFastMathFlags(FMF);
2225
2226 Intrinsic::ID IID = Callee->getName().startswith("fmin") ? Intrinsic::minnum
2227 : Intrinsic::maxnum;
2228 Function *F = Intrinsic::getDeclaration(CI->getModule(), IID, CI->getType());
2229 return copyFlags(
2230 *CI, B.CreateCall(F, {CI->getArgOperand(0), CI->getArgOperand(1)}));
2231}
2232
2233Value *LibCallSimplifier::optimizeLog(CallInst *Log, IRBuilderBase &B) {
2234 Function *LogFn = Log->getCalledFunction();
2235 AttributeList Attrs; // Attributes are only meaningful on the original call
2236 StringRef LogNm = LogFn->getName();
2237 Intrinsic::ID LogID = LogFn->getIntrinsicID();
2238 Module *Mod = Log->getModule();
2239 Type *Ty = Log->getType();
2240 Value *Ret = nullptr;
2241
2242 if (UnsafeFPShrink && hasFloatVersion(Mod, LogNm))
2243 Ret = optimizeUnaryDoubleFP(Log, B, TLI, true);
2244
2245 // The earlier call must also be 'fast' in order to do these transforms.
2246 CallInst *Arg = dyn_cast<CallInst>(Log->getArgOperand(0));
2247 if (!Log->isFast() || !Arg || !Arg->isFast() || !Arg->hasOneUse())
2248 return Ret;
2249
2250 LibFunc LogLb, ExpLb, Exp2Lb, Exp10Lb, PowLb;
2251
2252 // This is only applicable to log(), log2(), log10().
2253 if (TLI->getLibFunc(LogNm, LogLb))
2254 switch (LogLb) {
2255 case LibFunc_logf:
2256 LogID = Intrinsic::log;
2257 ExpLb = LibFunc_expf;
2258 Exp2Lb = LibFunc_exp2f;
2259 Exp10Lb = LibFunc_exp10f;
2260 PowLb = LibFunc_powf;
2261 break;
2262 case LibFunc_log:
2263 LogID = Intrinsic::log;
2264 ExpLb = LibFunc_exp;
2265 Exp2Lb = LibFunc_exp2;
2266 Exp10Lb = LibFunc_exp10;
2267 PowLb = LibFunc_pow;
2268 break;
2269 case LibFunc_logl:
2270 LogID = Intrinsic::log;
2271 ExpLb = LibFunc_expl;
2272 Exp2Lb = LibFunc_exp2l;
2273 Exp10Lb = LibFunc_exp10l;
2274 PowLb = LibFunc_powl;
2275 break;
2276 case LibFunc_log2f:
2277 LogID = Intrinsic::log2;
2278 ExpLb = LibFunc_expf;
2279 Exp2Lb = LibFunc_exp2f;
2280 Exp10Lb = LibFunc_exp10f;
2281 PowLb = LibFunc_powf;
2282 break;
2283 case LibFunc_log2:
2284 LogID = Intrinsic::log2;
2285 ExpLb = LibFunc_exp;
2286 Exp2Lb = LibFunc_exp2;
2287 Exp10Lb = LibFunc_exp10;
2288 PowLb = LibFunc_pow;
2289 break;
2290 case LibFunc_log2l:
2291 LogID = Intrinsic::log2;
2292 ExpLb = LibFunc_expl;
2293 Exp2Lb = LibFunc_exp2l;
2294 Exp10Lb = LibFunc_exp10l;
2295 PowLb = LibFunc_powl;
2296 break;
2297 case LibFunc_log10f:
2298 LogID = Intrinsic::log10;
2299 ExpLb = LibFunc_expf;
2300 Exp2Lb = LibFunc_exp2f;
2301 Exp10Lb = LibFunc_exp10f;
2302 PowLb = LibFunc_powf;
2303 break;
2304 case LibFunc_log10:
2305 LogID = Intrinsic::log10;
2306 ExpLb = LibFunc_exp;
2307 Exp2Lb = LibFunc_exp2;
2308 Exp10Lb = LibFunc_exp10;
2309 PowLb = LibFunc_pow;
2310 break;
2311 case LibFunc_log10l:
2312 LogID = Intrinsic::log10;
2313 ExpLb = LibFunc_expl;
2314 Exp2Lb = LibFunc_exp2l;
2315 Exp10Lb = LibFunc_exp10l;
2316 PowLb = LibFunc_powl;
2317 break;
2318 default:
2319 return Ret;
2320 }
2321 else if (LogID == Intrinsic::log || LogID == Intrinsic::log2 ||
2322 LogID == Intrinsic::log10) {
2323 if (Ty->getScalarType()->isFloatTy()) {
2324 ExpLb = LibFunc_expf;
2325 Exp2Lb = LibFunc_exp2f;
2326 Exp10Lb = LibFunc_exp10f;
2327 PowLb = LibFunc_powf;
2328 } else if (Ty->getScalarType()->isDoubleTy()) {
2329 ExpLb = LibFunc_exp;
2330 Exp2Lb = LibFunc_exp2;
2331 Exp10Lb = LibFunc_exp10;
2332 PowLb = LibFunc_pow;
2333 } else
2334 return Ret;
2335 } else
2336 return Ret;
2337
2338 IRBuilderBase::FastMathFlagGuard Guard(B);
2339 B.setFastMathFlags(FastMathFlags::getFast());
2340
2341 Intrinsic::ID ArgID = Arg->getIntrinsicID();
2342 LibFunc ArgLb = NotLibFunc;
2343 TLI->getLibFunc(*Arg, ArgLb);
2344
2345 // log(pow(x,y)) -> y*log(x)
2346 if (ArgLb == PowLb || ArgID == Intrinsic::pow) {
2347 Value *LogX =
2348 Log->doesNotAccessMemory()
2349 ? B.CreateCall(Intrinsic::getDeclaration(Mod, LogID, Ty),
2350 Arg->getOperand(0), "log")
2351 : emitUnaryFloatFnCall(Arg->getOperand(0), TLI, LogNm, B, Attrs);
2352 Value *MulY = B.CreateFMul(Arg->getArgOperand(1), LogX, "mul");
2353 // Since pow() may have side effects, e.g. errno,
2354 // dead code elimination may not be trusted to remove it.
2355 substituteInParent(Arg, MulY);
2356 return MulY;
2357 }
2358
2359 // log(exp{,2,10}(y)) -> y*log({e,2,10})
2360 // TODO: There is no exp10() intrinsic yet.
2361 if (ArgLb == ExpLb || ArgLb == Exp2Lb || ArgLb == Exp10Lb ||
2362 ArgID == Intrinsic::exp || ArgID == Intrinsic::exp2) {
2363 Constant *Eul;
2364 if (ArgLb == ExpLb || ArgID == Intrinsic::exp)
2365 // FIXME: Add more precise value of e for long double.
2366 Eul = ConstantFP::get(Log->getType(), numbers::e);
2367 else if (ArgLb == Exp2Lb || ArgID == Intrinsic::exp2)
2368 Eul = ConstantFP::get(Log->getType(), 2.0);
2369 else
2370 Eul = ConstantFP::get(Log->getType(), 10.0);
2371 Value *LogE = Log->doesNotAccessMemory()
2372 ? B.CreateCall(Intrinsic::getDeclaration(Mod, LogID, Ty),
2373 Eul, "log")
2374 : emitUnaryFloatFnCall(Eul, TLI, LogNm, B, Attrs);
2375 Value *MulY = B.CreateFMul(Arg->getArgOperand(0), LogE, "mul");
2376 // Since exp() may have side effects, e.g. errno,
2377 // dead code elimination may not be trusted to remove it.
2378 substituteInParent(Arg, MulY);
2379 return MulY;
2380 }
2381
2382 return Ret;
2383}
2384
2385Value *LibCallSimplifier::optimizeSqrt(CallInst *CI, IRBuilderBase &B) {
2386 Module *M = CI->getModule();
2387 Function *Callee = CI->getCalledFunction();
2388 Value *Ret = nullptr;
2389 // TODO: Once we have a way (other than checking for the existince of the
2390 // libcall) to tell whether our target can lower @llvm.sqrt, relax the
2391 // condition below.
2392 if (isLibFuncEmittable(M, TLI, LibFunc_sqrtf) &&
2393 (Callee->getName() == "sqrt" ||
2394 Callee->getIntrinsicID() == Intrinsic::sqrt))
2395 Ret = optimizeUnaryDoubleFP(CI, B, TLI, true);
2396
2397 if (!CI->isFast())
2398 return Ret;
2399
2400 Instruction *I = dyn_cast<Instruction>(CI->getArgOperand(0));
2401 if (!I || I->getOpcode() != Instruction::FMul || !I->isFast())
2402 return Ret;
2403
2404 // We're looking for a repeated factor in a multiplication tree,
2405 // so we can do this fold: sqrt(x * x) -> fabs(x);
2406 // or this fold: sqrt((x * x) * y) -> fabs(x) * sqrt(y).
2407 Value *Op0 = I->getOperand(0);
2408 Value *Op1 = I->getOperand(1);
2409 Value *RepeatOp = nullptr;
2410 Value *OtherOp = nullptr;
2411 if (Op0 == Op1) {
2412 // Simple match: the operands of the multiply are identical.
2413 RepeatOp = Op0;
2414 } else {
2415 // Look for a more complicated pattern: one of the operands is itself
2416 // a multiply, so search for a common factor in that multiply.
2417 // Note: We don't bother looking any deeper than this first level or for
2418 // variations of this pattern because instcombine's visitFMUL and/or the
2419 // reassociation pass should give us this form.
2420 Value *OtherMul0, *OtherMul1;
2421 if (match(Op0, m_FMul(m_Value(OtherMul0), m_Value(OtherMul1)))) {
2422 // Pattern: sqrt((x * y) * z)
2423 if (OtherMul0 == OtherMul1 && cast<Instruction>(Op0)->isFast()) {
2424 // Matched: sqrt((x * x) * z)
2425 RepeatOp = OtherMul0;
2426 OtherOp = Op1;
2427 }
2428 }
2429 }
2430 if (!RepeatOp)
2431 return Ret;
2432
2433 // Fast math flags for any created instructions should match the sqrt
2434 // and multiply.
2435 IRBuilderBase::FastMathFlagGuard Guard(B);
2436 B.setFastMathFlags(I->getFastMathFlags());
2437
2438 // If we found a repeated factor, hoist it out of the square root and
2439 // replace it with the fabs of that factor.
2440 Type *ArgType = I->getType();
2441 Function *Fabs = Intrinsic::getDeclaration(M, Intrinsic::fabs, ArgType);
2442 Value *FabsCall = B.CreateCall(Fabs, RepeatOp, "fabs");
2443 if (OtherOp) {
2444 // If we found a non-repeated factor, we still need to get its square
2445 // root. We then multiply that by the value that was simplified out
2446 // of the square root calculation.
2447 Function *Sqrt = Intrinsic::getDeclaration(M, Intrinsic::sqrt, ArgType);
2448 Value *SqrtCall = B.CreateCall(Sqrt, OtherOp, "sqrt");
2449 return copyFlags(*CI, B.CreateFMul(FabsCall, SqrtCall));
2450 }
2451 return copyFlags(*CI, FabsCall);
2452}
2453
2454// TODO: Generalize to handle any trig function and its inverse.
2455Value *LibCallSimplifier::optimizeTan(CallInst *CI, IRBuilderBase &B) {
2456 Module *M = CI->getModule();
2457 Function *Callee = CI->getCalledFunction();
2458 Value *Ret = nullptr;
2459 StringRef Name = Callee->getName();
2460 if (UnsafeFPShrink && Name == "tan" && hasFloatVersion(M, Name))
2461 Ret = optimizeUnaryDoubleFP(CI, B, TLI, true);
2462
2463 Value *Op1 = CI->getArgOperand(0);
2464 auto *OpC = dyn_cast<CallInst>(Op1);
2465 if (!OpC)
2466 return Ret;
2467
2468 // Both calls must be 'fast' in order to remove them.
2469 if (!CI->isFast() || !OpC->isFast())
2470 return Ret;
2471
2472 // tan(atan(x)) -> x
2473 // tanf(atanf(x)) -> x
2474 // tanl(atanl(x)) -> x
2475 LibFunc Func;
2476 Function *F = OpC->getCalledFunction();
2477 if (F && TLI->getLibFunc(F->getName(), Func) &&
2478 isLibFuncEmittable(M, TLI, Func) &&
2479 ((Func == LibFunc_atan && Callee->getName() == "tan") ||
2480 (Func == LibFunc_atanf && Callee->getName() == "tanf") ||
2481 (Func == LibFunc_atanl && Callee->getName() == "tanl")))
2482 Ret = OpC->getArgOperand(0);
2483 return Ret;
2484}
2485
2486static bool isTrigLibCall(CallInst *CI) {
2487 // We can only hope to do anything useful if we can ignore things like errno
2488 // and floating-point exceptions.
2489 // We already checked the prototype.
2490 return CI->hasFnAttr(Attribute::NoUnwind) &&
2491 CI->hasFnAttr(Attribute::ReadNone);
2492}
2493
2494static bool insertSinCosCall(IRBuilderBase &B, Function *OrigCallee, Value *Arg,
2495 bool UseFloat, Value *&Sin, Value *&Cos,
2496 Value *&SinCos, const TargetLibraryInfo *TLI) {
2497 Module *M = OrigCallee->getParent();
2498 Type *ArgTy = Arg->getType();
2499 Type *ResTy;
2500 StringRef Name;
2501
2502 Triple T(OrigCallee->getParent()->getTargetTriple());
2503 if (UseFloat) {
2504 Name = "__sincospif_stret";
2505
2506 assert(T.getArch() != Triple::x86 && "x86 messy and unsupported for now")(static_cast <bool> (T.getArch() != Triple::x86 &&
"x86 messy and unsupported for now") ? void (0) : __assert_fail
("T.getArch() != Triple::x86 && \"x86 messy and unsupported for now\""
, "llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp", 2506, __extension__
__PRETTY_FUNCTION__))
;
2507 // x86_64 can't use {float, float} since that would be returned in both
2508 // xmm0 and xmm1, which isn't what a real struct would do.
2509 ResTy = T.getArch() == Triple::x86_64
2510 ? static_cast<Type *>(FixedVectorType::get(ArgTy, 2))
2511 : static_cast<Type *>(StructType::get(ArgTy, ArgTy));
2512 } else {
2513 Name = "__sincospi_stret";
2514 ResTy = StructType::get(ArgTy, ArgTy);
2515 }
2516
2517 if (!isLibFuncEmittable(M, TLI, Name))
2518 return false;
2519 LibFunc TheLibFunc;
2520 TLI->getLibFunc(Name, TheLibFunc);
2521 FunctionCallee Callee = getOrInsertLibFunc(
2522 M, *TLI, TheLibFunc, OrigCallee->getAttributes(), ResTy, ArgTy);
2523
2524 if (Instruction *ArgInst = dyn_cast<Instruction>(Arg)) {
2525 // If the argument is an instruction, it must dominate all uses so put our
2526 // sincos call there.
2527 B.SetInsertPoint(ArgInst->getParent(), ++ArgInst->getIterator());
2528 } else {
2529 // Otherwise (e.g. for a constant) the beginning of the function is as
2530 // good a place as any.
2531 BasicBlock &EntryBB = B.GetInsertBlock()->getParent()->getEntryBlock();
2532 B.SetInsertPoint(&EntryBB, EntryBB.begin());
2533 }
2534
2535 SinCos = B.CreateCall(Callee, Arg, "sincospi");
2536
2537 if (SinCos->getType()->isStructTy()) {
2538 Sin = B.CreateExtractValue(SinCos, 0, "sinpi");
2539 Cos = B.CreateExtractValue(SinCos, 1, "cospi");
2540 } else {
2541 Sin = B.CreateExtractElement(SinCos, ConstantInt::get(B.getInt32Ty(), 0),
2542 "sinpi");
2543 Cos = B.CreateExtractElement(SinCos, ConstantInt::get(B.getInt32Ty(), 1),
2544 "cospi");
2545 }
2546
2547 return true;
2548}
2549
2550Value *LibCallSimplifier::optimizeSinCosPi(CallInst *CI, IRBuilderBase &B) {
2551 // Make sure the prototype is as expected, otherwise the rest of the
2552 // function is probably invalid and likely to abort.
2553 if (!isTrigLibCall(CI))
6
Assuming the condition is false
7
Taking false branch
2554 return nullptr;
2555
2556 Value *Arg = CI->getArgOperand(0);
2557 SmallVector<CallInst *, 1> SinCalls;
2558 SmallVector<CallInst *, 1> CosCalls;
2559 SmallVector<CallInst *, 1> SinCosCalls;
2560
2561 bool IsFloat = Arg->getType()->isFloatTy();
2562
2563 // Look for all compatible sinpi, cospi and sincospi calls with the same
2564 // argument. If there are enough (in some sense) we can make the
2565 // substitution.
2566 Function *F = CI->getFunction();
2567 for (User *U : Arg->users())
2568 classifyArgUse(U, F, IsFloat, SinCalls, CosCalls, SinCosCalls);
8
Calling 'LibCallSimplifier::classifyArgUse'
2569
2570 // It's only worthwhile if both sinpi and cospi are actually used.
2571 if (SinCalls.empty() || CosCalls.empty())
2572 return nullptr;
2573
2574 Value *Sin, *Cos, *SinCos;
2575 if (!insertSinCosCall(B, CI->getCalledFunction(), Arg, IsFloat, Sin, Cos,
2576 SinCos, TLI))
2577 return nullptr;
2578
2579 auto replaceTrigInsts = [this](SmallVectorImpl<CallInst *> &Calls,
2580 Value *Res) {
2581 for (CallInst *C : Calls)
2582 replaceAllUsesWith(C, Res);
2583 };
2584
2585 replaceTrigInsts(SinCalls, Sin);
2586 replaceTrigInsts(CosCalls, Cos);
2587 replaceTrigInsts(SinCosCalls, SinCos);
2588
2589 return nullptr;
2590}
2591
2592void LibCallSimplifier::classifyArgUse(
2593 Value *Val, Function *F, bool IsFloat,
2594 SmallVectorImpl<CallInst *> &SinCalls,
2595 SmallVectorImpl<CallInst *> &CosCalls,
2596 SmallVectorImpl<CallInst *> &SinCosCalls) {
2597 CallInst *CI = dyn_cast<CallInst>(Val);
9
Assuming 'Val' is not a 'CastReturnType'
10
'CI' initialized to a null pointer value
2598 Module *M = CI->getModule();
11
Called C++ object pointer is null
2599
2600 if (!CI || CI->use_empty())
2601 return;
2602
2603 // Don't consider calls in other functions.
2604 if (CI->getFunction() != F)
2605 return;
2606
2607 Function *Callee = CI->getCalledFunction();
2608 LibFunc Func;
2609 if (!Callee || !TLI->getLibFunc(*Callee, Func) ||
2610 !isLibFuncEmittable(M, TLI, Func) ||
2611 !isTrigLibCall(CI))
2612 return;
2613
2614 if (IsFloat) {
2615 if (Func == LibFunc_sinpif)
2616 SinCalls.push_back(CI);
2617 else if (Func == LibFunc_cospif)
2618 CosCalls.push_back(CI);
2619 else if (Func == LibFunc_sincospif_stret)
2620 SinCosCalls.push_back(CI);
2621 } else {
2622 if (Func == LibFunc_sinpi)
2623 SinCalls.push_back(CI);
2624 else if (Func == LibFunc_cospi)
2625 CosCalls.push_back(CI);
2626 else if (Func == LibFunc_sincospi_stret)
2627 SinCosCalls.push_back(CI);
2628 }
2629}
2630
2631//===----------------------------------------------------------------------===//
2632// Integer Library Call Optimizations
2633//===----------------------------------------------------------------------===//
2634
2635Value *LibCallSimplifier::optimizeFFS(CallInst *CI, IRBuilderBase &B) {
2636 // All variants of ffs return int which need not be 32 bits wide.
2637 // ffs{,l,ll}(x) -> x != 0 ? (int)llvm.cttz(x)+1 : 0
2638 Type *RetType = CI->getType();
2639 Value *Op = CI->getArgOperand(0);
2640 Type *ArgType = Op->getType();
2641 Function *F = Intrinsic::getDeclaration(CI->getCalledFunction()->getParent(),
2642 Intrinsic::cttz, ArgType);
2643 Value *V = B.CreateCall(F, {Op, B.getTrue()}, "cttz");
2644 V = B.CreateAdd(V, ConstantInt::get(V->getType(), 1));
2645 V = B.CreateIntCast(V, RetType, false);
2646
2647 Value *Cond = B.CreateICmpNE(Op, Constant::getNullValue(ArgType));
2648 return B.CreateSelect(Cond, V, ConstantInt::get(RetType, 0));
2649}
2650
2651Value *LibCallSimplifier::optimizeFls(CallInst *CI, IRBuilderBase &B) {
2652 // All variants of fls return int which need not be 32 bits wide.
2653 // fls{,l,ll}(x) -> (int)(sizeInBits(x) - llvm.ctlz(x, false))
2654 Value *Op = CI->getArgOperand(0);
2655 Type *ArgType = Op->getType();
2656 Function *F = Intrinsic::getDeclaration(CI->getCalledFunction()->getParent(),
2657 Intrinsic::ctlz, ArgType);
2658 Value *V = B.CreateCall(F, {Op, B.getFalse()}, "ctlz");
2659 V = B.CreateSub(ConstantInt::get(V->getType(), ArgType->getIntegerBitWidth()),
2660 V);
2661 return B.CreateIntCast(V, CI->getType(), false);
2662}
2663
2664Value *LibCallSimplifier::optimizeAbs(CallInst *CI, IRBuilderBase &B) {
2665 // abs(x) -> x <s 0 ? -x : x
2666 // The negation has 'nsw' because abs of INT_MIN is undefined.
2667 Value *X = CI->getArgOperand(0);
2668 Value *IsNeg = B.CreateIsNeg(X);
2669 Value *NegX = B.CreateNSWNeg(X, "neg");
2670 return B.CreateSelect(IsNeg, NegX, X);
2671}
2672
2673Value *LibCallSimplifier::optimizeIsDigit(CallInst *CI, IRBuilderBase &B) {
2674 // isdigit(c) -> (c-'0') <u 10
2675 Value *Op = CI->getArgOperand(0);
2676 Type *ArgType = Op->getType();
2677 Op = B.CreateSub(Op, ConstantInt::get(ArgType, '0'), "isdigittmp");
2678 Op = B.CreateICmpULT(Op, ConstantInt::get(ArgType, 10), "isdigit");
2679 return B.CreateZExt(Op, CI->getType());
2680}
2681
2682Value *LibCallSimplifier::optimizeIsAscii(CallInst *CI, IRBuilderBase &B) {
2683 // isascii(c) -> c <u 128
2684 Value *Op = CI->getArgOperand(0);
2685 Type *ArgType = Op->getType();
2686 Op = B.CreateICmpULT(Op, ConstantInt::get(ArgType, 128), "isascii");
2687 return B.CreateZExt(Op, CI->getType());
2688}
2689
2690Value *LibCallSimplifier::optimizeToAscii(CallInst *CI, IRBuilderBase &B) {
2691 // toascii(c) -> c & 0x7f
2692 return B.CreateAnd(CI->getArgOperand(0),
2693 ConstantInt::get(CI->getType(), 0x7F));
2694}
2695
2696// Fold calls to atoi, atol, and atoll.
2697Value *LibCallSimplifier::optimizeAtoi(CallInst *CI, IRBuilderBase &B) {
2698 CI->addParamAttr(0, Attribute::NoCapture);
2699
2700 StringRef Str;
2701 if (!getConstantStringInfo(CI->getArgOperand(0), Str))
2702 return nullptr;
2703
2704 return convertStrToInt(CI, Str, nullptr, 10, /*AsSigned=*/true, B);
2705}
2706
2707// Fold calls to strtol, strtoll, strtoul, and strtoull.
2708Value *LibCallSimplifier::optimizeStrToInt(CallInst *CI, IRBuilderBase &B,
2709 bool AsSigned) {
2710 Value *EndPtr = CI->getArgOperand(1);
2711 if (isa<ConstantPointerNull>(EndPtr)) {
2712 // With a null EndPtr, this function won't capture the main argument.
2713 // It would be readonly too, except that it still may write to errno.
2714 CI->addParamAttr(0, Attribute::NoCapture);
2715 EndPtr = nullptr;
2716 } else if (!isKnownNonZero(EndPtr, DL))
2717 return nullptr;
2718
2719 StringRef Str;
2720 if (!getConstantStringInfo(CI->getArgOperand(0), Str))
2721 return nullptr;
2722
2723 if (ConstantInt *CInt = dyn_cast<ConstantInt>(CI->getArgOperand(2))) {
2724 return convertStrToInt(CI, Str, EndPtr, CInt->getSExtValue(), AsSigned, B);
2725 }
2726
2727 return nullptr;
2728}
2729
2730//===----------------------------------------------------------------------===//
2731// Formatting and IO Library Call Optimizations
2732//===----------------------------------------------------------------------===//
2733
2734static bool isReportingError(Function *Callee, CallInst *CI, int StreamArg);
2735
2736Value *LibCallSimplifier::optimizeErrorReporting(CallInst *CI, IRBuilderBase &B,
2737 int StreamArg) {
2738 Function *Callee = CI->getCalledFunction();
2739 // Error reporting calls should be cold, mark them as such.
2740 // This applies even to non-builtin calls: it is only a hint and applies to
2741 // functions that the frontend might not understand as builtins.
2742
2743 // This heuristic was suggested in:
2744 // Improving Static Branch Prediction in a Compiler
2745 // Brian L. Deitrich, Ben-Chung Cheng, Wen-mei W. Hwu
2746 // Proceedings of PACT'98, Oct. 1998, IEEE
2747 if (!CI->hasFnAttr(Attribute::Cold) &&
2748 isReportingError(Callee, CI, StreamArg)) {
2749 CI->addFnAttr(Attribute::Cold);
2750 }
2751
2752 return nullptr;
2753}
2754
2755static bool isReportingError(Function *Callee, CallInst *CI, int StreamArg) {
2756 if (!Callee || !Callee->isDeclaration())
2757 return false;
2758
2759 if (StreamArg < 0)
2760 return true;
2761
2762 // These functions might be considered cold, but only if their stream
2763 // argument is stderr.
2764
2765 if (StreamArg >= (int)CI->arg_size())
2766 return false;
2767 LoadInst *LI = dyn_cast<LoadInst>(CI->getArgOperand(StreamArg));
2768 if (!LI)
2769 return false;
2770 GlobalVariable *GV = dyn_cast<GlobalVariable>(LI->getPointerOperand());
2771 if (!GV || !GV->isDeclaration())
2772 return false;
2773 return GV->getName() == "stderr";
2774}
2775
2776Value *LibCallSimplifier::optimizePrintFString(CallInst *CI, IRBuilderBase &B) {
2777 // Check for a fixed format string.
2778 StringRef FormatStr;
2779 if (!getConstantStringInfo(CI->getArgOperand(0), FormatStr))
2780 return nullptr;
2781
2782 // Empty format string -> noop.
2783 if (FormatStr.empty()) // Tolerate printf's declared void.
2784 return CI->use_empty() ? (Value *)CI : ConstantInt::get(CI->getType(), 0);
2785
2786 // Do not do any of the following transformations if the printf return value
2787 // is used, in general the printf return value is not compatible with either
2788 // putchar() or puts().
2789 if (!CI->use_empty())
2790 return nullptr;
2791
2792 Type *IntTy = CI->getType();
2793 // printf("x") -> putchar('x'), even for "%" and "%%".
2794 if (FormatStr.size() == 1 || FormatStr == "%%") {
2795 // Convert the character to unsigned char before passing it to putchar
2796 // to avoid host-specific sign extension in the IR. Putchar converts
2797 // it to unsigned char regardless.
2798 Value *IntChar = ConstantInt::get(IntTy, (unsigned char)FormatStr[0]);
2799 return copyFlags(*CI, emitPutChar(IntChar, B, TLI));
2800 }
2801
2802 // Try to remove call or emit putchar/puts.
2803 if (FormatStr == "%s" && CI->arg_size() > 1) {
2804 StringRef OperandStr;
2805 if (!getConstantStringInfo(CI->getOperand(1), OperandStr))
2806 return nullptr;
2807 // printf("%s", "") --> NOP
2808 if (OperandStr.empty())
2809 return (Value *)CI;
2810 // printf("%s", "a") --> putchar('a')
2811 if (OperandStr.size() == 1) {
2812 // Convert the character to unsigned char before passing it to putchar
2813 // to avoid host-specific sign extension in the IR. Putchar converts
2814 // it to unsigned char regardless.
2815 Value *IntChar = ConstantInt::get(IntTy, (unsigned char)OperandStr[0]);
2816 return copyFlags(*CI, emitPutChar(IntChar, B, TLI));
2817 }
2818 // printf("%s", str"\n") --> puts(str)
2819 if (OperandStr.back() == '\n') {
2820 OperandStr = OperandStr.drop_back();
2821 Value *GV = B.CreateGlobalString(OperandStr, "str");
2822 return copyFlags(*CI, emitPutS(GV, B, TLI));
2823 }
2824 return nullptr;
2825 }
2826
2827 // printf("foo\n") --> puts("foo")
2828 if (FormatStr.back() == '\n' &&
2829 !FormatStr.contains('%')) { // No format characters.
2830 // Create a string literal with no \n on it. We expect the constant merge
2831 // pass to be run after this pass, to merge duplicate strings.
2832 FormatStr = FormatStr.drop_back();
2833 Value *GV = B.CreateGlobalString(FormatStr, "str");
2834 return copyFlags(*CI, emitPutS(GV, B, TLI));
2835 }
2836
2837 // Optimize specific format strings.
2838 // printf("%c", chr) --> putchar(chr)
2839 if (FormatStr == "%c" && CI->arg_size() > 1 &&
2840 CI->getArgOperand(1)->getType()->isIntegerTy()) {
2841 // Convert the argument to the type expected by putchar, i.e., int, which
2842 // need not be 32 bits wide but which is the same as printf's return type.
2843 Value *IntChar = B.CreateIntCast(CI->getArgOperand(1), IntTy, false);
2844 return copyFlags(*CI, emitPutChar(IntChar, B, TLI));
2845 }
2846
2847 // printf("%s\n", str) --> puts(str)
2848 if (FormatStr == "%s\n" && CI->arg_size() > 1 &&
2849 CI->getArgOperand(1)->getType()->isPointerTy())
2850 return copyFlags(*CI, emitPutS(CI->getArgOperand(1), B, TLI));
2851 return nullptr;
2852}
2853
2854Value *LibCallSimplifier::optimizePrintF(CallInst *CI, IRBuilderBase &B) {
2855
2856 Module *M = CI->getModule();
2857 Function *Callee = CI->getCalledFunction();
2858 FunctionType *FT = Callee->getFunctionType();
2859 if (Value *V = optimizePrintFString(CI, B)) {
2860 return V;
2861 }
2862
2863 // printf(format, ...) -> iprintf(format, ...) if no floating point
2864 // arguments.
2865 if (isLibFuncEmittable(M, TLI, LibFunc_iprintf) &&
2866 !callHasFloatingPointArgument(CI)) {
2867 FunctionCallee IPrintFFn = getOrInsertLibFunc(M, *TLI, LibFunc_iprintf, FT,
2868 Callee->getAttributes());
2869 CallInst *New = cast<CallInst>(CI->clone());
2870 New->setCalledFunction(IPrintFFn);
2871 B.Insert(New);
2872 return New;
2873 }
2874
2875 // printf(format, ...) -> __small_printf(format, ...) if no 128-bit floating point
2876 // arguments.
2877 if (isLibFuncEmittable(M, TLI, LibFunc_small_printf) &&
2878 !callHasFP128Argument(CI)) {
2879 auto SmallPrintFFn = getOrInsertLibFunc(M, *TLI, LibFunc_small_printf, FT,
2880 Callee->getAttributes());
2881 CallInst *New = cast<CallInst>(CI->clone());
2882 New->setCalledFunction(SmallPrintFFn);
2883 B.Insert(New);
2884 return New;
2885 }
2886
2887 annotateNonNullNoUndefBasedOnAccess(CI, 0);
2888 return nullptr;
2889}
2890
2891Value *LibCallSimplifier::optimizeSPrintFString(CallInst *CI,
2892 IRBuilderBase &B) {
2893 // Check for a fixed format string.
2894 StringRef FormatStr;
2895 if (!getConstantStringInfo(CI->getArgOperand(1), FormatStr))
2896 return nullptr;
2897
2898 // If we just have a format string (nothing else crazy) transform it.
2899 Value *Dest = CI->getArgOperand(0);
2900 if (CI->arg_size() == 2) {
2901 // Make sure there's no % in the constant array. We could try to handle
2902 // %% -> % in the future if we cared.
2903 if (FormatStr.contains('%'))
2904 return nullptr; // we found a format specifier, bail out.
2905
2906 // sprintf(str, fmt) -> llvm.memcpy(align 1 str, align 1 fmt, strlen(fmt)+1)
2907 B.CreateMemCpy(
2908 Dest, Align(1), CI->getArgOperand(1), Align(1),
2909 ConstantInt::get(DL.getIntPtrType(CI->getContext()),
2910 FormatStr.size() + 1)); // Copy the null byte.
2911 return ConstantInt::get(CI->getType(), FormatStr.size());
2912 }
2913
2914 // The remaining optimizations require the format string to be "%s" or "%c"
2915 // and have an extra operand.
2916 if (FormatStr.size() != 2 || FormatStr[0] != '%' || CI->arg_size() < 3)
2917 return nullptr;
2918
2919 // Decode the second character of the format string.
2920 if (FormatStr[1] == 'c') {
2921 // sprintf(dst, "%c", chr) --> *(i8*)dst = chr; *((i8*)dst+1) = 0
2922 if (!CI->getArgOperand(2)->getType()->isIntegerTy())
2923 return nullptr;
2924 Value *V = B.CreateTrunc(CI->getArgOperand(2), B.getInt8Ty(), "char");
2925 Value *Ptr = castToCStr(Dest, B);
2926 B.CreateStore(V, Ptr);
2927 Ptr = B.CreateInBoundsGEP(B.getInt8Ty(), Ptr, B.getInt32(1), "nul");
2928 B.CreateStore(B.getInt8(0), Ptr);
2929
2930 return ConstantInt::get(CI->getType(), 1);
2931 }
2932
2933 if (FormatStr[1] == 's') {
2934 // sprintf(dest, "%s", str) -> llvm.memcpy(align 1 dest, align 1 str,
2935 // strlen(str)+1)
2936 if (!CI->getArgOperand(2)->getType()->isPointerTy())
2937 return nullptr;
2938
2939 if (CI->use_empty())
2940 // sprintf(dest, "%s", str) -> strcpy(dest, str)
2941 return copyFlags(*CI, emitStrCpy(Dest, CI->getArgOperand(2), B, TLI));
2942
2943 uint64_t SrcLen = GetStringLength(CI->getArgOperand(2));
2944 if (SrcLen) {
2945 B.CreateMemCpy(
2946 Dest, Align(1), CI->getArgOperand(2), Align(1),
2947 ConstantInt::get(DL.getIntPtrType(CI->getContext()), SrcLen));
2948 // Returns total number of characters written without null-character.
2949 return ConstantInt::get(CI->getType(), SrcLen - 1);
2950 } else if (Value *V = emitStpCpy(Dest, CI->getArgOperand(2), B, TLI)) {
2951 // sprintf(dest, "%s", str) -> stpcpy(dest, str) - dest
2952 // Handle mismatched pointer types (goes away with typeless pointers?).
2953 V = B.CreatePointerCast(V, B.getInt8PtrTy());
2954 Dest = B.CreatePointerCast(Dest, B.getInt8PtrTy());
2955 Value *PtrDiff = B.CreatePtrDiff(B.getInt8Ty(), V, Dest);
2956 return B.CreateIntCast(PtrDiff, CI->getType(), false);
2957 }
2958
2959 bool OptForSize = CI->getFunction()->hasOptSize() ||
2960 llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI,
2961 PGSOQueryType::IRPass);
2962 if (OptForSize)
2963 return nullptr;
2964
2965 Value *Len = emitStrLen(CI->getArgOperand(2), B, DL, TLI);
2966 if (!Len)
2967 return nullptr;
2968 Value *IncLen =
2969 B.CreateAdd(Len, ConstantInt::get(Len->getType(), 1), "leninc");
2970 B.CreateMemCpy(Dest, Align(1), CI->getArgOperand(2), Align(1), IncLen);
2971
2972 // The sprintf result is the unincremented number of bytes in the string.
2973 return B.CreateIntCast(Len, CI->getType(), false);
2974 }
2975 return nullptr;
2976}
2977
2978Value *LibCallSimplifier::optimizeSPrintF(CallInst *CI, IRBuilderBase &B) {
2979 Module *M = CI->getModule();
2980 Function *Callee = CI->getCalledFunction();
2981 FunctionType *FT = Callee->getFunctionType();
2982 if (Value *V = optimizeSPrintFString(CI, B)) {
2983 return V;
2984 }
2985
2986 // sprintf(str, format, ...) -> siprintf(str, format, ...) if no floating
2987 // point arguments.
2988 if (isLibFuncEmittable(M, TLI, LibFunc_siprintf) &&
2989 !callHasFloatingPointArgument(CI)) {
2990 FunctionCallee SIPrintFFn = getOrInsertLibFunc(M, *TLI, LibFunc_siprintf,
2991 FT, Callee->getAttributes());
2992 CallInst *New = cast<CallInst>(CI->clone());
2993 New->setCalledFunction(SIPrintFFn);
2994 B.Insert(New);
2995 return New;
2996 }
2997
2998 // sprintf(str, format, ...) -> __small_sprintf(str, format, ...) if no 128-bit
2999 // floating point arguments.
3000 if (isLibFuncEmittable(M, TLI, LibFunc_small_sprintf) &&
3001 !callHasFP128Argument(CI)) {
3002 auto SmallSPrintFFn = getOrInsertLibFunc(M, *TLI, LibFunc_small_sprintf, FT,
3003 Callee->getAttributes());
3004 CallInst *New = cast<CallInst>(CI->clone());
3005 New->setCalledFunction(SmallSPrintFFn);
3006 B.Insert(New);
3007 return New;
3008 }
3009
3010 annotateNonNullNoUndefBasedOnAccess(CI, {0, 1});
3011 return nullptr;
3012}
3013
3014// Transform an snprintf call CI with the bound N to format the string Str
3015// either to a call to memcpy, or to single character a store, or to nothing,
3016// and fold the result to a constant. A nonnull StrArg refers to the string
3017// argument being formatted. Otherwise the call is one with N < 2 and
3018// the "%c" directive to format a single character.
3019Value *LibCallSimplifier::emitSnPrintfMemCpy(CallInst *CI, Value *StrArg,
3020 StringRef Str, uint64_t N,
3021 IRBuilderBase &B) {
3022 assert(StrArg || (N < 2 && Str.size() == 1))(static_cast <bool> (StrArg || (N < 2 && Str
.size() == 1)) ? void (0) : __assert_fail ("StrArg || (N < 2 && Str.size() == 1)"
, "llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp", 3022, __extension__
__PRETTY_FUNCTION__))
;
3023
3024 unsigned IntBits = TLI->getIntSize();
3025 uint64_t IntMax = maxIntN(IntBits);
3026 if (Str.size() > IntMax)
3027 // Bail if the string is longer than INT_MAX. POSIX requires
3028 // implementations to set errno to EOVERFLOW in this case, in
3029 // addition to when N is larger than that (checked by the caller).
3030 return nullptr;
3031
3032 Value *StrLen = ConstantInt::get(CI->getType(), Str.size());
3033 if (N == 0)
3034 return StrLen;
3035
3036 // Set to the number of bytes to copy fron StrArg which is also
3037 // the offset of the terinating nul.
3038 uint64_t NCopy;
3039 if (N > Str.size())
3040 // Copy the full string, including the terminating nul (which must
3041 // be present regardless of the bound).
3042 NCopy = Str.size() + 1;
3043 else
3044 NCopy = N - 1;
3045
3046 Value *DstArg = CI->getArgOperand(0);
3047 if (NCopy && StrArg)
3048 // Transform the call to lvm.memcpy(dst, fmt, N).
3049 copyFlags(
3050 *CI,
3051 B.CreateMemCpy(
3052 DstArg, Align(1), StrArg, Align(1),
3053 ConstantInt::get(DL.getIntPtrType(CI->getContext()), NCopy)));
3054
3055 if (N > Str.size())
3056 // Return early when the whole format string, including the final nul,
3057 // has been copied.
3058 return StrLen;
3059
3060 // Otherwise, when truncating the string append a terminating nul.
3061 Type *Int8Ty = B.getInt8Ty();
3062 Value *NulOff = B.getIntN(IntBits, NCopy);
3063 Value *DstEnd = B.CreateInBoundsGEP(Int8Ty, DstArg, NulOff, "endptr");
3064 B.CreateStore(ConstantInt::get(Int8Ty, 0), DstEnd);
3065 return StrLen;
3066}
3067
3068Value *LibCallSimplifier::optimizeSnPrintFString(CallInst *CI,
3069 IRBuilderBase &B) {
3070 // Check for size
3071 ConstantInt *Size = dyn_cast<ConstantInt>(CI->getArgOperand(1));
3072 if (!Size)
3073 return nullptr;
3074
3075 uint64_t N = Size->getZExtValue();
3076 uint64_t IntMax = maxIntN(TLI->getIntSize());
3077 if (N > IntMax)
3078 // Bail if the bound exceeds INT_MAX. POSIX requires implementations
3079 // to set errno to EOVERFLOW in this case.
3080 return nullptr;
3081
3082 Value *DstArg = CI->getArgOperand(0);
3083 Value *FmtArg = CI->getArgOperand(2);
3084
3085 // Check for a fixed format string.
3086 StringRef FormatStr;
3087 if (!getConstantStringInfo(FmtArg, FormatStr))
3088 return nullptr;
3089
3090 // If we just have a format string (nothing else crazy) transform it.
3091 if (CI->arg_size() == 3) {
3092 if (FormatStr.contains('%'))
3093 // Bail if the format string contains a directive and there are
3094 // no arguments. We could handle "%%" in the future.
3095 return nullptr;
3096
3097 return emitSnPrintfMemCpy(CI, FmtArg, FormatStr, N, B);
3098 }
3099
3100 // The remaining optimizations require the format string to be "%s" or "%c"
3101 // and have an extra operand.
3102 if (FormatStr.size() != 2 || FormatStr[0] != '%' || CI->arg_size() != 4)
3103 return nullptr;
3104
3105 // Decode the second character of the format string.
3106 if (FormatStr[1] == 'c') {
3107 if (N <= 1) {
3108 // Use an arbitary string of length 1 to transform the call into
3109 // either a nul store (N == 1) or a no-op (N == 0) and fold it
3110 // to one.
3111 StringRef CharStr("*");
3112 return emitSnPrintfMemCpy(CI, nullptr, CharStr, N, B);
3113 }
3114
3115 // snprintf(dst, size, "%c", chr) --> *(i8*)dst = chr; *((i8*)dst+1) = 0
3116 if (!CI->getArgOperand(3)->getType()->isIntegerTy())
3117 return nullptr;
3118 Value *V = B.CreateTrunc(CI->getArgOperand(3), B.getInt8Ty(), "char");
3119 Value *Ptr = castToCStr(DstArg, B);
3120 B.CreateStore(V, Ptr);
3121 Ptr = B.CreateInBoundsGEP(B.getInt8Ty(), Ptr, B.getInt32(1), "nul");
3122 B.CreateStore(B.getInt8(0), Ptr);
3123 return ConstantInt::get(CI->getType(), 1);
3124 }
3125
3126 if (FormatStr[1] != 's')
3127 return nullptr;
3128
3129 Value *StrArg = CI->getArgOperand(3);
3130 // snprintf(dest, size, "%s", str) to llvm.memcpy(dest, str, len+1, 1)
3131 StringRef Str;
3132 if (!getConstantStringInfo(StrArg, Str))
3133 return nullptr;
3134
3135 return emitSnPrintfMemCpy(CI, StrArg, Str, N, B);
3136}
3137
3138Value *LibCallSimplifier::optimizeSnPrintF(CallInst *CI, IRBuilderBase &B) {
3139 if (Value *V = optimizeSnPrintFString(CI, B)) {
3140 return V;
3141 }
3142
3143 if (isKnownNonZero(CI->getOperand(1), DL))
3144 annotateNonNullNoUndefBasedOnAccess(CI, 0);
3145 return nullptr;
3146}
3147
3148Value *LibCallSimplifier::optimizeFPrintFString(CallInst *CI,
3149 IRBuilderBase &B) {
3150 optimizeErrorReporting(CI, B, 0);
3151
3152 // All the optimizations depend on the format string.
3153 StringRef FormatStr;
3154 if (!getConstantStringInfo(CI->getArgOperand(1), FormatStr))
3155 return nullptr;
3156
3157 // Do not do any of the following transformations if the fprintf return
3158 // value is used, in general the fprintf return value is not compatible
3159 // with fwrite(), fputc() or fputs().
3160 if (!CI->use_empty())
3161 return nullptr;
3162
3163 // fprintf(F, "foo") --> fwrite("foo", 3, 1, F)
3164 if (CI->arg_size() == 2) {
3165 // Could handle %% -> % if we cared.
3166 if (FormatStr.contains('%'))
3167 return nullptr; // We found a format specifier.
3168
3169 return copyFlags(
3170 *CI, emitFWrite(CI->getArgOperand(1),
3171 ConstantInt::get(DL.getIntPtrType(CI->getContext()),
3172 FormatStr.size()),
3173 CI->getArgOperand(0), B, DL, TLI));
3174 }
3175
3176 // The remaining optimizations require the format string to be "%s" or "%c"
3177 // and have an extra operand.
3178 if (FormatStr.size() != 2 || FormatStr[0] != '%' || CI->arg_size() < 3)
3179 return nullptr;
3180
3181 // Decode the second character of the format string.
3182 if (FormatStr[1] == 'c') {
3183 // fprintf(F, "%c", chr) --> fputc(chr, F)
3184 if (!CI->getArgOperand(2)->getType()->isIntegerTy())
3185 return nullptr;
3186 return copyFlags(
3187 *CI, emitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, TLI));
3188 }
3189
3190 if (FormatStr[1] == 's') {
3191 // fprintf(F, "%s", str) --> fputs(str, F)
3192 if (!CI->getArgOperand(2)->getType()->isPointerTy())
3193 return nullptr;
3194 return copyFlags(
3195 *CI, emitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TLI));
3196 }
3197 return nullptr;
3198}
3199
3200Value *LibCallSimplifier::optimizeFPrintF(CallInst *CI, IRBuilderBase &B) {
3201 Module *M = CI->getModule();
3202 Function *Callee = CI->getCalledFunction();
3203 FunctionType *FT = Callee->getFunctionType();
3204 if (Value *V = optimizeFPrintFString(CI, B)) {
3205 return V;
3206 }
3207
3208 // fprintf(stream, format, ...) -> fiprintf(stream, format, ...) if no
3209 // floating point arguments.
3210 if (isLibFuncEmittable(M, TLI, LibFunc_fiprintf) &&
3211 !callHasFloatingPointArgument(CI)) {
3212 FunctionCallee FIPrintFFn = getOrInsertLibFunc(M, *TLI, LibFunc_fiprintf,
3213 FT, Callee->getAttributes());
3214 CallInst *New = cast<CallInst>(CI->clone());
3215 New->setCalledFunction(FIPrintFFn);
3216 B.Insert(New);
3217 return New;
3218 }
3219
3220 // fprintf(stream, format, ...) -> __small_fprintf(stream, format, ...) if no
3221 // 128-bit floating point arguments.
3222 if (isLibFuncEmittable(M, TLI, LibFunc_small_fprintf) &&
3223 !callHasFP128Argument(CI)) {
3224 auto SmallFPrintFFn =
3225 getOrInsertLibFunc(M, *TLI, LibFunc_small_fprintf, FT,
3226 Callee->getAttributes());
3227 CallInst *New = cast<CallInst>(CI->clone());
3228 New->setCalledFunction(SmallFPrintFFn);
3229 B.Insert(New);
3230 return New;
3231 }
3232
3233 return nullptr;
3234}
3235
3236Value *LibCallSimplifier::optimizeFWrite(CallInst *CI, IRBuilderBase &B) {
3237 optimizeErrorReporting(CI, B, 3);
3238
3239 // Get the element size and count.
3240 ConstantInt *SizeC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
3241 ConstantInt *CountC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
3242 if (SizeC && CountC) {
3243 uint64_t Bytes = SizeC->getZExtValue() * CountC->getZExtValue();
3244
3245 // If this is writing zero records, remove the call (it's a noop).
3246 if (Bytes == 0)
3247 return ConstantInt::get(CI->getType(), 0);
3248
3249 // If this is writing one byte, turn it into fputc.
3250 // This optimisation is only valid, if the return value is unused.
3251 if (Bytes == 1 && CI->use_empty()) { // fwrite(S,1,1,F) -> fputc(S[0],F)
3252 Value *Char = B.CreateLoad(B.getInt8Ty(),
3253 castToCStr(CI->getArgOperand(0), B), "char");
3254 Value *NewCI = emitFPutC(Char, CI->getArgOperand(3), B, TLI);
3255 return NewCI ? ConstantInt::get(CI->getType(), 1) : nullptr;
3256 }
3257 }
3258
3259 return nullptr;
3260}
3261
3262Value *LibCallSimplifier::optimizeFPuts(CallInst *CI, IRBuilderBase &B) {
3263 optimizeErrorReporting(CI, B, 1);
3264
3265 // Don't rewrite fputs to fwrite when optimising for size because fwrite
3266 // requires more arguments and thus extra MOVs are required.
3267 bool OptForSize = CI->getFunction()->hasOptSize() ||
3268 llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI,
3269 PGSOQueryType::IRPass);
3270 if (OptForSize)
3271 return nullptr;
3272
3273 // We can't optimize if return value is used.
3274 if (!CI->use_empty())
3275 return nullptr;
3276
3277 // fputs(s,F) --> fwrite(s,strlen(s),1,F)
3278 uint64_t Len = GetStringLength(CI->getArgOperand(0));
3279 if (!Len)
3280 return nullptr;
3281
3282 // Known to have no uses (see above).
3283 return copyFlags(
3284 *CI,
3285 emitFWrite(CI->getArgOperand(0),
3286 ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len - 1),
3287 CI->getArgOperand(1), B, DL, TLI));
3288}
3289
3290Value *LibCallSimplifier::optimizePuts(CallInst *CI, IRBuilderBase &B) {
3291 annotateNonNullNoUndefBasedOnAccess(CI, 0);
3292 if (!CI->use_empty())
3293 return nullptr;
3294
3295 // Check for a constant string.
3296 // puts("") -> putchar('\n')
3297 StringRef Str;
3298 if (getConstantStringInfo(CI->getArgOperand(0), Str) && Str.empty()) {
3299 // putchar takes an argument of the same type as puts returns, i.e.,
3300 // int, which need not be 32 bits wide.
3301 Type *IntTy = CI->getType();
3302 return copyFlags(*CI, emitPutChar(ConstantInt::get(IntTy, '\n'), B, TLI));
3303 }
3304
3305 return nullptr;
3306}
3307
3308Value *LibCallSimplifier::optimizeBCopy(CallInst *CI, IRBuilderBase &B) {
3309 // bcopy(src, dst, n) -> llvm.memmove(dst, src, n)
3310 return copyFlags(*CI, B.CreateMemMove(CI->getArgOperand(1), Align(1),
3311 CI->getArgOperand(0), Align(1),
3312 CI->getArgOperand(2)));
3313}
3314
3315bool LibCallSimplifier::hasFloatVersion(const Module *M, StringRef FuncName) {
3316 SmallString<20> FloatFuncName = FuncName;
3317 FloatFuncName += 'f';
3318 return isLibFuncEmittable(M, TLI, FloatFuncName);
3319}
3320
3321Value *LibCallSimplifier::optimizeStringMemoryLibCall(CallInst *CI,
3322 IRBuilderBase &Builder) {
3323 Module *M = CI->getModule();
3324 LibFunc Func;
3325 Function *Callee = CI->getCalledFunction();
3326 // Check for string/memory library functions.
3327 if (TLI->getLibFunc(*Callee, Func) && isLibFuncEmittable(M, TLI, Func)) {
3328 // Make sure we never change the calling convention.
3329 assert((static_cast <bool> ((ignoreCallingConv(Func) || TargetLibraryInfoImpl
::isCallingConvCCompatible(CI)) && "Optimizing string/memory libcall would change the calling convention"
) ? void (0) : __assert_fail ("(ignoreCallingConv(Func) || TargetLibraryInfoImpl::isCallingConvCCompatible(CI)) && \"Optimizing string/memory libcall would change the calling convention\""
, "llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp", 3332, __extension__
__PRETTY_FUNCTION__))
3330 (ignoreCallingConv(Func) ||(static_cast <bool> ((ignoreCallingConv(Func) || TargetLibraryInfoImpl
::isCallingConvCCompatible(CI)) && "Optimizing string/memory libcall would change the calling convention"
) ? void (0) : __assert_fail ("(ignoreCallingConv(Func) || TargetLibraryInfoImpl::isCallingConvCCompatible(CI)) && \"Optimizing string/memory libcall would change the calling convention\""
, "llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp", 3332, __extension__
__PRETTY_FUNCTION__))
3331 TargetLibraryInfoImpl::isCallingConvCCompatible(CI)) &&(static_cast <bool> ((ignoreCallingConv(Func) || TargetLibraryInfoImpl
::isCallingConvCCompatible(CI)) && "Optimizing string/memory libcall would change the calling convention"
) ? void (0) : __assert_fail ("(ignoreCallingConv(Func) || TargetLibraryInfoImpl::isCallingConvCCompatible(CI)) && \"Optimizing string/memory libcall would change the calling convention\""
, "llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp", 3332, __extension__
__PRETTY_FUNCTION__))
3332 "Optimizing string/memory libcall would change the calling convention")(static_cast <bool> ((ignoreCallingConv(Func) || TargetLibraryInfoImpl
::isCallingConvCCompatible(CI)) && "Optimizing string/memory libcall would change the calling convention"
) ? void (0) : __assert_fail ("(ignoreCallingConv(Func) || TargetLibraryInfoImpl::isCallingConvCCompatible(CI)) && \"Optimizing string/memory libcall would change the calling convention\""
, "llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp", 3332, __extension__
__PRETTY_FUNCTION__))
;
3333 switch (Func) {
3334 case LibFunc_strcat:
3335 return optimizeStrCat(CI, Builder);
3336 case LibFunc_strncat:
3337 return optimizeStrNCat(CI, Builder);
3338 case LibFunc_strchr:
3339 return optimizeStrChr(CI, Builder);
3340 case LibFunc_strrchr:
3341 return optimizeStrRChr(CI, Builder);
3342 case LibFunc_strcmp:
3343 return optimizeStrCmp(CI, Builder);
3344 case LibFunc_strncmp:
3345 return optimizeStrNCmp(CI, Builder);
3346 case LibFunc_strcpy:
3347 return optimizeStrCpy(CI, Builder);
3348 case LibFunc_stpcpy:
3349 return optimizeStpCpy(CI, Builder);
3350 case LibFunc_strlcpy:
3351 return optimizeStrLCpy(CI, Builder);
3352 case LibFunc_strncpy:
3353 return optimizeStrNCpy(CI, Builder);
3354 case LibFunc_strlen:
3355 return optimizeStrLen(CI, Builder);
3356 case LibFunc_strnlen:
3357 return optimizeStrNLen(CI, Builder);
3358 case LibFunc_strpbrk:
3359 return optimizeStrPBrk(CI, Builder);
3360 case LibFunc_strndup:
3361 return optimizeStrNDup(CI, Builder);
3362 case LibFunc_strtol:
3363 case LibFunc_strtod:
3364 case LibFunc_strtof:
3365 case LibFunc_strtoul:
3366 case LibFunc_strtoll:
3367 case LibFunc_strtold:
3368 case LibFunc_strtoull:
3369 return optimizeStrTo(CI, Builder);
3370 case LibFunc_strspn:
3371 return optimizeStrSpn(CI, Builder);
3372 case LibFunc_strcspn:
3373 return optimizeStrCSpn(CI, Builder);
3374 case LibFunc_strstr:
3375 return optimizeStrStr(CI, Builder);
3376 case LibFunc_memchr:
3377 return optimizeMemChr(CI, Builder);
3378 case LibFunc_memrchr:
3379 return optimizeMemRChr(CI, Builder);
3380 case LibFunc_bcmp:
3381 return optimizeBCmp(CI, Builder);
3382 case LibFunc_memcmp:
3383 return optimizeMemCmp(CI, Builder);
3384 case LibFunc_memcpy:
3385 return optimizeMemCpy(CI, Builder);
3386 case LibFunc_memccpy:
3387 return optimizeMemCCpy(CI, Builder);
3388 case LibFunc_mempcpy:
3389 return optimizeMemPCpy(CI, Builder);
3390 case LibFunc_memmove:
3391 return optimizeMemMove(CI, Builder);
3392 case LibFunc_memset:
3393 return optimizeMemSet(CI, Builder);
3394 case LibFunc_realloc:
3395 return optimizeRealloc(CI, Builder);
3396 case LibFunc_wcslen:
3397 return optimizeWcslen(CI, Builder);
3398 case LibFunc_bcopy:
3399 return optimizeBCopy(CI, Builder);
3400 default:
3401 break;
3402 }
3403 }
3404 return nullptr;
3405}
3406
3407Value *LibCallSimplifier::optimizeFloatingPointLibCall(CallInst *CI,
3408 LibFunc Func,
3409 IRBuilderBase &Builder) {
3410 const Module *M = CI->getModule();
3411
3412 // Don't optimize calls that require strict floating point semantics.
3413 if (CI->isStrictFP())
1
Assuming the condition is false
2
Taking false branch
3414 return nullptr;
3415
3416 if (Value *V
2.1
'V' is null
= optimizeTrigReflections(CI, Func, Builder))
3
Taking false branch
3417 return V;
3418
3419 switch (Func) {
4
Control jumps to 'case LibFunc_cospi:' at line 3423
3420 case LibFunc_sinpif:
3421 case LibFunc_sinpi:
3422 case LibFunc_cospif:
3423 case LibFunc_cospi:
3424 return optimizeSinCosPi(CI, Builder);
5
Calling 'LibCallSimplifier::optimizeSinCosPi'
3425 case LibFunc_powf:
3426 case LibFunc_pow:
3427 case LibFunc_powl:
3428 return optimizePow(CI, Builder);
3429 case LibFunc_exp2l:
3430 case LibFunc_exp2:
3431 case LibFunc_exp2f:
3432 return optimizeExp2(CI, Builder);
3433 case LibFunc_fabsf:
3434 case LibFunc_fabs:
3435 case LibFunc_fabsl:
3436 return replaceUnaryCall(CI, Builder, Intrinsic::fabs);
3437 case LibFunc_sqrtf:
3438 case LibFunc_sqrt:
3439 case LibFunc_sqrtl:
3440 return optimizeSqrt(CI, Builder);
3441 case LibFunc_logf:
3442 case LibFunc_log:
3443 case LibFunc_logl:
3444 case LibFunc_log10f:
3445 case LibFunc_log10:
3446 case LibFunc_log10l:
3447 case LibFunc_log1pf:
3448 case LibFunc_log1p:
3449 case LibFunc_log1pl:
3450 case LibFunc_log2f:
3451 case LibFunc_log2:
3452 case LibFunc_log2l:
3453 case LibFunc_logbf:
3454 case LibFunc_logb:
3455 case LibFunc_logbl:
3456 return optimizeLog(CI, Builder);
3457 case LibFunc_tan:
3458 case LibFunc_tanf:
3459 case LibFunc_tanl:
3460 return optimizeTan(CI, Builder);
3461 case LibFunc_ceil:
3462 return replaceUnaryCall(CI, Builder, Intrinsic::ceil);
3463 case LibFunc_floor:
3464 return replaceUnaryCall(CI, Builder, Intrinsic::floor);
3465 case LibFunc_round:
3466 return replaceUnaryCall(CI, Builder, Intrinsic::round);
3467 case LibFunc_roundeven:
3468 return replaceUnaryCall(CI, Builder, Intrinsic::roundeven);
3469 case LibFunc_nearbyint:
3470 return replaceUnaryCall(CI, Builder, Intrinsic::nearbyint);
3471 case LibFunc_rint:
3472 return replaceUnaryCall(CI, Builder, Intrinsic::rint);
3473 case LibFunc_trunc:
3474 return replaceUnaryCall(CI, Builder, Intrinsic::trunc);
3475 case LibFunc_acos:
3476 case LibFunc_acosh:
3477 case LibFunc_asin:
3478 case LibFunc_asinh:
3479 case LibFunc_atan:
3480 case LibFunc_atanh:
3481 case LibFunc_cbrt:
3482 case LibFunc_cosh:
3483 case LibFunc_exp:
3484 case LibFunc_exp10:
3485 case LibFunc_expm1:
3486 case LibFunc_cos:
3487 case LibFunc_sin:
3488 case LibFunc_sinh:
3489 case LibFunc_tanh:
3490 if (UnsafeFPShrink && hasFloatVersion(M, CI->getCalledFunction()->getName()))
3491 return optimizeUnaryDoubleFP(CI, Builder, TLI, true);
3492 return nullptr;
3493 case LibFunc_copysign:
3494 if (hasFloatVersion(M, CI->getCalledFunction()->getName()))
3495 return optimizeBinaryDoubleFP(CI, Builder, TLI);
3496 return nullptr;
3497 case LibFunc_fminf:
3498 case LibFunc_fmin:
3499 case LibFunc_fminl:
3500 case LibFunc_fmaxf:
3501 case LibFunc_fmax:
3502 case LibFunc_fmaxl:
3503 return optimizeFMinFMax(CI, Builder);
3504 case LibFunc_cabs:
3505 case LibFunc_cabsf:
3506 case LibFunc_cabsl:
3507 return optimizeCAbs(CI, Builder);
3508 default:
3509 return nullptr;
3510 }
3511}
3512
3513Value *LibCallSimplifier::optimizeCall(CallInst *CI, IRBuilderBase &Builder) {
3514 Module *M = CI->getModule();
3515 assert(!CI->isMustTailCall() && "These transforms aren't musttail safe.")(static_cast <bool> (!CI->isMustTailCall() &&
"These transforms aren't musttail safe.") ? void (0) : __assert_fail
("!CI->isMustTailCall() && \"These transforms aren't musttail safe.\""
, "llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp", 3515, __extension__
__PRETTY_FUNCTION__))
;
3516
3517 // TODO: Split out the code below that operates on FP calls so that
3518 // we can all non-FP calls with the StrictFP attribute to be
3519 // optimized.
3520 if (CI->isNoBuiltin())
3521 return nullptr;
3522
3523 LibFunc Func;
3524 Function *Callee = CI->getCalledFunction();
3525 bool IsCallingConvC = TargetLibraryInfoImpl::isCallingConvCCompatible(CI);
3526
3527 SmallVector<OperandBundleDef, 2> OpBundles;
3528 CI->getOperandBundlesAsDefs(OpBundles);
3529
3530 IRBuilderBase::OperandBundlesGuard Guard(Builder);
3531 Builder.setDefaultOperandBundles(OpBundles);
3532
3533 // Command-line parameter overrides instruction attribute.
3534 // This can't be moved to optimizeFloatingPointLibCall() because it may be
3535 // used by the intrinsic optimizations.
3536 if (EnableUnsafeFPShrink.getNumOccurrences() > 0)
3537 UnsafeFPShrink = EnableUnsafeFPShrink;
3538 else if (isa<FPMathOperator>(CI) && CI->isFast())
3539 UnsafeFPShrink = true;
3540
3541 // First, check for intrinsics.
3542 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) {
3543 if (!IsCallingConvC)
3544 return nullptr;
3545 // The FP intrinsics have corresponding constrained versions so we don't
3546 // need to check for the StrictFP attribute here.
3547 switch (II->getIntrinsicID()) {
3548 case Intrinsic::pow:
3549 return optimizePow(CI, Builder);
3550 case Intrinsic::exp2:
3551 return optimizeExp2(CI, Builder);
3552 case Intrinsic::log:
3553 case Intrinsic::log2:
3554 case Intrinsic::log10:
3555 return optimizeLog(CI, Builder);
3556 case Intrinsic::sqrt:
3557 return optimizeSqrt(CI, Builder);
3558 case Intrinsic::memset:
3559 return optimizeMemSet(CI, Builder);
3560 case Intrinsic::memcpy:
3561 return optimizeMemCpy(CI, Builder);
3562 case Intrinsic::memmove:
3563 return optimizeMemMove(CI, Builder);
3564 default:
3565 return nullptr;
3566 }
3567 }
3568
3569 // Also try to simplify calls to fortified library functions.
3570 if (Value *SimplifiedFortifiedCI =
3571 FortifiedSimplifier.optimizeCall(CI, Builder)) {
3572 // Try to further simplify the result.
3573 CallInst *SimplifiedCI = dyn_cast<CallInst>(SimplifiedFortifiedCI);
3574 if (SimplifiedCI && SimplifiedCI->getCalledFunction()) {
3575 // Ensure that SimplifiedCI's uses are complete, since some calls have
3576 // their uses analyzed.
3577 replaceAllUsesWith(CI, SimplifiedCI);
3578
3579 // Set insertion point to SimplifiedCI to guarantee we reach all uses
3580 // we might replace later on.
3581 IRBuilderBase::InsertPointGuard Guard(Builder);
3582 Builder.SetInsertPoint(SimplifiedCI);
3583 if (Value *V = optimizeStringMemoryLibCall(SimplifiedCI, Builder)) {
3584 // If we were able to further simplify, remove the now redundant call.
3585 substituteInParent(SimplifiedCI, V);
3586 return V;
3587 }
3588 }
3589 return SimplifiedFortifiedCI;
3590 }
3591
3592 // Then check for known library functions.
3593 if (TLI->getLibFunc(*Callee, Func) && isLibFuncEmittable(M, TLI, Func)) {
3594 // We never change the calling convention.
3595 if (!ignoreCallingConv(Func) && !IsCallingConvC)
3596 return nullptr;
3597 if (Value *V = optimizeStringMemoryLibCall(CI, Builder))
3598 return V;
3599 if (Value *V = optimizeFloatingPointLibCall(CI, Func, Builder))
3600 return V;
3601 switch (Func) {
3602 case LibFunc_ffs:
3603 case LibFunc_ffsl:
3604 case LibFunc_ffsll:
3605 return optimizeFFS(CI, Builder);
3606 case LibFunc_fls:
3607 case LibFunc_flsl:
3608 case LibFunc_flsll:
3609 return optimizeFls(CI, Builder);
3610 case LibFunc_abs:
3611 case LibFunc_labs:
3612 case LibFunc_llabs:
3613 return optimizeAbs(CI, Builder);
3614 case LibFunc_isdigit:
3615 return optimizeIsDigit(CI, Builder);
3616 case LibFunc_isascii:
3617 return optimizeIsAscii(CI, Builder);
3618 case LibFunc_toascii:
3619 return optimizeToAscii(CI, Builder);
3620 case LibFunc_atoi:
3621 case LibFunc_atol:
3622 case LibFunc_atoll:
3623 return optimizeAtoi(CI, Builder);
3624 case LibFunc_strtol:
3625 case LibFunc_strtoll:
3626 return optimizeStrToInt(CI, Builder, /*AsSigned=*/true);
3627 case LibFunc_strtoul:
3628 case LibFunc_strtoull:
3629 return optimizeStrToInt(CI, Builder, /*AsSigned=*/false);
3630 case LibFunc_printf:
3631 return optimizePrintF(CI, Builder);
3632 case LibFunc_sprintf:
3633 return optimizeSPrintF(CI, Builder);
3634 case LibFunc_snprintf:
3635 return optimizeSnPrintF(CI, Builder);
3636 case LibFunc_fprintf:
3637 return optimizeFPrintF(CI, Builder);
3638 case LibFunc_fwrite:
3639 return optimizeFWrite(CI, Builder);
3640 case LibFunc_fputs:
3641 return optimizeFPuts(CI, Builder);
3642 case LibFunc_puts:
3643 return optimizePuts(CI, Builder);
3644 case LibFunc_perror:
3645 return optimizeErrorReporting(CI, Builder);
3646 case LibFunc_vfprintf:
3647 case LibFunc_fiprintf:
3648 return optimizeErrorReporting(CI, Builder, 0);
3649 default:
3650 return nullptr;
3651 }
3652 }
3653 return nullptr;
3654}
3655
3656LibCallSimplifier::LibCallSimplifier(
3657 const DataLayout &DL, const TargetLibraryInfo *TLI,
3658 OptimizationRemarkEmitter &ORE,
3659 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
3660 function_ref<void(Instruction *, Value *)> Replacer,
3661 function_ref<void(Instruction *)> Eraser)
3662 : FortifiedSimplifier(TLI), DL(DL), TLI(TLI), ORE(ORE), BFI(BFI), PSI(PSI),
3663 Replacer(Replacer), Eraser(Eraser) {}
3664
3665void LibCallSimplifier::replaceAllUsesWith(Instruction *I, Value *With) {
3666 // Indirect through the replacer used in this instance.
3667 Replacer(I, With);
3668}
3669
3670void LibCallSimplifier::eraseFromParent(Instruction *I) {
3671 Eraser(I);
3672}
3673
3674// TODO:
3675// Additional cases that we need to add to this file:
3676//
3677// cbrt:
3678// * cbrt(expN(X)) -> expN(x/3)
3679// * cbrt(sqrt(x)) -> pow(x,1/6)
3680// * cbrt(cbrt(x)) -> pow(x,1/9)
3681//
3682// exp, expf, expl:
3683// * exp(log(x)) -> x
3684//
3685// log, logf, logl:
3686// * log(exp(x)) -> x
3687// * log(exp(y)) -> y*log(e)
3688// * log(exp10(y)) -> y*log(10)
3689// * log(sqrt(x)) -> 0.5*log(x)
3690//
3691// pow, powf, powl:
3692// * pow(sqrt(x),y) -> pow(x,y*0.5)
3693// * pow(pow(x,y),z)-> pow(x,y*z)
3694//
3695// signbit:
3696// * signbit(cnst) -> cnst'
3697// * signbit(nncst) -> 0 (if pstv is a non-negative constant)
3698//
3699// sqrt, sqrtf, sqrtl:
3700// * sqrt(expN(x)) -> expN(x*0.5)
3701// * sqrt(Nroot(x)) -> pow(x,1/(2*N))
3702// * sqrt(pow(x,y)) -> pow(|x|,y*0.5)
3703//
3704
3705//===----------------------------------------------------------------------===//
3706// Fortified Library Call Optimizations
3707//===----------------------------------------------------------------------===//
3708
3709bool
3710FortifiedLibCallSimplifier::isFortifiedCallFoldable(CallInst *CI,
3711 unsigned ObjSizeOp,
3712 Optional<unsigned> SizeOp,
3713 Optional<unsigned> StrOp,
3714 Optional<unsigned> FlagOp) {
3715 // If this function takes a flag argument, the implementation may use it to
3716 // perform extra checks. Don't fold into the non-checking variant.
3717 if (FlagOp) {
3718 ConstantInt *Flag = dyn_cast<ConstantInt>(CI->getArgOperand(*FlagOp));
3719 if (!Flag || !Flag->isZero())
3720 return false;
3721 }
3722
3723 if (SizeOp && CI->getArgOperand(ObjSizeOp) == CI->getArgOperand(*SizeOp))
3724 return true;
3725
3726 if (ConstantInt *ObjSizeCI =
3727 dyn_cast<ConstantInt>(CI->getArgOperand(ObjSizeOp))) {
3728 if (ObjSizeCI->isMinusOne())
3729 return true;
3730 // If the object size wasn't -1 (unknown), bail out if we were asked to.
3731 if (OnlyLowerUnknownSize)
3732 return false;
3733 if (StrOp) {
3734 uint64_t Len = GetStringLength(CI->getArgOperand(*StrOp));
3735 // If the length is 0 we don't know how long it is and so we can't
3736 // remove the check.
3737 if (Len)
3738 annotateDereferenceableBytes(CI, *StrOp, Len);
3739 else
3740 return false;
3741 return ObjSizeCI->getZExtValue() >= Len;
3742 }
3743
3744 if (SizeOp) {
3745 if (ConstantInt *SizeCI =
3746 dyn_cast<ConstantInt>(CI->getArgOperand(*SizeOp)))
3747 return ObjSizeCI->getZExtValue() >= SizeCI->getZExtValue();
3748 }
3749 }
3750 return false;
3751}
3752
3753Value *FortifiedLibCallSimplifier::optimizeMemCpyChk(CallInst *CI,
3754 IRBuilderBase &B) {
3755 if (isFortifiedCallFoldable(CI, 3, 2)) {
3756 CallInst *NewCI =
3757 B.CreateMemCpy(CI->getArgOperand(0), Align(1), CI->getArgOperand(1),
3758 Align(1), CI->getArgOperand(2));
3759 NewCI->setAttributes(CI->getAttributes());
3760 NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
3761 copyFlags(*CI, NewCI);
3762 return CI->getArgOperand(0);
3763 }
3764 return nullptr;
3765}
3766
3767Value *FortifiedLibCallSimplifier::optimizeMemMoveChk(CallInst *CI,
3768 IRBuilderBase &B) {
3769 if (isFortifiedCallFoldable(CI, 3, 2)) {
3770 CallInst *NewCI =
3771 B.CreateMemMove(CI->getArgOperand(0), Align(1), CI->getArgOperand(1),
3772 Align(1), CI->getArgOperand(2));
3773 NewCI->setAttributes(CI->getAttributes());
3774 NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
3775 copyFlags(*CI, NewCI);
3776 return CI->getArgOperand(0);
3777 }
3778 return nullptr;
3779}
3780
3781Value *FortifiedLibCallSimplifier::optimizeMemSetChk(CallInst *CI,
3782 IRBuilderBase &B) {
3783 if (isFortifiedCallFoldable(CI, 3, 2)) {
3784 Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false);
3785 CallInst *NewCI = B.CreateMemSet(CI->getArgOperand(0), Val,
3786 CI->getArgOperand(2), Align(1));
3787 NewCI->setAttributes(CI->getAttributes());
3788 NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
3789 copyFlags(*CI, NewCI);
3790 return CI->getArgOperand(0);
3791 }
3792 return nullptr;
3793}
3794
3795Value *FortifiedLibCallSimplifier::optimizeMemPCpyChk(CallInst *CI,
3796 IRBuilderBase &B) {
3797 const DataLayout &DL = CI->getModule()->getDataLayout();
3798 if (isFortifiedCallFoldable(CI, 3, 2))
3799 if (Value *Call = emitMemPCpy(CI->getArgOperand(0), CI->getArgOperand(1),
3800 CI->getArgOperand(2), B, DL, TLI)) {
3801 CallInst *NewCI = cast<CallInst>(Call);
3802 NewCI->setAttributes(CI->getAttributes());
3803 NewCI->removeRetAttrs(AttributeFuncs::typeIncompatible(NewCI->getType()));
3804 return copyFlags(*CI, NewCI);
3805 }
3806 return nullptr;
3807}
3808
3809Value *FortifiedLibCallSimplifier::optimizeStrpCpyChk(CallInst *CI,
3810 IRBuilderBase &B,
3811 LibFunc Func) {
3812 const DataLayout &DL = CI->getModule()->getDataLayout();
3813 Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1),
3814 *ObjSize = CI->getArgOperand(2);
3815
3816 // __stpcpy_chk(x,x,...) -> x+strlen(x)
3817 if (Func == LibFunc_stpcpy_chk && !OnlyLowerUnknownSize && Dst == Src) {
3818 Value *StrLen = emitStrLen(Src, B, DL, TLI);
3819 return StrLen ? B.CreateInBoundsGEP(B.getInt8Ty(), Dst, StrLen) : nullptr;
3820 }
3821
3822 // If a) we don't have any length information, or b) we know this will
3823 // fit then just lower to a plain st[rp]cpy. Otherwise we'll keep our
3824 // st[rp]cpy_chk call which may fail at runtime if the size is too long.
3825 // TODO: It might be nice to get a maximum length out of the possible
3826 // string lengths for varying.
3827 if (isFortifiedCallFoldable(CI, 2, None, 1)) {
3828 if (Func == LibFunc_strcpy_chk)
3829 return copyFlags(*CI, emitStrCpy(Dst, Src, B, TLI));
3830 else
3831 return copyFlags(*CI, emitStpCpy(Dst, Src, B, TLI));
3832 }
3833
3834 if (OnlyLowerUnknownSize)
3835 return nullptr;
3836
3837 // Maybe we can stil fold __st[rp]cpy_chk to __memcpy_chk.
3838 uint64_t Len = GetStringLength(Src);
3839 if (Len)
3840 annotateDereferenceableBytes(CI, 1, Len);
3841 else
3842 return nullptr;
3843
3844 // FIXME: There is really no guarantee that sizeof(size_t) is equal to
3845 // sizeof(int*) for every target. So the assumption used here to derive the
3846 // SizeTBits based on the size of an integer pointer in address space zero
3847 // isn't always valid.
3848 Type *SizeTTy = DL.getIntPtrType(CI->getContext(), /*AddressSpace=*/0);
3849 Value *LenV = ConstantInt::get(SizeTTy, Len);
3850 Value *Ret = emitMemCpyChk(Dst, Src, LenV, ObjSize, B, DL, TLI);
3851 // If the function was an __stpcpy_chk, and we were able to fold it into
3852 // a __memcpy_chk, we still need to return the correct end pointer.
3853 if (Ret && Func == LibFunc_stpcpy_chk)
3854 return B.CreateInBoundsGEP(B.getInt8Ty(), Dst,
3855 ConstantInt::get(SizeTTy, Len - 1));
3856 return copyFlags(*CI, cast<CallInst>(Ret));
3857}
3858
3859Value *FortifiedLibCallSimplifier::optimizeStrLenChk(CallInst *CI,
3860 IRBuilderBase &B) {
3861 if (isFortifiedCallFoldable(CI, 1, None, 0))
3862 return copyFlags(*CI, emitStrLen(CI->getArgOperand(0), B,
3863 CI->getModule()->getDataLayout(), TLI));
3864 return nullptr;
3865}
3866
3867Value *FortifiedLibCallSimplifier::optimizeStrpNCpyChk(CallInst *CI,
3868 IRBuilderBase &B,
3869 LibFunc Func) {
3870 if (isFortifiedCallFoldable(CI, 3, 2)) {
3871 if (Func == LibFunc_strncpy_chk)
3872 return copyFlags(*CI,
3873 emitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
3874 CI->getArgOperand(2), B, TLI));
3875 else
3876 return copyFlags(*CI,
3877 emitStpNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
3878 CI->getArgOperand(2), B, TLI));
3879 }
3880
3881 return nullptr;
3882}
3883
3884Value *FortifiedLibCallSimplifier::optimizeMemCCpyChk(CallInst *CI,
3885 IRBuilderBase &B) {
3886 if (isFortifiedCallFoldable(CI, 4, 3))
3887 return copyFlags(
3888 *CI, emitMemCCpy(CI->getArgOperand(0), CI->getArgOperand(1),
3889 CI->getArgOperand(2), CI->getArgOperand(3), B, TLI));
3890
3891 return nullptr;
3892}
3893
3894Value *FortifiedLibCallSimplifier::optimizeSNPrintfChk(CallInst *CI,
3895 IRBuilderBase &B) {
3896 if (isFortifiedCallFoldable(CI, 3, 1, None, 2)) {
3897 SmallVector<Value *, 8> VariadicArgs(drop_begin(CI->args(), 5));
3898 return copyFlags(*CI,
3899 emitSNPrintf(CI->getArgOperand(0), CI->getArgOperand(1),
3900 CI->getArgOperand(4), VariadicArgs, B, TLI));
3901 }
3902
3903 return nullptr;
3904}
3905
3906Value *FortifiedLibCallSimplifier::optimizeSPrintfChk(CallInst *CI,
3907 IRBuilderBase &B) {
3908 if (isFortifiedCallFoldable(CI, 2, None, None, 1)) {
3909 SmallVector<Value *, 8> VariadicArgs(drop_begin(CI->args(), 4));
3910 return copyFlags(*CI,
3911 emitSPrintf(CI->getArgOperand(0), CI->getArgOperand(3),
3912 VariadicArgs, B, TLI));
3913 }
3914
3915 return nullptr;
3916}
3917
3918Value *FortifiedLibCallSimplifier::optimizeStrCatChk(CallInst *CI,
3919 IRBuilderBase &B) {
3920 if (isFortifiedCallFoldable(CI, 2))
3921 return copyFlags(
3922 *CI, emitStrCat(CI->getArgOperand(0), CI->getArgOperand(1), B, TLI));
3923
3924 return nullptr;
3925}
3926
3927Value *FortifiedLibCallSimplifier::optimizeStrLCat(CallInst *CI,
3928 IRBuilderBase &B) {
3929 if (isFortifiedCallFoldable(CI, 3))
3930 return copyFlags(*CI,
3931 emitStrLCat(CI->getArgOperand(0), CI->getArgOperand(1),
3932 CI->getArgOperand(2), B, TLI));
3933
3934 return nullptr;
3935}
3936
3937Value *FortifiedLibCallSimplifier::optimizeStrNCatChk(CallInst *CI,
3938 IRBuilderBase &B) {
3939 if (isFortifiedCallFoldable(CI, 3))
3940 return copyFlags(*CI,
3941 emitStrNCat(CI->getArgOperand(0), CI->getArgOperand(1),
3942 CI->getArgOperand(2), B, TLI));
3943
3944 return nullptr;
3945}
3946
3947Value *FortifiedLibCallSimplifier::optimizeStrLCpyChk(CallInst *CI,
3948 IRBuilderBase &B) {
3949 if (isFortifiedCallFoldable(CI, 3))
3950 return copyFlags(*CI,
3951 emitStrLCpy(CI->getArgOperand(0), CI->getArgOperand(1),
3952 CI->getArgOperand(2), B, TLI));
3953
3954 return nullptr;
3955}
3956
3957Value *FortifiedLibCallSimplifier::optimizeVSNPrintfChk(CallInst *CI,
3958 IRBuilderBase &B) {
3959 if (isFortifiedCallFoldable(CI, 3, 1, None, 2))
3960 return copyFlags(
3961 *CI, emitVSNPrintf(CI->getArgOperand(0), CI->getArgOperand(1),
3962 CI->getArgOperand(4), CI->getArgOperand(5), B, TLI));
3963
3964 return nullptr;
3965}
3966
3967Value *FortifiedLibCallSimplifier::optimizeVSPrintfChk(CallInst *CI,
3968 IRBuilderBase &B) {
3969 if (isFortifiedCallFoldable(CI, 2, None, None, 1))
3970 return copyFlags(*CI,
3971 emitVSPrintf(CI->getArgOperand(0), CI->getArgOperand(3),
3972 CI->getArgOperand(4), B, TLI));
3973
3974 return nullptr;
3975}
3976
3977Value *FortifiedLibCallSimplifier::optimizeCall(CallInst *CI,
3978 IRBuilderBase &Builder) {
3979 // FIXME: We shouldn't be changing "nobuiltin" or TLI unavailable calls here.
3980 // Some clang users checked for _chk libcall availability using:
3981 // __has_builtin(__builtin___memcpy_chk)
3982 // When compiling with -fno-builtin, this is always true.
3983 // When passing -ffreestanding/-mkernel, which both imply -fno-builtin, we
3984 // end up with fortified libcalls, which isn't acceptable in a freestanding
3985 // environment which only provides their non-fortified counterparts.
3986 //
3987 // Until we change clang and/or teach external users to check for availability
3988 // differently, disregard the "nobuiltin" attribute and TLI::has.
3989 //
3990 // PR23093.
3991
3992 LibFunc Func;
3993 Function *Callee = CI->getCalledFunction();
3994 bool IsCallingConvC = TargetLibraryInfoImpl::isCallingConvCCompatible(CI);
3995
3996 SmallVector<OperandBundleDef, 2> OpBundles;
3997 CI->getOperandBundlesAsDefs(OpBundles);
3998
3999 IRBuilderBase::OperandBundlesGuard Guard(Builder);
4000 Builder.setDefaultOperandBundles(OpBundles);
4001
4002 // First, check that this is a known library functions and that the prototype
4003 // is correct.
4004 if (!TLI->getLibFunc(*Callee, Func))
4005 return nullptr;
4006
4007 // We never change the calling convention.
4008 if (!ignoreCallingConv(Func) && !IsCallingConvC)
4009 return nullptr;
4010
4011 switch (Func) {
4012 case LibFunc_memcpy_chk:
4013 return optimizeMemCpyChk(CI, Builder);
4014 case LibFunc_mempcpy_chk:
4015 return optimizeMemPCpyChk(CI, Builder);
4016 case LibFunc_memmove_chk:
4017 return optimizeMemMoveChk(CI, Builder);
4018 case LibFunc_memset_chk:
4019 return optimizeMemSetChk(CI, Builder);
4020 case LibFunc_stpcpy_chk:
4021 case LibFunc_strcpy_chk:
4022 return optimizeStrpCpyChk(CI, Builder, Func);
4023 case LibFunc_strlen_chk:
4024 return optimizeStrLenChk(CI, Builder);
4025 case LibFunc_stpncpy_chk:
4026 case LibFunc_strncpy_chk:
4027 return optimizeStrpNCpyChk(CI, Builder, Func);
4028 case LibFunc_memccpy_chk:
4029 return optimizeMemCCpyChk(CI, Builder);
4030 case LibFunc_snprintf_chk:
4031 return optimizeSNPrintfChk(CI, Builder);
4032 case LibFunc_sprintf_chk:
4033 return optimizeSPrintfChk(CI, Builder);
4034 case LibFunc_strcat_chk:
4035 return optimizeStrCatChk(CI, Builder);
4036 case LibFunc_strlcat_chk:
4037 return optimizeStrLCat(CI, Builder);
4038 case LibFunc_strncat_chk:
4039 return optimizeStrNCatChk(CI, Builder);
4040 case LibFunc_strlcpy_chk:
4041 return optimizeStrLCpyChk(CI, Builder);
4042 case LibFunc_vsnprintf_chk:
4043 return optimizeVSNPrintfChk(CI, Builder);
4044 case LibFunc_vsprintf_chk:
4045 return optimizeVSPrintfChk(CI, Builder);
4046 default:
4047 break;
4048 }
4049 return nullptr;
4050}
4051
4052FortifiedLibCallSimplifier::FortifiedLibCallSimplifier(
4053 const TargetLibraryInfo *TLI, bool OnlyLowerUnknownSize)
4054 : TLI(TLI), OnlyLowerUnknownSize(OnlyLowerUnknownSize) {}