Line data Source code
1 : //===-- Execution.cpp - Implement code to simulate the program ------------===//
2 : //
3 : // The LLVM Compiler Infrastructure
4 : //
5 : // This file is distributed under the University of Illinois Open Source
6 : // License. See LICENSE.TXT for details.
7 : //
8 : //===----------------------------------------------------------------------===//
9 : //
10 : // This file contains the actual instruction interpreter.
11 : //
12 : //===----------------------------------------------------------------------===//
13 :
14 : #include "Interpreter.h"
15 : #include "llvm/ADT/APInt.h"
16 : #include "llvm/ADT/Statistic.h"
17 : #include "llvm/CodeGen/IntrinsicLowering.h"
18 : #include "llvm/IR/Constants.h"
19 : #include "llvm/IR/DerivedTypes.h"
20 : #include "llvm/IR/GetElementPtrTypeIterator.h"
21 : #include "llvm/IR/Instructions.h"
22 : #include "llvm/Support/CommandLine.h"
23 : #include "llvm/Support/Debug.h"
24 : #include "llvm/Support/ErrorHandling.h"
25 : #include "llvm/Support/MathExtras.h"
26 : #include "llvm/Support/raw_ostream.h"
27 : #include <algorithm>
28 : #include <cmath>
29 : using namespace llvm;
30 :
31 : #define DEBUG_TYPE "interpreter"
32 :
33 : STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
34 :
35 : static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
36 : cl::desc("make the interpreter print every volatile load and store"));
37 :
38 : //===----------------------------------------------------------------------===//
39 : // Various Helper Functions
40 : //===----------------------------------------------------------------------===//
41 :
42 546 : static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
43 546 : SF.Values[V] = Val;
44 546 : }
45 :
46 : //===----------------------------------------------------------------------===//
47 : // Binary Instruction Implementations
48 : //===----------------------------------------------------------------------===//
49 :
50 : #define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
51 : case Type::TY##TyID: \
52 : Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
53 : break
54 :
55 2 : static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
56 : GenericValue Src2, Type *Ty) {
57 2 : switch (Ty->getTypeID()) {
58 1 : IMPLEMENT_BINARY_OPERATOR(+, Float);
59 1 : IMPLEMENT_BINARY_OPERATOR(+, Double);
60 0 : default:
61 0 : dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
62 0 : llvm_unreachable(nullptr);
63 : }
64 2 : }
65 :
66 0 : static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
67 : GenericValue Src2, Type *Ty) {
68 0 : switch (Ty->getTypeID()) {
69 0 : IMPLEMENT_BINARY_OPERATOR(-, Float);
70 0 : IMPLEMENT_BINARY_OPERATOR(-, Double);
71 0 : default:
72 0 : dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
73 0 : llvm_unreachable(nullptr);
74 : }
75 0 : }
76 :
77 0 : static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
78 : GenericValue Src2, Type *Ty) {
79 0 : switch (Ty->getTypeID()) {
80 0 : IMPLEMENT_BINARY_OPERATOR(*, Float);
81 0 : IMPLEMENT_BINARY_OPERATOR(*, Double);
82 0 : default:
83 0 : dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
84 0 : llvm_unreachable(nullptr);
85 : }
86 0 : }
87 :
88 0 : static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
89 : GenericValue Src2, Type *Ty) {
90 0 : switch (Ty->getTypeID()) {
91 0 : IMPLEMENT_BINARY_OPERATOR(/, Float);
92 0 : IMPLEMENT_BINARY_OPERATOR(/, Double);
93 0 : default:
94 0 : dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
95 0 : llvm_unreachable(nullptr);
96 : }
97 0 : }
98 :
99 0 : static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
100 : GenericValue Src2, Type *Ty) {
101 0 : switch (Ty->getTypeID()) {
102 0 : case Type::FloatTyID:
103 0 : Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
104 0 : break;
105 0 : case Type::DoubleTyID:
106 0 : Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
107 0 : break;
108 0 : default:
109 0 : dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
110 0 : llvm_unreachable(nullptr);
111 : }
112 0 : }
113 :
114 : #define IMPLEMENT_INTEGER_ICMP(OP, TY) \
115 : case Type::IntegerTyID: \
116 : Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
117 : break;
118 :
119 : #define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \
120 : case Type::VectorTyID: { \
121 : assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
122 : Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
123 : for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
124 : Dest.AggregateVal[_i].IntVal = APInt(1, \
125 : Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal));\
126 : } break;
127 :
128 : // Handle pointers specially because they must be compared with only as much
129 : // width as the host has. We _do not_ want to be comparing 64 bit values when
130 : // running on a 32-bit target, otherwise the upper 32 bits might mess up
131 : // comparisons if they contain garbage.
132 : #define IMPLEMENT_POINTER_ICMP(OP) \
133 : case Type::PointerTyID: \
134 : Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
135 : (void*)(intptr_t)Src2.PointerVal); \
136 : break;
137 :
138 8 : static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
139 : Type *Ty) {
140 : GenericValue Dest;
141 8 : switch (Ty->getTypeID()) {
142 24 : IMPLEMENT_INTEGER_ICMP(eq,Ty);
143 0 : IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty);
144 0 : IMPLEMENT_POINTER_ICMP(==);
145 0 : default:
146 0 : dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
147 0 : llvm_unreachable(nullptr);
148 : }
149 8 : return Dest;
150 : }
151 :
152 2 : static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
153 : Type *Ty) {
154 : GenericValue Dest;
155 2 : switch (Ty->getTypeID()) {
156 6 : IMPLEMENT_INTEGER_ICMP(ne,Ty);
157 0 : IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty);
158 0 : IMPLEMENT_POINTER_ICMP(!=);
159 0 : default:
160 0 : dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
161 0 : llvm_unreachable(nullptr);
162 : }
163 2 : return Dest;
164 : }
165 :
166 0 : static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
167 : Type *Ty) {
168 : GenericValue Dest;
169 0 : switch (Ty->getTypeID()) {
170 0 : IMPLEMENT_INTEGER_ICMP(ult,Ty);
171 0 : IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty);
172 0 : IMPLEMENT_POINTER_ICMP(<);
173 0 : default:
174 0 : dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
175 0 : llvm_unreachable(nullptr);
176 : }
177 0 : return Dest;
178 : }
179 :
180 0 : static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
181 : Type *Ty) {
182 : GenericValue Dest;
183 0 : switch (Ty->getTypeID()) {
184 0 : IMPLEMENT_INTEGER_ICMP(slt,Ty);
185 0 : IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty);
186 0 : IMPLEMENT_POINTER_ICMP(<);
187 0 : default:
188 0 : dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
189 0 : llvm_unreachable(nullptr);
190 : }
191 0 : return Dest;
192 : }
193 :
194 0 : static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
195 : Type *Ty) {
196 : GenericValue Dest;
197 0 : switch (Ty->getTypeID()) {
198 0 : IMPLEMENT_INTEGER_ICMP(ugt,Ty);
199 0 : IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty);
200 0 : IMPLEMENT_POINTER_ICMP(>);
201 0 : default:
202 0 : dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
203 0 : llvm_unreachable(nullptr);
204 : }
205 0 : return Dest;
206 : }
207 :
208 0 : static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
209 : Type *Ty) {
210 : GenericValue Dest;
211 0 : switch (Ty->getTypeID()) {
212 0 : IMPLEMENT_INTEGER_ICMP(sgt,Ty);
213 0 : IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty);
214 0 : IMPLEMENT_POINTER_ICMP(>);
215 0 : default:
216 0 : dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
217 0 : llvm_unreachable(nullptr);
218 : }
219 0 : return Dest;
220 : }
221 :
222 0 : static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
223 : Type *Ty) {
224 : GenericValue Dest;
225 0 : switch (Ty->getTypeID()) {
226 0 : IMPLEMENT_INTEGER_ICMP(ule,Ty);
227 0 : IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty);
228 0 : IMPLEMENT_POINTER_ICMP(<=);
229 0 : default:
230 0 : dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
231 0 : llvm_unreachable(nullptr);
232 : }
233 0 : return Dest;
234 : }
235 :
236 0 : static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
237 : Type *Ty) {
238 : GenericValue Dest;
239 0 : switch (Ty->getTypeID()) {
240 0 : IMPLEMENT_INTEGER_ICMP(sle,Ty);
241 0 : IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty);
242 0 : IMPLEMENT_POINTER_ICMP(<=);
243 0 : default:
244 0 : dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
245 0 : llvm_unreachable(nullptr);
246 : }
247 0 : return Dest;
248 : }
249 :
250 0 : static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
251 : Type *Ty) {
252 : GenericValue Dest;
253 0 : switch (Ty->getTypeID()) {
254 0 : IMPLEMENT_INTEGER_ICMP(uge,Ty);
255 0 : IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty);
256 0 : IMPLEMENT_POINTER_ICMP(>=);
257 0 : default:
258 0 : dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
259 0 : llvm_unreachable(nullptr);
260 : }
261 0 : return Dest;
262 : }
263 :
264 0 : static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
265 : Type *Ty) {
266 : GenericValue Dest;
267 0 : switch (Ty->getTypeID()) {
268 0 : IMPLEMENT_INTEGER_ICMP(sge,Ty);
269 0 : IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty);
270 0 : IMPLEMENT_POINTER_ICMP(>=);
271 0 : default:
272 0 : dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
273 0 : llvm_unreachable(nullptr);
274 : }
275 0 : return Dest;
276 : }
277 :
278 10 : void Interpreter::visitICmpInst(ICmpInst &I) {
279 : ExecutionContext &SF = ECStack.back();
280 10 : Type *Ty = I.getOperand(0)->getType();
281 20 : GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
282 20 : GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
283 10 : GenericValue R; // Result
284 :
285 10 : switch (I.getPredicate()) {
286 8 : case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
287 2 : case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
288 0 : case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
289 0 : case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
290 0 : case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
291 0 : case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
292 0 : case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
293 0 : case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
294 0 : case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
295 0 : case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
296 0 : default:
297 0 : dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
298 0 : llvm_unreachable(nullptr);
299 : }
300 :
301 10 : SetValue(&I, R, SF);
302 10 : }
303 :
304 : #define IMPLEMENT_FCMP(OP, TY) \
305 : case Type::TY##TyID: \
306 : Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
307 : break
308 :
309 : #define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \
310 : assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
311 : Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
312 : for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
313 : Dest.AggregateVal[_i].IntVal = APInt(1, \
314 : Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\
315 : break;
316 :
317 : #define IMPLEMENT_VECTOR_FCMP(OP) \
318 : case Type::VectorTyID: \
319 : if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \
320 : IMPLEMENT_VECTOR_FCMP_T(OP, Float); \
321 : } else { \
322 : IMPLEMENT_VECTOR_FCMP_T(OP, Double); \
323 : }
324 :
325 16 : static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
326 : Type *Ty) {
327 : GenericValue Dest;
328 16 : switch (Ty->getTypeID()) {
329 16 : IMPLEMENT_FCMP(==, Float);
330 16 : IMPLEMENT_FCMP(==, Double);
331 0 : IMPLEMENT_VECTOR_FCMP(==);
332 0 : default:
333 0 : dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
334 0 : llvm_unreachable(nullptr);
335 : }
336 16 : return Dest;
337 : }
338 :
339 : #define IMPLEMENT_SCALAR_NANS(TY, X,Y) \
340 : if (TY->isFloatTy()) { \
341 : if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
342 : Dest.IntVal = APInt(1,false); \
343 : return Dest; \
344 : } \
345 : } else { \
346 : if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
347 : Dest.IntVal = APInt(1,false); \
348 : return Dest; \
349 : } \
350 : }
351 :
352 : #define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \
353 : assert(X.AggregateVal.size() == Y.AggregateVal.size()); \
354 : Dest.AggregateVal.resize( X.AggregateVal.size() ); \
355 : for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \
356 : if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \
357 : Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \
358 : Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \
359 : else { \
360 : Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \
361 : } \
362 : }
363 :
364 : #define MASK_VECTOR_NANS(TY, X,Y, FLAG) \
365 : if (TY->isVectorTy()) { \
366 : if (cast<VectorType>(TY)->getElementType()->isFloatTy()) { \
367 : MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \
368 : } else { \
369 : MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \
370 : } \
371 : } \
372 :
373 :
374 :
375 0 : static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
376 : Type *Ty)
377 : {
378 : GenericValue Dest;
379 : // if input is scalar value and Src1 or Src2 is NaN return false
380 0 : IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2)
381 : // if vector input detect NaNs and fill mask
382 0 : MASK_VECTOR_NANS(Ty, Src1, Src2, false)
383 0 : GenericValue DestMask = Dest;
384 0 : switch (Ty->getTypeID()) {
385 0 : IMPLEMENT_FCMP(!=, Float);
386 0 : IMPLEMENT_FCMP(!=, Double);
387 0 : IMPLEMENT_VECTOR_FCMP(!=);
388 0 : default:
389 0 : dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
390 0 : llvm_unreachable(nullptr);
391 : }
392 : // in vector case mask out NaN elements
393 0 : if (Ty->isVectorTy())
394 0 : for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
395 0 : if (DestMask.AggregateVal[_i].IntVal == false)
396 0 : Dest.AggregateVal[_i].IntVal = APInt(1,false);
397 :
398 : return Dest;
399 : }
400 :
401 0 : static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
402 : Type *Ty) {
403 : GenericValue Dest;
404 0 : switch (Ty->getTypeID()) {
405 0 : IMPLEMENT_FCMP(<=, Float);
406 0 : IMPLEMENT_FCMP(<=, Double);
407 0 : IMPLEMENT_VECTOR_FCMP(<=);
408 0 : default:
409 0 : dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
410 0 : llvm_unreachable(nullptr);
411 : }
412 0 : return Dest;
413 : }
414 :
415 0 : static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
416 : Type *Ty) {
417 : GenericValue Dest;
418 0 : switch (Ty->getTypeID()) {
419 0 : IMPLEMENT_FCMP(>=, Float);
420 0 : IMPLEMENT_FCMP(>=, Double);
421 0 : IMPLEMENT_VECTOR_FCMP(>=);
422 0 : default:
423 0 : dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
424 0 : llvm_unreachable(nullptr);
425 : }
426 0 : return Dest;
427 : }
428 :
429 0 : static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
430 : Type *Ty) {
431 : GenericValue Dest;
432 0 : switch (Ty->getTypeID()) {
433 0 : IMPLEMENT_FCMP(<, Float);
434 0 : IMPLEMENT_FCMP(<, Double);
435 0 : IMPLEMENT_VECTOR_FCMP(<);
436 0 : default:
437 0 : dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
438 0 : llvm_unreachable(nullptr);
439 : }
440 0 : return Dest;
441 : }
442 :
443 0 : static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
444 : Type *Ty) {
445 : GenericValue Dest;
446 0 : switch (Ty->getTypeID()) {
447 0 : IMPLEMENT_FCMP(>, Float);
448 0 : IMPLEMENT_FCMP(>, Double);
449 0 : IMPLEMENT_VECTOR_FCMP(>);
450 0 : default:
451 0 : dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
452 0 : llvm_unreachable(nullptr);
453 : }
454 0 : return Dest;
455 : }
456 :
457 : #define IMPLEMENT_UNORDERED(TY, X,Y) \
458 : if (TY->isFloatTy()) { \
459 : if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
460 : Dest.IntVal = APInt(1,true); \
461 : return Dest; \
462 : } \
463 : } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
464 : Dest.IntVal = APInt(1,true); \
465 : return Dest; \
466 : }
467 :
468 : #define IMPLEMENT_VECTOR_UNORDERED(TY, X, Y, FUNC) \
469 : if (TY->isVectorTy()) { \
470 : GenericValue DestMask = Dest; \
471 : Dest = FUNC(Src1, Src2, Ty); \
472 : for (size_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \
473 : if (DestMask.AggregateVal[_i].IntVal == true) \
474 : Dest.AggregateVal[_i].IntVal = APInt(1, true); \
475 : return Dest; \
476 : }
477 :
478 0 : static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
479 : Type *Ty) {
480 0 : GenericValue Dest;
481 0 : IMPLEMENT_UNORDERED(Ty, Src1, Src2)
482 0 : MASK_VECTOR_NANS(Ty, Src1, Src2, true)
483 0 : IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ)
484 0 : return executeFCMP_OEQ(Src1, Src2, Ty);
485 :
486 : }
487 :
488 0 : static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
489 : Type *Ty) {
490 0 : GenericValue Dest;
491 0 : IMPLEMENT_UNORDERED(Ty, Src1, Src2)
492 0 : MASK_VECTOR_NANS(Ty, Src1, Src2, true)
493 0 : IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE)
494 0 : return executeFCMP_ONE(Src1, Src2, Ty);
495 : }
496 :
497 0 : static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
498 : Type *Ty) {
499 0 : GenericValue Dest;
500 0 : IMPLEMENT_UNORDERED(Ty, Src1, Src2)
501 0 : MASK_VECTOR_NANS(Ty, Src1, Src2, true)
502 0 : IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE)
503 0 : return executeFCMP_OLE(Src1, Src2, Ty);
504 : }
505 :
506 0 : static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
507 : Type *Ty) {
508 0 : GenericValue Dest;
509 0 : IMPLEMENT_UNORDERED(Ty, Src1, Src2)
510 0 : MASK_VECTOR_NANS(Ty, Src1, Src2, true)
511 0 : IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE)
512 0 : return executeFCMP_OGE(Src1, Src2, Ty);
513 : }
514 :
515 0 : static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
516 : Type *Ty) {
517 0 : GenericValue Dest;
518 0 : IMPLEMENT_UNORDERED(Ty, Src1, Src2)
519 0 : MASK_VECTOR_NANS(Ty, Src1, Src2, true)
520 0 : IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT)
521 0 : return executeFCMP_OLT(Src1, Src2, Ty);
522 : }
523 :
524 0 : static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
525 : Type *Ty) {
526 0 : GenericValue Dest;
527 0 : IMPLEMENT_UNORDERED(Ty, Src1, Src2)
528 0 : MASK_VECTOR_NANS(Ty, Src1, Src2, true)
529 0 : IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT)
530 0 : return executeFCMP_OGT(Src1, Src2, Ty);
531 : }
532 :
533 0 : static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
534 : Type *Ty) {
535 : GenericValue Dest;
536 0 : if(Ty->isVectorTy()) {
537 : assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
538 0 : Dest.AggregateVal.resize( Src1.AggregateVal.size() );
539 0 : if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
540 0 : for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
541 0 : Dest.AggregateVal[_i].IntVal = APInt(1,
542 0 : ( (Src1.AggregateVal[_i].FloatVal ==
543 0 : Src1.AggregateVal[_i].FloatVal) &&
544 0 : (Src2.AggregateVal[_i].FloatVal ==
545 : Src2.AggregateVal[_i].FloatVal)));
546 : } else {
547 0 : for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
548 0 : Dest.AggregateVal[_i].IntVal = APInt(1,
549 0 : ( (Src1.AggregateVal[_i].DoubleVal ==
550 0 : Src1.AggregateVal[_i].DoubleVal) &&
551 0 : (Src2.AggregateVal[_i].DoubleVal ==
552 : Src2.AggregateVal[_i].DoubleVal)));
553 : }
554 0 : } else if (Ty->isFloatTy())
555 0 : Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
556 0 : Src2.FloatVal == Src2.FloatVal));
557 : else {
558 0 : Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
559 0 : Src2.DoubleVal == Src2.DoubleVal));
560 : }
561 0 : return Dest;
562 : }
563 :
564 0 : static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
565 : Type *Ty) {
566 : GenericValue Dest;
567 0 : if(Ty->isVectorTy()) {
568 : assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
569 0 : Dest.AggregateVal.resize( Src1.AggregateVal.size() );
570 0 : if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
571 0 : for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
572 0 : Dest.AggregateVal[_i].IntVal = APInt(1,
573 0 : ( (Src1.AggregateVal[_i].FloatVal !=
574 0 : Src1.AggregateVal[_i].FloatVal) ||
575 0 : (Src2.AggregateVal[_i].FloatVal !=
576 : Src2.AggregateVal[_i].FloatVal)));
577 : } else {
578 0 : for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
579 0 : Dest.AggregateVal[_i].IntVal = APInt(1,
580 0 : ( (Src1.AggregateVal[_i].DoubleVal !=
581 0 : Src1.AggregateVal[_i].DoubleVal) ||
582 0 : (Src2.AggregateVal[_i].DoubleVal !=
583 : Src2.AggregateVal[_i].DoubleVal)));
584 : }
585 0 : } else if (Ty->isFloatTy())
586 0 : Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
587 0 : Src2.FloatVal != Src2.FloatVal));
588 : else {
589 0 : Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
590 0 : Src2.DoubleVal != Src2.DoubleVal));
591 : }
592 0 : return Dest;
593 : }
594 :
595 0 : static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2,
596 : Type *Ty, const bool val) {
597 : GenericValue Dest;
598 0 : if(Ty->isVectorTy()) {
599 : assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
600 0 : Dest.AggregateVal.resize( Src1.AggregateVal.size() );
601 0 : for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
602 0 : Dest.AggregateVal[_i].IntVal = APInt(1,val);
603 : } else {
604 0 : Dest.IntVal = APInt(1, val);
605 : }
606 :
607 0 : return Dest;
608 : }
609 :
610 16 : void Interpreter::visitFCmpInst(FCmpInst &I) {
611 : ExecutionContext &SF = ECStack.back();
612 16 : Type *Ty = I.getOperand(0)->getType();
613 32 : GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
614 32 : GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
615 16 : GenericValue R; // Result
616 :
617 16 : switch (I.getPredicate()) {
618 0 : default:
619 0 : dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
620 0 : llvm_unreachable(nullptr);
621 : break;
622 0 : case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
623 0 : break;
624 0 : case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true);
625 0 : break;
626 0 : case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
627 0 : case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
628 0 : case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break;
629 16 : case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break;
630 0 : case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break;
631 0 : case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break;
632 0 : case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break;
633 0 : case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break;
634 0 : case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break;
635 0 : case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break;
636 0 : case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break;
637 0 : case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break;
638 0 : case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
639 0 : case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
640 : }
641 :
642 16 : SetValue(&I, R, SF);
643 16 : }
644 :
645 0 : static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
646 : GenericValue Src2, Type *Ty) {
647 0 : GenericValue Result;
648 0 : switch (predicate) {
649 0 : case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty);
650 0 : case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty);
651 0 : case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty);
652 0 : case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty);
653 0 : case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty);
654 0 : case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty);
655 0 : case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty);
656 0 : case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty);
657 0 : case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty);
658 0 : case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty);
659 0 : case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty);
660 0 : case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty);
661 0 : case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty);
662 0 : case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty);
663 0 : case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty);
664 0 : case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty);
665 0 : case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty);
666 0 : case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty);
667 0 : case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty);
668 0 : case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty);
669 0 : case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty);
670 0 : case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty);
671 0 : case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty);
672 0 : case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty);
673 0 : case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, false);
674 0 : case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, true);
675 0 : default:
676 0 : dbgs() << "Unhandled Cmp predicate\n";
677 0 : llvm_unreachable(nullptr);
678 : }
679 : }
680 :
681 122 : void Interpreter::visitBinaryOperator(BinaryOperator &I) {
682 : ExecutionContext &SF = ECStack.back();
683 122 : Type *Ty = I.getOperand(0)->getType();
684 244 : GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
685 244 : GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
686 122 : GenericValue R; // Result
687 :
688 : // First process vector operation
689 122 : if (Ty->isVectorTy()) {
690 : assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
691 182 : R.AggregateVal.resize(Src1.AggregateVal.size());
692 :
693 : // Macros to execute binary operation 'OP' over integer vectors
694 : #define INTEGER_VECTOR_OPERATION(OP) \
695 : for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
696 : R.AggregateVal[i].IntVal = \
697 : Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal;
698 :
699 : // Additional macros to execute binary operations udiv/sdiv/urem/srem since
700 : // they have different notation.
701 : #define INTEGER_VECTOR_FUNCTION(OP) \
702 : for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
703 : R.AggregateVal[i].IntVal = \
704 : Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal);
705 :
706 : // Macros to execute binary operation 'OP' over floating point type TY
707 : // (float or double) vectors
708 : #define FLOAT_VECTOR_FUNCTION(OP, TY) \
709 : for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
710 : R.AggregateVal[i].TY = \
711 : Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY;
712 :
713 : // Macros to choose appropriate TY: float or double and run operation
714 : // execution
715 : #define FLOAT_VECTOR_OP(OP) { \
716 : if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) \
717 : FLOAT_VECTOR_FUNCTION(OP, FloatVal) \
718 : else { \
719 : if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \
720 : FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \
721 : else { \
722 : dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
723 : llvm_unreachable(0); \
724 : } \
725 : } \
726 : }
727 :
728 91 : switch(I.getOpcode()){
729 0 : default:
730 0 : dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
731 0 : llvm_unreachable(nullptr);
732 : break;
733 614 : case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break;
734 304 : case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break;
735 0 : case Instruction::Mul: INTEGER_VECTOR_OPERATION(*) break;
736 0 : case Instruction::UDiv: INTEGER_VECTOR_FUNCTION(udiv) break;
737 0 : case Instruction::SDiv: INTEGER_VECTOR_FUNCTION(sdiv) break;
738 0 : case Instruction::URem: INTEGER_VECTOR_FUNCTION(urem) break;
739 0 : case Instruction::SRem: INTEGER_VECTOR_FUNCTION(srem) break;
740 0 : case Instruction::And: INTEGER_VECTOR_OPERATION(&) break;
741 0 : case Instruction::Or: INTEGER_VECTOR_OPERATION(|) break;
742 0 : case Instruction::Xor: INTEGER_VECTOR_OPERATION(^) break;
743 172 : case Instruction::FAdd: FLOAT_VECTOR_OP(+) break;
744 86 : case Instruction::FSub: FLOAT_VECTOR_OP(-) break;
745 0 : case Instruction::FMul: FLOAT_VECTOR_OP(*) break;
746 0 : case Instruction::FDiv: FLOAT_VECTOR_OP(/) break;
747 : case Instruction::FRem:
748 0 : if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
749 0 : for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
750 0 : R.AggregateVal[i].FloatVal =
751 0 : fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
752 : else {
753 0 : if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
754 0 : for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
755 0 : R.AggregateVal[i].DoubleVal =
756 0 : fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
757 : else {
758 0 : dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
759 0 : llvm_unreachable(nullptr);
760 : }
761 : }
762 : break;
763 : }
764 : } else {
765 31 : switch (I.getOpcode()) {
766 0 : default:
767 0 : dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
768 0 : llvm_unreachable(nullptr);
769 : break;
770 2 : case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
771 1 : case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
772 2 : case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break;
773 2 : case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break;
774 0 : case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break;
775 0 : case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break;
776 0 : case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break;
777 0 : case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break;
778 0 : case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
779 0 : case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
780 0 : case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
781 0 : case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
782 11 : case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break;
783 12 : case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break;
784 2 : case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
785 : }
786 : }
787 122 : SetValue(&I, R, SF);
788 122 : }
789 :
790 30 : static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
791 : GenericValue Src3, Type *Ty) {
792 : GenericValue Dest;
793 30 : if(Ty->isVectorTy()) {
794 : assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
795 : assert(Src2.AggregateVal.size() == Src3.AggregateVal.size());
796 60 : Dest.AggregateVal.resize( Src1.AggregateVal.size() );
797 258 : for (size_t i = 0; i < Src1.AggregateVal.size(); ++i)
798 198 : Dest.AggregateVal[i] = (Src1.AggregateVal[i].IntVal == 0) ?
799 198 : Src3.AggregateVal[i] : Src2.AggregateVal[i];
800 : } else {
801 0 : Dest = (Src1.IntVal == 0) ? Src3 : Src2;
802 : }
803 30 : return Dest;
804 : }
805 :
806 30 : void Interpreter::visitSelectInst(SelectInst &I) {
807 : ExecutionContext &SF = ECStack.back();
808 30 : Type * Ty = I.getOperand(0)->getType();
809 60 : GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
810 60 : GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
811 60 : GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
812 60 : GenericValue R = executeSelectInst(Src1, Src2, Src3, Ty);
813 30 : SetValue(&I, R, SF);
814 30 : }
815 :
816 : //===----------------------------------------------------------------------===//
817 : // Terminator Instruction Implementations
818 : //===----------------------------------------------------------------------===//
819 :
820 19 : void Interpreter::exitCalled(GenericValue GV) {
821 : // runAtExitHandlers() assumes there are no stack frames, but
822 : // if exit() was called, then it had a stack frame. Blow away
823 : // the stack before interpreting atexit handlers.
824 : ECStack.clear();
825 19 : runAtExitHandlers();
826 19 : exit(GV.IntVal.zextOrTrunc(32).getZExtValue());
827 : }
828 :
829 : /// Pop the last stack frame off of ECStack and then copy the result
830 : /// back into the result variable if we are not returning void. The
831 : /// result variable may be the ExitValue, or the Value of the calling
832 : /// CallInst if there was a previous stack frame. This method may
833 : /// invalidate any ECStack iterators you have. This method also takes
834 : /// care of switching to the normal destination BB, if we are returning
835 : /// from an invoke.
836 : ///
837 53 : void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
838 : GenericValue Result) {
839 : // Pop the current stack frame.
840 : ECStack.pop_back();
841 :
842 53 : if (ECStack.empty()) { // Finished main. Put result into exit code...
843 19 : if (RetTy && !RetTy->isVoidTy()) { // Nonvoid return type?
844 : ExitValue = Result; // Capture the exit value of the program
845 : } else {
846 0 : memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
847 : }
848 : } else {
849 : // If we have a previous stack frame, and we have a previous call,
850 : // fill in the return value...
851 : ExecutionContext &CallingSF = ECStack.back();
852 34 : if (Instruction *I = CallingSF.Caller.getInstruction()) {
853 : // Save result...
854 34 : if (!CallingSF.Caller.getType()->isVoidTy())
855 33 : SetValue(I, Result, CallingSF);
856 : if (InvokeInst *II = dyn_cast<InvokeInst> (I))
857 0 : SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
858 34 : CallingSF.Caller = CallSite(); // We returned from the call...
859 : }
860 : }
861 53 : }
862 :
863 25 : void Interpreter::visitReturnInst(ReturnInst &I) {
864 : ExecutionContext &SF = ECStack.back();
865 25 : Type *RetTy = Type::getVoidTy(I.getContext());
866 25 : GenericValue Result;
867 :
868 : // Save away the return value... (if we are not 'ret void')
869 25 : if (I.getNumOperands()) {
870 24 : RetTy = I.getReturnValue()->getType();
871 24 : Result = getOperandValue(I.getReturnValue(), SF);
872 : }
873 :
874 25 : popStackAndReturnValueToCaller(RetTy, Result);
875 25 : }
876 :
877 0 : void Interpreter::visitUnreachableInst(UnreachableInst &I) {
878 0 : report_fatal_error("Program executed an 'unreachable' instruction!");
879 : }
880 :
881 20 : void Interpreter::visitBranchInst(BranchInst &I) {
882 : ExecutionContext &SF = ECStack.back();
883 : BasicBlock *Dest;
884 :
885 : Dest = I.getSuccessor(0); // Uncond branches have a fixed dest...
886 20 : if (!I.isUnconditional()) {
887 : Value *Cond = I.getCondition();
888 7 : if (getOperandValue(Cond, SF).IntVal == 0) // If false cond...
889 : Dest = I.getSuccessor(1);
890 : }
891 20 : SwitchToNewBasicBlock(Dest, SF);
892 20 : }
893 :
894 0 : void Interpreter::visitSwitchInst(SwitchInst &I) {
895 : ExecutionContext &SF = ECStack.back();
896 : Value* Cond = I.getCondition();
897 0 : Type *ElTy = Cond->getType();
898 0 : GenericValue CondVal = getOperandValue(Cond, SF);
899 :
900 : // Check to see if any of the cases match...
901 : BasicBlock *Dest = nullptr;
902 0 : for (auto Case : I.cases()) {
903 0 : GenericValue CaseVal = getOperandValue(Case.getCaseValue(), SF);
904 0 : if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
905 0 : Dest = cast<BasicBlock>(Case.getCaseSuccessor());
906 0 : break;
907 : }
908 : }
909 0 : if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default
910 0 : SwitchToNewBasicBlock(Dest, SF);
911 0 : }
912 :
913 0 : void Interpreter::visitIndirectBrInst(IndirectBrInst &I) {
914 : ExecutionContext &SF = ECStack.back();
915 0 : void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
916 0 : SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
917 0 : }
918 :
919 :
920 : // SwitchToNewBasicBlock - This method is used to jump to a new basic block.
921 : // This function handles the actual updating of block and instruction iterators
922 : // as well as execution of all of the PHI nodes in the destination block.
923 : //
924 : // This method does this because all of the PHI nodes must be executed
925 : // atomically, reading their inputs before any of the results are updated. Not
926 : // doing this can cause problems if the PHI nodes depend on other PHI nodes for
927 : // their inputs. If the input PHI node is updated before it is read, incorrect
928 : // results can happen. Thus we use a two phase approach.
929 : //
930 20 : void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
931 20 : BasicBlock *PrevBB = SF.CurBB; // Remember where we came from...
932 20 : SF.CurBB = Dest; // Update CurBB to branch destination
933 20 : SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr...
934 :
935 20 : if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do
936 :
937 : // Loop over all of the PHI nodes in the current block, reading their inputs.
938 1 : std::vector<GenericValue> ResultValues;
939 :
940 : for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
941 : // Search for the value corresponding to this previous bb...
942 1 : int i = PN->getBasicBlockIndex(PrevBB);
943 : assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
944 1 : Value *IncomingValue = PN->getIncomingValue(i);
945 :
946 : // Save the incoming value for this PHI node...
947 2 : ResultValues.push_back(getOperandValue(IncomingValue, SF));
948 : }
949 :
950 : // Now loop over all of the PHI nodes setting their values...
951 2 : SF.CurInst = SF.CurBB->begin();
952 3 : for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
953 : PHINode *PN = cast<PHINode>(SF.CurInst);
954 2 : SetValue(PN, ResultValues[i], SF);
955 : }
956 : }
957 :
958 : //===----------------------------------------------------------------------===//
959 : // Memory Instruction Implementations
960 : //===----------------------------------------------------------------------===//
961 :
962 31 : void Interpreter::visitAllocaInst(AllocaInst &I) {
963 : ExecutionContext &SF = ECStack.back();
964 :
965 31 : Type *Ty = I.getType()->getElementType(); // Type to be allocated
966 :
967 : // Get the number of elements being allocated by the array...
968 : unsigned NumElements =
969 62 : getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
970 :
971 31 : unsigned TypeSize = (size_t)getDataLayout().getTypeAllocSize(Ty);
972 :
973 : // Avoid malloc-ing zero bytes, use max()...
974 31 : unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
975 :
976 : // Allocate enough memory to hold the type...
977 31 : void *Memory = safe_malloc(MemToAlloc);
978 :
979 : LLVM_DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize
980 : << " bytes) x " << NumElements << " (Total: " << MemToAlloc
981 : << ") at " << uintptr_t(Memory) << '\n');
982 :
983 31 : GenericValue Result = PTOGV(Memory);
984 : assert(Result.PointerVal && "Null pointer returned by malloc!");
985 31 : SetValue(&I, Result, SF);
986 :
987 31 : if (I.getOpcode() == Instruction::Alloca)
988 31 : ECStack.back().Allocas.add(Memory);
989 31 : }
990 :
991 : // getElementOffset - The workhorse for getelementptr.
992 : //
993 16 : GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
994 : gep_type_iterator E,
995 : ExecutionContext &SF) {
996 : assert(Ptr->getType()->isPointerTy() &&
997 : "Cannot getElementOffset of a nonpointer type!");
998 :
999 : uint64_t Total = 0;
1000 :
1001 48 : for (; I != E; ++I) {
1002 0 : if (StructType *STy = I.getStructTypeOrNull()) {
1003 0 : const StructLayout *SLO = getDataLayout().getStructLayout(STy);
1004 :
1005 0 : const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
1006 0 : unsigned Index = unsigned(CPU->getZExtValue());
1007 :
1008 0 : Total += SLO->getElementOffset(Index);
1009 : } else {
1010 : // Get the index number for the array... which must be long type...
1011 32 : GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
1012 :
1013 : int64_t Idx;
1014 : unsigned BitWidth =
1015 64 : cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
1016 32 : if (BitWidth == 32)
1017 20 : Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
1018 : else {
1019 : assert(BitWidth == 64 && "Invalid index type for getelementptr");
1020 12 : Idx = (int64_t)IdxGV.IntVal.getZExtValue();
1021 : }
1022 32 : Total += getDataLayout().getTypeAllocSize(I.getIndexedType()) * Idx;
1023 : }
1024 : }
1025 :
1026 : GenericValue Result;
1027 16 : Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
1028 : LLVM_DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
1029 16 : return Result;
1030 : }
1031 :
1032 10 : void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
1033 : ExecutionContext &SF = ECStack.back();
1034 30 : SetValue(&I, executeGEPOperation(I.getPointerOperand(),
1035 : gep_type_begin(I), gep_type_end(I), SF), SF);
1036 10 : }
1037 :
1038 30 : void Interpreter::visitLoadInst(LoadInst &I) {
1039 : ExecutionContext &SF = ECStack.back();
1040 60 : GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
1041 30 : GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
1042 30 : GenericValue Result;
1043 30 : LoadValueFromMemory(Result, Ptr, I.getType());
1044 30 : SetValue(&I, Result, SF);
1045 30 : if (I.isVolatile() && PrintVolatile)
1046 0 : dbgs() << "Volatile load " << I;
1047 30 : }
1048 :
1049 32 : void Interpreter::visitStoreInst(StoreInst &I) {
1050 : ExecutionContext &SF = ECStack.back();
1051 64 : GenericValue Val = getOperandValue(I.getOperand(0), SF);
1052 64 : GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
1053 32 : StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
1054 : I.getOperand(0)->getType());
1055 32 : if (I.isVolatile() && PrintVolatile)
1056 0 : dbgs() << "Volatile store: " << I;
1057 32 : }
1058 :
1059 : //===----------------------------------------------------------------------===//
1060 : // Miscellaneous Instruction Implementations
1061 : //===----------------------------------------------------------------------===//
1062 :
1063 54 : void Interpreter::visitCallSite(CallSite CS) {
1064 : ExecutionContext &SF = ECStack.back();
1065 :
1066 : // Check to see if this is an intrinsic function call...
1067 : Function *F = CS.getCalledFunction();
1068 53 : if (F && F->isDeclaration())
1069 48 : switch (F->getIntrinsicID()) {
1070 : case Intrinsic::not_intrinsic:
1071 : break;
1072 : case Intrinsic::vastart: { // va_start
1073 0 : GenericValue ArgIndex;
1074 0 : ArgIndex.UIntPairVal.first = ECStack.size() - 1;
1075 : ArgIndex.UIntPairVal.second = 0;
1076 0 : SetValue(CS.getInstruction(), ArgIndex, SF);
1077 : return;
1078 : }
1079 : case Intrinsic::vaend: // va_end is a noop for the interpreter
1080 : return;
1081 : case Intrinsic::vacopy: // va_copy: dest = src
1082 0 : SetValue(CS.getInstruction(), getOperandValue(*CS.arg_begin(), SF), SF);
1083 0 : return;
1084 : default:
1085 : // If it is an unknown intrinsic function, use the intrinsic lowering
1086 : // class to transform it into hopefully tasty LLVM code.
1087 : //
1088 : BasicBlock::iterator me(CS.getInstruction());
1089 20 : BasicBlock *Parent = CS.getInstruction()->getParent();
1090 : bool atBegin(Parent->begin() == me);
1091 20 : if (!atBegin)
1092 : --me;
1093 20 : IL->LowerIntrinsicCall(cast<CallInst>(CS.getInstruction()));
1094 :
1095 : // Restore the CurInst pointer to the first instruction newly inserted, if
1096 : // any.
1097 20 : if (atBegin) {
1098 2 : SF.CurInst = Parent->begin();
1099 : } else {
1100 18 : SF.CurInst = me;
1101 : ++SF.CurInst;
1102 : }
1103 20 : return;
1104 : }
1105 :
1106 :
1107 34 : SF.Caller = CS;
1108 34 : std::vector<GenericValue> ArgVals;
1109 34 : const unsigned NumArgs = SF.Caller.arg_size();
1110 34 : ArgVals.reserve(NumArgs);
1111 : uint16_t pNum = 1;
1112 48 : for (CallSite::arg_iterator i = SF.Caller.arg_begin(),
1113 82 : e = SF.Caller.arg_end(); i != e; ++i, ++pNum) {
1114 48 : Value *V = *i;
1115 96 : ArgVals.push_back(getOperandValue(V, SF));
1116 : }
1117 :
1118 : // To handle indirect calls, we must get the pointer value from the argument
1119 : // and treat it as a function pointer.
1120 34 : GenericValue SRC = getOperandValue(SF.Caller.getCalledValue(), SF);
1121 34 : callFunction((Function*)GVTOP(SRC), ArgVals);
1122 : }
1123 :
1124 : // auxiliary function for shift operations
1125 : static unsigned getShiftAmount(uint64_t orgShiftAmount,
1126 : llvm::APInt valueToShift) {
1127 42 : unsigned valueWidth = valueToShift.getBitWidth();
1128 42 : if (orgShiftAmount < (uint64_t)valueWidth)
1129 40 : return orgShiftAmount;
1130 : // according to the llvm documentation, if orgShiftAmount > valueWidth,
1131 : // the result is undfeined. but we do shift by this rule:
1132 4 : return (NextPowerOf2(valueWidth-1) - 1) & orgShiftAmount;
1133 : }
1134 :
1135 :
1136 10 : void Interpreter::visitShl(BinaryOperator &I) {
1137 : ExecutionContext &SF = ECStack.back();
1138 20 : GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1139 20 : GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1140 10 : GenericValue Dest;
1141 10 : Type *Ty = I.getType();
1142 :
1143 10 : if (Ty->isVectorTy()) {
1144 20 : uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
1145 : assert(src1Size == Src2.AggregateVal.size());
1146 30 : for (unsigned i = 0; i < src1Size; i++) {
1147 20 : GenericValue Result;
1148 20 : uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
1149 40 : llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
1150 40 : Result.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
1151 20 : Dest.AggregateVal.push_back(Result);
1152 : }
1153 : } else {
1154 : // scalar
1155 : uint64_t shiftAmount = Src2.IntVal.getZExtValue();
1156 : llvm::APInt valueToShift = Src1.IntVal;
1157 0 : Dest.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
1158 : }
1159 :
1160 10 : SetValue(&I, Dest, SF);
1161 10 : }
1162 :
1163 8 : void Interpreter::visitLShr(BinaryOperator &I) {
1164 : ExecutionContext &SF = ECStack.back();
1165 16 : GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1166 16 : GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1167 8 : GenericValue Dest;
1168 8 : Type *Ty = I.getType();
1169 :
1170 8 : if (Ty->isVectorTy()) {
1171 8 : uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
1172 : assert(src1Size == Src2.AggregateVal.size());
1173 12 : for (unsigned i = 0; i < src1Size; i++) {
1174 8 : GenericValue Result;
1175 8 : uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
1176 16 : llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
1177 16 : Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
1178 8 : Dest.AggregateVal.push_back(Result);
1179 : }
1180 : } else {
1181 : // scalar
1182 : uint64_t shiftAmount = Src2.IntVal.getZExtValue();
1183 : llvm::APInt valueToShift = Src1.IntVal;
1184 10 : Dest.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
1185 : }
1186 :
1187 8 : SetValue(&I, Dest, SF);
1188 8 : }
1189 :
1190 6 : void Interpreter::visitAShr(BinaryOperator &I) {
1191 : ExecutionContext &SF = ECStack.back();
1192 12 : GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1193 12 : GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1194 6 : GenericValue Dest;
1195 6 : Type *Ty = I.getType();
1196 :
1197 6 : if (Ty->isVectorTy()) {
1198 4 : size_t src1Size = Src1.AggregateVal.size();
1199 : assert(src1Size == Src2.AggregateVal.size());
1200 12 : for (unsigned i = 0; i < src1Size; i++) {
1201 8 : GenericValue Result;
1202 8 : uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
1203 16 : llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
1204 16 : Result.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
1205 8 : Dest.AggregateVal.push_back(Result);
1206 : }
1207 : } else {
1208 : // scalar
1209 : uint64_t shiftAmount = Src2.IntVal.getZExtValue();
1210 : llvm::APInt valueToShift = Src1.IntVal;
1211 6 : Dest.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
1212 : }
1213 :
1214 6 : SetValue(&I, Dest, SF);
1215 6 : }
1216 :
1217 17 : GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
1218 : ExecutionContext &SF) {
1219 34 : GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1220 17 : Type *SrcTy = SrcVal->getType();
1221 17 : if (SrcTy->isVectorTy()) {
1222 : Type *DstVecTy = DstTy->getScalarType();
1223 : unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
1224 12 : unsigned NumElts = Src.AggregateVal.size();
1225 : // the sizes of src and dst vectors must be equal
1226 12 : Dest.AggregateVal.resize(NumElts);
1227 42 : for (unsigned i = 0; i < NumElts; i++)
1228 60 : Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth);
1229 : } else {
1230 : IntegerType *DITy = cast<IntegerType>(DstTy);
1231 : unsigned DBitWidth = DITy->getBitWidth();
1232 10 : Dest.IntVal = Src.IntVal.trunc(DBitWidth);
1233 : }
1234 17 : return Dest;
1235 : }
1236 :
1237 21 : GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
1238 : ExecutionContext &SF) {
1239 21 : Type *SrcTy = SrcVal->getType();
1240 42 : GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1241 21 : if (SrcTy->isVectorTy()) {
1242 : Type *DstVecTy = DstTy->getScalarType();
1243 : unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
1244 20 : unsigned size = Src.AggregateVal.size();
1245 : // the sizes of src and dst vectors must be equal.
1246 20 : Dest.AggregateVal.resize(size);
1247 70 : for (unsigned i = 0; i < size; i++)
1248 100 : Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
1249 : } else {
1250 : auto *DITy = cast<IntegerType>(DstTy);
1251 : unsigned DBitWidth = DITy->getBitWidth();
1252 2 : Dest.IntVal = Src.IntVal.sext(DBitWidth);
1253 : }
1254 21 : return Dest;
1255 : }
1256 :
1257 28 : GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
1258 : ExecutionContext &SF) {
1259 28 : Type *SrcTy = SrcVal->getType();
1260 56 : GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1261 28 : if (SrcTy->isVectorTy()) {
1262 : Type *DstVecTy = DstTy->getScalarType();
1263 : unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
1264 :
1265 28 : unsigned size = Src.AggregateVal.size();
1266 : // the sizes of src and dst vectors must be equal.
1267 28 : Dest.AggregateVal.resize(size);
1268 94 : for (unsigned i = 0; i < size; i++)
1269 132 : Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
1270 : } else {
1271 : auto *DITy = cast<IntegerType>(DstTy);
1272 : unsigned DBitWidth = DITy->getBitWidth();
1273 0 : Dest.IntVal = Src.IntVal.zext(DBitWidth);
1274 : }
1275 28 : return Dest;
1276 : }
1277 :
1278 2 : GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy,
1279 : ExecutionContext &SF) {
1280 4 : GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1281 :
1282 4 : if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
1283 : assert(SrcVal->getType()->getScalarType()->isDoubleTy() &&
1284 : DstTy->getScalarType()->isFloatTy() &&
1285 : "Invalid FPTrunc instruction");
1286 :
1287 2 : unsigned size = Src.AggregateVal.size();
1288 : // the sizes of src and dst vectors must be equal.
1289 2 : Dest.AggregateVal.resize(size);
1290 7 : for (unsigned i = 0; i < size; i++)
1291 15 : Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal;
1292 : } else {
1293 : assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
1294 : "Invalid FPTrunc instruction");
1295 0 : Dest.FloatVal = (float)Src.DoubleVal;
1296 : }
1297 :
1298 2 : return Dest;
1299 : }
1300 :
1301 3 : GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy,
1302 : ExecutionContext &SF) {
1303 6 : GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1304 :
1305 6 : if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
1306 : assert(SrcVal->getType()->getScalarType()->isFloatTy() &&
1307 : DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction");
1308 :
1309 2 : unsigned size = Src.AggregateVal.size();
1310 : // the sizes of src and dst vectors must be equal.
1311 2 : Dest.AggregateVal.resize(size);
1312 7 : for (unsigned i = 0; i < size; i++)
1313 15 : Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal;
1314 : } else {
1315 : assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
1316 : "Invalid FPExt instruction");
1317 1 : Dest.DoubleVal = (double)Src.FloatVal;
1318 : }
1319 :
1320 3 : return Dest;
1321 : }
1322 :
1323 8 : GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
1324 : ExecutionContext &SF) {
1325 8 : Type *SrcTy = SrcVal->getType();
1326 16 : GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1327 :
1328 8 : if (SrcTy->getTypeID() == Type::VectorTyID) {
1329 : Type *DstVecTy = DstTy->getScalarType();
1330 : Type *SrcVecTy = SrcTy->getScalarType();
1331 : uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
1332 8 : unsigned size = Src.AggregateVal.size();
1333 : // the sizes of src and dst vectors must be equal.
1334 8 : Dest.AggregateVal.resize(size);
1335 :
1336 8 : if (SrcVecTy->getTypeID() == Type::FloatTyID) {
1337 : assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToUI instruction");
1338 0 : for (unsigned i = 0; i < size; i++)
1339 0 : Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
1340 0 : Src.AggregateVal[i].FloatVal, DBitWidth);
1341 : } else {
1342 28 : for (unsigned i = 0; i < size; i++)
1343 20 : Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
1344 20 : Src.AggregateVal[i].DoubleVal, DBitWidth);
1345 : }
1346 : } else {
1347 : // scalar
1348 : uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
1349 : assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
1350 :
1351 0 : if (SrcTy->getTypeID() == Type::FloatTyID)
1352 0 : Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
1353 : else {
1354 0 : Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
1355 : }
1356 : }
1357 :
1358 8 : return Dest;
1359 : }
1360 :
1361 8 : GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
1362 : ExecutionContext &SF) {
1363 8 : Type *SrcTy = SrcVal->getType();
1364 16 : GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1365 :
1366 8 : if (SrcTy->getTypeID() == Type::VectorTyID) {
1367 : Type *DstVecTy = DstTy->getScalarType();
1368 : Type *SrcVecTy = SrcTy->getScalarType();
1369 : uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
1370 8 : unsigned size = Src.AggregateVal.size();
1371 : // the sizes of src and dst vectors must be equal
1372 8 : Dest.AggregateVal.resize(size);
1373 :
1374 8 : if (SrcVecTy->getTypeID() == Type::FloatTyID) {
1375 : assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToSI instruction");
1376 0 : for (unsigned i = 0; i < size; i++)
1377 0 : Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
1378 0 : Src.AggregateVal[i].FloatVal, DBitWidth);
1379 : } else {
1380 28 : for (unsigned i = 0; i < size; i++)
1381 20 : Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
1382 20 : Src.AggregateVal[i].DoubleVal, DBitWidth);
1383 : }
1384 : } else {
1385 : // scalar
1386 : unsigned DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
1387 : assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
1388 :
1389 0 : if (SrcTy->getTypeID() == Type::FloatTyID)
1390 0 : Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
1391 : else {
1392 0 : Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
1393 : }
1394 : }
1395 8 : return Dest;
1396 : }
1397 :
1398 10 : GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
1399 : ExecutionContext &SF) {
1400 20 : GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1401 :
1402 20 : if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
1403 : Type *DstVecTy = DstTy->getScalarType();
1404 10 : unsigned size = Src.AggregateVal.size();
1405 : // the sizes of src and dst vectors must be equal
1406 10 : Dest.AggregateVal.resize(size);
1407 :
1408 10 : if (DstVecTy->getTypeID() == Type::FloatTyID) {
1409 : assert(DstVecTy->isFloatingPointTy() && "Invalid UIToFP instruction");
1410 20 : for (unsigned i = 0; i < size; i++)
1411 15 : Dest.AggregateVal[i].FloatVal =
1412 30 : APIntOps::RoundAPIntToFloat(Src.AggregateVal[i].IntVal);
1413 : } else {
1414 15 : for (unsigned i = 0; i < size; i++)
1415 10 : Dest.AggregateVal[i].DoubleVal =
1416 20 : APIntOps::RoundAPIntToDouble(Src.AggregateVal[i].IntVal);
1417 : }
1418 : } else {
1419 : // scalar
1420 : assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
1421 0 : if (DstTy->getTypeID() == Type::FloatTyID)
1422 0 : Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal);
1423 : else {
1424 0 : Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal);
1425 : }
1426 : }
1427 10 : return Dest;
1428 : }
1429 :
1430 10 : GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
1431 : ExecutionContext &SF) {
1432 20 : GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1433 :
1434 20 : if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
1435 : Type *DstVecTy = DstTy->getScalarType();
1436 10 : unsigned size = Src.AggregateVal.size();
1437 : // the sizes of src and dst vectors must be equal
1438 10 : Dest.AggregateVal.resize(size);
1439 :
1440 10 : if (DstVecTy->getTypeID() == Type::FloatTyID) {
1441 : assert(DstVecTy->isFloatingPointTy() && "Invalid SIToFP instruction");
1442 20 : for (unsigned i = 0; i < size; i++)
1443 15 : Dest.AggregateVal[i].FloatVal =
1444 30 : APIntOps::RoundSignedAPIntToFloat(Src.AggregateVal[i].IntVal);
1445 : } else {
1446 15 : for (unsigned i = 0; i < size; i++)
1447 10 : Dest.AggregateVal[i].DoubleVal =
1448 20 : APIntOps::RoundSignedAPIntToDouble(Src.AggregateVal[i].IntVal);
1449 : }
1450 : } else {
1451 : // scalar
1452 : assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
1453 :
1454 0 : if (DstTy->getTypeID() == Type::FloatTyID)
1455 0 : Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal);
1456 : else {
1457 0 : Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal);
1458 : }
1459 : }
1460 :
1461 10 : return Dest;
1462 : }
1463 :
1464 0 : GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy,
1465 : ExecutionContext &SF) {
1466 : uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
1467 0 : GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1468 : assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction");
1469 :
1470 0 : Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
1471 0 : return Dest;
1472 : }
1473 :
1474 0 : GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
1475 : ExecutionContext &SF) {
1476 0 : GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1477 : assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
1478 :
1479 : uint32_t PtrSize = getDataLayout().getPointerSizeInBits();
1480 0 : if (PtrSize != Src.IntVal.getBitWidth())
1481 0 : Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
1482 :
1483 0 : Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue()));
1484 0 : return Dest;
1485 : }
1486 :
1487 39 : GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
1488 : ExecutionContext &SF) {
1489 :
1490 : // This instruction supports bitwise conversion of vectors to integers and
1491 : // to vectors of other types (as long as they have the same size)
1492 39 : Type *SrcTy = SrcVal->getType();
1493 78 : GenericValue Dest, Src = getOperandValue(SrcVal, SF);
1494 :
1495 39 : if ((SrcTy->getTypeID() == Type::VectorTyID) ||
1496 : (DstTy->getTypeID() == Type::VectorTyID)) {
1497 : // vector src bitcast to vector dst or vector src bitcast to scalar dst or
1498 : // scalar src bitcast to vector dst
1499 28 : bool isLittleEndian = getDataLayout().isLittleEndian();
1500 28 : GenericValue TempDst, TempSrc, SrcVec;
1501 : Type *SrcElemTy;
1502 : Type *DstElemTy;
1503 : unsigned SrcBitSize;
1504 : unsigned DstBitSize;
1505 : unsigned SrcNum;
1506 : unsigned DstNum;
1507 :
1508 28 : if (SrcTy->getTypeID() == Type::VectorTyID) {
1509 : SrcElemTy = SrcTy->getScalarType();
1510 20 : SrcBitSize = SrcTy->getScalarSizeInBits();
1511 40 : SrcNum = Src.AggregateVal.size();
1512 : SrcVec = Src;
1513 : } else {
1514 : // if src is scalar value, make it vector <1 x type>
1515 : SrcElemTy = SrcTy;
1516 8 : SrcBitSize = SrcTy->getPrimitiveSizeInBits();
1517 : SrcNum = 1;
1518 8 : SrcVec.AggregateVal.push_back(Src);
1519 : }
1520 :
1521 28 : if (DstTy->getTypeID() == Type::VectorTyID) {
1522 : DstElemTy = DstTy->getScalarType();
1523 20 : DstBitSize = DstTy->getScalarSizeInBits();
1524 20 : DstNum = (SrcNum * SrcBitSize) / DstBitSize;
1525 : } else {
1526 : DstElemTy = DstTy;
1527 8 : DstBitSize = DstTy->getPrimitiveSizeInBits();
1528 : DstNum = 1;
1529 : }
1530 :
1531 28 : if (SrcNum * SrcBitSize != DstNum * DstBitSize)
1532 0 : llvm_unreachable("Invalid BitCast");
1533 :
1534 : // If src is floating point, cast to integer first.
1535 28 : TempSrc.AggregateVal.resize(SrcNum);
1536 28 : if (SrcElemTy->isFloatTy()) {
1537 15 : for (unsigned i = 0; i < SrcNum; i++)
1538 10 : TempSrc.AggregateVal[i].IntVal =
1539 30 : APInt::floatToBits(SrcVec.AggregateVal[i].FloatVal);
1540 :
1541 23 : } else if (SrcElemTy->isDoubleTy()) {
1542 8 : for (unsigned i = 0; i < SrcNum; i++)
1543 4 : TempSrc.AggregateVal[i].IntVal =
1544 12 : APInt::doubleToBits(SrcVec.AggregateVal[i].DoubleVal);
1545 19 : } else if (SrcElemTy->isIntegerTy()) {
1546 93 : for (unsigned i = 0; i < SrcNum; i++)
1547 222 : TempSrc.AggregateVal[i].IntVal = SrcVec.AggregateVal[i].IntVal;
1548 : } else {
1549 : // Pointers are not allowed as the element type of vector.
1550 0 : llvm_unreachable("Invalid Bitcast");
1551 : }
1552 :
1553 : // now TempSrc is integer type vector
1554 28 : if (DstNum < SrcNum) {
1555 : // Example: bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>
1556 13 : unsigned Ratio = SrcNum / DstNum;
1557 : unsigned SrcElt = 0;
1558 33 : for (unsigned i = 0; i < DstNum; i++) {
1559 20 : GenericValue Elt;
1560 20 : Elt.IntVal = 0;
1561 20 : Elt.IntVal = Elt.IntVal.zext(DstBitSize);
1562 20 : unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1);
1563 84 : for (unsigned j = 0; j < Ratio; j++) {
1564 : APInt Tmp;
1565 64 : Tmp = Tmp.zext(SrcBitSize);
1566 128 : Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
1567 64 : Tmp = Tmp.zext(DstBitSize);
1568 64 : Tmp <<= ShiftAmt;
1569 64 : ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
1570 : Elt.IntVal |= Tmp;
1571 : }
1572 20 : TempDst.AggregateVal.push_back(Elt);
1573 : }
1574 : } else {
1575 : // Example: bitcast <2 x i64> <i64 0, i64 1> to <4 x i32>
1576 15 : unsigned Ratio = DstNum / SrcNum;
1577 39 : for (unsigned i = 0; i < SrcNum; i++) {
1578 24 : unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1);
1579 92 : for (unsigned j = 0; j < Ratio; j++) {
1580 68 : GenericValue Elt;
1581 68 : Elt.IntVal = Elt.IntVal.zext(SrcBitSize);
1582 136 : Elt.IntVal = TempSrc.AggregateVal[i].IntVal;
1583 : Elt.IntVal.lshrInPlace(ShiftAmt);
1584 : // it could be DstBitSize == SrcBitSize, so check it
1585 68 : if (DstBitSize < SrcBitSize)
1586 128 : Elt.IntVal = Elt.IntVal.trunc(DstBitSize);
1587 68 : ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
1588 68 : TempDst.AggregateVal.push_back(Elt);
1589 : }
1590 : }
1591 : }
1592 :
1593 : // convert result from integer to specified type
1594 28 : if (DstTy->getTypeID() == Type::VectorTyID) {
1595 20 : if (DstElemTy->isDoubleTy()) {
1596 0 : Dest.AggregateVal.resize(DstNum);
1597 0 : for (unsigned i = 0; i < DstNum; i++)
1598 0 : Dest.AggregateVal[i].DoubleVal =
1599 0 : TempDst.AggregateVal[i].IntVal.bitsToDouble();
1600 20 : } else if (DstElemTy->isFloatTy()) {
1601 5 : Dest.AggregateVal.resize(DstNum);
1602 15 : for (unsigned i = 0; i < DstNum; i++)
1603 20 : Dest.AggregateVal[i].FloatVal =
1604 10 : TempDst.AggregateVal[i].IntVal.bitsToFloat();
1605 : } else {
1606 : Dest = TempDst;
1607 : }
1608 : } else {
1609 8 : if (DstElemTy->isDoubleTy())
1610 8 : Dest.DoubleVal = TempDst.AggregateVal[0].IntVal.bitsToDouble();
1611 4 : else if (DstElemTy->isFloatTy()) {
1612 0 : Dest.FloatVal = TempDst.AggregateVal[0].IntVal.bitsToFloat();
1613 : } else {
1614 4 : Dest.IntVal = TempDst.AggregateVal[0].IntVal;
1615 : }
1616 : }
1617 : } else { // if ((SrcTy->getTypeID() == Type::VectorTyID) ||
1618 : // (DstTy->getTypeID() == Type::VectorTyID))
1619 :
1620 : // scalar src bitcast to scalar dst
1621 11 : if (DstTy->isPointerTy()) {
1622 : assert(SrcTy->isPointerTy() && "Invalid BitCast");
1623 0 : Dest.PointerVal = Src.PointerVal;
1624 11 : } else if (DstTy->isIntegerTy()) {
1625 10 : if (SrcTy->isFloatTy())
1626 0 : Dest.IntVal = APInt::floatToBits(Src.FloatVal);
1627 10 : else if (SrcTy->isDoubleTy()) {
1628 6 : Dest.IntVal = APInt::doubleToBits(Src.DoubleVal);
1629 7 : } else if (SrcTy->isIntegerTy()) {
1630 7 : Dest.IntVal = Src.IntVal;
1631 : } else {
1632 0 : llvm_unreachable("Invalid BitCast");
1633 : }
1634 1 : } else if (DstTy->isFloatTy()) {
1635 0 : if (SrcTy->isIntegerTy())
1636 0 : Dest.FloatVal = Src.IntVal.bitsToFloat();
1637 : else {
1638 0 : Dest.FloatVal = Src.FloatVal;
1639 : }
1640 1 : } else if (DstTy->isDoubleTy()) {
1641 1 : if (SrcTy->isIntegerTy())
1642 1 : Dest.DoubleVal = Src.IntVal.bitsToDouble();
1643 : else {
1644 0 : Dest.DoubleVal = Src.DoubleVal;
1645 : }
1646 : } else {
1647 0 : llvm_unreachable("Invalid Bitcast");
1648 : }
1649 : }
1650 :
1651 39 : return Dest;
1652 : }
1653 :
1654 17 : void Interpreter::visitTruncInst(TruncInst &I) {
1655 : ExecutionContext &SF = ECStack.back();
1656 17 : SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
1657 17 : }
1658 :
1659 21 : void Interpreter::visitSExtInst(SExtInst &I) {
1660 : ExecutionContext &SF = ECStack.back();
1661 21 : SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
1662 21 : }
1663 :
1664 28 : void Interpreter::visitZExtInst(ZExtInst &I) {
1665 : ExecutionContext &SF = ECStack.back();
1666 28 : SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
1667 28 : }
1668 :
1669 2 : void Interpreter::visitFPTruncInst(FPTruncInst &I) {
1670 : ExecutionContext &SF = ECStack.back();
1671 2 : SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
1672 2 : }
1673 :
1674 3 : void Interpreter::visitFPExtInst(FPExtInst &I) {
1675 : ExecutionContext &SF = ECStack.back();
1676 3 : SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
1677 3 : }
1678 :
1679 10 : void Interpreter::visitUIToFPInst(UIToFPInst &I) {
1680 : ExecutionContext &SF = ECStack.back();
1681 10 : SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
1682 10 : }
1683 :
1684 10 : void Interpreter::visitSIToFPInst(SIToFPInst &I) {
1685 : ExecutionContext &SF = ECStack.back();
1686 10 : SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
1687 10 : }
1688 :
1689 8 : void Interpreter::visitFPToUIInst(FPToUIInst &I) {
1690 : ExecutionContext &SF = ECStack.back();
1691 8 : SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
1692 8 : }
1693 :
1694 8 : void Interpreter::visitFPToSIInst(FPToSIInst &I) {
1695 : ExecutionContext &SF = ECStack.back();
1696 8 : SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
1697 8 : }
1698 :
1699 0 : void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
1700 : ExecutionContext &SF = ECStack.back();
1701 0 : SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
1702 0 : }
1703 :
1704 0 : void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
1705 : ExecutionContext &SF = ECStack.back();
1706 0 : SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
1707 0 : }
1708 :
1709 39 : void Interpreter::visitBitCastInst(BitCastInst &I) {
1710 : ExecutionContext &SF = ECStack.back();
1711 39 : SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
1712 39 : }
1713 :
1714 : #define IMPLEMENT_VAARG(TY) \
1715 : case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
1716 :
1717 0 : void Interpreter::visitVAArgInst(VAArgInst &I) {
1718 : ExecutionContext &SF = ECStack.back();
1719 :
1720 : // Get the incoming valist parameter. LLI treats the valist as a
1721 : // (ec-stack-depth var-arg-index) pair.
1722 0 : GenericValue VAList = getOperandValue(I.getOperand(0), SF);
1723 0 : GenericValue Dest;
1724 0 : GenericValue Src = ECStack[VAList.UIntPairVal.first]
1725 0 : .VarArgs[VAList.UIntPairVal.second];
1726 0 : Type *Ty = I.getType();
1727 0 : switch (Ty->getTypeID()) {
1728 0 : case Type::IntegerTyID:
1729 0 : Dest.IntVal = Src.IntVal;
1730 0 : break;
1731 0 : IMPLEMENT_VAARG(Pointer);
1732 0 : IMPLEMENT_VAARG(Float);
1733 0 : IMPLEMENT_VAARG(Double);
1734 0 : default:
1735 0 : dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
1736 0 : llvm_unreachable(nullptr);
1737 : }
1738 :
1739 : // Set the Value of this Instruction.
1740 0 : SetValue(&I, Dest, SF);
1741 :
1742 : // Move the pointer to the next vararg.
1743 0 : ++VAList.UIntPairVal.second;
1744 0 : }
1745 :
1746 15 : void Interpreter::visitExtractElementInst(ExtractElementInst &I) {
1747 : ExecutionContext &SF = ECStack.back();
1748 30 : GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1749 30 : GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1750 15 : GenericValue Dest;
1751 :
1752 15 : Type *Ty = I.getType();
1753 : const unsigned indx = unsigned(Src2.IntVal.getZExtValue());
1754 :
1755 30 : if(Src1.AggregateVal.size() > indx) {
1756 15 : switch (Ty->getTypeID()) {
1757 0 : default:
1758 0 : dbgs() << "Unhandled destination type for extractelement instruction: "
1759 0 : << *Ty << "\n";
1760 0 : llvm_unreachable(nullptr);
1761 : break;
1762 7 : case Type::IntegerTyID:
1763 7 : Dest.IntVal = Src1.AggregateVal[indx].IntVal;
1764 7 : break;
1765 4 : case Type::FloatTyID:
1766 4 : Dest.FloatVal = Src1.AggregateVal[indx].FloatVal;
1767 4 : break;
1768 4 : case Type::DoubleTyID:
1769 4 : Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal;
1770 4 : break;
1771 : }
1772 : } else {
1773 0 : dbgs() << "Invalid index in extractelement instruction\n";
1774 : }
1775 :
1776 15 : SetValue(&I, Dest, SF);
1777 15 : }
1778 :
1779 30 : void Interpreter::visitInsertElementInst(InsertElementInst &I) {
1780 : ExecutionContext &SF = ECStack.back();
1781 : Type *Ty = I.getType();
1782 :
1783 30 : if(!(Ty->isVectorTy()) )
1784 0 : llvm_unreachable("Unhandled dest type for insertelement instruction");
1785 :
1786 60 : GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1787 60 : GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1788 60 : GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
1789 30 : GenericValue Dest;
1790 :
1791 30 : Type *TyContained = Ty->getContainedType(0);
1792 :
1793 : const unsigned indx = unsigned(Src3.IntVal.getZExtValue());
1794 30 : Dest.AggregateVal = Src1.AggregateVal;
1795 :
1796 60 : if(Src1.AggregateVal.size() <= indx)
1797 0 : llvm_unreachable("Invalid index in insertelement instruction");
1798 30 : switch (TyContained->getTypeID()) {
1799 0 : default:
1800 0 : llvm_unreachable("Unhandled dest type for insertelement instruction");
1801 20 : case Type::IntegerTyID:
1802 40 : Dest.AggregateVal[indx].IntVal = Src2.IntVal;
1803 20 : break;
1804 5 : case Type::FloatTyID:
1805 5 : Dest.AggregateVal[indx].FloatVal = Src2.FloatVal;
1806 5 : break;
1807 5 : case Type::DoubleTyID:
1808 5 : Dest.AggregateVal[indx].DoubleVal = Src2.DoubleVal;
1809 5 : break;
1810 : }
1811 30 : SetValue(&I, Dest, SF);
1812 30 : }
1813 :
1814 30 : void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){
1815 : ExecutionContext &SF = ECStack.back();
1816 :
1817 : Type *Ty = I.getType();
1818 30 : if(!(Ty->isVectorTy()))
1819 0 : llvm_unreachable("Unhandled dest type for shufflevector instruction");
1820 :
1821 60 : GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
1822 60 : GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1823 60 : GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
1824 30 : GenericValue Dest;
1825 :
1826 : // There is no need to check types of src1 and src2, because the compiled
1827 : // bytecode can't contain different types for src1 and src2 for a
1828 : // shufflevector instruction.
1829 :
1830 30 : Type *TyContained = Ty->getContainedType(0);
1831 30 : unsigned src1Size = (unsigned)Src1.AggregateVal.size();
1832 30 : unsigned src2Size = (unsigned)Src2.AggregateVal.size();
1833 30 : unsigned src3Size = (unsigned)Src3.AggregateVal.size();
1834 :
1835 30 : Dest.AggregateVal.resize(src3Size);
1836 :
1837 30 : switch (TyContained->getTypeID()) {
1838 0 : default:
1839 0 : llvm_unreachable("Unhandled dest type for insertelement instruction");
1840 : break;
1841 : case Type::IntegerTyID:
1842 152 : for( unsigned i=0; i<src3Size; i++) {
1843 132 : unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
1844 132 : if(j < src1Size)
1845 396 : Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal;
1846 0 : else if(j < src1Size + src2Size)
1847 0 : Dest.AggregateVal[i].IntVal = Src2.AggregateVal[j-src1Size].IntVal;
1848 : else
1849 : // The selector may not be greater than sum of lengths of first and
1850 : // second operands and llasm should not allow situation like
1851 : // %tmp = shufflevector <2 x i32> <i32 3, i32 4>, <2 x i32> undef,
1852 : // <2 x i32> < i32 0, i32 5 >,
1853 : // where i32 5 is invalid, but let it be additional check here:
1854 0 : llvm_unreachable("Invalid mask in shufflevector instruction");
1855 20 : }
1856 : break;
1857 : case Type::FloatTyID:
1858 38 : for( unsigned i=0; i<src3Size; i++) {
1859 33 : unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
1860 33 : if(j < src1Size)
1861 99 : Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal;
1862 0 : else if(j < src1Size + src2Size)
1863 0 : Dest.AggregateVal[i].FloatVal = Src2.AggregateVal[j-src1Size].FloatVal;
1864 : else
1865 0 : llvm_unreachable("Invalid mask in shufflevector instruction");
1866 5 : }
1867 : break;
1868 : case Type::DoubleTyID:
1869 38 : for( unsigned i=0; i<src3Size; i++) {
1870 33 : unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
1871 33 : if(j < src1Size)
1872 99 : Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal;
1873 0 : else if(j < src1Size + src2Size)
1874 0 : Dest.AggregateVal[i].DoubleVal =
1875 0 : Src2.AggregateVal[j-src1Size].DoubleVal;
1876 : else
1877 0 : llvm_unreachable("Invalid mask in shufflevector instruction");
1878 5 : }
1879 : break;
1880 : }
1881 30 : SetValue(&I, Dest, SF);
1882 30 : }
1883 :
1884 6 : void Interpreter::visitExtractValueInst(ExtractValueInst &I) {
1885 : ExecutionContext &SF = ECStack.back();
1886 : Value *Agg = I.getAggregateOperand();
1887 6 : GenericValue Dest;
1888 6 : GenericValue Src = getOperandValue(Agg, SF);
1889 :
1890 : ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
1891 : unsigned Num = I.getNumIndices();
1892 : GenericValue *pSrc = &Src;
1893 :
1894 14 : for (unsigned i = 0 ; i < Num; ++i) {
1895 8 : pSrc = &pSrc->AggregateVal[*IdxBegin];
1896 8 : ++IdxBegin;
1897 : }
1898 :
1899 6 : Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
1900 6 : switch (IndexedType->getTypeID()) {
1901 0 : default:
1902 0 : llvm_unreachable("Unhandled dest type for extractelement instruction");
1903 : break;
1904 1 : case Type::IntegerTyID:
1905 1 : Dest.IntVal = pSrc->IntVal;
1906 1 : break;
1907 2 : case Type::FloatTyID:
1908 2 : Dest.FloatVal = pSrc->FloatVal;
1909 2 : break;
1910 2 : case Type::DoubleTyID:
1911 2 : Dest.DoubleVal = pSrc->DoubleVal;
1912 2 : break;
1913 1 : case Type::ArrayTyID:
1914 : case Type::StructTyID:
1915 : case Type::VectorTyID:
1916 1 : Dest.AggregateVal = pSrc->AggregateVal;
1917 1 : break;
1918 0 : case Type::PointerTyID:
1919 0 : Dest.PointerVal = pSrc->PointerVal;
1920 0 : break;
1921 : }
1922 :
1923 6 : SetValue(&I, Dest, SF);
1924 6 : }
1925 :
1926 3 : void Interpreter::visitInsertValueInst(InsertValueInst &I) {
1927 :
1928 : ExecutionContext &SF = ECStack.back();
1929 : Value *Agg = I.getAggregateOperand();
1930 :
1931 6 : GenericValue Src1 = getOperandValue(Agg, SF);
1932 6 : GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
1933 3 : GenericValue Dest = Src1; // Dest is a slightly changed Src1
1934 :
1935 : ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
1936 : unsigned Num = I.getNumIndices();
1937 :
1938 : GenericValue *pDest = &Dest;
1939 8 : for (unsigned i = 0 ; i < Num; ++i) {
1940 5 : pDest = &pDest->AggregateVal[*IdxBegin];
1941 5 : ++IdxBegin;
1942 : }
1943 : // pDest points to the target value in the Dest now
1944 :
1945 3 : Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
1946 :
1947 3 : switch (IndexedType->getTypeID()) {
1948 0 : default:
1949 0 : llvm_unreachable("Unhandled dest type for insertelement instruction");
1950 : break;
1951 1 : case Type::IntegerTyID:
1952 1 : pDest->IntVal = Src2.IntVal;
1953 1 : break;
1954 1 : case Type::FloatTyID:
1955 1 : pDest->FloatVal = Src2.FloatVal;
1956 1 : break;
1957 1 : case Type::DoubleTyID:
1958 1 : pDest->DoubleVal = Src2.DoubleVal;
1959 1 : break;
1960 0 : case Type::ArrayTyID:
1961 : case Type::StructTyID:
1962 : case Type::VectorTyID:
1963 0 : pDest->AggregateVal = Src2.AggregateVal;
1964 0 : break;
1965 0 : case Type::PointerTyID:
1966 0 : pDest->PointerVal = Src2.PointerVal;
1967 0 : break;
1968 : }
1969 :
1970 3 : SetValue(&I, Dest, SF);
1971 3 : }
1972 :
1973 6 : GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
1974 : ExecutionContext &SF) {
1975 6 : switch (CE->getOpcode()) {
1976 0 : case Instruction::Trunc:
1977 0 : return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
1978 0 : case Instruction::ZExt:
1979 0 : return executeZExtInst(CE->getOperand(0), CE->getType(), SF);
1980 0 : case Instruction::SExt:
1981 0 : return executeSExtInst(CE->getOperand(0), CE->getType(), SF);
1982 0 : case Instruction::FPTrunc:
1983 0 : return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF);
1984 0 : case Instruction::FPExt:
1985 0 : return executeFPExtInst(CE->getOperand(0), CE->getType(), SF);
1986 0 : case Instruction::UIToFP:
1987 0 : return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF);
1988 0 : case Instruction::SIToFP:
1989 0 : return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF);
1990 0 : case Instruction::FPToUI:
1991 0 : return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF);
1992 0 : case Instruction::FPToSI:
1993 0 : return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF);
1994 0 : case Instruction::PtrToInt:
1995 0 : return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
1996 0 : case Instruction::IntToPtr:
1997 0 : return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
1998 0 : case Instruction::BitCast:
1999 0 : return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
2000 : case Instruction::GetElementPtr:
2001 : return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
2002 12 : gep_type_end(CE), SF);
2003 : case Instruction::FCmp:
2004 : case Instruction::ICmp:
2005 : return executeCmpInst(CE->getPredicate(),
2006 0 : getOperandValue(CE->getOperand(0), SF),
2007 0 : getOperandValue(CE->getOperand(1), SF),
2008 0 : CE->getOperand(0)->getType());
2009 : case Instruction::Select:
2010 0 : return executeSelectInst(getOperandValue(CE->getOperand(0), SF),
2011 0 : getOperandValue(CE->getOperand(1), SF),
2012 0 : getOperandValue(CE->getOperand(2), SF),
2013 0 : CE->getOperand(0)->getType());
2014 : default :
2015 : break;
2016 : }
2017 :
2018 : // The cases below here require a GenericValue parameter for the result
2019 : // so we initialize one, compute it and then return it.
2020 0 : GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
2021 0 : GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
2022 0 : GenericValue Dest;
2023 0 : Type * Ty = CE->getOperand(0)->getType();
2024 0 : switch (CE->getOpcode()) {
2025 0 : case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
2026 0 : case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
2027 0 : case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
2028 0 : case Instruction::FAdd: executeFAddInst(Dest, Op0, Op1, Ty); break;
2029 0 : case Instruction::FSub: executeFSubInst(Dest, Op0, Op1, Ty); break;
2030 0 : case Instruction::FMul: executeFMulInst(Dest, Op0, Op1, Ty); break;
2031 0 : case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break;
2032 0 : case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break;
2033 0 : case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break;
2034 0 : case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break;
2035 0 : case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break;
2036 0 : case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break;
2037 0 : case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
2038 0 : case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
2039 0 : case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
2040 : case Instruction::Shl:
2041 0 : Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
2042 0 : break;
2043 : case Instruction::LShr:
2044 0 : Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
2045 0 : break;
2046 : case Instruction::AShr:
2047 0 : Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
2048 0 : break;
2049 0 : default:
2050 0 : dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
2051 0 : llvm_unreachable("Unhandled ConstantExpr");
2052 : }
2053 : return Dest;
2054 : }
2055 :
2056 1089 : GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
2057 1089 : if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
2058 6 : return getConstantExprValue(CE, SF);
2059 : } else if (Constant *CPV = dyn_cast<Constant>(V)) {
2060 708 : return getConstantValue(CPV);
2061 : } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2062 0 : return PTOGV(getPointerToGlobal(GV));
2063 : } else {
2064 375 : return SF.Values[V];
2065 : }
2066 : }
2067 :
2068 : //===----------------------------------------------------------------------===//
2069 : // Dispatch and Execution Code
2070 : //===----------------------------------------------------------------------===//
2071 :
2072 : //===----------------------------------------------------------------------===//
2073 : // callFunction - Execute the specified function...
2074 : //
2075 72 : void Interpreter::callFunction(Function *F, ArrayRef<GenericValue> ArgVals) {
2076 : assert((ECStack.empty() || !ECStack.back().Caller.getInstruction() ||
2077 : ECStack.back().Caller.arg_size() == ArgVals.size()) &&
2078 : "Incorrect number of arguments passed into function call!");
2079 : // Make a new stack frame... and fill it in.
2080 72 : ECStack.emplace_back();
2081 : ExecutionContext &StackFrame = ECStack.back();
2082 72 : StackFrame.CurFunction = F;
2083 :
2084 : // Special handling for external functions.
2085 72 : if (F->isDeclaration()) {
2086 47 : GenericValue Result = callExternalFunction (F, ArgVals);
2087 : // Simulate a 'ret' instruction of the appropriate type.
2088 28 : popStackAndReturnValueToCaller (F->getReturnType (), Result);
2089 : return;
2090 : }
2091 :
2092 : // Get pointers to first LLVM BB & Instruction in function.
2093 25 : StackFrame.CurBB = &F->front();
2094 25 : StackFrame.CurInst = StackFrame.CurBB->begin();
2095 :
2096 : // Run through the function arguments and initialize their values...
2097 : assert((ArgVals.size() == F->arg_size() ||
2098 : (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
2099 : "Invalid number of values passed to function invocation!");
2100 :
2101 : // Handle non-varargs arguments...
2102 : unsigned i = 0;
2103 9 : for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
2104 34 : AI != E; ++AI, ++i)
2105 18 : SetValue(&*AI, ArgVals[i], StackFrame);
2106 :
2107 : // Handle varargs arguments...
2108 50 : StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
2109 : }
2110 :
2111 :
2112 19 : void Interpreter::run() {
2113 653 : while (!ECStack.empty()) {
2114 : // Interpret a single instruction & increment the "PC".
2115 : ExecutionContext &SF = ECStack.back(); // Current stack frame
2116 : Instruction &I = *SF.CurInst++; // Increment before execute
2117 :
2118 : // Track the number of dynamic instructions executed.
2119 : ++NumDynamicInsts;
2120 :
2121 : LLVM_DEBUG(dbgs() << "About to interpret: " << I);
2122 634 : visit(I); // Dispatch to one of the visit* methods...
2123 : }
2124 19 : }
|