diff options
Diffstat (limited to '')
-rw-r--r-- | libraries/luajit-2.0/src/lj_asm_x86.h | 2751 |
1 files changed, 0 insertions, 2751 deletions
diff --git a/libraries/luajit-2.0/src/lj_asm_x86.h b/libraries/luajit-2.0/src/lj_asm_x86.h deleted file mode 100644 index 1170b66..0000000 --- a/libraries/luajit-2.0/src/lj_asm_x86.h +++ /dev/null | |||
@@ -1,2751 +0,0 @@ | |||
1 | /* | ||
2 | ** x86/x64 IR assembler (SSA IR -> machine code). | ||
3 | ** Copyright (C) 2005-2011 Mike Pall. See Copyright Notice in luajit.h | ||
4 | */ | ||
5 | |||
6 | /* -- Guard handling ------------------------------------------------------ */ | ||
7 | |||
8 | /* Generate an exit stub group at the bottom of the reserved MCode memory. */ | ||
9 | static MCode *asm_exitstub_gen(ASMState *as, ExitNo group) | ||
10 | { | ||
11 | ExitNo i, groupofs = (group*EXITSTUBS_PER_GROUP) & 0xff; | ||
12 | MCode *mxp = as->mcbot; | ||
13 | MCode *mxpstart = mxp; | ||
14 | if (mxp + (2+2)*EXITSTUBS_PER_GROUP+8+5 >= as->mctop) | ||
15 | asm_mclimit(as); | ||
16 | /* Push low byte of exitno for each exit stub. */ | ||
17 | *mxp++ = XI_PUSHi8; *mxp++ = (MCode)groupofs; | ||
18 | for (i = 1; i < EXITSTUBS_PER_GROUP; i++) { | ||
19 | *mxp++ = XI_JMPs; *mxp++ = (MCode)((2+2)*(EXITSTUBS_PER_GROUP - i) - 2); | ||
20 | *mxp++ = XI_PUSHi8; *mxp++ = (MCode)(groupofs + i); | ||
21 | } | ||
22 | /* Push the high byte of the exitno for each exit stub group. */ | ||
23 | *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8); | ||
24 | /* Store DISPATCH at original stack slot 0. Account for the two push ops. */ | ||
25 | *mxp++ = XI_MOVmi; | ||
26 | *mxp++ = MODRM(XM_OFS8, 0, RID_ESP); | ||
27 | *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP); | ||
28 | *mxp++ = 2*sizeof(void *); | ||
29 | *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4; | ||
30 | /* Jump to exit handler which fills in the ExitState. */ | ||
31 | *mxp++ = XI_JMP; mxp += 4; | ||
32 | *((int32_t *)(mxp-4)) = jmprel(mxp, (MCode *)(void *)lj_vm_exit_handler); | ||
33 | /* Commit the code for this group (even if assembly fails later on). */ | ||
34 | lj_mcode_commitbot(as->J, mxp); | ||
35 | as->mcbot = mxp; | ||
36 | as->mclim = as->mcbot + MCLIM_REDZONE; | ||
37 | return mxpstart; | ||
38 | } | ||
39 | |||
40 | /* Setup all needed exit stubs. */ | ||
41 | static void asm_exitstub_setup(ASMState *as, ExitNo nexits) | ||
42 | { | ||
43 | ExitNo i; | ||
44 | if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) | ||
45 | lj_trace_err(as->J, LJ_TRERR_SNAPOV); | ||
46 | for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++) | ||
47 | if (as->J->exitstubgroup[i] == NULL) | ||
48 | as->J->exitstubgroup[i] = asm_exitstub_gen(as, i); | ||
49 | } | ||
50 | |||
51 | /* Emit conditional branch to exit for guard. | ||
52 | ** It's important to emit this *after* all registers have been allocated, | ||
53 | ** because rematerializations may invalidate the flags. | ||
54 | */ | ||
55 | static void asm_guardcc(ASMState *as, int cc) | ||
56 | { | ||
57 | MCode *target = exitstub_addr(as->J, as->snapno); | ||
58 | MCode *p = as->mcp; | ||
59 | if (LJ_UNLIKELY(p == as->invmcp)) { | ||
60 | as->loopinv = 1; | ||
61 | *(int32_t *)(p+1) = jmprel(p+5, target); | ||
62 | target = p; | ||
63 | cc ^= 1; | ||
64 | if (as->realign) { | ||
65 | emit_sjcc(as, cc, target); | ||
66 | return; | ||
67 | } | ||
68 | } | ||
69 | emit_jcc(as, cc, target); | ||
70 | } | ||
71 | |||
72 | /* -- Memory operand fusion ----------------------------------------------- */ | ||
73 | |||
74 | /* Limit linear search to this distance. Avoids O(n^2) behavior. */ | ||
75 | #define CONFLICT_SEARCH_LIM 31 | ||
76 | |||
77 | /* Check if a reference is a signed 32 bit constant. */ | ||
78 | static int asm_isk32(ASMState *as, IRRef ref, int32_t *k) | ||
79 | { | ||
80 | if (irref_isk(ref)) { | ||
81 | IRIns *ir = IR(ref); | ||
82 | if (ir->o != IR_KINT64) { | ||
83 | *k = ir->i; | ||
84 | return 1; | ||
85 | } else if (checki32((int64_t)ir_kint64(ir)->u64)) { | ||
86 | *k = (int32_t)ir_kint64(ir)->u64; | ||
87 | return 1; | ||
88 | } | ||
89 | } | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | /* Check if there's no conflicting instruction between curins and ref. | ||
94 | ** Also avoid fusing loads if there are multiple references. | ||
95 | */ | ||
96 | static int noconflict(ASMState *as, IRRef ref, IROp conflict, int noload) | ||
97 | { | ||
98 | IRIns *ir = as->ir; | ||
99 | IRRef i = as->curins; | ||
100 | if (i > ref + CONFLICT_SEARCH_LIM) | ||
101 | return 0; /* Give up, ref is too far away. */ | ||
102 | while (--i > ref) { | ||
103 | if (ir[i].o == conflict) | ||
104 | return 0; /* Conflict found. */ | ||
105 | else if (!noload && (ir[i].op1 == ref || ir[i].op2 == ref)) | ||
106 | return 0; | ||
107 | } | ||
108 | return 1; /* Ok, no conflict. */ | ||
109 | } | ||
110 | |||
111 | /* Fuse array base into memory operand. */ | ||
112 | static IRRef asm_fuseabase(ASMState *as, IRRef ref) | ||
113 | { | ||
114 | IRIns *irb = IR(ref); | ||
115 | as->mrm.ofs = 0; | ||
116 | if (irb->o == IR_FLOAD) { | ||
117 | IRIns *ira = IR(irb->op1); | ||
118 | lua_assert(irb->op2 == IRFL_TAB_ARRAY); | ||
119 | /* We can avoid the FLOAD of t->array for colocated arrays. */ | ||
120 | if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE && | ||
121 | !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) { | ||
122 | as->mrm.ofs = (int32_t)sizeof(GCtab); /* Ofs to colocated array. */ | ||
123 | return irb->op1; /* Table obj. */ | ||
124 | } | ||
125 | } else if (irb->o == IR_ADD && irref_isk(irb->op2)) { | ||
126 | /* Fuse base offset (vararg load). */ | ||
127 | as->mrm.ofs = IR(irb->op2)->i; | ||
128 | return irb->op1; | ||
129 | } | ||
130 | return ref; /* Otherwise use the given array base. */ | ||
131 | } | ||
132 | |||
133 | /* Fuse array reference into memory operand. */ | ||
134 | static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow) | ||
135 | { | ||
136 | IRIns *irx; | ||
137 | lua_assert(ir->o == IR_AREF); | ||
138 | as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow); | ||
139 | irx = IR(ir->op2); | ||
140 | if (irref_isk(ir->op2)) { | ||
141 | as->mrm.ofs += 8*irx->i; | ||
142 | as->mrm.idx = RID_NONE; | ||
143 | } else { | ||
144 | rset_clear(allow, as->mrm.base); | ||
145 | as->mrm.scale = XM_SCALE8; | ||
146 | /* Fuse a constant ADD (e.g. t[i+1]) into the offset. | ||
147 | ** Doesn't help much without ABCelim, but reduces register pressure. | ||
148 | */ | ||
149 | if (!LJ_64 && /* Has bad effects with negative index on x64. */ | ||
150 | mayfuse(as, ir->op2) && ra_noreg(irx->r) && | ||
151 | irx->o == IR_ADD && irref_isk(irx->op2)) { | ||
152 | as->mrm.ofs += 8*IR(irx->op2)->i; | ||
153 | as->mrm.idx = (uint8_t)ra_alloc1(as, irx->op1, allow); | ||
154 | } else { | ||
155 | as->mrm.idx = (uint8_t)ra_alloc1(as, ir->op2, allow); | ||
156 | } | ||
157 | } | ||
158 | } | ||
159 | |||
160 | /* Fuse array/hash/upvalue reference into memory operand. | ||
161 | ** Caveat: this may allocate GPRs for the base/idx registers. Be sure to | ||
162 | ** pass the final allow mask, excluding any GPRs used for other inputs. | ||
163 | ** In particular: 2-operand GPR instructions need to call ra_dest() first! | ||
164 | */ | ||
165 | static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow) | ||
166 | { | ||
167 | IRIns *ir = IR(ref); | ||
168 | if (ra_noreg(ir->r)) { | ||
169 | switch ((IROp)ir->o) { | ||
170 | case IR_AREF: | ||
171 | if (mayfuse(as, ref)) { | ||
172 | asm_fusearef(as, ir, allow); | ||
173 | return; | ||
174 | } | ||
175 | break; | ||
176 | case IR_HREFK: | ||
177 | if (mayfuse(as, ref)) { | ||
178 | as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow); | ||
179 | as->mrm.ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); | ||
180 | as->mrm.idx = RID_NONE; | ||
181 | return; | ||
182 | } | ||
183 | break; | ||
184 | case IR_UREFC: | ||
185 | if (irref_isk(ir->op1)) { | ||
186 | GCfunc *fn = ir_kfunc(IR(ir->op1)); | ||
187 | GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv; | ||
188 | as->mrm.ofs = ptr2addr(&uv->tv); | ||
189 | as->mrm.base = as->mrm.idx = RID_NONE; | ||
190 | return; | ||
191 | } | ||
192 | break; | ||
193 | default: | ||
194 | lua_assert(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO || | ||
195 | ir->o == IR_KKPTR); | ||
196 | break; | ||
197 | } | ||
198 | } | ||
199 | as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow); | ||
200 | as->mrm.ofs = 0; | ||
201 | as->mrm.idx = RID_NONE; | ||
202 | } | ||
203 | |||
204 | /* Fuse FLOAD/FREF reference into memory operand. */ | ||
205 | static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow) | ||
206 | { | ||
207 | lua_assert(ir->o == IR_FLOAD || ir->o == IR_FREF); | ||
208 | as->mrm.ofs = field_ofs[ir->op2]; | ||
209 | as->mrm.idx = RID_NONE; | ||
210 | if (irref_isk(ir->op1)) { | ||
211 | as->mrm.ofs += IR(ir->op1)->i; | ||
212 | as->mrm.base = RID_NONE; | ||
213 | } else { | ||
214 | as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow); | ||
215 | } | ||
216 | } | ||
217 | |||
218 | /* Fuse string reference into memory operand. */ | ||
219 | static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow) | ||
220 | { | ||
221 | IRIns *irr; | ||
222 | lua_assert(ir->o == IR_STRREF); | ||
223 | as->mrm.base = as->mrm.idx = RID_NONE; | ||
224 | as->mrm.scale = XM_SCALE1; | ||
225 | as->mrm.ofs = sizeof(GCstr); | ||
226 | if (irref_isk(ir->op1)) { | ||
227 | as->mrm.ofs += IR(ir->op1)->i; | ||
228 | } else { | ||
229 | Reg r = ra_alloc1(as, ir->op1, allow); | ||
230 | rset_clear(allow, r); | ||
231 | as->mrm.base = (uint8_t)r; | ||
232 | } | ||
233 | irr = IR(ir->op2); | ||
234 | if (irref_isk(ir->op2)) { | ||
235 | as->mrm.ofs += irr->i; | ||
236 | } else { | ||
237 | Reg r; | ||
238 | /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */ | ||
239 | if (!LJ_64 && /* Has bad effects with negative index on x64. */ | ||
240 | mayfuse(as, ir->op2) && irr->o == IR_ADD && irref_isk(irr->op2)) { | ||
241 | as->mrm.ofs += IR(irr->op2)->i; | ||
242 | r = ra_alloc1(as, irr->op1, allow); | ||
243 | } else { | ||
244 | r = ra_alloc1(as, ir->op2, allow); | ||
245 | } | ||
246 | if (as->mrm.base == RID_NONE) | ||
247 | as->mrm.base = (uint8_t)r; | ||
248 | else | ||
249 | as->mrm.idx = (uint8_t)r; | ||
250 | } | ||
251 | } | ||
252 | |||
253 | static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow) | ||
254 | { | ||
255 | IRIns *ir = IR(ref); | ||
256 | as->mrm.idx = RID_NONE; | ||
257 | if (ir->o == IR_KPTR || ir->o == IR_KKPTR) { | ||
258 | as->mrm.ofs = ir->i; | ||
259 | as->mrm.base = RID_NONE; | ||
260 | } else if (ir->o == IR_STRREF) { | ||
261 | asm_fusestrref(as, ir, allow); | ||
262 | } else { | ||
263 | as->mrm.ofs = 0; | ||
264 | if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) { | ||
265 | /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */ | ||
266 | IRIns *irx; | ||
267 | IRRef idx; | ||
268 | Reg r; | ||
269 | if (asm_isk32(as, ir->op2, &as->mrm.ofs)) { /* Recognize x+ofs. */ | ||
270 | ref = ir->op1; | ||
271 | ir = IR(ref); | ||
272 | if (!(ir->o == IR_ADD && canfuse(as, ir) && ra_noreg(ir->r))) | ||
273 | goto noadd; | ||
274 | } | ||
275 | as->mrm.scale = XM_SCALE1; | ||
276 | idx = ir->op1; | ||
277 | ref = ir->op2; | ||
278 | irx = IR(idx); | ||
279 | if (!(irx->o == IR_BSHL || irx->o == IR_ADD)) { /* Try other operand. */ | ||
280 | idx = ir->op2; | ||
281 | ref = ir->op1; | ||
282 | irx = IR(idx); | ||
283 | } | ||
284 | if (canfuse(as, irx) && ra_noreg(irx->r)) { | ||
285 | if (irx->o == IR_BSHL && irref_isk(irx->op2) && IR(irx->op2)->i <= 3) { | ||
286 | /* Recognize idx<<b with b = 0-3, corresponding to sz = (1),2,4,8. */ | ||
287 | idx = irx->op1; | ||
288 | as->mrm.scale = (uint8_t)(IR(irx->op2)->i << 6); | ||
289 | } else if (irx->o == IR_ADD && irx->op1 == irx->op2) { | ||
290 | /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */ | ||
291 | idx = irx->op1; | ||
292 | as->mrm.scale = XM_SCALE2; | ||
293 | } | ||
294 | } | ||
295 | r = ra_alloc1(as, idx, allow); | ||
296 | rset_clear(allow, r); | ||
297 | as->mrm.idx = (uint8_t)r; | ||
298 | } | ||
299 | noadd: | ||
300 | as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow); | ||
301 | } | ||
302 | } | ||
303 | |||
304 | /* Fuse load into memory operand. */ | ||
305 | static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow) | ||
306 | { | ||
307 | IRIns *ir = IR(ref); | ||
308 | if (ra_hasreg(ir->r)) { | ||
309 | if (allow != RSET_EMPTY) { /* Fast path. */ | ||
310 | ra_noweak(as, ir->r); | ||
311 | return ir->r; | ||
312 | } | ||
313 | fusespill: | ||
314 | /* Force a spill if only memory operands are allowed (asm_x87load). */ | ||
315 | as->mrm.base = RID_ESP; | ||
316 | as->mrm.ofs = ra_spill(as, ir); | ||
317 | as->mrm.idx = RID_NONE; | ||
318 | return RID_MRM; | ||
319 | } | ||
320 | if (ir->o == IR_KNUM) { | ||
321 | RegSet avail = as->freeset & ~as->modset & RSET_FPR; | ||
322 | lua_assert(allow != RSET_EMPTY); | ||
323 | if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */ | ||
324 | as->mrm.ofs = ptr2addr(ir_knum(ir)); | ||
325 | as->mrm.base = as->mrm.idx = RID_NONE; | ||
326 | return RID_MRM; | ||
327 | } | ||
328 | } else if (mayfuse(as, ref)) { | ||
329 | RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR; | ||
330 | if (ir->o == IR_SLOAD) { | ||
331 | if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) && | ||
332 | noconflict(as, ref, IR_RETF, 0)) { | ||
333 | as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow); | ||
334 | as->mrm.ofs = 8*((int32_t)ir->op1-1) + ((ir->op2&IRSLOAD_FRAME)?4:0); | ||
335 | as->mrm.idx = RID_NONE; | ||
336 | return RID_MRM; | ||
337 | } | ||
338 | } else if (ir->o == IR_FLOAD) { | ||
339 | /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */ | ||
340 | if ((irt_isint(ir->t) || irt_isaddr(ir->t)) && | ||
341 | noconflict(as, ref, IR_FSTORE, 0)) { | ||
342 | asm_fusefref(as, ir, xallow); | ||
343 | return RID_MRM; | ||
344 | } | ||
345 | } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) { | ||
346 | if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0)) { | ||
347 | asm_fuseahuref(as, ir->op1, xallow); | ||
348 | return RID_MRM; | ||
349 | } | ||
350 | } else if (ir->o == IR_XLOAD) { | ||
351 | /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp). | ||
352 | ** Fusing unaligned memory operands is ok on x86 (except for SIMD types). | ||
353 | */ | ||
354 | if ((!irt_typerange(ir->t, IRT_I8, IRT_U16)) && | ||
355 | noconflict(as, ref, IR_XSTORE, 0)) { | ||
356 | asm_fusexref(as, ir->op1, xallow); | ||
357 | return RID_MRM; | ||
358 | } | ||
359 | } else if (ir->o == IR_VLOAD) { | ||
360 | asm_fuseahuref(as, ir->op1, xallow); | ||
361 | return RID_MRM; | ||
362 | } | ||
363 | } | ||
364 | if (!(as->freeset & allow) && | ||
365 | (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref))) | ||
366 | goto fusespill; | ||
367 | return ra_allocref(as, ref, allow); | ||
368 | } | ||
369 | |||
370 | /* -- Calls --------------------------------------------------------------- */ | ||
371 | |||
372 | /* Count the required number of stack slots for a call. */ | ||
373 | static int asm_count_call_slots(ASMState *as, const CCallInfo *ci, IRRef *args) | ||
374 | { | ||
375 | uint32_t i, nargs = CCI_NARGS(ci); | ||
376 | int nslots = 0; | ||
377 | #if LJ_64 | ||
378 | if (LJ_ABI_WIN) { | ||
379 | nslots = (int)(nargs*2); /* Only matters for more than four args. */ | ||
380 | } else { | ||
381 | int ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR; | ||
382 | for (i = 0; i < nargs; i++) | ||
383 | if (args[i] && irt_isfp(IR(args[i])->t)) { | ||
384 | if (nfpr > 0) nfpr--; else nslots += 2; | ||
385 | } else { | ||
386 | if (ngpr > 0) ngpr--; else nslots += 2; | ||
387 | } | ||
388 | } | ||
389 | #else | ||
390 | int ngpr = 0; | ||
391 | if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL) | ||
392 | ngpr = 2; | ||
393 | else if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL) | ||
394 | ngpr = 1; | ||
395 | for (i = 0; i < nargs; i++) | ||
396 | if (args[i] && irt_isfp(IR(args[i])->t)) { | ||
397 | nslots += irt_isnum(IR(args[i])->t) ? 2 : 1; | ||
398 | } else { | ||
399 | if (ngpr > 0) ngpr--; else nslots++; | ||
400 | } | ||
401 | #endif | ||
402 | return nslots; | ||
403 | } | ||
404 | |||
405 | /* Generate a call to a C function. */ | ||
406 | static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | ||
407 | { | ||
408 | uint32_t n, nargs = CCI_NARGS(ci); | ||
409 | int32_t ofs = STACKARG_OFS; | ||
410 | #if LJ_64 | ||
411 | uint32_t gprs = REGARG_GPRS; | ||
412 | Reg fpr = REGARG_FIRSTFPR; | ||
413 | #if !LJ_ABI_WIN | ||
414 | MCode *patchnfpr = NULL; | ||
415 | #endif | ||
416 | #else | ||
417 | uint32_t gprs = 0; | ||
418 | if ((ci->flags & CCI_CC_MASK) != CCI_CC_CDECL) { | ||
419 | if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL) | ||
420 | gprs = (REGARG_GPRS & 31); | ||
421 | else if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL) | ||
422 | gprs = REGARG_GPRS; | ||
423 | } | ||
424 | #endif | ||
425 | if ((void *)ci->func) | ||
426 | emit_call(as, ci->func); | ||
427 | #if LJ_64 | ||
428 | if ((ci->flags & CCI_VARARG)) { /* Special handling for vararg calls. */ | ||
429 | #if LJ_ABI_WIN | ||
430 | for (n = 0; n < 4 && n < nargs; n++) { | ||
431 | IRIns *ir = IR(args[n]); | ||
432 | if (irt_isfp(ir->t)) /* Duplicate FPRs in GPRs. */ | ||
433 | emit_rr(as, XO_MOVDto, (irt_isnum(ir->t) ? REX_64 : 0) | (fpr+n), | ||
434 | ((gprs >> (n*5)) & 31)); /* Either MOVD or MOVQ. */ | ||
435 | } | ||
436 | #else | ||
437 | patchnfpr = --as->mcp; /* Indicate number of used FPRs in register al. */ | ||
438 | *--as->mcp = XI_MOVrib | RID_EAX; | ||
439 | #endif | ||
440 | } | ||
441 | #endif | ||
442 | for (n = 0; n < nargs; n++) { /* Setup args. */ | ||
443 | IRRef ref = args[n]; | ||
444 | IRIns *ir = IR(ref); | ||
445 | Reg r; | ||
446 | #if LJ_64 && LJ_ABI_WIN | ||
447 | /* Windows/x64 argument registers are strictly positional. */ | ||
448 | r = irt_isfp(ir->t) ? (fpr <= REGARG_LASTFPR ? fpr : 0) : (gprs & 31); | ||
449 | fpr++; gprs >>= 5; | ||
450 | #elif LJ_64 | ||
451 | /* POSIX/x64 argument registers are used in order of appearance. */ | ||
452 | if (irt_isfp(ir->t)) { | ||
453 | r = fpr <= REGARG_LASTFPR ? fpr++ : 0; | ||
454 | } else { | ||
455 | r = gprs & 31; gprs >>= 5; | ||
456 | } | ||
457 | #else | ||
458 | if (ref && irt_isfp(ir->t)) { | ||
459 | r = 0; | ||
460 | } else { | ||
461 | r = gprs & 31; gprs >>= 5; | ||
462 | if (!ref) continue; | ||
463 | } | ||
464 | #endif | ||
465 | if (r) { /* Argument is in a register. */ | ||
466 | if (r < RID_MAX_GPR && ref < ASMREF_TMP1) { | ||
467 | #if LJ_64 | ||
468 | if (ir->o == IR_KINT64) | ||
469 | emit_loadu64(as, r, ir_kint64(ir)->u64); | ||
470 | else | ||
471 | #endif | ||
472 | emit_loadi(as, r, ir->i); | ||
473 | } else { | ||
474 | lua_assert(rset_test(as->freeset, r)); /* Must have been evicted. */ | ||
475 | if (ra_hasreg(ir->r)) { | ||
476 | ra_noweak(as, ir->r); | ||
477 | emit_movrr(as, ir, r, ir->r); | ||
478 | } else { | ||
479 | ra_allocref(as, ref, RID2RSET(r)); | ||
480 | } | ||
481 | } | ||
482 | } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */ | ||
483 | lua_assert(!(irt_isfloat(ir->t) && irref_isk(ref))); /* No float k. */ | ||
484 | if (LJ_32 && (ofs & 4) && irref_isk(ref)) { | ||
485 | /* Split stores for unaligned FP consts. */ | ||
486 | emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo); | ||
487 | emit_movmroi(as, RID_ESP, ofs+4, (int32_t)ir_knum(ir)->u32.hi); | ||
488 | } else { | ||
489 | r = ra_alloc1(as, ref, RSET_FPR); | ||
490 | emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, | ||
491 | r, RID_ESP, ofs); | ||
492 | } | ||
493 | ofs += (LJ_32 && irt_isfloat(ir->t)) ? 4 : 8; | ||
494 | } else { /* Non-FP argument is on stack. */ | ||
495 | if (LJ_32 && ref < ASMREF_TMP1) { | ||
496 | emit_movmroi(as, RID_ESP, ofs, ir->i); | ||
497 | } else { | ||
498 | r = ra_alloc1(as, ref, RSET_GPR); | ||
499 | emit_movtomro(as, REX_64 + r, RID_ESP, ofs); | ||
500 | } | ||
501 | ofs += sizeof(intptr_t); | ||
502 | } | ||
503 | } | ||
504 | #if LJ_64 && !LJ_ABI_WIN | ||
505 | if (patchnfpr) *patchnfpr = fpr - REGARG_FIRSTFPR; | ||
506 | #endif | ||
507 | } | ||
508 | |||
509 | /* Setup result reg/sp for call. Evict scratch regs. */ | ||
510 | static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) | ||
511 | { | ||
512 | RegSet drop = RSET_SCRATCH; | ||
513 | if ((ci->flags & CCI_NOFPRCLOBBER)) | ||
514 | drop &= ~RSET_FPR; | ||
515 | if (ra_hasreg(ir->r)) | ||
516 | rset_clear(drop, ir->r); /* Dest reg handled below. */ | ||
517 | ra_evictset(as, drop); /* Evictions must be performed first. */ | ||
518 | if (ra_used(ir)) { | ||
519 | if (irt_isfp(ir->t)) { | ||
520 | int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ | ||
521 | #if LJ_64 | ||
522 | if ((ci->flags & CCI_CASTU64)) { | ||
523 | Reg dest = ir->r; | ||
524 | if (ra_hasreg(dest)) { | ||
525 | ra_free(as, dest); | ||
526 | ra_modified(as, dest); | ||
527 | emit_rr(as, XO_MOVD, dest|REX_64, RID_RET); /* Really MOVQ. */ | ||
528 | } | ||
529 | if (ofs) emit_movtomro(as, RID_RET|REX_64, RID_ESP, ofs); | ||
530 | } else { | ||
531 | ra_destreg(as, ir, RID_FPRET); | ||
532 | } | ||
533 | #else | ||
534 | /* Number result is in x87 st0 for x86 calling convention. */ | ||
535 | Reg dest = ir->r; | ||
536 | if (ra_hasreg(dest)) { | ||
537 | ra_free(as, dest); | ||
538 | ra_modified(as, dest); | ||
539 | emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, | ||
540 | dest, RID_ESP, ofs); | ||
541 | } | ||
542 | if ((ci->flags & CCI_CASTU64)) { | ||
543 | emit_movtomro(as, RID_RETLO, RID_ESP, ofs); | ||
544 | emit_movtomro(as, RID_RETHI, RID_ESP, ofs+4); | ||
545 | } else { | ||
546 | emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd, | ||
547 | irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs); | ||
548 | } | ||
549 | #endif | ||
550 | } else { | ||
551 | lua_assert(!irt_ispri(ir->t)); | ||
552 | ra_destreg(as, ir, RID_RET); | ||
553 | } | ||
554 | } else if (LJ_32 && irt_isfp(ir->t)) { | ||
555 | emit_x87op(as, XI_FPOP); /* Pop unused result from x87 st0. */ | ||
556 | } | ||
557 | } | ||
558 | |||
559 | static void asm_call(ASMState *as, IRIns *ir) | ||
560 | { | ||
561 | IRRef args[CCI_NARGS_MAX]; | ||
562 | const CCallInfo *ci = &lj_ir_callinfo[ir->op2]; | ||
563 | asm_collectargs(as, ir, ci, args); | ||
564 | asm_setupresult(as, ir, ci); | ||
565 | asm_gencall(as, ci, args); | ||
566 | } | ||
567 | |||
568 | /* Return a constant function pointer or NULL for indirect calls. */ | ||
569 | static void *asm_callx_func(ASMState *as, IRIns *irf, IRRef func) | ||
570 | { | ||
571 | #if LJ_32 | ||
572 | UNUSED(as); | ||
573 | if (irref_isk(func)) | ||
574 | return (void *)irf->i; | ||
575 | #else | ||
576 | if (irref_isk(func)) { | ||
577 | MCode *p; | ||
578 | if (irf->o == IR_KINT64) | ||
579 | p = (MCode *)(void *)ir_k64(irf)->u64; | ||
580 | else | ||
581 | p = (MCode *)(void *)(uintptr_t)(uint32_t)irf->i; | ||
582 | if (p - as->mcp == (int32_t)(p - as->mcp)) | ||
583 | return p; /* Call target is still in +-2GB range. */ | ||
584 | /* Avoid the indirect case of emit_call(). Try to hoist func addr. */ | ||
585 | } | ||
586 | #endif | ||
587 | return NULL; | ||
588 | } | ||
589 | |||
590 | static void asm_callx(ASMState *as, IRIns *ir) | ||
591 | { | ||
592 | IRRef args[CCI_NARGS_MAX]; | ||
593 | CCallInfo ci; | ||
594 | IRRef func; | ||
595 | IRIns *irf; | ||
596 | int32_t spadj = 0; | ||
597 | ci.flags = asm_callx_flags(as, ir); | ||
598 | asm_collectargs(as, ir, &ci, args); | ||
599 | asm_setupresult(as, ir, &ci); | ||
600 | #if LJ_32 | ||
601 | /* Have to readjust stack after non-cdecl calls due to callee cleanup. */ | ||
602 | if ((ci.flags & CCI_CC_MASK) != CCI_CC_CDECL) | ||
603 | spadj = 4 * asm_count_call_slots(as, &ci, args); | ||
604 | #endif | ||
605 | func = ir->op2; irf = IR(func); | ||
606 | if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } | ||
607 | ci.func = (ASMFunction)asm_callx_func(as, irf, func); | ||
608 | if (!(void *)ci.func) { | ||
609 | /* Use a (hoistable) non-scratch register for indirect calls. */ | ||
610 | RegSet allow = (RSET_GPR & ~RSET_SCRATCH); | ||
611 | Reg r = ra_alloc1(as, func, allow); | ||
612 | if (LJ_32) emit_spsub(as, spadj); /* Above code may cause restores! */ | ||
613 | emit_rr(as, XO_GROUP5, XOg_CALL, r); | ||
614 | } else if (LJ_32) { | ||
615 | emit_spsub(as, spadj); | ||
616 | } | ||
617 | asm_gencall(as, &ci, args); | ||
618 | } | ||
619 | |||
620 | /* -- Returns ------------------------------------------------------------- */ | ||
621 | |||
622 | /* Return to lower frame. Guard that it goes to the right spot. */ | ||
623 | static void asm_retf(ASMState *as, IRIns *ir) | ||
624 | { | ||
625 | Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); | ||
626 | void *pc = ir_kptr(IR(ir->op2)); | ||
627 | int32_t delta = 1+bc_a(*((const BCIns *)pc - 1)); | ||
628 | as->topslot -= (BCReg)delta; | ||
629 | if ((int32_t)as->topslot < 0) as->topslot = 0; | ||
630 | emit_setgl(as, base, jit_base); | ||
631 | emit_addptr(as, base, -8*delta); | ||
632 | asm_guardcc(as, CC_NE); | ||
633 | emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc)); | ||
634 | } | ||
635 | |||
636 | /* -- Type conversions ---------------------------------------------------- */ | ||
637 | |||
638 | static void asm_tointg(ASMState *as, IRIns *ir, Reg left) | ||
639 | { | ||
640 | Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); | ||
641 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
642 | asm_guardcc(as, CC_P); | ||
643 | asm_guardcc(as, CC_NE); | ||
644 | emit_rr(as, XO_UCOMISD, left, tmp); | ||
645 | emit_rr(as, XO_CVTSI2SD, tmp, dest); | ||
646 | if (!(as->flags & JIT_F_SPLIT_XMM)) | ||
647 | emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */ | ||
648 | emit_rr(as, XO_CVTTSD2SI, dest, left); | ||
649 | /* Can't fuse since left is needed twice. */ | ||
650 | } | ||
651 | |||
652 | static void asm_tobit(ASMState *as, IRIns *ir) | ||
653 | { | ||
654 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
655 | Reg tmp = ra_noreg(IR(ir->op1)->r) ? | ||
656 | ra_alloc1(as, ir->op1, RSET_FPR) : | ||
657 | ra_scratch(as, RSET_FPR); | ||
658 | Reg right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp)); | ||
659 | emit_rr(as, XO_MOVDto, tmp, dest); | ||
660 | emit_mrm(as, XO_ADDSD, tmp, right); | ||
661 | ra_left(as, tmp, ir->op1); | ||
662 | } | ||
663 | |||
664 | static void asm_conv(ASMState *as, IRIns *ir) | ||
665 | { | ||
666 | IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); | ||
667 | int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64)); | ||
668 | int stfp = (st == IRT_NUM || st == IRT_FLOAT); | ||
669 | IRRef lref = ir->op1; | ||
670 | lua_assert(irt_type(ir->t) != st); | ||
671 | lua_assert(!(LJ_32 && (irt_isint64(ir->t) || st64))); /* Handled by SPLIT. */ | ||
672 | if (irt_isfp(ir->t)) { | ||
673 | Reg dest = ra_dest(as, ir, RSET_FPR); | ||
674 | if (stfp) { /* FP to FP conversion. */ | ||
675 | Reg left = asm_fuseload(as, lref, RSET_FPR); | ||
676 | emit_mrm(as, st == IRT_NUM ? XO_CVTSD2SS : XO_CVTSS2SD, dest, left); | ||
677 | if (left == dest) return; /* Avoid the XO_XORPS. */ | ||
678 | } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */ | ||
679 | /* number = (2^52+2^51 .. u32) - (2^52+2^51) */ | ||
680 | cTValue *k = lj_ir_k64_find(as->J, U64x(43380000,00000000)); | ||
681 | Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest)); | ||
682 | if (irt_isfloat(ir->t)) | ||
683 | emit_rr(as, XO_CVTSD2SS, dest, dest); | ||
684 | emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */ | ||
685 | emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */ | ||
686 | emit_loadn(as, bias, k); | ||
687 | emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR)); | ||
688 | return; | ||
689 | } else { /* Integer to FP conversion. */ | ||
690 | Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ? | ||
691 | ra_alloc1(as, lref, RSET_GPR) : | ||
692 | asm_fuseload(as, lref, RSET_GPR); | ||
693 | if (LJ_64 && st == IRT_U64) { | ||
694 | MCLabel l_end = emit_label(as); | ||
695 | const void *k = lj_ir_k64_find(as->J, U64x(43f00000,00000000)); | ||
696 | emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */ | ||
697 | emit_sjcc(as, CC_NS, l_end); | ||
698 | emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */ | ||
699 | } | ||
700 | emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS, | ||
701 | dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left); | ||
702 | } | ||
703 | if (!(as->flags & JIT_F_SPLIT_XMM)) | ||
704 | emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */ | ||
705 | } else if (stfp) { /* FP to integer conversion. */ | ||
706 | if (irt_isguard(ir->t)) { | ||
707 | /* Checked conversions are only supported from number to int. */ | ||
708 | lua_assert(irt_isint(ir->t) && st == IRT_NUM); | ||
709 | asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); | ||
710 | } else { | ||
711 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
712 | x86Op op = st == IRT_NUM ? | ||
713 | ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSD2SI : XO_CVTSD2SI) : | ||
714 | ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSS2SI : XO_CVTSS2SI); | ||
715 | if (LJ_64 ? irt_isu64(ir->t) : irt_isu32(ir->t)) { | ||
716 | /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */ | ||
717 | /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */ | ||
718 | Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) : | ||
719 | ra_scratch(as, RSET_FPR); | ||
720 | MCLabel l_end = emit_label(as); | ||
721 | if (LJ_32) | ||
722 | emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000); | ||
723 | emit_rr(as, op, dest|REX_64, tmp); | ||
724 | if (st == IRT_NUM) | ||
725 | emit_rma(as, XO_ADDSD, tmp, lj_ir_k64_find(as->J, | ||
726 | LJ_64 ? U64x(c3f00000,00000000) : U64x(c1e00000,00000000))); | ||
727 | else | ||
728 | emit_rma(as, XO_ADDSS, tmp, lj_ir_k64_find(as->J, | ||
729 | LJ_64 ? U64x(00000000,df800000) : U64x(00000000,cf000000))); | ||
730 | emit_sjcc(as, CC_NS, l_end); | ||
731 | emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest negative. */ | ||
732 | emit_rr(as, op, dest|REX_64, tmp); | ||
733 | ra_left(as, tmp, lref); | ||
734 | } else { | ||
735 | Reg left = asm_fuseload(as, lref, RSET_FPR); | ||
736 | if (LJ_64 && irt_isu32(ir->t)) | ||
737 | emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */ | ||
738 | emit_mrm(as, op, | ||
739 | dest|((LJ_64 && | ||
740 | (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0), | ||
741 | left); | ||
742 | } | ||
743 | } | ||
744 | } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ | ||
745 | Reg left, dest = ra_dest(as, ir, RSET_GPR); | ||
746 | RegSet allow = RSET_GPR; | ||
747 | x86Op op; | ||
748 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t)); | ||
749 | if (st == IRT_I8) { | ||
750 | op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX; | ||
751 | } else if (st == IRT_U8) { | ||
752 | op = XO_MOVZXb; allow = RSET_GPR8; dest |= FORCE_REX; | ||
753 | } else if (st == IRT_I16) { | ||
754 | op = XO_MOVSXw; | ||
755 | } else { | ||
756 | op = XO_MOVZXw; | ||
757 | } | ||
758 | left = asm_fuseload(as, lref, allow); | ||
759 | /* Add extra MOV if source is already in wrong register. */ | ||
760 | if (!LJ_64 && left != RID_MRM && !rset_test(allow, left)) { | ||
761 | Reg tmp = ra_scratch(as, allow); | ||
762 | emit_rr(as, op, dest, tmp); | ||
763 | emit_rr(as, XO_MOV, tmp, left); | ||
764 | } else { | ||
765 | emit_mrm(as, op, dest, left); | ||
766 | } | ||
767 | } else { /* 32/64 bit integer conversions. */ | ||
768 | if (LJ_32) { /* Only need to handle 32/32 bit no-op (cast) on x86. */ | ||
769 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
770 | ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ | ||
771 | } else if (irt_is64(ir->t)) { | ||
772 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
773 | if (st64 || !(ir->op2 & IRCONV_SEXT)) { | ||
774 | /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */ | ||
775 | ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ | ||
776 | } else { /* 32 to 64 bit sign extension. */ | ||
777 | Reg left = asm_fuseload(as, lref, RSET_GPR); | ||
778 | emit_mrm(as, XO_MOVSXd, dest|REX_64, left); | ||
779 | } | ||
780 | } else { | ||
781 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
782 | if (st64) { | ||
783 | Reg left = asm_fuseload(as, lref, RSET_GPR); | ||
784 | /* This is either a 32 bit reg/reg mov which zeroes the hiword | ||
785 | ** or a load of the loword from a 64 bit address. | ||
786 | */ | ||
787 | emit_mrm(as, XO_MOV, dest, left); | ||
788 | } else { /* 32/32 bit no-op (cast). */ | ||
789 | ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ | ||
790 | } | ||
791 | } | ||
792 | } | ||
793 | } | ||
794 | |||
795 | #if LJ_32 && LJ_HASFFI | ||
796 | /* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */ | ||
797 | |||
798 | /* 64 bit integer to FP conversion in 32 bit mode. */ | ||
799 | static void asm_conv_fp_int64(ASMState *as, IRIns *ir) | ||
800 | { | ||
801 | Reg hi = ra_alloc1(as, ir->op1, RSET_GPR); | ||
802 | Reg lo = ra_alloc1(as, (ir-1)->op1, rset_exclude(RSET_GPR, hi)); | ||
803 | int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ | ||
804 | Reg dest = ir->r; | ||
805 | if (ra_hasreg(dest)) { | ||
806 | ra_free(as, dest); | ||
807 | ra_modified(as, dest); | ||
808 | emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, | ||
809 | dest, RID_ESP, ofs); | ||
810 | } | ||
811 | emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd, | ||
812 | irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs); | ||
813 | if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) { | ||
814 | /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */ | ||
815 | MCLabel l_end = emit_label(as); | ||
816 | emit_rma(as, XO_FADDq, XOg_FADDq, | ||
817 | lj_ir_k64_find(as->J, U64x(43f00000,00000000))); | ||
818 | emit_sjcc(as, CC_NS, l_end); | ||
819 | emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */ | ||
820 | } else { | ||
821 | lua_assert(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64); | ||
822 | } | ||
823 | emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0); | ||
824 | /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */ | ||
825 | emit_rmro(as, XO_MOVto, hi, RID_ESP, 4); | ||
826 | emit_rmro(as, XO_MOVto, lo, RID_ESP, 0); | ||
827 | } | ||
828 | |||
829 | /* FP to 64 bit integer conversion in 32 bit mode. */ | ||
830 | static void asm_conv_int64_fp(ASMState *as, IRIns *ir) | ||
831 | { | ||
832 | IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK); | ||
833 | IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH); | ||
834 | Reg lo, hi; | ||
835 | lua_assert(st == IRT_NUM || st == IRT_FLOAT); | ||
836 | lua_assert(dt == IRT_I64 || dt == IRT_U64); | ||
837 | lua_assert(((ir-1)->op2 & IRCONV_TRUNC)); | ||
838 | hi = ra_dest(as, ir, RSET_GPR); | ||
839 | lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi)); | ||
840 | if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0); | ||
841 | /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */ | ||
842 | if (!(as->flags & JIT_F_SSE3)) { /* Set FPU rounding mode to default. */ | ||
843 | emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 4); | ||
844 | emit_rmro(as, XO_MOVto, lo, RID_ESP, 4); | ||
845 | emit_gri(as, XG_ARITHi(XOg_AND), lo, 0xf3ff); | ||
846 | } | ||
847 | if (dt == IRT_U64) { | ||
848 | /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */ | ||
849 | MCLabel l_pop, l_end = emit_label(as); | ||
850 | emit_x87op(as, XI_FPOP); | ||
851 | l_pop = emit_label(as); | ||
852 | emit_sjmp(as, l_end); | ||
853 | emit_rmro(as, XO_MOV, hi, RID_ESP, 4); | ||
854 | if ((as->flags & JIT_F_SSE3)) | ||
855 | emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0); | ||
856 | else | ||
857 | emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0); | ||
858 | emit_rma(as, XO_FADDq, XOg_FADDq, | ||
859 | lj_ir_k64_find(as->J, U64x(c3f00000,00000000))); | ||
860 | emit_sjcc(as, CC_NS, l_pop); | ||
861 | emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */ | ||
862 | } | ||
863 | emit_rmro(as, XO_MOV, hi, RID_ESP, 4); | ||
864 | if ((as->flags & JIT_F_SSE3)) { /* Truncation is easy with SSE3. */ | ||
865 | emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0); | ||
866 | } else { /* Otherwise set FPU rounding mode to truncate before the store. */ | ||
867 | emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0); | ||
868 | emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 0); | ||
869 | emit_rmro(as, XO_MOVtow, lo, RID_ESP, 0); | ||
870 | emit_rmro(as, XO_ARITHw(XOg_OR), lo, RID_ESP, 0); | ||
871 | emit_loadi(as, lo, 0xc00); | ||
872 | emit_rmro(as, XO_FNSTCW, XOg_FNSTCW, RID_ESP, 0); | ||
873 | } | ||
874 | if (dt == IRT_U64) | ||
875 | emit_x87op(as, XI_FDUP); | ||
876 | emit_mrm(as, st == IRT_NUM ? XO_FLDq : XO_FLDd, | ||
877 | st == IRT_NUM ? XOg_FLDq: XOg_FLDd, | ||
878 | asm_fuseload(as, ir->op1, RSET_EMPTY)); | ||
879 | } | ||
880 | #endif | ||
881 | |||
882 | static void asm_strto(ASMState *as, IRIns *ir) | ||
883 | { | ||
884 | /* Force a spill slot for the destination register (if any). */ | ||
885 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_tonum]; | ||
886 | IRRef args[2]; | ||
887 | RegSet drop = RSET_SCRATCH; | ||
888 | if ((drop & RSET_FPR) != RSET_FPR && ra_hasreg(ir->r)) | ||
889 | rset_set(drop, ir->r); /* WIN64 doesn't spill all FPRs. */ | ||
890 | ra_evictset(as, drop); | ||
891 | asm_guardcc(as, CC_E); | ||
892 | emit_rr(as, XO_TEST, RID_RET, RID_RET); /* Test return status. */ | ||
893 | args[0] = ir->op1; /* GCstr *str */ | ||
894 | args[1] = ASMREF_TMP1; /* TValue *n */ | ||
895 | asm_gencall(as, ci, args); | ||
896 | /* Store the result to the spill slot or temp slots. */ | ||
897 | emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64, | ||
898 | RID_ESP, sps_scale(ir->s)); | ||
899 | } | ||
900 | |||
901 | static void asm_tostr(ASMState *as, IRIns *ir) | ||
902 | { | ||
903 | IRIns *irl = IR(ir->op1); | ||
904 | IRRef args[2]; | ||
905 | args[0] = ASMREF_L; | ||
906 | as->gcsteps++; | ||
907 | if (irt_isnum(irl->t)) { | ||
908 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum]; | ||
909 | args[1] = ASMREF_TMP1; /* const lua_Number * */ | ||
910 | asm_setupresult(as, ir, ci); /* GCstr * */ | ||
911 | asm_gencall(as, ci, args); | ||
912 | emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64, | ||
913 | RID_ESP, ra_spill(as, irl)); | ||
914 | } else { | ||
915 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint]; | ||
916 | args[1] = ir->op1; /* int32_t k */ | ||
917 | asm_setupresult(as, ir, ci); /* GCstr * */ | ||
918 | asm_gencall(as, ci, args); | ||
919 | } | ||
920 | } | ||
921 | |||
922 | /* -- Memory references --------------------------------------------------- */ | ||
923 | |||
924 | static void asm_aref(ASMState *as, IRIns *ir) | ||
925 | { | ||
926 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
927 | asm_fusearef(as, ir, RSET_GPR); | ||
928 | if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0)) | ||
929 | emit_mrm(as, XO_LEA, dest, RID_MRM); | ||
930 | else if (as->mrm.base != dest) | ||
931 | emit_rr(as, XO_MOV, dest, as->mrm.base); | ||
932 | } | ||
933 | |||
934 | /* Merge NE(HREF, niltv) check. */ | ||
935 | static MCode *merge_href_niltv(ASMState *as, IRIns *ir) | ||
936 | { | ||
937 | /* Assumes nothing else generates NE of HREF. */ | ||
938 | if ((ir[1].o == IR_NE || ir[1].o == IR_EQ) && ir[1].op1 == as->curins && | ||
939 | ra_hasreg(ir->r)) { | ||
940 | MCode *p = as->mcp; | ||
941 | p += (LJ_64 && *p != XI_ARITHi) ? 7+6 : 6+6; | ||
942 | /* Ensure no loop branch inversion happened. */ | ||
943 | if (p[-6] == 0x0f && p[-5] == XI_JCCn+(CC_NE^(ir[1].o & 1))) { | ||
944 | as->mcp = p; /* Kill cmp reg, imm32 + jz exit. */ | ||
945 | return p + *(int32_t *)(p-4); /* Return exit address. */ | ||
946 | } | ||
947 | } | ||
948 | return NULL; | ||
949 | } | ||
950 | |||
951 | /* Inlined hash lookup. Specialized for key type and for const keys. | ||
952 | ** The equivalent C code is: | ||
953 | ** Node *n = hashkey(t, key); | ||
954 | ** do { | ||
955 | ** if (lj_obj_equal(&n->key, key)) return &n->val; | ||
956 | ** } while ((n = nextnode(n))); | ||
957 | ** return niltv(L); | ||
958 | */ | ||
959 | static void asm_href(ASMState *as, IRIns *ir) | ||
960 | { | ||
961 | MCode *nilexit = merge_href_niltv(as, ir); /* Do this before any restores. */ | ||
962 | RegSet allow = RSET_GPR; | ||
963 | Reg dest = ra_dest(as, ir, allow); | ||
964 | Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); | ||
965 | Reg key = RID_NONE, tmp = RID_NONE; | ||
966 | IRIns *irkey = IR(ir->op2); | ||
967 | int isk = irref_isk(ir->op2); | ||
968 | IRType1 kt = irkey->t; | ||
969 | uint32_t khash; | ||
970 | MCLabel l_end, l_loop, l_next; | ||
971 | |||
972 | if (!isk) { | ||
973 | rset_clear(allow, tab); | ||
974 | key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow); | ||
975 | if (!irt_isstr(kt)) | ||
976 | tmp = ra_scratch(as, rset_exclude(allow, key)); | ||
977 | } | ||
978 | |||
979 | /* Key not found in chain: jump to exit (if merged with NE) or load niltv. */ | ||
980 | l_end = emit_label(as); | ||
981 | if (nilexit && ir[1].o == IR_NE) { | ||
982 | emit_jcc(as, CC_E, nilexit); /* XI_JMP is not found by lj_asm_patchexit. */ | ||
983 | nilexit = NULL; | ||
984 | } else { | ||
985 | emit_loada(as, dest, niltvg(J2G(as->J))); | ||
986 | } | ||
987 | |||
988 | /* Follow hash chain until the end. */ | ||
989 | l_loop = emit_sjcc_label(as, CC_NZ); | ||
990 | emit_rr(as, XO_TEST, dest, dest); | ||
991 | emit_rmro(as, XO_MOV, dest, dest, offsetof(Node, next)); | ||
992 | l_next = emit_label(as); | ||
993 | |||
994 | /* Type and value comparison. */ | ||
995 | if (nilexit) | ||
996 | emit_jcc(as, CC_E, nilexit); | ||
997 | else | ||
998 | emit_sjcc(as, CC_E, l_end); | ||
999 | if (irt_isnum(kt)) { | ||
1000 | if (isk) { | ||
1001 | /* Assumes -0.0 is already canonicalized to +0.0. */ | ||
1002 | emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo), | ||
1003 | (int32_t)ir_knum(irkey)->u32.lo); | ||
1004 | emit_sjcc(as, CC_NE, l_next); | ||
1005 | emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi), | ||
1006 | (int32_t)ir_knum(irkey)->u32.hi); | ||
1007 | } else { | ||
1008 | emit_sjcc(as, CC_P, l_next); | ||
1009 | emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n)); | ||
1010 | emit_sjcc(as, CC_AE, l_next); | ||
1011 | /* The type check avoids NaN penalties and complaints from Valgrind. */ | ||
1012 | #if LJ_64 | ||
1013 | emit_u32(as, LJ_TISNUM); | ||
1014 | emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it)); | ||
1015 | #else | ||
1016 | emit_i8(as, LJ_TISNUM); | ||
1017 | emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); | ||
1018 | #endif | ||
1019 | } | ||
1020 | #if LJ_64 | ||
1021 | } else if (irt_islightud(kt)) { | ||
1022 | emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64)); | ||
1023 | #endif | ||
1024 | } else { | ||
1025 | if (!irt_ispri(kt)) { | ||
1026 | lua_assert(irt_isaddr(kt)); | ||
1027 | if (isk) | ||
1028 | emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr), | ||
1029 | ptr2addr(ir_kgc(irkey))); | ||
1030 | else | ||
1031 | emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr)); | ||
1032 | emit_sjcc(as, CC_NE, l_next); | ||
1033 | } | ||
1034 | lua_assert(!irt_isnil(kt)); | ||
1035 | emit_i8(as, irt_toitype(kt)); | ||
1036 | emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); | ||
1037 | } | ||
1038 | emit_sfixup(as, l_loop); | ||
1039 | checkmclim(as); | ||
1040 | |||
1041 | /* Load main position relative to tab->node into dest. */ | ||
1042 | khash = isk ? ir_khash(irkey) : 1; | ||
1043 | if (khash == 0) { | ||
1044 | emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, node)); | ||
1045 | } else { | ||
1046 | emit_rmro(as, XO_ARITH(XOg_ADD), dest, tab, offsetof(GCtab, node)); | ||
1047 | if ((as->flags & JIT_F_PREFER_IMUL)) { | ||
1048 | emit_i8(as, sizeof(Node)); | ||
1049 | emit_rr(as, XO_IMULi8, dest, dest); | ||
1050 | } else { | ||
1051 | emit_shifti(as, XOg_SHL, dest, 3); | ||
1052 | emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0); | ||
1053 | } | ||
1054 | if (isk) { | ||
1055 | emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash); | ||
1056 | emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); | ||
1057 | } else if (irt_isstr(kt)) { | ||
1058 | emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, hash)); | ||
1059 | emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); | ||
1060 | } else { /* Must match with hashrot() in lj_tab.c. */ | ||
1061 | emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask)); | ||
1062 | emit_rr(as, XO_ARITH(XOg_SUB), dest, tmp); | ||
1063 | emit_shifti(as, XOg_ROL, tmp, HASH_ROT3); | ||
1064 | emit_rr(as, XO_ARITH(XOg_XOR), dest, tmp); | ||
1065 | emit_shifti(as, XOg_ROL, dest, HASH_ROT2); | ||
1066 | emit_rr(as, XO_ARITH(XOg_SUB), tmp, dest); | ||
1067 | emit_shifti(as, XOg_ROL, dest, HASH_ROT1); | ||
1068 | emit_rr(as, XO_ARITH(XOg_XOR), tmp, dest); | ||
1069 | if (irt_isnum(kt)) { | ||
1070 | emit_rr(as, XO_ARITH(XOg_ADD), dest, dest); | ||
1071 | #if LJ_64 | ||
1072 | emit_shifti(as, XOg_SHR|REX_64, dest, 32); | ||
1073 | emit_rr(as, XO_MOV, tmp, dest); | ||
1074 | emit_rr(as, XO_MOVDto, key|REX_64, dest); | ||
1075 | #else | ||
1076 | emit_rmro(as, XO_MOV, dest, RID_ESP, ra_spill(as, irkey)+4); | ||
1077 | emit_rr(as, XO_MOVDto, key, tmp); | ||
1078 | #endif | ||
1079 | } else { | ||
1080 | emit_rr(as, XO_MOV, tmp, key); | ||
1081 | emit_rmro(as, XO_LEA, dest, key, HASH_BIAS); | ||
1082 | } | ||
1083 | } | ||
1084 | } | ||
1085 | } | ||
1086 | |||
1087 | static void asm_hrefk(ASMState *as, IRIns *ir) | ||
1088 | { | ||
1089 | IRIns *kslot = IR(ir->op2); | ||
1090 | IRIns *irkey = IR(kslot->op1); | ||
1091 | int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); | ||
1092 | Reg dest = ra_used(ir) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; | ||
1093 | Reg node = ra_alloc1(as, ir->op1, RSET_GPR); | ||
1094 | #if !LJ_64 | ||
1095 | MCLabel l_exit; | ||
1096 | #endif | ||
1097 | lua_assert(ofs % sizeof(Node) == 0); | ||
1098 | if (ra_hasreg(dest)) { | ||
1099 | if (ofs != 0) { | ||
1100 | if (dest == node && !(as->flags & JIT_F_LEA_AGU)) | ||
1101 | emit_gri(as, XG_ARITHi(XOg_ADD), dest, ofs); | ||
1102 | else | ||
1103 | emit_rmro(as, XO_LEA, dest, node, ofs); | ||
1104 | } else if (dest != node) { | ||
1105 | emit_rr(as, XO_MOV, dest, node); | ||
1106 | } | ||
1107 | } | ||
1108 | asm_guardcc(as, CC_NE); | ||
1109 | #if LJ_64 | ||
1110 | if (!irt_ispri(irkey->t)) { | ||
1111 | Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node)); | ||
1112 | emit_rmro(as, XO_CMP, key|REX_64, node, | ||
1113 | ofs + (int32_t)offsetof(Node, key.u64)); | ||
1114 | lua_assert(irt_isnum(irkey->t) || irt_isgcv(irkey->t)); | ||
1115 | /* Assumes -0.0 is already canonicalized to +0.0. */ | ||
1116 | emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 : | ||
1117 | ((uint64_t)irt_toitype(irkey->t) << 32) | | ||
1118 | (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey))); | ||
1119 | } else { | ||
1120 | lua_assert(!irt_isnil(irkey->t)); | ||
1121 | emit_i8(as, irt_toitype(irkey->t)); | ||
1122 | emit_rmro(as, XO_ARITHi8, XOg_CMP, node, | ||
1123 | ofs + (int32_t)offsetof(Node, key.it)); | ||
1124 | } | ||
1125 | #else | ||
1126 | l_exit = emit_label(as); | ||
1127 | if (irt_isnum(irkey->t)) { | ||
1128 | /* Assumes -0.0 is already canonicalized to +0.0. */ | ||
1129 | emit_gmroi(as, XG_ARITHi(XOg_CMP), node, | ||
1130 | ofs + (int32_t)offsetof(Node, key.u32.lo), | ||
1131 | (int32_t)ir_knum(irkey)->u32.lo); | ||
1132 | emit_sjcc(as, CC_NE, l_exit); | ||
1133 | emit_gmroi(as, XG_ARITHi(XOg_CMP), node, | ||
1134 | ofs + (int32_t)offsetof(Node, key.u32.hi), | ||
1135 | (int32_t)ir_knum(irkey)->u32.hi); | ||
1136 | } else { | ||
1137 | if (!irt_ispri(irkey->t)) { | ||
1138 | lua_assert(irt_isgcv(irkey->t)); | ||
1139 | emit_gmroi(as, XG_ARITHi(XOg_CMP), node, | ||
1140 | ofs + (int32_t)offsetof(Node, key.gcr), | ||
1141 | ptr2addr(ir_kgc(irkey))); | ||
1142 | emit_sjcc(as, CC_NE, l_exit); | ||
1143 | } | ||
1144 | lua_assert(!irt_isnil(irkey->t)); | ||
1145 | emit_i8(as, irt_toitype(irkey->t)); | ||
1146 | emit_rmro(as, XO_ARITHi8, XOg_CMP, node, | ||
1147 | ofs + (int32_t)offsetof(Node, key.it)); | ||
1148 | } | ||
1149 | #endif | ||
1150 | } | ||
1151 | |||
1152 | static void asm_newref(ASMState *as, IRIns *ir) | ||
1153 | { | ||
1154 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey]; | ||
1155 | IRRef args[3]; | ||
1156 | IRIns *irkey; | ||
1157 | Reg tmp; | ||
1158 | args[0] = ASMREF_L; /* lua_State *L */ | ||
1159 | args[1] = ir->op1; /* GCtab *t */ | ||
1160 | args[2] = ASMREF_TMP1; /* cTValue *key */ | ||
1161 | asm_setupresult(as, ir, ci); /* TValue * */ | ||
1162 | asm_gencall(as, ci, args); | ||
1163 | tmp = ra_releasetmp(as, ASMREF_TMP1); | ||
1164 | irkey = IR(ir->op2); | ||
1165 | if (irt_isnum(irkey->t)) { | ||
1166 | /* For numbers use the constant itself or a spill slot as a TValue. */ | ||
1167 | if (irref_isk(ir->op2)) | ||
1168 | emit_loada(as, tmp, ir_knum(irkey)); | ||
1169 | else | ||
1170 | emit_rmro(as, XO_LEA, tmp|REX_64, RID_ESP, ra_spill(as, irkey)); | ||
1171 | } else { | ||
1172 | /* Otherwise use g->tmptv to hold the TValue. */ | ||
1173 | if (!irref_isk(ir->op2)) { | ||
1174 | Reg src = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, tmp)); | ||
1175 | emit_movtomro(as, REX_64IR(irkey, src), tmp, 0); | ||
1176 | } else if (!irt_ispri(irkey->t)) { | ||
1177 | emit_movmroi(as, tmp, 0, irkey->i); | ||
1178 | } | ||
1179 | if (!(LJ_64 && irt_islightud(irkey->t))) | ||
1180 | emit_movmroi(as, tmp, 4, irt_toitype(irkey->t)); | ||
1181 | emit_loada(as, tmp, &J2G(as->J)->tmptv); | ||
1182 | } | ||
1183 | } | ||
1184 | |||
1185 | static void asm_uref(ASMState *as, IRIns *ir) | ||
1186 | { | ||
1187 | /* NYI: Check that UREFO is still open and not aliasing a slot. */ | ||
1188 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
1189 | if (irref_isk(ir->op1)) { | ||
1190 | GCfunc *fn = ir_kfunc(IR(ir->op1)); | ||
1191 | MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; | ||
1192 | emit_rma(as, XO_MOV, dest, v); | ||
1193 | } else { | ||
1194 | Reg uv = ra_scratch(as, RSET_GPR); | ||
1195 | Reg func = ra_alloc1(as, ir->op1, RSET_GPR); | ||
1196 | if (ir->o == IR_UREFC) { | ||
1197 | emit_rmro(as, XO_LEA, dest, uv, offsetof(GCupval, tv)); | ||
1198 | asm_guardcc(as, CC_NE); | ||
1199 | emit_i8(as, 1); | ||
1200 | emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed)); | ||
1201 | } else { | ||
1202 | emit_rmro(as, XO_MOV, dest, uv, offsetof(GCupval, v)); | ||
1203 | } | ||
1204 | emit_rmro(as, XO_MOV, uv, func, | ||
1205 | (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8)); | ||
1206 | } | ||
1207 | } | ||
1208 | |||
1209 | static void asm_fref(ASMState *as, IRIns *ir) | ||
1210 | { | ||
1211 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
1212 | asm_fusefref(as, ir, RSET_GPR); | ||
1213 | emit_mrm(as, XO_LEA, dest, RID_MRM); | ||
1214 | } | ||
1215 | |||
1216 | static void asm_strref(ASMState *as, IRIns *ir) | ||
1217 | { | ||
1218 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
1219 | asm_fusestrref(as, ir, RSET_GPR); | ||
1220 | if (as->mrm.base == RID_NONE) | ||
1221 | emit_loadi(as, dest, as->mrm.ofs); | ||
1222 | else if (as->mrm.base == dest && as->mrm.idx == RID_NONE) | ||
1223 | emit_gri(as, XG_ARITHi(XOg_ADD), dest, as->mrm.ofs); | ||
1224 | else | ||
1225 | emit_mrm(as, XO_LEA, dest, RID_MRM); | ||
1226 | } | ||
1227 | |||
1228 | /* -- Loads and stores ---------------------------------------------------- */ | ||
1229 | |||
1230 | static void asm_fxload(ASMState *as, IRIns *ir) | ||
1231 | { | ||
1232 | Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); | ||
1233 | x86Op xo; | ||
1234 | if (ir->o == IR_FLOAD) | ||
1235 | asm_fusefref(as, ir, RSET_GPR); | ||
1236 | else | ||
1237 | asm_fusexref(as, ir->op1, RSET_GPR); | ||
1238 | /* ir->op2 is ignored -- unaligned loads are ok on x86. */ | ||
1239 | switch (irt_type(ir->t)) { | ||
1240 | case IRT_I8: xo = XO_MOVSXb; break; | ||
1241 | case IRT_U8: xo = XO_MOVZXb; break; | ||
1242 | case IRT_I16: xo = XO_MOVSXw; break; | ||
1243 | case IRT_U16: xo = XO_MOVZXw; break; | ||
1244 | case IRT_NUM: xo = XMM_MOVRM(as); break; | ||
1245 | case IRT_FLOAT: xo = XO_MOVSS; break; | ||
1246 | default: | ||
1247 | if (LJ_64 && irt_is64(ir->t)) | ||
1248 | dest |= REX_64; | ||
1249 | else | ||
1250 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); | ||
1251 | xo = XO_MOV; | ||
1252 | break; | ||
1253 | } | ||
1254 | emit_mrm(as, xo, dest, RID_MRM); | ||
1255 | } | ||
1256 | |||
1257 | static void asm_fxstore(ASMState *as, IRIns *ir) | ||
1258 | { | ||
1259 | RegSet allow = RSET_GPR; | ||
1260 | Reg src = RID_NONE, osrc = RID_NONE; | ||
1261 | int32_t k = 0; | ||
1262 | /* The IRT_I16/IRT_U16 stores should never be simplified for constant | ||
1263 | ** values since mov word [mem], imm16 has a length-changing prefix. | ||
1264 | */ | ||
1265 | if (irt_isi16(ir->t) || irt_isu16(ir->t) || irt_isfp(ir->t) || | ||
1266 | !asm_isk32(as, ir->op2, &k)) { | ||
1267 | RegSet allow8 = irt_isfp(ir->t) ? RSET_FPR : | ||
1268 | (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR; | ||
1269 | src = osrc = ra_alloc1(as, ir->op2, allow8); | ||
1270 | if (!LJ_64 && !rset_test(allow8, src)) { /* Already in wrong register. */ | ||
1271 | rset_clear(allow, osrc); | ||
1272 | src = ra_scratch(as, allow8); | ||
1273 | } | ||
1274 | rset_clear(allow, src); | ||
1275 | } | ||
1276 | if (ir->o == IR_FSTORE) | ||
1277 | asm_fusefref(as, IR(ir->op1), allow); | ||
1278 | else | ||
1279 | asm_fusexref(as, ir->op1, allow); | ||
1280 | /* ir->op2 is ignored -- unaligned stores are ok on x86. */ | ||
1281 | if (ra_hasreg(src)) { | ||
1282 | x86Op xo; | ||
1283 | switch (irt_type(ir->t)) { | ||
1284 | case IRT_I8: case IRT_U8: xo = XO_MOVtob; src |= FORCE_REX; break; | ||
1285 | case IRT_I16: case IRT_U16: xo = XO_MOVtow; break; | ||
1286 | case IRT_NUM: xo = XO_MOVSDto; break; | ||
1287 | case IRT_FLOAT: xo = XO_MOVSSto; break; | ||
1288 | #if LJ_64 | ||
1289 | case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */ | ||
1290 | #endif | ||
1291 | default: | ||
1292 | if (LJ_64 && irt_is64(ir->t)) | ||
1293 | src |= REX_64; | ||
1294 | else | ||
1295 | lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)); | ||
1296 | xo = XO_MOVto; | ||
1297 | break; | ||
1298 | } | ||
1299 | emit_mrm(as, xo, src, RID_MRM); | ||
1300 | if (!LJ_64 && src != osrc) { | ||
1301 | ra_noweak(as, osrc); | ||
1302 | emit_rr(as, XO_MOV, src, osrc); | ||
1303 | } | ||
1304 | } else { | ||
1305 | if (irt_isi8(ir->t) || irt_isu8(ir->t)) { | ||
1306 | emit_i8(as, k); | ||
1307 | emit_mrm(as, XO_MOVmib, 0, RID_MRM); | ||
1308 | } else { | ||
1309 | lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) || | ||
1310 | irt_isaddr(ir->t)); | ||
1311 | emit_i32(as, k); | ||
1312 | emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM); | ||
1313 | } | ||
1314 | } | ||
1315 | } | ||
1316 | |||
1317 | #if LJ_64 | ||
1318 | static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck) | ||
1319 | { | ||
1320 | if (ra_used(ir) || typecheck) { | ||
1321 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
1322 | if (typecheck) { | ||
1323 | Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, dest)); | ||
1324 | asm_guardcc(as, CC_NE); | ||
1325 | emit_i8(as, -2); | ||
1326 | emit_rr(as, XO_ARITHi8, XOg_CMP, tmp); | ||
1327 | emit_shifti(as, XOg_SAR|REX_64, tmp, 47); | ||
1328 | emit_rr(as, XO_MOV, tmp|REX_64, dest); | ||
1329 | } | ||
1330 | return dest; | ||
1331 | } else { | ||
1332 | return RID_NONE; | ||
1333 | } | ||
1334 | } | ||
1335 | #endif | ||
1336 | |||
1337 | static void asm_ahuvload(ASMState *as, IRIns *ir) | ||
1338 | { | ||
1339 | lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) || | ||
1340 | (LJ_DUALNUM && irt_isint(ir->t))); | ||
1341 | #if LJ_64 | ||
1342 | if (irt_islightud(ir->t)) { | ||
1343 | Reg dest = asm_load_lightud64(as, ir, 1); | ||
1344 | if (ra_hasreg(dest)) { | ||
1345 | asm_fuseahuref(as, ir->op1, RSET_GPR); | ||
1346 | emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM); | ||
1347 | } | ||
1348 | return; | ||
1349 | } else | ||
1350 | #endif | ||
1351 | if (ra_used(ir)) { | ||
1352 | RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR; | ||
1353 | Reg dest = ra_dest(as, ir, allow); | ||
1354 | asm_fuseahuref(as, ir->op1, RSET_GPR); | ||
1355 | emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as), dest, RID_MRM); | ||
1356 | } else { | ||
1357 | asm_fuseahuref(as, ir->op1, RSET_GPR); | ||
1358 | } | ||
1359 | /* Always do the type check, even if the load result is unused. */ | ||
1360 | as->mrm.ofs += 4; | ||
1361 | asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE); | ||
1362 | if (LJ_64 && irt_type(ir->t) >= IRT_NUM) { | ||
1363 | lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t)); | ||
1364 | emit_u32(as, LJ_TISNUM); | ||
1365 | emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM); | ||
1366 | } else { | ||
1367 | emit_i8(as, irt_toitype(ir->t)); | ||
1368 | emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM); | ||
1369 | } | ||
1370 | } | ||
1371 | |||
1372 | static void asm_ahustore(ASMState *as, IRIns *ir) | ||
1373 | { | ||
1374 | if (irt_isnum(ir->t)) { | ||
1375 | Reg src = ra_alloc1(as, ir->op2, RSET_FPR); | ||
1376 | asm_fuseahuref(as, ir->op1, RSET_GPR); | ||
1377 | emit_mrm(as, XO_MOVSDto, src, RID_MRM); | ||
1378 | #if LJ_64 | ||
1379 | } else if (irt_islightud(ir->t)) { | ||
1380 | Reg src = ra_alloc1(as, ir->op2, RSET_GPR); | ||
1381 | asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src)); | ||
1382 | emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM); | ||
1383 | #endif | ||
1384 | } else { | ||
1385 | IRIns *irr = IR(ir->op2); | ||
1386 | RegSet allow = RSET_GPR; | ||
1387 | Reg src = RID_NONE; | ||
1388 | if (!irref_isk(ir->op2)) { | ||
1389 | src = ra_alloc1(as, ir->op2, allow); | ||
1390 | rset_clear(allow, src); | ||
1391 | } | ||
1392 | asm_fuseahuref(as, ir->op1, allow); | ||
1393 | if (ra_hasreg(src)) { | ||
1394 | emit_mrm(as, XO_MOVto, src, RID_MRM); | ||
1395 | } else if (!irt_ispri(irr->t)) { | ||
1396 | lua_assert(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t))); | ||
1397 | emit_i32(as, irr->i); | ||
1398 | emit_mrm(as, XO_MOVmi, 0, RID_MRM); | ||
1399 | } | ||
1400 | as->mrm.ofs += 4; | ||
1401 | emit_i32(as, (int32_t)irt_toitype(ir->t)); | ||
1402 | emit_mrm(as, XO_MOVmi, 0, RID_MRM); | ||
1403 | } | ||
1404 | } | ||
1405 | |||
1406 | static void asm_sload(ASMState *as, IRIns *ir) | ||
1407 | { | ||
1408 | int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0); | ||
1409 | IRType1 t = ir->t; | ||
1410 | Reg base; | ||
1411 | lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */ | ||
1412 | lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK)); | ||
1413 | lua_assert(LJ_DUALNUM || | ||
1414 | !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME))); | ||
1415 | if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { | ||
1416 | Reg left = ra_scratch(as, RSET_FPR); | ||
1417 | asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */ | ||
1418 | base = ra_alloc1(as, REF_BASE, RSET_GPR); | ||
1419 | emit_rmro(as, XMM_MOVRM(as), left, base, ofs); | ||
1420 | t.irt = IRT_NUM; /* Continue with a regular number type check. */ | ||
1421 | #if LJ_64 | ||
1422 | } else if (irt_islightud(t)) { | ||
1423 | Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK)); | ||
1424 | if (ra_hasreg(dest)) { | ||
1425 | base = ra_alloc1(as, REF_BASE, RSET_GPR); | ||
1426 | emit_rmro(as, XO_MOV, dest|REX_64, base, ofs); | ||
1427 | } | ||
1428 | return; | ||
1429 | #endif | ||
1430 | } else if (ra_used(ir)) { | ||
1431 | RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR; | ||
1432 | Reg dest = ra_dest(as, ir, allow); | ||
1433 | base = ra_alloc1(as, REF_BASE, RSET_GPR); | ||
1434 | lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t)); | ||
1435 | if ((ir->op2 & IRSLOAD_CONVERT)) { | ||
1436 | t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */ | ||
1437 | emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTSD2SI, dest, base, ofs); | ||
1438 | } else if (irt_isnum(t)) { | ||
1439 | emit_rmro(as, XMM_MOVRM(as), dest, base, ofs); | ||
1440 | } else { | ||
1441 | emit_rmro(as, XO_MOV, dest, base, ofs); | ||
1442 | } | ||
1443 | } else { | ||
1444 | if (!(ir->op2 & IRSLOAD_TYPECHECK)) | ||
1445 | return; /* No type check: avoid base alloc. */ | ||
1446 | base = ra_alloc1(as, REF_BASE, RSET_GPR); | ||
1447 | } | ||
1448 | if ((ir->op2 & IRSLOAD_TYPECHECK)) { | ||
1449 | /* Need type check, even if the load result is unused. */ | ||
1450 | asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE); | ||
1451 | if (LJ_64 && irt_type(t) >= IRT_NUM) { | ||
1452 | lua_assert(irt_isinteger(t) || irt_isnum(t)); | ||
1453 | emit_u32(as, LJ_TISNUM); | ||
1454 | emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4); | ||
1455 | } else { | ||
1456 | emit_i8(as, irt_toitype(t)); | ||
1457 | emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4); | ||
1458 | } | ||
1459 | } | ||
1460 | } | ||
1461 | |||
1462 | /* -- Allocations --------------------------------------------------------- */ | ||
1463 | |||
1464 | #if LJ_HASFFI | ||
1465 | static void asm_cnew(ASMState *as, IRIns *ir) | ||
1466 | { | ||
1467 | CTState *cts = ctype_ctsG(J2G(as->J)); | ||
1468 | CTypeID typeid = (CTypeID)IR(ir->op1)->i; | ||
1469 | CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ? | ||
1470 | lj_ctype_size(cts, typeid) : (CTSize)IR(ir->op2)->i; | ||
1471 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; | ||
1472 | IRRef args[2]; | ||
1473 | lua_assert(sz != CTSIZE_INVALID); | ||
1474 | |||
1475 | args[0] = ASMREF_L; /* lua_State *L */ | ||
1476 | args[1] = ASMREF_TMP1; /* MSize size */ | ||
1477 | as->gcsteps++; | ||
1478 | asm_setupresult(as, ir, ci); /* GCcdata * */ | ||
1479 | |||
1480 | /* Initialize immutable cdata object. */ | ||
1481 | if (ir->o == IR_CNEWI) { | ||
1482 | RegSet allow = (RSET_GPR & ~RSET_SCRATCH); | ||
1483 | #if LJ_64 | ||
1484 | Reg r64 = sz == 8 ? REX_64 : 0; | ||
1485 | if (irref_isk(ir->op2)) { | ||
1486 | IRIns *irk = IR(ir->op2); | ||
1487 | uint64_t k = irk->o == IR_KINT64 ? ir_k64(irk)->u64 : | ||
1488 | (uint64_t)(uint32_t)irk->i; | ||
1489 | if (sz == 4 || checki32((int64_t)k)) { | ||
1490 | emit_i32(as, (int32_t)k); | ||
1491 | emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata)); | ||
1492 | } else { | ||
1493 | emit_movtomro(as, RID_ECX + r64, RID_RET, sizeof(GCcdata)); | ||
1494 | emit_loadu64(as, RID_ECX, k); | ||
1495 | } | ||
1496 | } else { | ||
1497 | Reg r = ra_alloc1(as, ir->op2, allow); | ||
1498 | emit_movtomro(as, r + r64, RID_RET, sizeof(GCcdata)); | ||
1499 | } | ||
1500 | #else | ||
1501 | int32_t ofs = sizeof(GCcdata); | ||
1502 | if (sz == 8) { | ||
1503 | ofs += 4; ir++; | ||
1504 | lua_assert(ir->o == IR_HIOP); | ||
1505 | } | ||
1506 | do { | ||
1507 | if (irref_isk(ir->op2)) { | ||
1508 | emit_movmroi(as, RID_RET, ofs, IR(ir->op2)->i); | ||
1509 | } else { | ||
1510 | Reg r = ra_alloc1(as, ir->op2, allow); | ||
1511 | emit_movtomro(as, r, RID_RET, ofs); | ||
1512 | rset_clear(allow, r); | ||
1513 | } | ||
1514 | if (ofs == sizeof(GCcdata)) break; | ||
1515 | ofs -= 4; ir--; | ||
1516 | } while (1); | ||
1517 | #endif | ||
1518 | lua_assert(sz == 4 || sz == 8); | ||
1519 | } | ||
1520 | |||
1521 | /* Combine initialization of marked, gct and typeid. */ | ||
1522 | emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked)); | ||
1523 | emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX, | ||
1524 | (int32_t)((~LJ_TCDATA<<8)+(typeid<<16))); | ||
1525 | emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES); | ||
1526 | emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite); | ||
1527 | |||
1528 | asm_gencall(as, ci, args); | ||
1529 | emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata))); | ||
1530 | } | ||
1531 | #else | ||
1532 | #define asm_cnew(as, ir) ((void)0) | ||
1533 | #endif | ||
1534 | |||
1535 | /* -- Write barriers ------------------------------------------------------ */ | ||
1536 | |||
1537 | static void asm_tbar(ASMState *as, IRIns *ir) | ||
1538 | { | ||
1539 | Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); | ||
1540 | Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab)); | ||
1541 | MCLabel l_end = emit_label(as); | ||
1542 | emit_movtomro(as, tmp, tab, offsetof(GCtab, gclist)); | ||
1543 | emit_setgl(as, tab, gc.grayagain); | ||
1544 | emit_getgl(as, tmp, gc.grayagain); | ||
1545 | emit_i8(as, ~LJ_GC_BLACK); | ||
1546 | emit_rmro(as, XO_ARITHib, XOg_AND, tab, offsetof(GCtab, marked)); | ||
1547 | emit_sjcc(as, CC_Z, l_end); | ||
1548 | emit_i8(as, LJ_GC_BLACK); | ||
1549 | emit_rmro(as, XO_GROUP3b, XOg_TEST, tab, offsetof(GCtab, marked)); | ||
1550 | } | ||
1551 | |||
1552 | static void asm_obar(ASMState *as, IRIns *ir) | ||
1553 | { | ||
1554 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; | ||
1555 | IRRef args[2]; | ||
1556 | MCLabel l_end; | ||
1557 | Reg obj; | ||
1558 | /* No need for other object barriers (yet). */ | ||
1559 | lua_assert(IR(ir->op1)->o == IR_UREFC); | ||
1560 | ra_evictset(as, RSET_SCRATCH); | ||
1561 | l_end = emit_label(as); | ||
1562 | args[0] = ASMREF_TMP1; /* global_State *g */ | ||
1563 | args[1] = ir->op1; /* TValue *tv */ | ||
1564 | asm_gencall(as, ci, args); | ||
1565 | emit_loada(as, ra_releasetmp(as, ASMREF_TMP1), J2G(as->J)); | ||
1566 | obj = IR(ir->op1)->r; | ||
1567 | emit_sjcc(as, CC_Z, l_end); | ||
1568 | emit_i8(as, LJ_GC_WHITES); | ||
1569 | if (irref_isk(ir->op2)) { | ||
1570 | GCobj *vp = ir_kgc(IR(ir->op2)); | ||
1571 | emit_rma(as, XO_GROUP3b, XOg_TEST, &vp->gch.marked); | ||
1572 | } else { | ||
1573 | Reg val = ra_alloc1(as, ir->op2, rset_exclude(RSET_SCRATCH&RSET_GPR, obj)); | ||
1574 | emit_rmro(as, XO_GROUP3b, XOg_TEST, val, (int32_t)offsetof(GChead, marked)); | ||
1575 | } | ||
1576 | emit_sjcc(as, CC_Z, l_end); | ||
1577 | emit_i8(as, LJ_GC_BLACK); | ||
1578 | emit_rmro(as, XO_GROUP3b, XOg_TEST, obj, | ||
1579 | (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); | ||
1580 | } | ||
1581 | |||
1582 | /* -- FP/int arithmetic and logic operations ------------------------------ */ | ||
1583 | |||
1584 | /* Load reference onto x87 stack. Force a spill to memory if needed. */ | ||
1585 | static void asm_x87load(ASMState *as, IRRef ref) | ||
1586 | { | ||
1587 | IRIns *ir = IR(ref); | ||
1588 | if (ir->o == IR_KNUM) { | ||
1589 | cTValue *tv = ir_knum(ir); | ||
1590 | if (tvispzero(tv)) /* Use fldz only for +0. */ | ||
1591 | emit_x87op(as, XI_FLDZ); | ||
1592 | else if (tvispone(tv)) | ||
1593 | emit_x87op(as, XI_FLD1); | ||
1594 | else | ||
1595 | emit_rma(as, XO_FLDq, XOg_FLDq, tv); | ||
1596 | } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) && | ||
1597 | !irref_isk(ir->op1) && mayfuse(as, ir->op1)) { | ||
1598 | IRIns *iri = IR(ir->op1); | ||
1599 | emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri)); | ||
1600 | } else { | ||
1601 | emit_mrm(as, XO_FLDq, XOg_FLDq, asm_fuseload(as, ref, RSET_EMPTY)); | ||
1602 | } | ||
1603 | } | ||
1604 | |||
1605 | /* Try to rejoin pow from EXP2, MUL and LOG2 (if still unsplit). */ | ||
1606 | static int fpmjoin_pow(ASMState *as, IRIns *ir) | ||
1607 | { | ||
1608 | IRIns *irp = IR(ir->op1); | ||
1609 | if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) { | ||
1610 | IRIns *irpp = IR(irp->op1); | ||
1611 | if (irpp == ir-2 && irpp->o == IR_FPMATH && | ||
1612 | irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) { | ||
1613 | /* The modified regs must match with the *.dasc implementation. */ | ||
1614 | RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX); | ||
1615 | IRIns *irx; | ||
1616 | if (ra_hasreg(ir->r)) | ||
1617 | rset_clear(drop, ir->r); /* Dest reg handled below. */ | ||
1618 | ra_evictset(as, drop); | ||
1619 | ra_destreg(as, ir, RID_XMM0); | ||
1620 | emit_call(as, lj_vm_pow_sse); | ||
1621 | irx = IR(irpp->op1); | ||
1622 | if (ra_noreg(irx->r) && ra_gethint(irx->r) == RID_XMM1) | ||
1623 | irx->r = RID_INIT; /* Avoid allocating xmm1 for x. */ | ||
1624 | ra_left(as, RID_XMM0, irpp->op1); | ||
1625 | ra_left(as, RID_XMM1, irp->op2); | ||
1626 | return 1; | ||
1627 | } | ||
1628 | } | ||
1629 | return 0; | ||
1630 | } | ||
1631 | |||
1632 | static void asm_fpmath(ASMState *as, IRIns *ir) | ||
1633 | { | ||
1634 | IRFPMathOp fpm = ir->o == IR_FPMATH ? (IRFPMathOp)ir->op2 : IRFPM_OTHER; | ||
1635 | if (fpm == IRFPM_SQRT) { | ||
1636 | Reg dest = ra_dest(as, ir, RSET_FPR); | ||
1637 | Reg left = asm_fuseload(as, ir->op1, RSET_FPR); | ||
1638 | emit_mrm(as, XO_SQRTSD, dest, left); | ||
1639 | } else if (fpm <= IRFPM_TRUNC) { | ||
1640 | if (as->flags & JIT_F_SSE4_1) { /* SSE4.1 has a rounding instruction. */ | ||
1641 | Reg dest = ra_dest(as, ir, RSET_FPR); | ||
1642 | Reg left = asm_fuseload(as, ir->op1, RSET_FPR); | ||
1643 | /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op. | ||
1644 | ** Let's pretend it's a 3-byte opcode, and compensate afterwards. | ||
1645 | ** This is atrocious, but the alternatives are much worse. | ||
1646 | */ | ||
1647 | /* Round down/up/trunc == 1001/1010/1011. */ | ||
1648 | emit_i8(as, 0x09 + fpm); | ||
1649 | emit_mrm(as, XO_ROUNDSD, dest, left); | ||
1650 | if (LJ_64 && as->mcp[1] != (MCode)(XO_ROUNDSD >> 16)) { | ||
1651 | as->mcp[0] = as->mcp[1]; as->mcp[1] = 0x0f; /* Swap 0F and REX. */ | ||
1652 | } | ||
1653 | *--as->mcp = 0x66; /* 1st byte of ROUNDSD opcode. */ | ||
1654 | } else { /* Call helper functions for SSE2 variant. */ | ||
1655 | /* The modified regs must match with the *.dasc implementation. */ | ||
1656 | RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX); | ||
1657 | if (ra_hasreg(ir->r)) | ||
1658 | rset_clear(drop, ir->r); /* Dest reg handled below. */ | ||
1659 | ra_evictset(as, drop); | ||
1660 | ra_destreg(as, ir, RID_XMM0); | ||
1661 | emit_call(as, fpm == IRFPM_FLOOR ? lj_vm_floor_sse : | ||
1662 | fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse); | ||
1663 | ra_left(as, RID_XMM0, ir->op1); | ||
1664 | } | ||
1665 | } else if (fpm == IRFPM_EXP2 && fpmjoin_pow(as, ir)) { | ||
1666 | /* Rejoined to pow(). */ | ||
1667 | } else { /* Handle x87 ops. */ | ||
1668 | int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ | ||
1669 | Reg dest = ir->r; | ||
1670 | if (ra_hasreg(dest)) { | ||
1671 | ra_free(as, dest); | ||
1672 | ra_modified(as, dest); | ||
1673 | emit_rmro(as, XMM_MOVRM(as), dest, RID_ESP, ofs); | ||
1674 | } | ||
1675 | emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs); | ||
1676 | switch (fpm) { /* st0 = lj_vm_*(st0) */ | ||
1677 | case IRFPM_EXP: emit_call(as, lj_vm_exp_x87); break; | ||
1678 | case IRFPM_EXP2: emit_call(as, lj_vm_exp2_x87); break; | ||
1679 | case IRFPM_SIN: emit_x87op(as, XI_FSIN); break; | ||
1680 | case IRFPM_COS: emit_x87op(as, XI_FCOS); break; | ||
1681 | case IRFPM_TAN: emit_x87op(as, XI_FPOP); emit_x87op(as, XI_FPTAN); break; | ||
1682 | case IRFPM_LOG: case IRFPM_LOG2: case IRFPM_LOG10: | ||
1683 | /* Note: the use of fyl2xp1 would be pointless here. When computing | ||
1684 | ** log(1.0+eps) the precision is already lost after 1.0 is added. | ||
1685 | ** Subtracting 1.0 won't recover it. OTOH math.log1p would make sense. | ||
1686 | */ | ||
1687 | emit_x87op(as, XI_FYL2X); break; | ||
1688 | case IRFPM_OTHER: | ||
1689 | switch (ir->o) { | ||
1690 | case IR_ATAN2: | ||
1691 | emit_x87op(as, XI_FPATAN); asm_x87load(as, ir->op2); break; | ||
1692 | case IR_LDEXP: | ||
1693 | emit_x87op(as, XI_FPOP1); emit_x87op(as, XI_FSCALE); break; | ||
1694 | default: lua_assert(0); break; | ||
1695 | } | ||
1696 | break; | ||
1697 | default: lua_assert(0); break; | ||
1698 | } | ||
1699 | asm_x87load(as, ir->op1); | ||
1700 | switch (fpm) { | ||
1701 | case IRFPM_LOG: emit_x87op(as, XI_FLDLN2); break; | ||
1702 | case IRFPM_LOG2: emit_x87op(as, XI_FLD1); break; | ||
1703 | case IRFPM_LOG10: emit_x87op(as, XI_FLDLG2); break; | ||
1704 | case IRFPM_OTHER: | ||
1705 | if (ir->o == IR_LDEXP) asm_x87load(as, ir->op2); | ||
1706 | break; | ||
1707 | default: break; | ||
1708 | } | ||
1709 | } | ||
1710 | } | ||
1711 | |||
1712 | static void asm_fppowi(ASMState *as, IRIns *ir) | ||
1713 | { | ||
1714 | /* The modified regs must match with the *.dasc implementation. */ | ||
1715 | RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX); | ||
1716 | if (ra_hasreg(ir->r)) | ||
1717 | rset_clear(drop, ir->r); /* Dest reg handled below. */ | ||
1718 | ra_evictset(as, drop); | ||
1719 | ra_destreg(as, ir, RID_XMM0); | ||
1720 | emit_call(as, lj_vm_powi_sse); | ||
1721 | ra_left(as, RID_XMM0, ir->op1); | ||
1722 | ra_left(as, RID_EAX, ir->op2); | ||
1723 | } | ||
1724 | |||
1725 | #if LJ_64 && LJ_HASFFI | ||
1726 | static void asm_arith64(ASMState *as, IRIns *ir, IRCallID id) | ||
1727 | { | ||
1728 | const CCallInfo *ci = &lj_ir_callinfo[id]; | ||
1729 | IRRef args[2]; | ||
1730 | args[0] = ir->op1; | ||
1731 | args[1] = ir->op2; | ||
1732 | asm_setupresult(as, ir, ci); | ||
1733 | asm_gencall(as, ci, args); | ||
1734 | } | ||
1735 | #endif | ||
1736 | |||
1737 | static void asm_intmod(ASMState *as, IRIns *ir) | ||
1738 | { | ||
1739 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_vm_modi]; | ||
1740 | IRRef args[2]; | ||
1741 | args[0] = ir->op1; | ||
1742 | args[1] = ir->op2; | ||
1743 | asm_setupresult(as, ir, ci); | ||
1744 | asm_gencall(as, ci, args); | ||
1745 | } | ||
1746 | |||
1747 | static int asm_swapops(ASMState *as, IRIns *ir) | ||
1748 | { | ||
1749 | IRIns *irl = IR(ir->op1); | ||
1750 | IRIns *irr = IR(ir->op2); | ||
1751 | lua_assert(ra_noreg(irr->r)); | ||
1752 | if (!irm_iscomm(lj_ir_mode[ir->o])) | ||
1753 | return 0; /* Can't swap non-commutative operations. */ | ||
1754 | if (irref_isk(ir->op2)) | ||
1755 | return 0; /* Don't swap constants to the left. */ | ||
1756 | if (ra_hasreg(irl->r)) | ||
1757 | return 1; /* Swap if left already has a register. */ | ||
1758 | if (ra_samehint(ir->r, irr->r)) | ||
1759 | return 1; /* Swap if dest and right have matching hints. */ | ||
1760 | if (as->curins > as->loopref) { /* In variant part? */ | ||
1761 | if (ir->op2 < as->loopref && !irt_isphi(irr->t)) | ||
1762 | return 0; /* Keep invariants on the right. */ | ||
1763 | if (ir->op1 < as->loopref && !irt_isphi(irl->t)) | ||
1764 | return 1; /* Swap invariants to the right. */ | ||
1765 | } | ||
1766 | if (opisfusableload(irl->o)) | ||
1767 | return 1; /* Swap fusable loads to the right. */ | ||
1768 | return 0; /* Otherwise don't swap. */ | ||
1769 | } | ||
1770 | |||
1771 | static void asm_fparith(ASMState *as, IRIns *ir, x86Op xo) | ||
1772 | { | ||
1773 | IRRef lref = ir->op1; | ||
1774 | IRRef rref = ir->op2; | ||
1775 | RegSet allow = RSET_FPR; | ||
1776 | Reg dest; | ||
1777 | Reg right = IR(rref)->r; | ||
1778 | if (ra_hasreg(right)) { | ||
1779 | rset_clear(allow, right); | ||
1780 | ra_noweak(as, right); | ||
1781 | } | ||
1782 | dest = ra_dest(as, ir, allow); | ||
1783 | if (lref == rref) { | ||
1784 | right = dest; | ||
1785 | } else if (ra_noreg(right)) { | ||
1786 | if (asm_swapops(as, ir)) { | ||
1787 | IRRef tmp = lref; lref = rref; rref = tmp; | ||
1788 | } | ||
1789 | right = asm_fuseload(as, rref, rset_clear(allow, dest)); | ||
1790 | } | ||
1791 | emit_mrm(as, xo, dest, right); | ||
1792 | ra_left(as, dest, lref); | ||
1793 | } | ||
1794 | |||
1795 | static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa) | ||
1796 | { | ||
1797 | IRRef lref = ir->op1; | ||
1798 | IRRef rref = ir->op2; | ||
1799 | RegSet allow = RSET_GPR; | ||
1800 | Reg dest, right; | ||
1801 | int32_t k = 0; | ||
1802 | if (as->flagmcp == as->mcp) { /* Drop test r,r instruction. */ | ||
1803 | as->flagmcp = NULL; | ||
1804 | as->mcp += (LJ_64 && *as->mcp != XI_TEST) ? 3 : 2; | ||
1805 | } | ||
1806 | right = IR(rref)->r; | ||
1807 | if (ra_hasreg(right)) { | ||
1808 | rset_clear(allow, right); | ||
1809 | ra_noweak(as, right); | ||
1810 | } | ||
1811 | dest = ra_dest(as, ir, allow); | ||
1812 | if (lref == rref) { | ||
1813 | right = dest; | ||
1814 | } else if (ra_noreg(right) && !asm_isk32(as, rref, &k)) { | ||
1815 | if (asm_swapops(as, ir)) { | ||
1816 | IRRef tmp = lref; lref = rref; rref = tmp; | ||
1817 | } | ||
1818 | right = asm_fuseload(as, rref, rset_clear(allow, dest)); | ||
1819 | } | ||
1820 | if (irt_isguard(ir->t)) /* For IR_ADDOV etc. */ | ||
1821 | asm_guardcc(as, CC_O); | ||
1822 | if (xa != XOg_X_IMUL) { | ||
1823 | if (ra_hasreg(right)) | ||
1824 | emit_mrm(as, XO_ARITH(xa), REX_64IR(ir, dest), right); | ||
1825 | else | ||
1826 | emit_gri(as, XG_ARITHi(xa), REX_64IR(ir, dest), k); | ||
1827 | } else if (ra_hasreg(right)) { /* IMUL r, mrm. */ | ||
1828 | emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right); | ||
1829 | } else { /* IMUL r, r, k. */ | ||
1830 | /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */ | ||
1831 | Reg left = asm_fuseload(as, lref, RSET_GPR); | ||
1832 | x86Op xo; | ||
1833 | if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8; | ||
1834 | } else { emit_i32(as, k); xo = XO_IMULi; } | ||
1835 | emit_mrm(as, xo, REX_64IR(ir, dest), left); | ||
1836 | return; | ||
1837 | } | ||
1838 | ra_left(as, dest, lref); | ||
1839 | } | ||
1840 | |||
1841 | /* LEA is really a 4-operand ADD with an independent destination register, | ||
1842 | ** up to two source registers and an immediate. One register can be scaled | ||
1843 | ** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several | ||
1844 | ** instructions. | ||
1845 | ** | ||
1846 | ** Currently only a few common cases are supported: | ||
1847 | ** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated | ||
1848 | ** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b | ||
1849 | ** - Right ADD fusion: y = a+(b+k) | ||
1850 | ** The ommited variants have already been reduced by FOLD. | ||
1851 | ** | ||
1852 | ** There are more fusion opportunities, like gathering shifts or joining | ||
1853 | ** common references. But these are probably not worth the trouble, since | ||
1854 | ** array indexing is not decomposed and already makes use of all fields | ||
1855 | ** of the ModRM operand. | ||
1856 | */ | ||
1857 | static int asm_lea(ASMState *as, IRIns *ir) | ||
1858 | { | ||
1859 | IRIns *irl = IR(ir->op1); | ||
1860 | IRIns *irr = IR(ir->op2); | ||
1861 | RegSet allow = RSET_GPR; | ||
1862 | Reg dest; | ||
1863 | as->mrm.base = as->mrm.idx = RID_NONE; | ||
1864 | as->mrm.scale = XM_SCALE1; | ||
1865 | as->mrm.ofs = 0; | ||
1866 | if (ra_hasreg(irl->r)) { | ||
1867 | rset_clear(allow, irl->r); | ||
1868 | ra_noweak(as, irl->r); | ||
1869 | as->mrm.base = irl->r; | ||
1870 | if (irref_isk(ir->op2) || ra_hasreg(irr->r)) { | ||
1871 | /* The PHI renaming logic does a better job in some cases. */ | ||
1872 | if (ra_hasreg(ir->r) && | ||
1873 | ((irt_isphi(irl->t) && as->phireg[ir->r] == ir->op1) || | ||
1874 | (irt_isphi(irr->t) && as->phireg[ir->r] == ir->op2))) | ||
1875 | return 0; | ||
1876 | if (irref_isk(ir->op2)) { | ||
1877 | as->mrm.ofs = irr->i; | ||
1878 | } else { | ||
1879 | rset_clear(allow, irr->r); | ||
1880 | ra_noweak(as, irr->r); | ||
1881 | as->mrm.idx = irr->r; | ||
1882 | } | ||
1883 | } else if (irr->o == IR_ADD && mayfuse(as, ir->op2) && | ||
1884 | irref_isk(irr->op2)) { | ||
1885 | Reg idx = ra_alloc1(as, irr->op1, allow); | ||
1886 | rset_clear(allow, idx); | ||
1887 | as->mrm.idx = (uint8_t)idx; | ||
1888 | as->mrm.ofs = IR(irr->op2)->i; | ||
1889 | } else { | ||
1890 | return 0; | ||
1891 | } | ||
1892 | } else if (ir->op1 != ir->op2 && irl->o == IR_ADD && mayfuse(as, ir->op1) && | ||
1893 | (irref_isk(ir->op2) || irref_isk(irl->op2))) { | ||
1894 | Reg idx, base = ra_alloc1(as, irl->op1, allow); | ||
1895 | rset_clear(allow, base); | ||
1896 | as->mrm.base = (uint8_t)base; | ||
1897 | if (irref_isk(ir->op2)) { | ||
1898 | as->mrm.ofs = irr->i; | ||
1899 | idx = ra_alloc1(as, irl->op2, allow); | ||
1900 | } else { | ||
1901 | as->mrm.ofs = IR(irl->op2)->i; | ||
1902 | idx = ra_alloc1(as, ir->op2, allow); | ||
1903 | } | ||
1904 | rset_clear(allow, idx); | ||
1905 | as->mrm.idx = (uint8_t)idx; | ||
1906 | } else { | ||
1907 | return 0; | ||
1908 | } | ||
1909 | dest = ra_dest(as, ir, allow); | ||
1910 | emit_mrm(as, XO_LEA, dest, RID_MRM); | ||
1911 | return 1; /* Success. */ | ||
1912 | } | ||
1913 | |||
1914 | static void asm_add(ASMState *as, IRIns *ir) | ||
1915 | { | ||
1916 | if (irt_isnum(ir->t)) | ||
1917 | asm_fparith(as, ir, XO_ADDSD); | ||
1918 | else if ((as->flags & JIT_F_LEA_AGU) || as->flagmcp == as->mcp || | ||
1919 | irt_is64(ir->t) || !asm_lea(as, ir)) | ||
1920 | asm_intarith(as, ir, XOg_ADD); | ||
1921 | } | ||
1922 | |||
1923 | static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg) | ||
1924 | { | ||
1925 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
1926 | emit_rr(as, XO_GROUP3, REX_64IR(ir, xg), dest); | ||
1927 | ra_left(as, dest, ir->op1); | ||
1928 | } | ||
1929 | |||
1930 | static void asm_min_max(ASMState *as, IRIns *ir, int cc) | ||
1931 | { | ||
1932 | Reg right, dest = ra_dest(as, ir, RSET_GPR); | ||
1933 | IRRef lref = ir->op1, rref = ir->op2; | ||
1934 | if (irref_isk(rref)) { lref = rref; rref = ir->op1; } | ||
1935 | right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, dest)); | ||
1936 | emit_rr(as, XO_CMOV + (cc<<24), REX_64IR(ir, dest), right); | ||
1937 | emit_rr(as, XO_CMP, REX_64IR(ir, dest), right); | ||
1938 | ra_left(as, dest, lref); | ||
1939 | } | ||
1940 | |||
1941 | static void asm_bitswap(ASMState *as, IRIns *ir) | ||
1942 | { | ||
1943 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
1944 | as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24), | ||
1945 | REX_64IR(ir, 0), dest, 0, as->mcp, 1); | ||
1946 | ra_left(as, dest, ir->op1); | ||
1947 | } | ||
1948 | |||
1949 | static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs) | ||
1950 | { | ||
1951 | IRRef rref = ir->op2; | ||
1952 | IRIns *irr = IR(rref); | ||
1953 | Reg dest; | ||
1954 | if (irref_isk(rref)) { /* Constant shifts. */ | ||
1955 | int shift; | ||
1956 | dest = ra_dest(as, ir, RSET_GPR); | ||
1957 | shift = irr->i & (irt_is64(ir->t) ? 63 : 31); | ||
1958 | switch (shift) { | ||
1959 | case 0: break; | ||
1960 | case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break; | ||
1961 | default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break; | ||
1962 | } | ||
1963 | } else { /* Variable shifts implicitly use register cl (i.e. ecx). */ | ||
1964 | Reg right; | ||
1965 | dest = ra_dest(as, ir, rset_exclude(RSET_GPR, RID_ECX)); | ||
1966 | if (dest == RID_ECX) { | ||
1967 | dest = ra_scratch(as, rset_exclude(RSET_GPR, RID_ECX)); | ||
1968 | emit_rr(as, XO_MOV, RID_ECX, dest); | ||
1969 | } | ||
1970 | right = irr->r; | ||
1971 | if (ra_noreg(right)) | ||
1972 | right = ra_allocref(as, rref, RID2RSET(RID_ECX)); | ||
1973 | else if (right != RID_ECX) | ||
1974 | ra_scratch(as, RID2RSET(RID_ECX)); | ||
1975 | emit_rr(as, XO_SHIFTcl, REX_64IR(ir, xs), dest); | ||
1976 | if (right != RID_ECX) { | ||
1977 | ra_noweak(as, right); | ||
1978 | emit_rr(as, XO_MOV, RID_ECX, right); | ||
1979 | } | ||
1980 | } | ||
1981 | ra_left(as, dest, ir->op1); | ||
1982 | /* | ||
1983 | ** Note: avoid using the flags resulting from a shift or rotate! | ||
1984 | ** All of them cause a partial flag stall, except for r,1 shifts | ||
1985 | ** (but not rotates). And a shift count of 0 leaves the flags unmodified. | ||
1986 | */ | ||
1987 | } | ||
1988 | |||
1989 | /* -- Comparisons --------------------------------------------------------- */ | ||
1990 | |||
1991 | /* Virtual flags for unordered FP comparisons. */ | ||
1992 | #define VCC_U 0x1000 /* Unordered. */ | ||
1993 | #define VCC_P 0x2000 /* Needs extra CC_P branch. */ | ||
1994 | #define VCC_S 0x4000 /* Swap avoids CC_P branch. */ | ||
1995 | #define VCC_PS (VCC_P|VCC_S) | ||
1996 | |||
1997 | /* Map of comparisons to flags. ORDER IR. */ | ||
1998 | #define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf)) | ||
1999 | static const uint16_t asm_compmap[IR_ABC+1] = { | ||
2000 | /* signed non-eq unsigned flags */ | ||
2001 | /* LT */ COMPFLAGS(CC_GE, CC_G, CC_AE, VCC_PS), | ||
2002 | /* GE */ COMPFLAGS(CC_L, CC_L, CC_B, 0), | ||
2003 | /* LE */ COMPFLAGS(CC_G, CC_G, CC_A, VCC_PS), | ||
2004 | /* GT */ COMPFLAGS(CC_LE, CC_L, CC_BE, 0), | ||
2005 | /* ULT */ COMPFLAGS(CC_AE, CC_A, CC_AE, VCC_U), | ||
2006 | /* UGE */ COMPFLAGS(CC_B, CC_B, CC_B, VCC_U|VCC_PS), | ||
2007 | /* ULE */ COMPFLAGS(CC_A, CC_A, CC_A, VCC_U), | ||
2008 | /* UGT */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS), | ||
2009 | /* EQ */ COMPFLAGS(CC_NE, CC_NE, CC_NE, VCC_P), | ||
2010 | /* NE */ COMPFLAGS(CC_E, CC_E, CC_E, VCC_U|VCC_P), | ||
2011 | /* ABC */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS) /* Same as UGT. */ | ||
2012 | }; | ||
2013 | |||
2014 | /* FP and integer comparisons. */ | ||
2015 | static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc) | ||
2016 | { | ||
2017 | if (irt_isnum(ir->t)) { | ||
2018 | IRRef lref = ir->op1; | ||
2019 | IRRef rref = ir->op2; | ||
2020 | Reg left, right; | ||
2021 | MCLabel l_around; | ||
2022 | /* | ||
2023 | ** An extra CC_P branch is required to preserve ordered/unordered | ||
2024 | ** semantics for FP comparisons. This can be avoided by swapping | ||
2025 | ** the operands and inverting the condition (except for EQ and UNE). | ||
2026 | ** So always try to swap if possible. | ||
2027 | ** | ||
2028 | ** Another option would be to swap operands to achieve better memory | ||
2029 | ** operand fusion. But it's unlikely that this outweighs the cost | ||
2030 | ** of the extra branches. | ||
2031 | */ | ||
2032 | if (cc & VCC_S) { /* Swap? */ | ||
2033 | IRRef tmp = lref; lref = rref; rref = tmp; | ||
2034 | cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */ | ||
2035 | } | ||
2036 | left = ra_alloc1(as, lref, RSET_FPR); | ||
2037 | right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left)); | ||
2038 | l_around = emit_label(as); | ||
2039 | asm_guardcc(as, cc >> 4); | ||
2040 | if (cc & VCC_P) { /* Extra CC_P branch required? */ | ||
2041 | if (!(cc & VCC_U)) { | ||
2042 | asm_guardcc(as, CC_P); /* Branch to exit for ordered comparisons. */ | ||
2043 | } else if (l_around != as->invmcp) { | ||
2044 | emit_sjcc(as, CC_P, l_around); /* Branch around for unordered. */ | ||
2045 | } else { | ||
2046 | /* Patched to mcloop by asm_loop_fixup. */ | ||
2047 | as->loopinv = 2; | ||
2048 | if (as->realign) | ||
2049 | emit_sjcc(as, CC_P, as->mcp); | ||
2050 | else | ||
2051 | emit_jcc(as, CC_P, as->mcp); | ||
2052 | } | ||
2053 | } | ||
2054 | emit_mrm(as, XO_UCOMISD, left, right); | ||
2055 | } else { | ||
2056 | IRRef lref = ir->op1, rref = ir->op2; | ||
2057 | IROp leftop = (IROp)(IR(lref)->o); | ||
2058 | Reg r64 = REX_64IR(ir, 0); | ||
2059 | int32_t imm = 0; | ||
2060 | lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isaddr(ir->t)); | ||
2061 | /* Swap constants (only for ABC) and fusable loads to the right. */ | ||
2062 | if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) { | ||
2063 | if ((cc & 0xc) == 0xc) cc ^= 3; /* L <-> G, LE <-> GE */ | ||
2064 | else if ((cc & 0xa) == 0x2) cc ^= 5; /* A <-> B, AE <-> BE */ | ||
2065 | lref = ir->op2; rref = ir->op1; | ||
2066 | } | ||
2067 | if (asm_isk32(as, rref, &imm)) { | ||
2068 | IRIns *irl = IR(lref); | ||
2069 | /* Check wether we can use test ins. Not for unsigned, since CF=0. */ | ||
2070 | int usetest = (imm == 0 && (cc & 0xa) != 0x2); | ||
2071 | if (usetest && irl->o == IR_BAND && irl+1 == ir && !ra_used(irl)) { | ||
2072 | /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */ | ||
2073 | Reg right, left = RID_NONE; | ||
2074 | RegSet allow = RSET_GPR; | ||
2075 | if (!asm_isk32(as, irl->op2, &imm)) { | ||
2076 | left = ra_alloc1(as, irl->op2, allow); | ||
2077 | rset_clear(allow, left); | ||
2078 | } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */ | ||
2079 | IRIns *irll = IR(irl->op1); | ||
2080 | if (opisfusableload((IROp)irll->o) && | ||
2081 | (irt_isi8(irll->t) || irt_isu8(irll->t))) { | ||
2082 | IRType1 origt = irll->t; /* Temporarily flip types. */ | ||
2083 | irll->t.irt = (irll->t.irt & ~IRT_TYPE) | IRT_INT; | ||
2084 | as->curins--; /* Skip to BAND to avoid failing in noconflict(). */ | ||
2085 | right = asm_fuseload(as, irl->op1, RSET_GPR); | ||
2086 | as->curins++; | ||
2087 | irll->t = origt; | ||
2088 | if (right != RID_MRM) goto test_nofuse; | ||
2089 | /* Fusion succeeded, emit test byte mrm, imm8. */ | ||
2090 | asm_guardcc(as, cc); | ||
2091 | emit_i8(as, (imm & 0xff)); | ||
2092 | emit_mrm(as, XO_GROUP3b, XOg_TEST, RID_MRM); | ||
2093 | return; | ||
2094 | } | ||
2095 | } | ||
2096 | as->curins--; /* Skip to BAND to avoid failing in noconflict(). */ | ||
2097 | right = asm_fuseload(as, irl->op1, allow); | ||
2098 | as->curins++; /* Undo the above. */ | ||
2099 | test_nofuse: | ||
2100 | asm_guardcc(as, cc); | ||
2101 | if (ra_noreg(left)) { | ||
2102 | emit_i32(as, imm); | ||
2103 | emit_mrm(as, XO_GROUP3, r64 + XOg_TEST, right); | ||
2104 | } else { | ||
2105 | emit_mrm(as, XO_TEST, r64 + left, right); | ||
2106 | } | ||
2107 | } else { | ||
2108 | Reg left; | ||
2109 | if (opisfusableload((IROp)irl->o) && | ||
2110 | ((irt_isu8(irl->t) && checku8(imm)) || | ||
2111 | ((irt_isi8(irl->t) || irt_isi16(irl->t)) && checki8(imm)) || | ||
2112 | (irt_isu16(irl->t) && checku16(imm) && checki8((int16_t)imm)))) { | ||
2113 | /* Only the IRT_INT case is fused by asm_fuseload. | ||
2114 | ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads | ||
2115 | ** are handled here. | ||
2116 | ** Note that cmp word [mem], imm16 should not be generated, | ||
2117 | ** since it has a length-changing prefix. Compares of a word | ||
2118 | ** against a sign-extended imm8 are ok, however. | ||
2119 | */ | ||
2120 | IRType1 origt = irl->t; /* Temporarily flip types. */ | ||
2121 | irl->t.irt = (irl->t.irt & ~IRT_TYPE) | IRT_INT; | ||
2122 | left = asm_fuseload(as, lref, RSET_GPR); | ||
2123 | irl->t = origt; | ||
2124 | if (left == RID_MRM) { /* Fusion succeeded? */ | ||
2125 | if (irt_isu8(irl->t) || irt_isu16(irl->t)) | ||
2126 | cc >>= 4; /* Need unsigned compare. */ | ||
2127 | asm_guardcc(as, cc); | ||
2128 | emit_i8(as, imm); | ||
2129 | emit_mrm(as, (irt_isi8(origt) || irt_isu8(origt)) ? | ||
2130 | XO_ARITHib : XO_ARITHiw8, r64 + XOg_CMP, RID_MRM); | ||
2131 | return; | ||
2132 | } /* Otherwise handle register case as usual. */ | ||
2133 | } else { | ||
2134 | left = asm_fuseload(as, lref, RSET_GPR); | ||
2135 | } | ||
2136 | asm_guardcc(as, cc); | ||
2137 | if (usetest && left != RID_MRM) { | ||
2138 | /* Use test r,r instead of cmp r,0. */ | ||
2139 | emit_rr(as, XO_TEST, r64 + left, left); | ||
2140 | if (irl+1 == ir) /* Referencing previous ins? */ | ||
2141 | as->flagmcp = as->mcp; /* Set flag to drop test r,r if possible. */ | ||
2142 | } else { | ||
2143 | emit_gmrmi(as, XG_ARITHi(XOg_CMP), r64 + left, imm); | ||
2144 | } | ||
2145 | } | ||
2146 | } else { | ||
2147 | Reg left = ra_alloc1(as, lref, RSET_GPR); | ||
2148 | Reg right = asm_fuseload(as, rref, rset_exclude(RSET_GPR, left)); | ||
2149 | asm_guardcc(as, cc); | ||
2150 | emit_mrm(as, XO_CMP, r64 + left, right); | ||
2151 | } | ||
2152 | } | ||
2153 | } | ||
2154 | |||
2155 | #if LJ_32 && LJ_HASFFI | ||
2156 | /* 64 bit integer comparisons in 32 bit mode. */ | ||
2157 | static void asm_comp_int64(ASMState *as, IRIns *ir) | ||
2158 | { | ||
2159 | uint32_t cc = asm_compmap[(ir-1)->o]; | ||
2160 | RegSet allow = RSET_GPR; | ||
2161 | Reg lefthi = RID_NONE, leftlo = RID_NONE; | ||
2162 | Reg righthi = RID_NONE, rightlo = RID_NONE; | ||
2163 | MCLabel l_around; | ||
2164 | x86ModRM mrm; | ||
2165 | |||
2166 | as->curins--; /* Skip loword ins. Avoids failing in noconflict(), too. */ | ||
2167 | |||
2168 | /* Allocate/fuse hiword operands. */ | ||
2169 | if (irref_isk(ir->op2)) { | ||
2170 | lefthi = asm_fuseload(as, ir->op1, allow); | ||
2171 | } else { | ||
2172 | lefthi = ra_alloc1(as, ir->op1, allow); | ||
2173 | righthi = asm_fuseload(as, ir->op2, allow); | ||
2174 | if (righthi == RID_MRM) { | ||
2175 | if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base); | ||
2176 | if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx); | ||
2177 | } else { | ||
2178 | rset_clear(allow, righthi); | ||
2179 | } | ||
2180 | } | ||
2181 | mrm = as->mrm; /* Save state for hiword instruction. */ | ||
2182 | |||
2183 | /* Allocate/fuse loword operands. */ | ||
2184 | if (irref_isk((ir-1)->op2)) { | ||
2185 | leftlo = asm_fuseload(as, (ir-1)->op1, allow); | ||
2186 | } else { | ||
2187 | leftlo = ra_alloc1(as, (ir-1)->op1, allow); | ||
2188 | rightlo = asm_fuseload(as, (ir-1)->op2, allow); | ||
2189 | if (rightlo == RID_MRM) { | ||
2190 | if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base); | ||
2191 | if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx); | ||
2192 | } else { | ||
2193 | rset_clear(allow, rightlo); | ||
2194 | } | ||
2195 | } | ||
2196 | |||
2197 | /* All register allocations must be performed _before_ this point. */ | ||
2198 | l_around = emit_label(as); | ||
2199 | as->invmcp = as->flagmcp = NULL; /* Cannot use these optimizations. */ | ||
2200 | |||
2201 | /* Loword comparison and branch. */ | ||
2202 | asm_guardcc(as, cc >> 4); /* Always use unsigned compare for loword. */ | ||
2203 | if (ra_noreg(rightlo)) { | ||
2204 | int32_t imm = IR((ir-1)->op2)->i; | ||
2205 | if (imm == 0 && ((cc >> 4) & 0xa) != 0x2 && leftlo != RID_MRM) | ||
2206 | emit_rr(as, XO_TEST, leftlo, leftlo); | ||
2207 | else | ||
2208 | emit_gmrmi(as, XG_ARITHi(XOg_CMP), leftlo, imm); | ||
2209 | } else { | ||
2210 | emit_mrm(as, XO_CMP, leftlo, rightlo); | ||
2211 | } | ||
2212 | |||
2213 | /* Hiword comparison and branches. */ | ||
2214 | if ((cc & 15) != CC_NE) | ||
2215 | emit_sjcc(as, CC_NE, l_around); /* Hiword unequal: skip loword compare. */ | ||
2216 | if ((cc & 15) != CC_E) | ||
2217 | asm_guardcc(as, cc >> 8); /* Hiword compare without equality check. */ | ||
2218 | as->mrm = mrm; /* Restore state. */ | ||
2219 | if (ra_noreg(righthi)) { | ||
2220 | int32_t imm = IR(ir->op2)->i; | ||
2221 | if (imm == 0 && (cc & 0xa) != 0x2 && lefthi != RID_MRM) | ||
2222 | emit_rr(as, XO_TEST, lefthi, lefthi); | ||
2223 | else | ||
2224 | emit_gmrmi(as, XG_ARITHi(XOg_CMP), lefthi, imm); | ||
2225 | } else { | ||
2226 | emit_mrm(as, XO_CMP, lefthi, righthi); | ||
2227 | } | ||
2228 | } | ||
2229 | #endif | ||
2230 | |||
2231 | /* -- Support for 64 bit ops in 32 bit mode ------------------------------- */ | ||
2232 | |||
2233 | /* Hiword op of a split 64 bit op. Previous op must be the loword op. */ | ||
2234 | static void asm_hiop(ASMState *as, IRIns *ir) | ||
2235 | { | ||
2236 | #if LJ_32 && LJ_HASFFI | ||
2237 | /* HIOP is marked as a store because it needs its own DCE logic. */ | ||
2238 | int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ | ||
2239 | if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; | ||
2240 | if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ | ||
2241 | if (usehi || uselo) { | ||
2242 | if (irt_isfp(ir->t)) | ||
2243 | asm_conv_fp_int64(as, ir); | ||
2244 | else | ||
2245 | asm_conv_int64_fp(as, ir); | ||
2246 | } | ||
2247 | as->curins--; /* Always skip the CONV. */ | ||
2248 | return; | ||
2249 | } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ | ||
2250 | asm_comp_int64(as, ir); | ||
2251 | return; | ||
2252 | } | ||
2253 | if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ | ||
2254 | switch ((ir-1)->o) { | ||
2255 | case IR_ADD: | ||
2256 | as->flagmcp = NULL; | ||
2257 | as->curins--; | ||
2258 | asm_intarith(as, ir, XOg_ADC); | ||
2259 | asm_intarith(as, ir-1, XOg_ADD); | ||
2260 | break; | ||
2261 | case IR_SUB: | ||
2262 | as->flagmcp = NULL; | ||
2263 | as->curins--; | ||
2264 | asm_intarith(as, ir, XOg_SBB); | ||
2265 | asm_intarith(as, ir-1, XOg_SUB); | ||
2266 | break; | ||
2267 | case IR_NEG: { | ||
2268 | Reg dest = ra_dest(as, ir, RSET_GPR); | ||
2269 | emit_rr(as, XO_GROUP3, XOg_NEG, dest); | ||
2270 | emit_i8(as, 0); | ||
2271 | emit_rr(as, XO_ARITHi8, XOg_ADC, dest); | ||
2272 | ra_left(as, dest, ir->op1); | ||
2273 | as->curins--; | ||
2274 | asm_neg_not(as, ir-1, XOg_NEG); | ||
2275 | break; | ||
2276 | } | ||
2277 | case IR_CALLN: | ||
2278 | case IR_CALLXS: | ||
2279 | ra_destreg(as, ir, RID_RETHI); | ||
2280 | if (!uselo) | ||
2281 | ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark call as used. */ | ||
2282 | break; | ||
2283 | case IR_CNEWI: | ||
2284 | /* Nothing to do here. Handled by CNEWI itself. */ | ||
2285 | break; | ||
2286 | default: lua_assert(0); break; | ||
2287 | } | ||
2288 | #else | ||
2289 | UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on x64 or without FFI. */ | ||
2290 | #endif | ||
2291 | } | ||
2292 | |||
2293 | /* -- Stack handling ------------------------------------------------------ */ | ||
2294 | |||
2295 | /* Check Lua stack size for overflow. Use exit handler as fallback. */ | ||
2296 | static void asm_stack_check(ASMState *as, BCReg topslot, | ||
2297 | IRIns *irp, RegSet allow, ExitNo exitno) | ||
2298 | { | ||
2299 | /* Try to get an unused temp. register, otherwise spill/restore eax. */ | ||
2300 | Reg pbase = irp ? irp->r : RID_BASE; | ||
2301 | Reg r = allow ? rset_pickbot(allow) : RID_EAX; | ||
2302 | emit_jcc(as, CC_B, exitstub_addr(as->J, exitno)); | ||
2303 | if (allow == RSET_EMPTY) /* Restore temp. register. */ | ||
2304 | emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0); | ||
2305 | else | ||
2306 | ra_modified(as, r); | ||
2307 | emit_gri(as, XG_ARITHi(XOg_CMP), r, (int32_t)(8*topslot)); | ||
2308 | if (ra_hasreg(pbase) && pbase != r) | ||
2309 | emit_rr(as, XO_ARITH(XOg_SUB), r, pbase); | ||
2310 | else | ||
2311 | emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE, | ||
2312 | ptr2addr(&J2G(as->J)->jit_base)); | ||
2313 | emit_rmro(as, XO_MOV, r, r, offsetof(lua_State, maxstack)); | ||
2314 | emit_getgl(as, r, jit_L); | ||
2315 | if (allow == RSET_EMPTY) /* Spill temp. register. */ | ||
2316 | emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0); | ||
2317 | } | ||
2318 | |||
2319 | /* Restore Lua stack from on-trace state. */ | ||
2320 | static void asm_stack_restore(ASMState *as, SnapShot *snap) | ||
2321 | { | ||
2322 | SnapEntry *map = &as->T->snapmap[snap->mapofs]; | ||
2323 | SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1]; | ||
2324 | MSize n, nent = snap->nent; | ||
2325 | /* Store the value of all modified slots to the Lua stack. */ | ||
2326 | for (n = 0; n < nent; n++) { | ||
2327 | SnapEntry sn = map[n]; | ||
2328 | BCReg s = snap_slot(sn); | ||
2329 | int32_t ofs = 8*((int32_t)s-1); | ||
2330 | IRRef ref = snap_ref(sn); | ||
2331 | IRIns *ir = IR(ref); | ||
2332 | if ((sn & SNAP_NORESTORE)) | ||
2333 | continue; | ||
2334 | if (irt_isnum(ir->t)) { | ||
2335 | Reg src = ra_alloc1(as, ref, RSET_FPR); | ||
2336 | emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs); | ||
2337 | } else { | ||
2338 | lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || | ||
2339 | (LJ_DUALNUM && irt_isinteger(ir->t))); | ||
2340 | if (!irref_isk(ref)) { | ||
2341 | Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE)); | ||
2342 | emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs); | ||
2343 | } else if (!irt_ispri(ir->t)) { | ||
2344 | emit_movmroi(as, RID_BASE, ofs, ir->i); | ||
2345 | } | ||
2346 | if ((sn & (SNAP_CONT|SNAP_FRAME))) { | ||
2347 | if (s != 0) /* Do not overwrite link to previous frame. */ | ||
2348 | emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--)); | ||
2349 | } else { | ||
2350 | if (!(LJ_64 && irt_islightud(ir->t))) | ||
2351 | emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t)); | ||
2352 | } | ||
2353 | } | ||
2354 | checkmclim(as); | ||
2355 | } | ||
2356 | lua_assert(map + nent == flinks); | ||
2357 | } | ||
2358 | |||
2359 | /* -- GC handling --------------------------------------------------------- */ | ||
2360 | |||
2361 | /* Check GC threshold and do one or more GC steps. */ | ||
2362 | static void asm_gc_check(ASMState *as) | ||
2363 | { | ||
2364 | const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; | ||
2365 | IRRef args[2]; | ||
2366 | MCLabel l_end; | ||
2367 | Reg tmp; | ||
2368 | ra_evictset(as, RSET_SCRATCH); | ||
2369 | l_end = emit_label(as); | ||
2370 | /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ | ||
2371 | asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */ | ||
2372 | emit_rr(as, XO_TEST, RID_RET, RID_RET); | ||
2373 | args[0] = ASMREF_TMP1; /* global_State *g */ | ||
2374 | args[1] = ASMREF_TMP2; /* MSize steps */ | ||
2375 | asm_gencall(as, ci, args); | ||
2376 | tmp = ra_releasetmp(as, ASMREF_TMP1); | ||
2377 | emit_loada(as, tmp, J2G(as->J)); | ||
2378 | emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), (int32_t)as->gcsteps); | ||
2379 | /* Jump around GC step if GC total < GC threshold. */ | ||
2380 | emit_sjcc(as, CC_B, l_end); | ||
2381 | emit_opgl(as, XO_ARITH(XOg_CMP), tmp, gc.threshold); | ||
2382 | emit_getgl(as, tmp, gc.total); | ||
2383 | as->gcsteps = 0; | ||
2384 | checkmclim(as); | ||
2385 | } | ||
2386 | |||
2387 | /* -- Loop handling ------------------------------------------------------- */ | ||
2388 | |||
2389 | /* Fixup the loop branch. */ | ||
2390 | static void asm_loop_fixup(ASMState *as) | ||
2391 | { | ||
2392 | MCode *p = as->mctop; | ||
2393 | MCode *target = as->mcp; | ||
2394 | if (as->realign) { /* Realigned loops use short jumps. */ | ||
2395 | as->realign = NULL; /* Stop another retry. */ | ||
2396 | lua_assert(((intptr_t)target & 15) == 0); | ||
2397 | if (as->loopinv) { /* Inverted loop branch? */ | ||
2398 | p -= 5; | ||
2399 | p[0] = XI_JMP; | ||
2400 | lua_assert(target - p >= -128); | ||
2401 | p[-1] = (MCode)(target - p); /* Patch sjcc. */ | ||
2402 | if (as->loopinv == 2) | ||
2403 | p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */ | ||
2404 | } else { | ||
2405 | lua_assert(target - p >= -128); | ||
2406 | p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */ | ||
2407 | p[-2] = XI_JMPs; | ||
2408 | } | ||
2409 | } else { | ||
2410 | MCode *newloop; | ||
2411 | p[-5] = XI_JMP; | ||
2412 | if (as->loopinv) { /* Inverted loop branch? */ | ||
2413 | /* asm_guardcc already inverted the jcc and patched the jmp. */ | ||
2414 | p -= 5; | ||
2415 | newloop = target+4; | ||
2416 | *(int32_t *)(p-4) = (int32_t)(target - p); /* Patch jcc. */ | ||
2417 | if (as->loopinv == 2) { | ||
2418 | *(int32_t *)(p-10) = (int32_t)(target - p + 6); /* Patch opt. jp. */ | ||
2419 | newloop = target+8; | ||
2420 | } | ||
2421 | } else { /* Otherwise just patch jmp. */ | ||
2422 | *(int32_t *)(p-4) = (int32_t)(target - p); | ||
2423 | newloop = target+3; | ||
2424 | } | ||
2425 | /* Realign small loops and shorten the loop branch. */ | ||
2426 | if (newloop >= p - 128) { | ||
2427 | as->realign = newloop; /* Force a retry and remember alignment. */ | ||
2428 | as->curins = as->stopins; /* Abort asm_trace now. */ | ||
2429 | as->T->nins = as->orignins; /* Remove any added renames. */ | ||
2430 | } | ||
2431 | } | ||
2432 | } | ||
2433 | |||
2434 | /* -- Head of trace ------------------------------------------------------- */ | ||
2435 | |||
2436 | /* Coalesce BASE register for a root trace. */ | ||
2437 | static void asm_head_root_base(ASMState *as) | ||
2438 | { | ||
2439 | IRIns *ir = IR(REF_BASE); | ||
2440 | Reg r = ir->r; | ||
2441 | if (ra_hasreg(r)) { | ||
2442 | ra_free(as, r); | ||
2443 | if (rset_test(as->modset, r)) | ||
2444 | ir->r = RID_INIT; /* No inheritance for modified BASE register. */ | ||
2445 | if (r != RID_BASE) | ||
2446 | emit_rr(as, XO_MOV, r, RID_BASE); | ||
2447 | } | ||
2448 | } | ||
2449 | |||
2450 | /* Coalesce or reload BASE register for a side trace. */ | ||
2451 | static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow) | ||
2452 | { | ||
2453 | IRIns *ir = IR(REF_BASE); | ||
2454 | Reg r = ir->r; | ||
2455 | if (ra_hasreg(r)) { | ||
2456 | ra_free(as, r); | ||
2457 | if (rset_test(as->modset, r)) | ||
2458 | ir->r = RID_INIT; /* No inheritance for modified BASE register. */ | ||
2459 | if (irp->r == r) { | ||
2460 | rset_clear(allow, r); /* Mark same BASE register as coalesced. */ | ||
2461 | } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) { | ||
2462 | rset_clear(allow, irp->r); | ||
2463 | emit_rr(as, XO_MOV, r, irp->r); /* Move from coalesced parent reg. */ | ||
2464 | } else { | ||
2465 | emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */ | ||
2466 | } | ||
2467 | } | ||
2468 | return allow; | ||
2469 | } | ||
2470 | |||
2471 | /* -- Tail of trace ------------------------------------------------------- */ | ||
2472 | |||
2473 | /* Fixup the tail code. */ | ||
2474 | static void asm_tail_fixup(ASMState *as, TraceNo lnk) | ||
2475 | { | ||
2476 | /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */ | ||
2477 | MCode *p = as->mctop; | ||
2478 | MCode *target, *q; | ||
2479 | int32_t spadj = as->T->spadjust; | ||
2480 | if (spadj == 0) { | ||
2481 | p -= ((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0); | ||
2482 | } else { | ||
2483 | MCode *p1; | ||
2484 | /* Patch stack adjustment. */ | ||
2485 | if (checki8(spadj)) { | ||
2486 | p -= 3; | ||
2487 | p1 = p-6; | ||
2488 | *p1 = (MCode)spadj; | ||
2489 | } else { | ||
2490 | p1 = p-9; | ||
2491 | *(int32_t *)p1 = spadj; | ||
2492 | } | ||
2493 | if ((as->flags & JIT_F_LEA_AGU)) { | ||
2494 | #if LJ_64 | ||
2495 | p1[-4] = 0x48; | ||
2496 | #endif | ||
2497 | p1[-3] = (MCode)XI_LEA; | ||
2498 | p1[-2] = MODRM(checki8(spadj) ? XM_OFS8 : XM_OFS32, RID_ESP, RID_ESP); | ||
2499 | p1[-1] = MODRM(XM_SCALE1, RID_ESP, RID_ESP); | ||
2500 | } else { | ||
2501 | #if LJ_64 | ||
2502 | p1[-3] = 0x48; | ||
2503 | #endif | ||
2504 | p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi); | ||
2505 | p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP); | ||
2506 | } | ||
2507 | } | ||
2508 | /* Patch exit branch. */ | ||
2509 | target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp; | ||
2510 | *(int32_t *)(p-4) = jmprel(p, target); | ||
2511 | p[-5] = XI_JMP; | ||
2512 | /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */ | ||
2513 | for (q = as->mctop-1; q >= p; q--) | ||
2514 | *q = XI_NOP; | ||
2515 | as->mctop = p; | ||
2516 | } | ||
2517 | |||
2518 | /* Prepare tail of code. */ | ||
2519 | static void asm_tail_prep(ASMState *as) | ||
2520 | { | ||
2521 | MCode *p = as->mctop; | ||
2522 | /* Realign and leave room for backwards loop branch or exit branch. */ | ||
2523 | if (as->realign) { | ||
2524 | int i = ((int)(intptr_t)as->realign) & 15; | ||
2525 | /* Fill unused mcode tail with NOPs to make the prefetcher happy. */ | ||
2526 | while (i-- > 0) | ||
2527 | *--p = XI_NOP; | ||
2528 | as->mctop = p; | ||
2529 | p -= (as->loopinv ? 5 : 2); /* Space for short/near jmp. */ | ||
2530 | } else { | ||
2531 | p -= 5; /* Space for exit branch (near jmp). */ | ||
2532 | } | ||
2533 | if (as->loopref) { | ||
2534 | as->invmcp = as->mcp = p; | ||
2535 | } else { | ||
2536 | /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */ | ||
2537 | as->mcp = p - (((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0)); | ||
2538 | as->invmcp = NULL; | ||
2539 | } | ||
2540 | } | ||
2541 | |||
2542 | /* -- Instruction dispatch ------------------------------------------------ */ | ||
2543 | |||
2544 | /* Assemble a single instruction. */ | ||
2545 | static void asm_ir(ASMState *as, IRIns *ir) | ||
2546 | { | ||
2547 | switch ((IROp)ir->o) { | ||
2548 | /* Miscellaneous ops. */ | ||
2549 | case IR_LOOP: asm_loop(as); break; | ||
2550 | case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break; | ||
2551 | case IR_USE: | ||
2552 | ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break; | ||
2553 | case IR_PHI: asm_phi(as, ir); break; | ||
2554 | case IR_HIOP: asm_hiop(as, ir); break; | ||
2555 | |||
2556 | /* Guarded assertions. */ | ||
2557 | case IR_LT: case IR_GE: case IR_LE: case IR_GT: | ||
2558 | case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT: | ||
2559 | case IR_EQ: case IR_NE: case IR_ABC: | ||
2560 | asm_comp(as, ir, asm_compmap[ir->o]); | ||
2561 | break; | ||
2562 | |||
2563 | case IR_RETF: asm_retf(as, ir); break; | ||
2564 | |||
2565 | /* Bit ops. */ | ||
2566 | case IR_BNOT: asm_neg_not(as, ir, XOg_NOT); break; | ||
2567 | case IR_BSWAP: asm_bitswap(as, ir); break; | ||
2568 | |||
2569 | case IR_BAND: asm_intarith(as, ir, XOg_AND); break; | ||
2570 | case IR_BOR: asm_intarith(as, ir, XOg_OR); break; | ||
2571 | case IR_BXOR: asm_intarith(as, ir, XOg_XOR); break; | ||
2572 | |||
2573 | case IR_BSHL: asm_bitshift(as, ir, XOg_SHL); break; | ||
2574 | case IR_BSHR: asm_bitshift(as, ir, XOg_SHR); break; | ||
2575 | case IR_BSAR: asm_bitshift(as, ir, XOg_SAR); break; | ||
2576 | case IR_BROL: asm_bitshift(as, ir, XOg_ROL); break; | ||
2577 | case IR_BROR: asm_bitshift(as, ir, XOg_ROR); break; | ||
2578 | |||
2579 | /* Arithmetic ops. */ | ||
2580 | case IR_ADD: asm_add(as, ir); break; | ||
2581 | case IR_SUB: | ||
2582 | if (irt_isnum(ir->t)) | ||
2583 | asm_fparith(as, ir, XO_SUBSD); | ||
2584 | else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */ | ||
2585 | asm_intarith(as, ir, XOg_SUB); | ||
2586 | break; | ||
2587 | case IR_MUL: | ||
2588 | if (irt_isnum(ir->t)) | ||
2589 | asm_fparith(as, ir, XO_MULSD); | ||
2590 | else | ||
2591 | asm_intarith(as, ir, XOg_X_IMUL); | ||
2592 | break; | ||
2593 | case IR_DIV: | ||
2594 | #if LJ_64 && LJ_HASFFI | ||
2595 | if (!irt_isnum(ir->t)) | ||
2596 | asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 : | ||
2597 | IRCALL_lj_carith_divu64); | ||
2598 | else | ||
2599 | #endif | ||
2600 | asm_fparith(as, ir, XO_DIVSD); | ||
2601 | break; | ||
2602 | case IR_MOD: | ||
2603 | #if LJ_64 && LJ_HASFFI | ||
2604 | if (!irt_isint(ir->t)) | ||
2605 | asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 : | ||
2606 | IRCALL_lj_carith_modu64); | ||
2607 | else | ||
2608 | #endif | ||
2609 | asm_intmod(as, ir); | ||
2610 | break; | ||
2611 | |||
2612 | case IR_NEG: | ||
2613 | if (irt_isnum(ir->t)) | ||
2614 | asm_fparith(as, ir, XO_XORPS); | ||
2615 | else | ||
2616 | asm_neg_not(as, ir, XOg_NEG); | ||
2617 | break; | ||
2618 | case IR_ABS: asm_fparith(as, ir, XO_ANDPS); break; | ||
2619 | |||
2620 | case IR_MIN: | ||
2621 | if (irt_isnum(ir->t)) | ||
2622 | asm_fparith(as, ir, XO_MINSD); | ||
2623 | else | ||
2624 | asm_min_max(as, ir, CC_G); | ||
2625 | break; | ||
2626 | case IR_MAX: | ||
2627 | if (irt_isnum(ir->t)) | ||
2628 | asm_fparith(as, ir, XO_MAXSD); | ||
2629 | else | ||
2630 | asm_min_max(as, ir, CC_L); | ||
2631 | break; | ||
2632 | |||
2633 | case IR_FPMATH: case IR_ATAN2: case IR_LDEXP: | ||
2634 | asm_fpmath(as, ir); | ||
2635 | break; | ||
2636 | case IR_POW: | ||
2637 | #if LJ_64 && LJ_HASFFI | ||
2638 | if (!irt_isnum(ir->t)) | ||
2639 | asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 : | ||
2640 | IRCALL_lj_carith_powu64); | ||
2641 | else | ||
2642 | #endif | ||
2643 | asm_fppowi(as, ir); | ||
2644 | break; | ||
2645 | |||
2646 | /* Overflow-checking arithmetic ops. Note: don't use LEA here! */ | ||
2647 | case IR_ADDOV: asm_intarith(as, ir, XOg_ADD); break; | ||
2648 | case IR_SUBOV: asm_intarith(as, ir, XOg_SUB); break; | ||
2649 | case IR_MULOV: asm_intarith(as, ir, XOg_X_IMUL); break; | ||
2650 | |||
2651 | /* Memory references. */ | ||
2652 | case IR_AREF: asm_aref(as, ir); break; | ||
2653 | case IR_HREF: asm_href(as, ir); break; | ||
2654 | case IR_HREFK: asm_hrefk(as, ir); break; | ||
2655 | case IR_NEWREF: asm_newref(as, ir); break; | ||
2656 | case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break; | ||
2657 | case IR_FREF: asm_fref(as, ir); break; | ||
2658 | case IR_STRREF: asm_strref(as, ir); break; | ||
2659 | |||
2660 | /* Loads and stores. */ | ||
2661 | case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD: | ||
2662 | asm_ahuvload(as, ir); | ||
2663 | break; | ||
2664 | case IR_FLOAD: case IR_XLOAD: asm_fxload(as, ir); break; | ||
2665 | case IR_SLOAD: asm_sload(as, ir); break; | ||
2666 | |||
2667 | case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break; | ||
2668 | case IR_FSTORE: case IR_XSTORE: asm_fxstore(as, ir); break; | ||
2669 | |||
2670 | /* Allocations. */ | ||
2671 | case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break; | ||
2672 | case IR_TNEW: asm_tnew(as, ir); break; | ||
2673 | case IR_TDUP: asm_tdup(as, ir); break; | ||
2674 | case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break; | ||
2675 | |||
2676 | /* Write barriers. */ | ||
2677 | case IR_TBAR: asm_tbar(as, ir); break; | ||
2678 | case IR_OBAR: asm_obar(as, ir); break; | ||
2679 | |||
2680 | /* Type conversions. */ | ||
2681 | case IR_TOBIT: asm_tobit(as, ir); break; | ||
2682 | case IR_CONV: asm_conv(as, ir); break; | ||
2683 | case IR_TOSTR: asm_tostr(as, ir); break; | ||
2684 | case IR_STRTO: asm_strto(as, ir); break; | ||
2685 | |||
2686 | /* Calls. */ | ||
2687 | case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break; | ||
2688 | case IR_CALLXS: asm_callx(as, ir); break; | ||
2689 | case IR_CARG: break; | ||
2690 | |||
2691 | default: | ||
2692 | setintV(&as->J->errinfo, ir->o); | ||
2693 | lj_trace_err_info(as->J, LJ_TRERR_NYIIR); | ||
2694 | break; | ||
2695 | } | ||
2696 | } | ||
2697 | |||
2698 | /* -- Trace setup --------------------------------------------------------- */ | ||
2699 | |||
2700 | /* Ensure there are enough stack slots for call arguments. */ | ||
2701 | static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) | ||
2702 | { | ||
2703 | IRRef args[CCI_NARGS_MAX]; | ||
2704 | int nslots; | ||
2705 | asm_collectargs(as, ir, ci, args); | ||
2706 | nslots = asm_count_call_slots(as, ci, args); | ||
2707 | if (nslots > as->evenspill) /* Leave room for args in stack slots. */ | ||
2708 | as->evenspill = nslots; | ||
2709 | #if LJ_64 | ||
2710 | return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET); | ||
2711 | #else | ||
2712 | return irt_isfp(ir->t) ? REGSP_INIT : REGSP_HINT(RID_RET); | ||
2713 | #endif | ||
2714 | } | ||
2715 | |||
2716 | /* Target-specific setup. */ | ||
2717 | static void asm_setup_target(ASMState *as) | ||
2718 | { | ||
2719 | asm_exitstub_setup(as, as->T->nsnap); | ||
2720 | } | ||
2721 | |||
2722 | /* -- Trace patching ------------------------------------------------------ */ | ||
2723 | |||
2724 | /* Patch exit jumps of existing machine code to a new target. */ | ||
2725 | void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) | ||
2726 | { | ||
2727 | MCode *p = T->mcode; | ||
2728 | MCode *mcarea = lj_mcode_patch(J, p, 0); | ||
2729 | MSize len = T->szmcode; | ||
2730 | MCode *px = exitstub_addr(J, exitno) - 6; | ||
2731 | MCode *pe = p+len-6; | ||
2732 | uint32_t stateaddr = u32ptr(&J2G(J)->vmstate); | ||
2733 | if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px) | ||
2734 | *(int32_t *)(p+len-4) = jmprel(p+len, target); | ||
2735 | /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */ | ||
2736 | for (; p < pe; p++) | ||
2737 | if (*(uint32_t *)(p+(LJ_64 ? 3 : 2)) == stateaddr && p[0] == XI_MOVmi) { | ||
2738 | p += LJ_64 ? 11 : 10; | ||
2739 | break; | ||
2740 | } | ||
2741 | lua_assert(p < pe); | ||
2742 | for (; p < pe; p++) { | ||
2743 | if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px) { | ||
2744 | *(int32_t *)(p+2) = jmprel(p+6, target); | ||
2745 | p += 5; | ||
2746 | } | ||
2747 | } | ||
2748 | lj_mcode_sync(T->mcode, T->mcode + T->szmcode); | ||
2749 | lj_mcode_patch(J, mcarea, 1); | ||
2750 | } | ||
2751 | |||