/[pearpc]/src/cpu/cpu_jitc_x86/ppc_alu.cc
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /src/cpu/cpu_jitc_x86/ppc_alu.cc

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1 - (show annotations)
Wed Sep 5 17:11:21 2007 UTC (16 years, 6 months ago) by dpavlin
File size: 69455 byte(s)
import upstream CVS
1 /*
2 * PearPC
3 * ppc_alu.cc
4 *
5 * Copyright (C) 2003-2005 Sebastian Biallas (sb@biallas.net)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21 #include "debug/tracers.h"
22 #include "ppc_alu.h"
23 #include "ppc_dec.h"
24 #include "ppc_exc.h"
25 #include "ppc_cpu.h"
26 #include "ppc_opc.h"
27 #include "ppc_tools.h"
28
29 #include "jitc.h"
30 #include "jitc_asm.h"
31 #include "jitc_debug.h"
32 #include "x86asm.h"
33
34 static inline uint32 ppc_mask(int MB, int ME)
35 {
36 uint32 mask;
37 if (MB <= ME) {
38 if (ME-MB == 31) {
39 mask = 0xffffffff;
40 } else {
41 mask = ((1<<(ME-MB+1))-1)<<(31-ME);
42 }
43 } else {
44 mask = ppc_word_rotl((1<<(32-MB+ME+1))-1, 31-ME);
45 }
46 return mask;
47 }
48
49 static JITCFlow ppc_opc_gen_ori_oris_xori_xoris(X86ALUopc opc, uint32 imm, int rS, int rA)
50 {
51 if (imm) {
52 jitcClobberCarryAndFlags();
53 if (rA == rS) {
54 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
55 asmALURegImm(opc, a, imm);
56 } else {
57 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
58 NativeReg a = jitcMapClientRegisterDirty(PPC_GPR(rA));
59 asmALURegReg(X86_MOV, a, s);
60 asmALURegImm(opc, a, imm);
61 }
62 } else {
63 if (rA == rS) {
64 /* nop */
65 } else {
66 /* mov */
67 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
68 NativeReg a = jitcMapClientRegisterDirty(PPC_GPR(rA));
69 asmALURegReg(X86_MOV, a, s);
70 }
71 }
72 return flowContinue;
73 }
74
75 /*
76 * addx Add
77 * .422
78 */
79 void ppc_opc_addx()
80 {
81 int rD, rA, rB;
82 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
83 gCPU.gpr[rD] = gCPU.gpr[rA] + gCPU.gpr[rB];
84 if (gCPU.current_opc & PPC_OPC_Rc) {
85 // update cr0 flags
86 ppc_update_cr0(gCPU.gpr[rD]);
87 }
88 }
89 static JITCFlow ppc_opc_gen_add()
90 {
91 int rD, rA, rB;
92 PPC_OPC_TEMPL_XO(gJITC.current_opc, rD, rA, rB);
93 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
94 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
95 if (rD == rA) {
96 // add rA, rA, rB
97 jitcClobberCarryAndFlags();
98 asmALURegReg(X86_ADD, a, b);
99 jitcDirtyRegister(a);
100 } else if (rD == rB) {
101 // add rB, rA, rB
102 jitcClobberCarryAndFlags();
103 asmALURegReg(X86_ADD, b, a);
104 jitcDirtyRegister(b);
105 } else {
106 // add rD, rA, rB
107 NativeReg result = jitcMapClientRegisterDirty(PPC_GPR(rD));
108 // lea result, [a+1*b+0]
109 byte modrm[6];
110 asmLEA(result, modrm, x86_mem_sib_r(modrm, a, 1, b, 0));
111 // result already is dirty
112 }
113 return flowContinue;
114 }
115 static JITCFlow ppc_opc_gen_addp()
116 {
117 int rD, rA, rB;
118 PPC_OPC_TEMPL_XO(gJITC.current_opc, rD, rA, rB);
119 jitcClobberCarry();
120 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
121 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
122 if (rD == rA) {
123 // add r1, r1, r2
124 asmALURegReg(X86_ADD, a, b);
125 jitcDirtyRegister(a);
126 } else if (rD == rB) {
127 // add r1, r2, r1
128 asmALURegReg(X86_ADD, b, a);
129 jitcDirtyRegister(b);
130 } else {
131 // add r3, r1, r2
132 NativeReg result = jitcMapClientRegisterDirty(PPC_GPR(rD));
133 // lea doesn't update the flags
134 asmALURegReg(X86_MOV, result, a);
135 asmALURegReg(X86_ADD, result, b);
136 }
137 jitcMapFlagsDirty();
138 return flowContinue;
139 }
140 JITCFlow ppc_opc_gen_addx()
141 {
142 if (gJITC.current_opc & PPC_OPC_Rc) {
143 return ppc_opc_gen_addp();
144 } else {
145 return ppc_opc_gen_add();
146 }
147 }
148 /*
149 * addox Add with Overflow
150 * .422
151 */
152 void ppc_opc_addox()
153 {
154 int rD, rA, rB;
155 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
156 gCPU.gpr[rD] = gCPU.gpr[rA] + gCPU.gpr[rB];
157 if (gCPU.current_opc & PPC_OPC_Rc) {
158 // update cr0 flags
159 ppc_update_cr0(gCPU.gpr[rD]);
160 }
161 // update XER flags
162 PPC_ALU_ERR("addox unimplemented\n");
163 }
164 /*
165 * addcx Add Carrying
166 * .423
167 */
168 void ppc_opc_addcx()
169 {
170 int rD, rA, rB;
171 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
172 uint32 a = gCPU.gpr[rA];
173 gCPU.gpr[rD] = a + gCPU.gpr[rB];
174 gCPU.xer_ca = (gCPU.gpr[rD] < a);
175 if (gCPU.current_opc & PPC_OPC_Rc) {
176 // update cr0 flags
177 ppc_update_cr0(gCPU.gpr[rD]);
178 }
179 }
180 JITCFlow ppc_opc_gen_addcx()
181 {
182 int rD, rA, rB;
183 PPC_OPC_TEMPL_XO(gJITC.current_opc, rD, rA, rB);
184 if (!(gJITC.current_opc & PPC_OPC_Rc)) jitcClobberFlags();
185 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
186 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
187 if (rD == rA) {
188 // add r1, r1, r2
189 asmALURegReg(X86_ADD, a, b);
190 jitcDirtyRegister(a);
191 } else if (rD == rB) {
192 // add r1, r2, r1
193 asmALURegReg(X86_ADD, b, a);
194 jitcDirtyRegister(b);
195 } else {
196 // add r3, r1, r2
197 NativeReg result = jitcMapClientRegisterDirty(PPC_GPR(rD));
198 // lea doesn't update the carry
199 asmALURegReg(X86_MOV, result, a);
200 asmALURegReg(X86_ADD, result, b);
201 }
202 jitcMapCarryDirty();
203 if (gJITC.current_opc & PPC_OPC_Rc) jitcMapFlagsDirty();
204 return flowContinue;
205 }
206 /*
207 * addcox Add Carrying with Overflow
208 * .423
209 */
210 void ppc_opc_addcox()
211 {
212 int rD, rA, rB;
213 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
214 uint32 a = gCPU.gpr[rA];
215 gCPU.gpr[rD] = a + gCPU.gpr[rB];
216 gCPU.xer_ca = (gCPU.gpr[rD] < a);
217 if (gCPU.current_opc & PPC_OPC_Rc) {
218 // update cr0 flags
219 ppc_update_cr0(gCPU.gpr[rD]);
220 }
221 // update XER flags
222 PPC_ALU_ERR("addcox unimplemented\n");
223 }
224 /*
225 * addex Add Extended
226 * .424
227 */
228 void ppc_opc_addex()
229 {
230 int rD, rA, rB;
231 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
232 uint32 a = gCPU.gpr[rA];
233 uint32 b = gCPU.gpr[rB];
234 uint32 ca = gCPU.xer_ca;
235 gCPU.gpr[rD] = a + b + ca;
236 gCPU.xer_ca = ppc_carry_3(a, b, ca);
237 if (gCPU.current_opc & PPC_OPC_Rc) {
238 // update cr0 flags
239 ppc_update_cr0(gCPU.gpr[rD]);
240 }
241 }
242 JITCFlow ppc_opc_gen_addex()
243 {
244 int rD, rA, rB;
245 PPC_OPC_TEMPL_XO(gJITC.current_opc, rD, rA, rB);
246 if (!(gJITC.current_opc & PPC_OPC_Rc)) jitcClobberFlags();
247 jitcGetClientCarry();
248 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
249 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
250 if (rD == rA) {
251 // add r1, r1, r2
252 asmALURegReg(X86_ADC, a, b);
253 jitcDirtyRegister(a);
254 } else if (rD == rB) {
255 // add r1, r2, r1
256 asmALURegReg(X86_ADC, b, a);
257 jitcDirtyRegister(b);
258 } else {
259 // add r3, r1, r2
260 NativeReg result = jitcMapClientRegisterDirty(PPC_GPR(rD));
261 // lea doesn't update the carry
262 asmALURegReg(X86_MOV, result, a);
263 asmALURegReg(X86_ADC, result, b);
264 }
265 jitcMapCarryDirty();
266 if (gJITC.current_opc & PPC_OPC_Rc) jitcMapFlagsDirty();
267 return flowContinue;
268 }
269 /*
270 * addeox Add Extended with Overflow
271 * .424
272 */
273 void ppc_opc_addeox()
274 {
275 int rD, rA, rB;
276 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
277 uint32 a = gCPU.gpr[rA];
278 uint32 b = gCPU.gpr[rB];
279 uint32 ca = gCPU.xer_ca;
280 gCPU.gpr[rD] = a + b + ca;
281 gCPU.xer_ca = ppc_carry_3(a, b, ca);
282 if (gCPU.current_opc & PPC_OPC_Rc) {
283 // update cr0 flags
284 ppc_update_cr0(gCPU.gpr[rD]);
285 }
286 // update XER flags
287 PPC_ALU_ERR("addeox unimplemented\n");
288 }
289 /*
290 * addi Add Immediate
291 * .425
292 */
293 void ppc_opc_addi()
294 {
295 int rD, rA;
296 uint32 imm;
297 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rD, rA, imm);
298 gCPU.gpr[rD] = (rA ? gCPU.gpr[rA] : 0) + imm;
299 }
300 JITCFlow ppc_opc_gen_addi_addis(int rD, int rA, uint32 imm)
301 {
302 if (rA == 0) {
303 NativeReg d = jitcMapClientRegisterDirty(PPC_GPR(rD));
304 if (imm == 0 && !jitcFlagsMapped() && !jitcCarryMapped()) {
305 jitcClobberCarryAndFlags();
306 asmALURegReg(X86_XOR, d, d);
307 } else {
308 asmMOVRegImm_NoFlags(d, imm);
309 }
310 } else {
311 if (rD == rA) {
312 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
313 if (!imm) {
314 // empty
315 } else if (imm == 1) {
316 // inc / dec doesn't clobber carry
317 jitcClobberFlags();
318 asmINCReg(a);
319 } else if (imm == 0xffffffff) {
320 jitcClobberFlags();
321 asmDECReg(a);
322 } else {
323 if (jitcFlagsMapped() || jitcCarryMapped()) {
324 // lea rA, [rB+imm]
325 byte modrm[6];
326 asmLEA(a, modrm, x86_mem_r(modrm, a, imm));
327 } else {
328 jitcClobberCarryAndFlags();
329 asmALURegImm(X86_ADD, a, imm);
330 }
331 }
332 } else {
333 if (imm) {
334 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
335 // lea d, [a+imm]
336 NativeReg d = jitcMapClientRegisterDirty(PPC_GPR(rD));
337 byte modrm[6];
338 asmLEA(d, modrm, x86_mem_r(modrm, a, imm));
339 } else {
340 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
341 // mov d, a
342 NativeReg d = jitcMapClientRegisterDirty(PPC_GPR(rD));
343 asmALURegReg(X86_MOV, d, a);
344 }
345 }
346 }
347 return flowContinue;
348 }
349 JITCFlow ppc_opc_gen_addi()
350 {
351 int rD, rA;
352 uint32 imm;
353 PPC_OPC_TEMPL_D_SImm(gJITC.current_opc, rD, rA, imm);
354 return ppc_opc_gen_addi_addis(rD, rA, imm);
355 }
356 /*
357 * addic Add Immediate Carrying
358 * .426
359 */
360 void ppc_opc_addic()
361 {
362 int rD, rA;
363 uint32 imm;
364 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rD, rA, imm);
365 uint32 a = gCPU.gpr[rA];
366 gCPU.gpr[rD] = a + imm;
367 gCPU.xer_ca = (gCPU.gpr[rD] < a);
368 }
369 JITCFlow ppc_opc_gen_addic()
370 {
371 int rD, rA;
372 uint32 imm;
373 PPC_OPC_TEMPL_D_SImm(gJITC.current_opc, rD, rA, imm);
374 jitcClobberFlags();
375 if (rD == rA) {
376 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
377 asmALURegImm(X86_ADD, a, imm);
378 } else {
379 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
380 NativeReg d = jitcMapClientRegisterDirty(PPC_GPR(rD));
381 asmALURegReg(X86_MOV, d, a);
382 asmALURegImm(X86_ADD, d, imm);
383 }
384 jitcMapCarryDirty();
385 return flowContinue;
386 }
387 /*
388 * addic. Add Immediate Carrying and Record
389 * .427
390 */
391 void ppc_opc_addic_()
392 {
393 int rD, rA;
394 uint32 imm;
395 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rD, rA, imm);
396 uint32 a = gCPU.gpr[rA];
397 gCPU.gpr[rD] = a + imm;
398 gCPU.xer_ca = (gCPU.gpr[rD] < a);
399 // update cr0 flags
400 ppc_update_cr0(gCPU.gpr[rD]);
401 }
402 JITCFlow ppc_opc_gen_addic_()
403 {
404 int rD, rA;
405 uint32 imm;
406 PPC_OPC_TEMPL_D_SImm(gJITC.current_opc, rD, rA, imm);
407 if (rD == rA) {
408 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
409 asmALURegImm(X86_ADD, a, imm);
410 } else {
411 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
412 NativeReg d = jitcMapClientRegisterDirty(PPC_GPR(rD));
413 asmALURegReg(X86_MOV, d, a);
414 asmALURegImm(X86_ADD, d, imm);
415 }
416 jitcMapCarryDirty();
417 jitcMapFlagsDirty();
418 return flowContinue;
419 }
420 /*
421 * addis Add Immediate Shifted
422 * .428
423 */
424 void ppc_opc_addis()
425 {
426 int rD, rA;
427 uint32 imm;
428 PPC_OPC_TEMPL_D_Shift16(gCPU.current_opc, rD, rA, imm);
429 gCPU.gpr[rD] = (rA ? gCPU.gpr[rA] : 0) + imm;
430 }
431 JITCFlow ppc_opc_gen_addis()
432 {
433 int rD, rA;
434 uint32 imm;
435 PPC_OPC_TEMPL_D_Shift16(gJITC.current_opc, rD, rA, imm);
436 return ppc_opc_gen_addi_addis(rD, rA, imm);
437 }
438 /*
439 * addmex Add to Minus One Extended
440 * .429
441 */
442 void ppc_opc_addmex()
443 {
444 int rD, rA, rB;
445 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
446 PPC_OPC_ASSERT(rB == 0);
447 uint32 a = gCPU.gpr[rA];
448 uint32 ca = gCPU.xer_ca;
449 gCPU.gpr[rD] = a + ca + 0xffffffff;
450 gCPU.xer_ca = a || ca;
451 if (gCPU.current_opc & PPC_OPC_Rc) {
452 // update cr0 flags
453 ppc_update_cr0(gCPU.gpr[rD]);
454 }
455 }
456 JITCFlow ppc_opc_gen_addmex()
457 {
458 ppc_opc_gen_interpret(ppc_opc_addmex);
459 return flowEndBlock;
460
461 }
462 /*
463 * addmeox Add to Minus One Extended with Overflow
464 * .429
465 */
466 void ppc_opc_addmeox()
467 {
468 int rD, rA, rB;
469 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
470 PPC_OPC_ASSERT(rB == 0);
471 uint32 a = gCPU.gpr[rA];
472 uint32 ca = (gCPU.xer_ca);
473 gCPU.gpr[rD] = a + ca + 0xffffffff;
474 gCPU.xer_ca = (a || ca);
475 if (gCPU.current_opc & PPC_OPC_Rc) {
476 // update cr0 flags
477 ppc_update_cr0(gCPU.gpr[rD]);
478 }
479 // update XER flags
480 PPC_ALU_ERR("addmeox unimplemented\n");
481 }
482 /*
483 * addzex Add to Zero Extended
484 * .430
485 */
486 void ppc_opc_addzex()
487 {
488 int rD, rA, rB;
489 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
490 PPC_OPC_ASSERT(rB == 0);
491 uint32 a = gCPU.gpr[rA];
492 uint32 ca = gCPU.xer_ca;
493 gCPU.gpr[rD] = a + ca;
494 gCPU.xer_ca = ((a == 0xffffffff) && ca);
495 if (gCPU.current_opc & PPC_OPC_Rc) {
496 // update cr0 flags
497 ppc_update_cr0(gCPU.gpr[rD]);
498 }
499 }
500 JITCFlow ppc_opc_gen_addzex()
501 {
502 int rD, rA, rB;
503 PPC_OPC_TEMPL_XO(gJITC.current_opc, rD, rA, rB);
504 if (!(gJITC.current_opc & PPC_OPC_Rc)) jitcClobberFlags();
505 jitcGetClientCarry();
506 NativeReg d;
507 if (rA == rD) {
508 d = jitcGetClientRegisterDirty(PPC_GPR(rD));
509 } else {
510 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
511 d = jitcMapClientRegisterDirty(PPC_GPR(rD));
512 asmALURegReg(X86_MOV, d, a);
513 }
514 asmALURegImm(X86_ADC, d, 0);
515 jitcMapCarryDirty();
516 if (gJITC.current_opc & PPC_OPC_Rc) jitcMapFlagsDirty();
517 return flowContinue;
518 }
519 /*
520 * addzeox Add to Zero Extended with Overflow
521 * .430
522 */
523 void ppc_opc_addzeox()
524 {
525 int rD, rA, rB;
526 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
527 PPC_OPC_ASSERT(rB == 0);
528 uint32 a = gCPU.gpr[rA];
529 uint32 ca = gCPU.xer_ca;
530 gCPU.gpr[rD] = a + ca;
531 gCPU.xer_ca = ((a == 0xffffffff) && ca);
532 if (gCPU.current_opc & PPC_OPC_Rc) {
533 // update cr0 flags
534 ppc_update_cr0(gCPU.gpr[rD]);
535 }
536 // update XER flags
537 PPC_ALU_ERR("addzeox unimplemented\n");
538 }
539
540 /*
541 * andx AND
542 * .431
543 */
544 void ppc_opc_andx()
545 {
546 int rS, rA, rB;
547 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
548 gCPU.gpr[rA] = gCPU.gpr[rS] & gCPU.gpr[rB];
549 if (gCPU.current_opc & PPC_OPC_Rc) {
550 // update cr0 flags
551 ppc_update_cr0(gCPU.gpr[rA]);
552 }
553 }
554 JITCFlow ppc_opc_gen_andx()
555 {
556 int rS, rA, rB;
557 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
558 if (gJITC.current_opc & PPC_OPC_Rc) {
559 jitcClobberCarry();
560 } else {
561 jitcClobberCarryAndFlags();
562 }
563 if (rA == rS) {
564 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
565 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
566 asmALURegReg(X86_AND, a, b);
567 } else if (rA == rB) {
568 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
569 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
570 asmALURegReg(X86_AND, a, s);
571 } else {
572 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
573 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
574 NativeReg a = jitcMapClientRegisterDirty(PPC_GPR(rA));
575 asmALURegReg(X86_MOV, a, s);
576 asmALURegReg(X86_AND, a, b);
577 }
578 if (gJITC.current_opc & PPC_OPC_Rc) {
579 jitcMapFlagsDirty();
580 }
581 return flowContinue;
582 }
583 /*
584 * andcx AND with Complement
585 * .432
586 */
587 void ppc_opc_andcx()
588 {
589 int rS, rA, rB;
590 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
591 gCPU.gpr[rA] = gCPU.gpr[rS] & ~gCPU.gpr[rB];
592 if (gCPU.current_opc & PPC_OPC_Rc) {
593 // update cr0 flags
594 ppc_update_cr0(gCPU.gpr[rA]);
595 }
596 }
597 JITCFlow ppc_opc_gen_andcx()
598 {
599 int rS, rA, rB;
600 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
601 if (gJITC.current_opc & PPC_OPC_Rc) {
602 jitcClobberCarry();
603 } else {
604 jitcClobberCarryAndFlags();
605 }
606 if (rA == rS) {
607 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
608 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
609 NativeReg tmp = jitcAllocRegister();
610 asmALURegReg(X86_MOV, tmp, b);
611 asmALUReg(X86_NOT, tmp);
612 asmALURegReg(X86_AND, a, tmp);
613 } else if (rA == rB) {
614 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
615 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
616 asmALUReg(X86_NOT, a);
617 asmALURegReg(X86_AND, a, s);
618 } else {
619 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
620 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
621 NativeReg a = jitcMapClientRegisterDirty(PPC_GPR(rA));
622 asmALURegReg(X86_MOV, a, b);
623 asmALUReg(X86_NOT, a);
624 asmALURegReg(X86_AND, a, s);
625 }
626 if (gJITC.current_opc & PPC_OPC_Rc) {
627 jitcMapFlagsDirty();
628 }
629 return flowContinue;
630 }
631 /*
632 * andi. AND Immediate
633 * .433
634 */
635 void ppc_opc_andi_()
636 {
637 int rS, rA;
638 uint32 imm;
639 PPC_OPC_TEMPL_D_UImm(gCPU.current_opc, rS, rA, imm);
640 gCPU.gpr[rA] = gCPU.gpr[rS] & imm;
641 // update cr0 flags
642 ppc_update_cr0(gCPU.gpr[rA]);
643 }
644 JITCFlow ppc_opc_gen_andi_()
645 {
646 int rS, rA;
647 uint32 imm;
648 PPC_OPC_TEMPL_D_UImm(gJITC.current_opc, rS, rA, imm);
649 jitcClobberCarry();
650 if (rS == rA) {
651 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
652 asmALURegImm(X86_AND, a, imm);
653 } else {
654 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
655 NativeReg a = jitcMapClientRegisterDirty(PPC_GPR(rA));
656 asmALURegReg(X86_MOV, a, s);
657 asmALURegImm(X86_AND, a, imm);
658 }
659 jitcMapFlagsDirty();
660 return flowContinue;
661 }
662 /*
663 * andis. AND Immediate Shifted
664 * .434
665 */
666 void ppc_opc_andis_()
667 {
668 int rS, rA;
669 uint32 imm;
670 PPC_OPC_TEMPL_D_Shift16(gCPU.current_opc, rS, rA, imm);
671 gCPU.gpr[rA] = gCPU.gpr[rS] & imm;
672 // update cr0 flags
673 ppc_update_cr0(gCPU.gpr[rA]);
674 }
675 JITCFlow ppc_opc_gen_andis_()
676 {
677 int rS, rA;
678 uint32 imm;
679 PPC_OPC_TEMPL_D_Shift16(gJITC.current_opc, rS, rA, imm);
680 jitcClobberCarry();
681 if (rS == rA) {
682 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
683 asmALURegImm(X86_AND, a, imm);
684 } else {
685 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
686 NativeReg a = jitcMapClientRegisterDirty(PPC_GPR(rA));
687 asmALURegReg(X86_MOV, a, s);
688 asmALURegImm(X86_AND, a, imm);
689 }
690 jitcMapFlagsDirty();
691 return flowContinue;
692 }
693
694 /*
695 * cmp Compare
696 * .442
697 */
698 static uint32 ppc_cmp_and_mask[8] = {
699 0xfffffff0,
700 0xffffff0f,
701 0xfffff0ff,
702 0xffff0fff,
703 0xfff0ffff,
704 0xff0fffff,
705 0xf0ffffff,
706 0x0fffffff,
707 };
708
709 void ppc_opc_cmp()
710 {
711 uint32 cr;
712 int rA, rB;
713 PPC_OPC_TEMPL_X(gCPU.current_opc, cr, rA, rB);
714 cr >>= 2;
715 sint32 a = gCPU.gpr[rA];
716 sint32 b = gCPU.gpr[rB];
717 uint32 c;
718 if (a < b) {
719 c = 8;
720 } else if (a > b) {
721 c = 4;
722 } else {
723 c = 2;
724 }
725 if (gCPU.xer & XER_SO) c |= 1;
726 cr = 7-cr;
727 gCPU.cr &= ppc_cmp_and_mask[cr];
728 gCPU.cr |= c<<(cr*4);
729 }
730 JITCFlow ppc_opc_gen_cmp()
731 {
732 uint32 cr;
733 int rA, rB;
734 PPC_OPC_TEMPL_X(gJITC.current_opc, cr, rA, rB);
735 cr >>= 2;
736 jitcClobberCarryAndFlags();
737 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
738 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
739 asmALURegReg(X86_CMP, a, b);
740 #if 0
741 if (cr == 0) {
742 asmCALL((NativeAddress)ppc_flush_flags_signed_0_asm);
743 } else {
744 jitcClobberRegister(EAX | NATIVE_REG);
745 asmMOVRegImm_NoFlags(EAX, (7-cr)/2);
746 asmCALL((cr & 1) ? (NativeAddress)ppc_flush_flags_signed_odd_asm : (NativeAddress)ppc_flush_flags_signed_even_asm);
747 }
748 #else
749 if (cr & 1) {
750 jitcFlushFlagsAfterCMP_L((7-cr)/2);
751 } else {
752 jitcFlushFlagsAfterCMP_U((7-cr)/2);
753 }
754 #endif
755 return flowContinue;
756 }
757 /*
758 * cmpi Compare Immediate
759 * .443
760 */
761 void ppc_opc_cmpi()
762 {
763 uint32 cr;
764 int rA;
765 uint32 imm;
766 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, cr, rA, imm);
767 cr >>= 2;
768 sint32 a = gCPU.gpr[rA];
769 sint32 b = imm;
770 uint32 c;
771 if (a < b) {
772 c = 8;
773 } else if (a > b) {
774 c = 4;
775 } else {
776 c = 2;
777 }
778 if (gCPU.xer & XER_SO) c |= 1;
779 cr = 7-cr;
780 gCPU.cr &= ppc_cmp_and_mask[cr];
781 gCPU.cr |= c<<(cr*4);
782 }
783 JITCFlow ppc_opc_gen_cmpi()
784 {
785 uint32 cr;
786 int rA;
787 uint32 imm;
788 PPC_OPC_TEMPL_D_SImm(gJITC.current_opc, cr, rA, imm);
789 cr >>= 2;
790 jitcClobberCarryAndFlags();
791 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
792 asmALURegImm(X86_CMP, a, imm);
793 #if 0
794 if (cr == 0) {
795 asmCALL((NativeAddress)ppc_flush_flags_signed_0_asm);
796 } else {
797 jitcClobberRegister(EAX | NATIVE_REG);
798 asmMOVRegImm_NoFlags(EAX, (7-cr)/2);
799 asmCALL((cr & 1) ? (NativeAddress)ppc_flush_flags_signed_odd_asm : (NativeAddress)ppc_flush_flags_signed_even_asm);
800 }
801 #else
802 if (cr & 1) {
803 jitcFlushFlagsAfterCMP_L((7-cr)/2);
804 } else {
805 jitcFlushFlagsAfterCMP_U((7-cr)/2);
806 }
807 #endif
808 return flowContinue;
809 }
810 /*
811 * cmpl Compare Logical
812 * .444
813 */
814 void ppc_opc_cmpl()
815 {
816 uint32 cr;
817 int rA, rB;
818 PPC_OPC_TEMPL_X(gCPU.current_opc, cr, rA, rB);
819 cr >>= 2;
820 uint32 a = gCPU.gpr[rA];
821 uint32 b = gCPU.gpr[rB];
822 uint32 c;
823 if (a < b) {
824 c = 8;
825 } else if (a > b) {
826 c = 4;
827 } else {
828 c = 2;
829 }
830 if (gCPU.xer & XER_SO) c |= 1;
831 cr = 7-cr;
832 gCPU.cr &= ppc_cmp_and_mask[cr];
833 gCPU.cr |= c<<(cr*4);
834 }
835 JITCFlow ppc_opc_gen_cmpl()
836 {
837 uint32 cr;
838 int rA, rB;
839 PPC_OPC_TEMPL_X(gJITC.current_opc, cr, rA, rB);
840 cr >>= 2;
841 jitcClobberCarryAndFlags();
842 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
843 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
844 asmALURegReg(X86_CMP, a, b);
845 #if 0
846 if (cr == 0) {
847 asmCALL((NativeAddress)ppc_flush_flags_unsigned_0_asm);
848 } else {
849 jitcClobberRegister(EAX | NATIVE_REG);
850 asmMOVRegImm_NoFlags(EAX, (7-cr)/2);
851 asmCALL((cr & 1) ? (NativeAddress)ppc_flush_flags_unsigned_odd_asm : (NativeAddress)ppc_flush_flags_unsigned_even_asm);
852 }
853 #else
854 if (cr & 1) {
855 jitcFlushFlagsAfterCMPL_L((7-cr)/2);
856 } else {
857 jitcFlushFlagsAfterCMPL_U((7-cr)/2);
858 }
859 #endif
860 return flowContinue;
861 }
862 /*
863 * cmpli Compare Logical Immediate
864 * .445
865 */
866 void ppc_opc_cmpli()
867 {
868 uint32 cr;
869 int rA;
870 uint32 imm;
871 PPC_OPC_TEMPL_D_UImm(gCPU.current_opc, cr, rA, imm);
872 cr >>= 2;
873 uint32 a = gCPU.gpr[rA];
874 uint32 b = imm;
875 uint32 c;
876 if (a < b) {
877 c = 8;
878 } else if (a > b) {
879 c = 4;
880 } else {
881 c = 2;
882 }
883 if (gCPU.xer & XER_SO) c |= 1;
884 cr = 7-cr;
885 gCPU.cr &= ppc_cmp_and_mask[cr];
886 gCPU.cr |= c<<(cr*4);
887 }
888 JITCFlow ppc_opc_gen_cmpli()
889 {
890 uint32 cr;
891 int rA;
892 uint32 imm;
893 PPC_OPC_TEMPL_D_UImm(gJITC.current_opc, cr, rA, imm);
894 cr >>= 2;
895 jitcClobberCarryAndFlags();
896 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
897 asmALURegImm(X86_CMP, a, imm);
898 #if 0
899 if (cr == 0) {
900 asmCALL((NativeAddress)ppc_flush_flags_unsigned_0_asm);
901 } else {
902 jitcClobberRegister(EAX | NATIVE_REG);
903 asmMOVRegImm_NoFlags(EAX, (7-cr)/2);
904 asmCALL((cr & 1) ? (NativeAddress)ppc_flush_flags_unsigned_odd_asm : (NativeAddress)ppc_flush_flags_unsigned_even_asm);
905 }
906 #else
907 if (cr & 1) {
908 jitcFlushFlagsAfterCMPL_L((7-cr)/2);
909 } else {
910 jitcFlushFlagsAfterCMPL_U((7-cr)/2);
911 }
912 #endif
913 return flowContinue;
914 }
915
916 /*
917 * cntlzwx Count Leading Zeros Word
918 * .447
919 */
920 void ppc_opc_cntlzwx()
921 {
922 int rS, rA, rB;
923 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
924 PPC_OPC_ASSERT(rB==0);
925 uint32 n=0;
926 uint32 x=0x80000000;
927 uint32 v=gCPU.gpr[rS];
928 while (!(v & x)) {
929 n++;
930 if (n==32) break;
931 x>>=1;
932 }
933 gCPU.gpr[rA] = n;
934 if (gCPU.current_opc & PPC_OPC_Rc) {
935 // update cr0 flags
936 ppc_update_cr0(gCPU.gpr[rA]);
937 }
938 }
939 JITCFlow ppc_opc_gen_cntlzwx()
940 {
941 int rS, rA, rB;
942 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
943 jitcClobberCarryAndFlags();
944 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
945 NativeReg a = jitcMapClientRegisterDirty(PPC_GPR(rA));
946 NativeReg z = jitcAllocRegister();
947 asmALURegImm(X86_MOV, z, 0xffffffff);
948 asmBSxRegReg(X86_BSR, a, s);
949 asmCMOVRegReg(X86_Z, a, z);
950 asmALUReg(X86_NEG, a);
951 asmALURegImm(X86_ADD, a, 31);
952 if (gJITC.current_opc & PPC_OPC_Rc) {
953 jitcMapFlagsDirty();
954 }
955 return flowContinue;
956 }
957
958 /*
959 * crand Condition Register AND
960 * .448
961 */
962 void ppc_opc_crand()
963 {
964 int crD, crA, crB;
965 PPC_OPC_TEMPL_X(gCPU.current_opc, crD, crA, crB);
966 if ((gCPU.cr & (1<<(31-crA))) && (gCPU.cr & (1<<(31-crB)))) {
967 gCPU.cr |= (1<<(31-crD));
968 } else {
969 gCPU.cr &= ~(1<<(31-crD));
970 }
971 }
972 JITCFlow ppc_opc_gen_crand()
973 {
974 int crD, crA, crB;
975 PPC_OPC_TEMPL_X(gJITC.current_opc, crD, crA, crB);
976 jitcClobberCarryAndFlags();
977 asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-crA));
978 NativeAddress nocrA = asmJxxFixup(X86_Z);
979 asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-crB));
980 NativeAddress nocrB = asmJxxFixup(X86_Z);
981 asmORDMemImm((uint32)&gCPU.cr, 1<<(31-crD));
982 NativeAddress end1 = asmJMPFixup();
983 asmResolveFixup(nocrB, asmHERE());
984 asmResolveFixup(nocrA, asmHERE());
985 asmANDDMemImm((uint32)&gCPU.cr, ~(1<<(31-crD)));
986 asmResolveFixup(end1, asmHERE());
987 return flowContinue;
988 }
989 /*
990 * crandc Condition Register AND with Complement
991 * .449
992 */
993 void ppc_opc_crandc()
994 {
995 int crD, crA, crB;
996 PPC_OPC_TEMPL_X(gCPU.current_opc, crD, crA, crB);
997 if ((gCPU.cr & (1<<(31-crA))) && !(gCPU.cr & (1<<(31-crB)))) {
998 gCPU.cr |= (1<<(31-crD));
999 } else {
1000 gCPU.cr &= ~(1<<(31-crD));
1001 }
1002 }
1003 JITCFlow ppc_opc_gen_crandc()
1004 {
1005 int crD, crA, crB;
1006 PPC_OPC_TEMPL_X(gJITC.current_opc, crD, crA, crB);
1007 jitcClobberCarryAndFlags();
1008 asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-crA));
1009 NativeAddress nocrA = asmJxxFixup(X86_Z);
1010 asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-crB));
1011 NativeAddress nocrB = asmJxxFixup(X86_NZ);
1012 asmORDMemImm((uint32)&gCPU.cr, 1<<(31-crD));
1013 NativeAddress end1 = asmJMPFixup();
1014 asmResolveFixup(nocrB, asmHERE());
1015 asmResolveFixup(nocrA, asmHERE());
1016 asmANDDMemImm((uint32)&gCPU.cr, ~(1<<(31-crD)));
1017 asmResolveFixup(end1, asmHERE());
1018 return flowContinue;
1019 }
1020 /*
1021 * creqv Condition Register Equivalent
1022 * .450
1023 */
1024 void ppc_opc_creqv()
1025 {
1026 int crD, crA, crB;
1027 PPC_OPC_TEMPL_X(gCPU.current_opc, crD, crA, crB);
1028 if (((gCPU.cr & (1<<(31-crA))) && (gCPU.cr & (1<<(31-crB))))
1029 || (!(gCPU.cr & (1<<(31-crA))) && !(gCPU.cr & (1<<(31-crB))))) {
1030 gCPU.cr |= (1<<(31-crD));
1031 } else {
1032 gCPU.cr &= ~(1<<(31-crD));
1033 }
1034 }
1035 JITCFlow ppc_opc_gen_creqv()
1036 {
1037 int crD, crA, crB;
1038 PPC_OPC_TEMPL_X(gJITC.current_opc, crD, crA, crB);
1039 jitcClobberCarryAndFlags();
1040 if (crA == crB) {
1041 asmORDMemImm((uint32)&gCPU.cr, 1<<(31-crD));
1042 } else {
1043 // crD = crA ? (crB ? 1 : 0) : (crB ? 0 : 1)
1044 asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-crA));
1045 NativeAddress nocrA = asmJxxFixup(X86_Z);
1046 asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-crB));
1047 NativeAddress nocrB1 = asmJxxFixup(X86_Z);
1048 asmORDMemImm((uint32)&gCPU.cr, 1<<(31-crD));
1049 NativeAddress end1 = asmJMPFixup();
1050 asmResolveFixup(nocrB1, asmHERE());
1051 asmANDDMemImm((uint32)&gCPU.cr, ~(1<<(31-crD)));
1052 NativeAddress end2 = asmJMPFixup();
1053 asmResolveFixup(nocrA, asmHERE());
1054 asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-crB));
1055 NativeAddress nocrB2 = asmJxxFixup(X86_Z);
1056 asmANDDMemImm((uint32)&gCPU.cr, ~(1<<(31-crD)));
1057 NativeAddress end3 = asmJMPFixup();
1058 asmResolveFixup(nocrB2, asmHERE());
1059 asmORDMemImm((uint32)&gCPU.cr, 1<<(31-crD));
1060 asmResolveFixup(end1, asmHERE());
1061 asmResolveFixup(end2, asmHERE());
1062 asmResolveFixup(end3, asmHERE());
1063 }
1064 return flowContinue;
1065 }
1066 /*
1067 * crnand Condition Register NAND
1068 * .451
1069 */
1070 void ppc_opc_crnand()
1071 {
1072 int crD, crA, crB;
1073 PPC_OPC_TEMPL_X(gCPU.current_opc, crD, crA, crB);
1074 if (!((gCPU.cr & (1<<(31-crA))) && (gCPU.cr & (1<<(31-crB))))) {
1075 gCPU.cr |= (1<<(31-crD));
1076 } else {
1077 gCPU.cr &= ~(1<<(31-crD));
1078 }
1079 }
1080 JITCFlow ppc_opc_gen_crnand()
1081 {
1082 int crD, crA, crB;
1083 PPC_OPC_TEMPL_X(gJITC.current_opc, crD, crA, crB);
1084 jitcClobberCarryAndFlags();
1085 asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-crA));
1086 NativeAddress nocrA = asmJxxFixup(X86_Z);
1087 asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-crB));
1088 NativeAddress nocrB = asmJxxFixup(X86_Z);
1089 asmANDDMemImm((uint32)&gCPU.cr, ~(1<<(31-crD)));
1090 NativeAddress end1 = asmJMPFixup();
1091 asmResolveFixup(nocrB, asmHERE());
1092 asmResolveFixup(nocrA, asmHERE());
1093 asmORDMemImm((uint32)&gCPU.cr, 1<<(31-crD));
1094 asmResolveFixup(end1, asmHERE());
1095 return flowContinue;
1096 }
1097 /*
1098 * crnor Condition Register NOR
1099 * .452
1100 */
1101 void ppc_opc_crnor()
1102 {
1103 int crD, crA, crB;
1104 PPC_OPC_TEMPL_X(gCPU.current_opc, crD, crA, crB);
1105 uint32 t = (1<<(31-crA)) | (1<<(31-crB));
1106 if (!(gCPU.cr & t)) {
1107 gCPU.cr |= (1<<(31-crD));
1108 } else {
1109 gCPU.cr &= ~(1<<(31-crD));
1110 }
1111 }
1112 JITCFlow ppc_opc_gen_crnor()
1113 {
1114 int crD, crA, crB;
1115 PPC_OPC_TEMPL_X(gJITC.current_opc, crD, crA, crB);
1116 jitcClobberCarryAndFlags();
1117 asmTESTDMemImm((uint32)&gCPU.cr, (1<<(31-crA)) | (1<<(31-crB)));
1118 NativeAddress notset = asmJxxFixup(X86_Z);
1119 asmANDDMemImm((uint32)&gCPU.cr, ~(1<<(31-crD)));
1120 NativeAddress end1 = asmJMPFixup();
1121 asmResolveFixup(notset, asmHERE());
1122 asmORDMemImm((uint32)&gCPU.cr, 1<<(31-crD));
1123 asmResolveFixup(end1, asmHERE());
1124 return flowContinue;
1125 }
1126 /*
1127 * cror Condition Register OR
1128 * .453
1129 */
1130 void ppc_opc_cror()
1131 {
1132 int crD, crA, crB;
1133 PPC_OPC_TEMPL_X(gCPU.current_opc, crD, crA, crB);
1134 uint32 t = (1<<(31-crA)) | (1<<(31-crB));
1135 if (gCPU.cr & t) {
1136 gCPU.cr |= (1<<(31-crD));
1137 } else {
1138 gCPU.cr &= ~(1<<(31-crD));
1139 }
1140 }
1141 JITCFlow ppc_opc_gen_cror()
1142 {
1143 int crD, crA, crB;
1144 PPC_OPC_TEMPL_X(gJITC.current_opc, crD, crA, crB);
1145 jitcClobberCarryAndFlags();
1146 asmTESTDMemImm((uint32)&gCPU.cr, (1<<(31-crA)) | (1<<(31-crB)));
1147 NativeAddress notset = asmJxxFixup(X86_Z);
1148 asmORDMemImm((uint32)&gCPU.cr, 1<<(31-crD));
1149 NativeAddress end1 = asmJMPFixup();
1150 asmResolveFixup(notset, asmHERE());
1151 asmANDDMemImm((uint32)&gCPU.cr, ~(1<<(31-crD)));
1152 asmResolveFixup(end1, asmHERE());
1153 return flowContinue;
1154 }
1155 /*
1156 * crorc Condition Register OR with Complement
1157 * .454
1158 */
1159 void ppc_opc_crorc()
1160 {
1161 int crD, crA, crB;
1162 PPC_OPC_TEMPL_X(gCPU.current_opc, crD, crA, crB);
1163 if ((gCPU.cr & (1<<(31-crA))) || !(gCPU.cr & (1<<(31-crB)))) {
1164 gCPU.cr |= (1<<(31-crD));
1165 } else {
1166 gCPU.cr &= ~(1<<(31-crD));
1167 }
1168 }
1169
1170 JITCFlow ppc_opc_gen_crorc()
1171 {
1172 int crD, crA, crB;
1173 PPC_OPC_TEMPL_X(gJITC.current_opc, crD, crA, crB);
1174 jitcClobberCarryAndFlags();
1175 asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-crA));
1176 NativeAddress crAset = asmJxxFixup(X86_NZ);
1177 asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-crB));
1178 NativeAddress nocrB = asmJxxFixup(X86_Z);
1179 asmANDDMemImm((uint32)&gCPU.cr, ~(1<<(31-crD)));
1180 NativeAddress end1 = asmJMPFixup();
1181 asmResolveFixup(nocrB, asmHERE());
1182 asmResolveFixup(crAset, asmHERE());
1183 asmORDMemImm((uint32)&gCPU.cr, 1<<(31-crD));
1184 asmResolveFixup(end1, asmHERE());
1185 return flowContinue;
1186 }
1187 /*
1188 * crxor Condition Register XOR
1189 * .448
1190 */
1191 void ppc_opc_crxor()
1192 {
1193 int crD, crA, crB;
1194 PPC_OPC_TEMPL_X(gCPU.current_opc, crD, crA, crB);
1195 if ((!(gCPU.cr & (1<<(31-crA))) && (gCPU.cr & (1<<(31-crB))))
1196 || ((gCPU.cr & (1<<(31-crA))) && !(gCPU.cr & (1<<(31-crB))))) {
1197 gCPU.cr |= (1<<(31-crD));
1198 } else {
1199 gCPU.cr &= ~(1<<(31-crD));
1200 }
1201 }
1202 JITCFlow ppc_opc_gen_crxor()
1203 {
1204 int crD, crA, crB;
1205 PPC_OPC_TEMPL_X(gJITC.current_opc, crD, crA, crB);
1206 jitcClobberCarryAndFlags();
1207 if (crA == crB) {
1208 asmANDDMemImm((uint32)&gCPU.cr, ~(1<<(31-crD)));
1209 } else {
1210 // crD = crA ? (crB ? 0 : 1) : (crB ? 1 : 0)
1211 asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-crA));
1212 NativeAddress nocrA = asmJxxFixup(X86_Z);
1213 asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-crB));
1214 NativeAddress nocrB1 = asmJxxFixup(X86_Z);
1215 asmANDDMemImm((uint32)&gCPU.cr, ~(1<<(31-crD)));
1216 NativeAddress end1 = asmJMPFixup();
1217 asmResolveFixup(nocrB1, asmHERE());
1218 asmORDMemImm((uint32)&gCPU.cr, 1<<(31-crD));
1219 NativeAddress end2 = asmJMPFixup();
1220 asmResolveFixup(nocrA, asmHERE());
1221 asmTESTDMemImm((uint32)&gCPU.cr, 1<<(31-crB));
1222 NativeAddress nocrB2 = asmJxxFixup(X86_Z);
1223 asmORDMemImm((uint32)&gCPU.cr, 1<<(31-crD));
1224 NativeAddress end3 = asmJMPFixup();
1225 asmResolveFixup(nocrB2, asmHERE());
1226 asmANDDMemImm((uint32)&gCPU.cr, ~(1<<(31-crD)));
1227 asmResolveFixup(end1, asmHERE());
1228 asmResolveFixup(end2, asmHERE());
1229 asmResolveFixup(end3, asmHERE());
1230 }
1231 return flowContinue;
1232 }
1233
1234 /*
1235 * divwx Divide Word
1236 * .470
1237 */
1238 void ppc_opc_divwx()
1239 {
1240 int rD, rA, rB;
1241 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
1242 if (!gCPU.gpr[rB]) {
1243 PPC_ALU_WARN("division by zero @%08x\n", gCPU.pc);
1244 SINGLESTEP("");
1245 } else {
1246 sint32 a = gCPU.gpr[rA];
1247 sint32 b = gCPU.gpr[rB];
1248 gCPU.gpr[rD] = a / b;
1249 }
1250 if (gCPU.current_opc & PPC_OPC_Rc) {
1251 // update cr0 flags
1252 ppc_update_cr0(gCPU.gpr[rD]);
1253 }
1254 }
1255 JITCFlow ppc_opc_gen_divwx()
1256 {
1257 int rD, rA, rB;
1258 PPC_OPC_TEMPL_XO(gJITC.current_opc, rD, rA, rB);
1259 jitcClobberCarryAndFlags();
1260 jitcAllocRegister(NATIVE_REG | EDX);
1261 jitcGetClientRegister(PPC_GPR(rA), NATIVE_REG | EAX);
1262 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1263 jitcClobberRegister(NATIVE_REG | EAX);
1264 asmSimple(X86_CDQ);
1265 asmALURegReg(X86_TEST, EAX, EAX);
1266 NativeAddress na = asmJxxFixup(X86_Z);
1267 asmALUReg(X86_IDIV, b);
1268 asmResolveFixup(na, asmHERE());
1269 jitcMapClientRegisterDirty(PPC_GPR(rD), NATIVE_REG | EAX);
1270 if (gJITC.current_opc & PPC_OPC_Rc) {
1271 asmALURegReg(X86_TEST, EAX, EAX);
1272 jitcMapFlagsDirty();
1273 }
1274 return flowContinue;
1275 }
1276 /*
1277 * divwox Divide Word with Overflow
1278 * .470
1279 */
1280 void ppc_opc_divwox()
1281 {
1282 int rD, rA, rB;
1283 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
1284 if (!gCPU.gpr[rB]) {
1285 PPC_ALU_WARN("division by zero\n");
1286 } else {
1287 sint32 a = gCPU.gpr[rA];
1288 sint32 b = gCPU.gpr[rB];
1289 gCPU.gpr[rD] = a / b;
1290 }
1291 if (gCPU.current_opc & PPC_OPC_Rc) {
1292 // update cr0 flags
1293 ppc_update_cr0(gCPU.gpr[rD]);
1294 }
1295 // update XER flags
1296 PPC_ALU_ERR("divwox unimplemented\n");
1297 }
1298 /*
1299 * divwux Divide Word Unsigned
1300 * .472
1301 */
1302 void ppc_opc_divwux()
1303 {
1304 int rD, rA, rB;
1305 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
1306 if (!gCPU.gpr[rB]) {
1307 PPC_ALU_WARN("division by zero @%08x\n", gCPU.pc);
1308 SINGLESTEP("");
1309 } else {
1310 gCPU.gpr[rD] = gCPU.gpr[rA] / gCPU.gpr[rB];
1311 }
1312 if (gCPU.current_opc & PPC_OPC_Rc) {
1313 // update cr0 flags
1314 ppc_update_cr0(gCPU.gpr[rD]);
1315 }
1316 }
1317 JITCFlow ppc_opc_gen_divwux()
1318 {
1319 int rD, rA, rB;
1320 PPC_OPC_TEMPL_XO(gJITC.current_opc, rD, rA, rB);
1321 jitcClobberCarryAndFlags();
1322 jitcAllocRegister(NATIVE_REG | EDX);
1323 jitcGetClientRegister(PPC_GPR(rA), NATIVE_REG | EAX);
1324 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1325 jitcClobberRegister(NATIVE_REG | EAX);
1326 asmALURegReg(X86_XOR, EDX, EDX);
1327 asmALURegReg(X86_TEST, EAX, EAX);
1328 NativeAddress na = asmJxxFixup(X86_Z);
1329 asmALUReg(X86_DIV, b);
1330 asmResolveFixup(na, asmHERE());
1331 jitcMapClientRegisterDirty(PPC_GPR(rD), NATIVE_REG | EAX);
1332 if (gJITC.current_opc & PPC_OPC_Rc) {
1333 asmALURegReg(X86_TEST, EAX, EAX);
1334 jitcMapFlagsDirty();
1335 }
1336 return flowContinue;
1337 }
1338 /*
1339 * divwuox Divide Word Unsigned with Overflow
1340 * .472
1341 */
1342 void ppc_opc_divwuox()
1343 {
1344 int rD, rA, rB;
1345 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
1346 if (!gCPU.gpr[rB]) {
1347 PPC_ALU_WARN("division by zero @%08x\n", gCPU.pc);
1348 } else {
1349 gCPU.gpr[rD] = gCPU.gpr[rA] / gCPU.gpr[rB];
1350 }
1351 if (gCPU.current_opc & PPC_OPC_Rc) {
1352 // update cr0 flags
1353 ppc_update_cr0(gCPU.gpr[rD]);
1354 }
1355 // update XER flags
1356 PPC_ALU_ERR("divwuox unimplemented\n");
1357 }
1358
1359 /*
1360 * eqvx Equivalent
1361 * .480
1362 */
1363 void ppc_opc_eqvx()
1364 {
1365 int rS, rA, rB;
1366 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1367 gCPU.gpr[rA] = ~(gCPU.gpr[rS] ^ gCPU.gpr[rB]);
1368 if (gCPU.current_opc & PPC_OPC_Rc) {
1369 // update cr0 flags
1370 ppc_update_cr0(gCPU.gpr[rA]);
1371 }
1372 }
1373 JITCFlow ppc_opc_gen_eqvx()
1374 {
1375 int rS, rA, rB;
1376 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
1377 if (gJITC.current_opc & PPC_OPC_Rc) {
1378 jitcClobberCarry();
1379 } else {
1380 jitcClobberCarryAndFlags();
1381 }
1382 NativeReg a;
1383 if (rA == rS) {
1384 a = jitcGetClientRegisterDirty(PPC_GPR(rA));
1385 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1386 asmALURegReg(X86_XOR, a, b);
1387 } else if (rA == rB) {
1388 a = jitcGetClientRegisterDirty(PPC_GPR(rA));
1389 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1390 asmALURegReg(X86_XOR, a, s);
1391 } else {
1392 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1393 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1394 a = jitcMapClientRegisterDirty(PPC_GPR(rA));
1395 asmALURegReg(X86_MOV, a, s);
1396 asmALURegReg(X86_XOR, a, b);
1397 }
1398 asmALUReg(X86_NOT, a);
1399 if (gJITC.current_opc & PPC_OPC_Rc) {
1400 // "NOT" doesn't update the flags
1401 asmALURegReg(X86_TEST, a, a);
1402 jitcMapFlagsDirty();
1403 }
1404 return flowContinue;
1405 }
1406
1407 /*
1408 * extsbx Extend Sign Byte
1409 * .481
1410 */
1411 void ppc_opc_extsbx()
1412 {
1413 int rS, rA, rB;
1414 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1415 PPC_OPC_ASSERT(rB==0);
1416 gCPU.gpr[rA] = gCPU.gpr[rS];
1417 if (gCPU.gpr[rA] & 0x80) {
1418 gCPU.gpr[rA] |= 0xffffff00;
1419 } else {
1420 gCPU.gpr[rA] &= ~0xffffff00;
1421 }
1422 if (gCPU.current_opc & PPC_OPC_Rc) {
1423 // update cr0 flags
1424 ppc_update_cr0(gCPU.gpr[rA]);
1425 }
1426 }
1427 JITCFlow ppc_opc_gen_extsbx()
1428 {
1429 int rS, rA, rB;
1430 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
1431 NativeReg8 s = (NativeReg8)jitcGetClientRegister(PPC_GPR(rS), NATIVE_REG_8);
1432 NativeReg a = jitcMapClientRegisterDirty(PPC_GPR(rA));
1433 asmMOVxxRegReg8(X86_MOVSX, a, s);
1434 if (gJITC.current_opc & PPC_OPC_Rc) {
1435 jitcClobberCarry();
1436 asmALURegReg(X86_TEST, a, a);
1437 jitcMapFlagsDirty();
1438 }
1439 return flowContinue;
1440 }
1441 /*
1442 * extshx Extend Sign Half Word
1443 * .482
1444 */
1445 void ppc_opc_extshx()
1446 {
1447 int rS, rA, rB;
1448 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1449 PPC_OPC_ASSERT(rB==0);
1450 gCPU.gpr[rA] = gCPU.gpr[rS];
1451 if (gCPU.gpr[rA] & 0x8000) {
1452 gCPU.gpr[rA] |= 0xffff0000;
1453 } else {
1454 gCPU.gpr[rA] &= ~0xffff0000;
1455 }
1456 if (gCPU.current_opc & PPC_OPC_Rc) {
1457 // update cr0 flags
1458 ppc_update_cr0(gCPU.gpr[rA]);
1459 }
1460 }
1461 JITCFlow ppc_opc_gen_extshx()
1462 {
1463 int rS, rA, rB;
1464 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
1465 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1466 NativeReg a = jitcMapClientRegisterDirty(PPC_GPR(rA));
1467 asmMOVxxRegReg16(X86_MOVSX, a, s);
1468 if (gJITC.current_opc & PPC_OPC_Rc) {
1469 jitcClobberCarry();
1470 asmALURegReg(X86_TEST, a, a);
1471 jitcMapFlagsDirty();
1472 }
1473 return flowContinue;
1474 }
1475
1476 /*
1477 * mulhwx Multiply High Word
1478 * .595
1479 */
1480 void ppc_opc_mulhwx()
1481 {
1482 int rD, rA, rB;
1483 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
1484 sint64 a = (sint32)gCPU.gpr[rA];
1485 sint64 b = (sint32)gCPU.gpr[rB];
1486 sint64 c = a*b;
1487 gCPU.gpr[rD] = ((uint64)c)>>32;
1488 if (gCPU.current_opc & PPC_OPC_Rc) {
1489 // update cr0 flags
1490 ppc_update_cr0(gCPU.gpr[rD]);
1491 // PPC_ALU_WARN("mulhw. correct?\n");
1492 }
1493 }
1494 JITCFlow ppc_opc_gen_mulhwx()
1495 {
1496 int rD, rA, rB;
1497 PPC_OPC_TEMPL_XO(gJITC.current_opc, rD, rA, rB);
1498 jitcClobberCarryAndFlags();
1499 NativeReg a, b;
1500 if (jitcGetClientRegisterMapping(PPC_GPR(rB)) == EAX) {
1501 // swapped by incident
1502 a = EAX;
1503 b = jitcGetClientRegister(PPC_GPR(rA));
1504 } else {
1505 a = jitcGetClientRegister(PPC_GPR(rA), NATIVE_REG | EAX);
1506 b = jitcGetClientRegister(PPC_GPR(rB));
1507 }
1508 jitcClobberRegister(NATIVE_REG | EAX);
1509 jitcClobberRegister(NATIVE_REG | EDX);
1510 asmALUReg(X86_IMUL, b);
1511 jitcMapClientRegisterDirty(PPC_GPR(rD), NATIVE_REG | EDX);
1512 if (gJITC.current_opc & PPC_OPC_Rc) {
1513 asmALURegReg(X86_TEST, EDX, EDX);
1514 jitcMapFlagsDirty();
1515 }
1516 return flowContinue;
1517 }
1518 /*
1519 * mulhwux Multiply High Word Unsigned
1520 * .596
1521 */
1522 void ppc_opc_mulhwux()
1523 {
1524 int rD, rA, rB;
1525 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
1526 uint64 a = gCPU.gpr[rA];
1527 uint64 b = gCPU.gpr[rB];
1528 uint64 c = a*b;
1529 gCPU.gpr[rD] = c>>32;
1530 if (gCPU.current_opc & PPC_OPC_Rc) {
1531 // update cr0 flags
1532 ppc_update_cr0(gCPU.gpr[rD]);
1533 }
1534 }
1535 JITCFlow ppc_opc_gen_mulhwux()
1536 {
1537 int rD, rA, rB;
1538 PPC_OPC_TEMPL_XO(gJITC.current_opc, rD, rA, rB);
1539 jitcClobberCarryAndFlags();
1540 NativeReg a, b;
1541 if (jitcGetClientRegisterMapping(PPC_GPR(rB)) == EAX) {
1542 // swapped by incident
1543 a = EAX;
1544 b = jitcGetClientRegister(PPC_GPR(rA));
1545 } else {
1546 a = jitcGetClientRegister(PPC_GPR(rA), NATIVE_REG | EAX);
1547 b = jitcGetClientRegister(PPC_GPR(rB));
1548 }
1549 jitcClobberRegister(NATIVE_REG | EAX);
1550 jitcClobberRegister(NATIVE_REG | EDX);
1551 asmALUReg(X86_MUL, b);
1552 jitcMapClientRegisterDirty(PPC_GPR(rD), NATIVE_REG | EDX);
1553 if (gJITC.current_opc & PPC_OPC_Rc) {
1554 asmALURegReg(X86_TEST, EDX, EDX);
1555 jitcMapFlagsDirty();
1556 }
1557 return flowContinue;
1558 }
1559 /*
1560 * mulli Multiply Low Immediate
1561 * .598
1562 */
1563 void ppc_opc_mulli()
1564 {
1565 int rD, rA;
1566 uint32 imm;
1567 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rD, rA, imm);
1568 // FIXME: signed / unsigned correct?
1569 gCPU.gpr[rD] = gCPU.gpr[rA] * imm;
1570 }
1571 JITCFlow ppc_opc_gen_mulli()
1572 {
1573 int rD, rA;
1574 uint32 imm;
1575 PPC_OPC_TEMPL_D_SImm(gJITC.current_opc, rD, rA, imm);
1576 jitcClobberCarryAndFlags();
1577 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
1578 NativeReg d = jitcMapClientRegisterDirty(PPC_GPR(rD));
1579 asmIMULRegRegImm(d, a, imm);
1580 return flowContinue;
1581 }
1582 /*
1583 * mullwx Multiply Low Word
1584 * .599
1585 */
1586 void ppc_opc_mullwx()
1587 {
1588 int rD, rA, rB;
1589 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
1590 gCPU.gpr[rD] = gCPU.gpr[rA] * gCPU.gpr[rB];
1591 if (gCPU.current_opc & PPC_OPC_Rc) {
1592 // update cr0 flags
1593 ppc_update_cr0(gCPU.gpr[rD]);
1594 }
1595 if (gCPU.current_opc & PPC_OPC_OE) {
1596 // update XER flags
1597 PPC_ALU_ERR("mullwox unimplemented\n");
1598 }
1599 }
1600 JITCFlow ppc_opc_gen_mullwx()
1601 {
1602 int rD, rA, rB;
1603 PPC_OPC_TEMPL_XO(gJITC.current_opc, rD, rA, rB);
1604 if (gJITC.current_opc & PPC_OPC_Rc) {
1605 jitcClobberCarry();
1606 } else {
1607 jitcClobberCarryAndFlags();
1608 }
1609 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
1610 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1611 NativeReg d;
1612 if (rA == rD) {
1613 d = a;
1614 jitcDirtyRegister(a);
1615 } else if (rB == rD) {
1616 d = b;
1617 jitcDirtyRegister(b);
1618 b = a;
1619 } else {
1620 d = jitcMapClientRegisterDirty(PPC_GPR(rD));
1621 asmALURegReg(X86_MOV, d, a);
1622 }
1623 // now: d *= b
1624 asmIMULRegReg(d, b);
1625 if (gJITC.current_opc & PPC_OPC_Rc) {
1626 asmALURegReg(X86_OR, d, d);
1627 jitcMapFlagsDirty();
1628 }
1629 return flowContinue;
1630 }
1631
1632 /*
1633 * nandx NAND
1634 * .600
1635 */
1636 void ppc_opc_nandx()
1637 {
1638 int rS, rA, rB;
1639 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1640 gCPU.gpr[rA] = ~(gCPU.gpr[rS] & gCPU.gpr[rB]);
1641 if (gCPU.current_opc & PPC_OPC_Rc) {
1642 // update cr0 flags
1643 ppc_update_cr0(gCPU.gpr[rA]);
1644 }
1645 }
1646 JITCFlow ppc_opc_gen_nandx()
1647 {
1648 int rS, rA, rB;
1649 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
1650 if (gJITC.current_opc & PPC_OPC_Rc) {
1651 jitcClobberCarry();
1652 } else {
1653 jitcClobberCarryAndFlags();
1654 }
1655 NativeReg a;
1656 if (rS == rB) {
1657 if (rA == rS) {
1658 a = jitcGetClientRegisterDirty(PPC_GPR(rA));
1659 } else {
1660 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1661 a = jitcMapClientRegisterDirty(PPC_GPR(rA));
1662 asmALURegReg(X86_MOV, a, s);
1663 }
1664 } else if (rA == rS) {
1665 a = jitcGetClientRegisterDirty(PPC_GPR(rA));
1666 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1667 asmALURegReg(X86_AND, a, b);
1668 } else if (rA == rB) {
1669 a = jitcGetClientRegisterDirty(PPC_GPR(rA));
1670 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1671 asmALURegReg(X86_AND, a, s);
1672 } else {
1673 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1674 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1675 a = jitcMapClientRegisterDirty(PPC_GPR(rA));
1676 asmALURegReg(X86_MOV, a, s);
1677 asmALURegReg(X86_AND, a, b);
1678 }
1679 asmALUReg(X86_NOT, a);
1680 if (gJITC.current_opc & PPC_OPC_Rc) {
1681 // "NOT" doesn't update the flags
1682 asmALURegReg(X86_TEST, a, a);
1683 jitcMapFlagsDirty();
1684 }
1685 return flowContinue;
1686 }
1687
1688 /*
1689 * negx Negate
1690 * .601
1691 */
1692 void ppc_opc_negx()
1693 {
1694 int rD, rA, rB;
1695 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
1696 PPC_OPC_ASSERT(rB == 0);
1697 gCPU.gpr[rD] = -gCPU.gpr[rA];
1698 if (gCPU.current_opc & PPC_OPC_Rc) {
1699 // update cr0 flags
1700 ppc_update_cr0(gCPU.gpr[rD]);
1701 }
1702 }
1703 JITCFlow ppc_opc_gen_negx()
1704 {
1705 int rD, rA, rB;
1706 PPC_OPC_TEMPL_XO(gJITC.current_opc, rD, rA, rB);
1707 if (gJITC.current_opc & PPC_OPC_Rc) {
1708 jitcClobberCarry();
1709 } else {
1710 jitcClobberCarryAndFlags();
1711 }
1712 if (rA == rD) {
1713 NativeReg d = jitcGetClientRegisterDirty(PPC_GPR(rD));
1714 asmALUReg(X86_NEG, d);
1715 } else {
1716 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
1717 NativeReg d = jitcMapClientRegisterDirty(PPC_GPR(rD));
1718 asmALURegReg(X86_MOV, d, a);
1719 asmALUReg(X86_NEG, d);
1720 }
1721 if (gJITC.current_opc & PPC_OPC_Rc) {
1722 jitcMapFlagsDirty();
1723 }
1724 return flowContinue;
1725 }
1726 /*
1727 * negox Negate with Overflow
1728 * .601
1729 */
1730 void ppc_opc_negox()
1731 {
1732 int rD, rA, rB;
1733 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
1734 PPC_OPC_ASSERT(rB == 0);
1735 gCPU.gpr[rD] = -gCPU.gpr[rA];
1736 if (gCPU.current_opc & PPC_OPC_Rc) {
1737 // update cr0 flags
1738 ppc_update_cr0(gCPU.gpr[rD]);
1739 }
1740 // update XER flags
1741 PPC_ALU_ERR("negox unimplemented\n");
1742 }
1743 /*
1744 * norx NOR
1745 * .602
1746 */
1747 void ppc_opc_norx()
1748 {
1749 int rS, rA, rB;
1750 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1751 gCPU.gpr[rA] = ~(gCPU.gpr[rS] | gCPU.gpr[rB]);
1752 if (gCPU.current_opc & PPC_OPC_Rc) {
1753 // update cr0 flags
1754 ppc_update_cr0(gCPU.gpr[rA]);
1755 }
1756 }
1757 JITCFlow ppc_opc_gen_norx()
1758 {
1759 int rS, rA, rB;
1760 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
1761 NativeReg a;
1762 if (rS == rB) {
1763 // norx rA, rS, rS == not rA, rS
1764 // not doen't clobber the flags
1765 if (gJITC.current_opc & PPC_OPC_Rc) {
1766 jitcClobberCarry();
1767 }
1768 if (rA == rS) {
1769 a = jitcGetClientRegisterDirty(PPC_GPR(rA));
1770 } else {
1771 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1772 a = jitcMapClientRegisterDirty(PPC_GPR(rA));
1773 asmALURegReg(X86_MOV, a, s);
1774 }
1775 } else {
1776 if (gJITC.current_opc & PPC_OPC_Rc) {
1777 jitcClobberCarry();
1778 } else {
1779 jitcClobberCarryAndFlags();
1780 }
1781 if (rA == rS) {
1782 a = jitcGetClientRegisterDirty(PPC_GPR(rA));
1783 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1784 asmALURegReg(X86_OR, a, b);
1785 } else if (rA == rB) {
1786 a = jitcGetClientRegisterDirty(PPC_GPR(rA));
1787 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1788 asmALURegReg(X86_OR, a, s);
1789 } else {
1790 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1791 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1792 a = jitcMapClientRegisterDirty(PPC_GPR(rA));
1793 asmALURegReg(X86_MOV, a, s);
1794 asmALURegReg(X86_OR, a, b);
1795 }
1796 }
1797 asmALUReg(X86_NOT, a);
1798 if (gJITC.current_opc & PPC_OPC_Rc) {
1799 // "NOT" doesn't update the flags
1800 asmALURegReg(X86_TEST, a, a);
1801 jitcMapFlagsDirty();
1802 }
1803 return flowContinue;
1804 }
1805
1806 /*
1807 * orx OR
1808 * .603
1809 */
1810 void ppc_opc_orx()
1811 {
1812 int rS, rA, rB;
1813 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1814 gCPU.gpr[rA] = gCPU.gpr[rS] | gCPU.gpr[rB];
1815 if (gCPU.current_opc & PPC_OPC_Rc) {
1816 // update cr0 flags
1817 ppc_update_cr0(gCPU.gpr[rA]);
1818 }
1819 }
1820 JITCFlow ppc_opc_gen_or()
1821 {
1822 int rS, rA, rB;
1823 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
1824 if (rS == rB) {
1825 if (rS == rA) {
1826 /* nop */
1827 } else {
1828 /* mr rA, rS*/
1829 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1830 NativeReg a = jitcMapClientRegisterDirty(PPC_GPR(rA));
1831 asmALURegReg(X86_MOV, a, s);
1832 }
1833 } else {
1834 if (rA == rS) {
1835 // or a, a, b
1836 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
1837 jitcClobberCarryAndFlags();
1838 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1839 asmALURegReg(X86_OR, a, b);
1840 } else if (rA == rB) {
1841 // or a, s, a
1842 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
1843 jitcClobberCarryAndFlags();
1844 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1845 asmALURegReg(X86_OR, a, s);
1846 } else {
1847 // or a, s, b
1848 NativeReg a = jitcMapClientRegisterDirty(PPC_GPR(rA));
1849 jitcClobberCarryAndFlags();
1850 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1851 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1852 asmALURegReg(X86_MOV, a, s);
1853 asmALURegReg(X86_OR, a, b);
1854 }
1855 }
1856 return flowContinue;
1857 }
1858 JITCFlow ppc_opc_gen_orp()
1859 {
1860 int rS, rA, rB;
1861 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
1862 jitcClobberCarry();
1863 if (rS == rB) {
1864 if (rS == rA) {
1865 /* mr. rA, rA */
1866 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
1867 asmALURegReg(X86_TEST, a, a);
1868 } else {
1869 /* mr. rA, rS*/
1870 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1871 NativeReg a = jitcMapClientRegisterDirty(PPC_GPR(rA));
1872 asmALURegReg(X86_MOV, a, s);
1873 asmALURegReg(X86_TEST, a, a);
1874 }
1875 } else {
1876 if (rA == rS) {
1877 // or a, a, b
1878 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
1879 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1880 asmALURegReg(X86_OR, a, b);
1881 } else if (rA == rB) {
1882 // or a, s, a
1883 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
1884 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1885 asmALURegReg(X86_OR, a, s);
1886 } else {
1887 // or a, s, b
1888 NativeReg a = jitcMapClientRegisterDirty(PPC_GPR(rA));
1889 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1890 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1891 asmALURegReg(X86_MOV, a, s);
1892 asmALURegReg(X86_OR, a, b);
1893 }
1894 }
1895 jitcMapFlagsDirty();
1896 return flowContinue;
1897 }
1898 JITCFlow ppc_opc_gen_orx()
1899 {
1900 if (gJITC.current_opc & PPC_OPC_Rc) {
1901 return ppc_opc_gen_orp();
1902 } else {
1903 return ppc_opc_gen_or();
1904 }
1905 }
1906 /*
1907 * orcx OR with Complement
1908 * .604
1909 */
1910 void ppc_opc_orcx()
1911 {
1912 int rS, rA, rB;
1913 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1914 gCPU.gpr[rA] = gCPU.gpr[rS] | ~gCPU.gpr[rB];
1915 if (gCPU.current_opc & PPC_OPC_Rc) {
1916 // update cr0 flags
1917 ppc_update_cr0(gCPU.gpr[rA]);
1918 }
1919 }
1920 JITCFlow ppc_opc_gen_orcx()
1921 {
1922 int rS, rA, rB;
1923 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
1924 if (gJITC.current_opc & PPC_OPC_Rc) {
1925 jitcClobberCarry();
1926 } else {
1927 jitcClobberCarryAndFlags();
1928 }
1929 if (rA == rS) {
1930 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
1931 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1932 NativeReg tmp = jitcAllocRegister();
1933 asmALURegReg(X86_MOV, tmp, b);
1934 asmALUReg(X86_NOT, tmp);
1935 asmALURegReg(X86_OR, a, tmp);
1936 } else if (rA == rB) {
1937 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
1938 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1939 asmALUReg(X86_NOT, a);
1940 asmALURegReg(X86_OR, a, s);
1941 } else {
1942 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
1943 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
1944 NativeReg a = jitcMapClientRegisterDirty(PPC_GPR(rA));
1945 asmALURegReg(X86_MOV, a, b);
1946 asmALUReg(X86_NOT, a);
1947 asmALURegReg(X86_OR, a, s);
1948 }
1949 if (gJITC.current_opc & PPC_OPC_Rc) {
1950 jitcMapFlagsDirty();
1951 }
1952 return flowContinue;
1953 }
1954 /*
1955 * ori OR Immediate
1956 * .605
1957 */
1958 void ppc_opc_ori()
1959 {
1960 int rS, rA;
1961 uint32 imm;
1962 PPC_OPC_TEMPL_D_UImm(gCPU.current_opc, rS, rA, imm);
1963 gCPU.gpr[rA] = gCPU.gpr[rS] | imm;
1964 }
1965 JITCFlow ppc_opc_gen_ori()
1966 {
1967 int rS, rA;
1968 uint32 imm;
1969 PPC_OPC_TEMPL_D_UImm(gJITC.current_opc, rS, rA, imm);
1970 return ppc_opc_gen_ori_oris_xori_xoris(X86_OR, imm, rS, rA);
1971 }
1972 /*
1973 * oris OR Immediate Shifted
1974 * .606
1975 */
1976 void ppc_opc_oris()
1977 {
1978 int rS, rA;
1979 uint32 imm;
1980 PPC_OPC_TEMPL_D_Shift16(gCPU.current_opc, rS, rA, imm);
1981 gCPU.gpr[rA] = gCPU.gpr[rS] | imm;
1982 }
1983 JITCFlow ppc_opc_gen_oris()
1984 {
1985 int rS, rA;
1986 uint32 imm;
1987 PPC_OPC_TEMPL_D_Shift16(gJITC.current_opc, rS, rA, imm);
1988 return ppc_opc_gen_ori_oris_xori_xoris(X86_OR, imm, rS, rA);
1989 }
1990 /*
1991 * rlwimix Rotate Left Word Immediate then Mask Insert
1992 * .617
1993 */
1994 void ppc_opc_rlwimix()
1995 {
1996 int rS, rA, SH, MB, ME;
1997 PPC_OPC_TEMPL_M(gCPU.current_opc, rS, rA, SH, MB, ME);
1998 uint32 v = ppc_word_rotl(gCPU.gpr[rS], SH);
1999 uint32 mask = ppc_mask(MB, ME);
2000 gCPU.gpr[rA] = (v & mask) | (gCPU.gpr[rA] & ~mask);
2001 if (gCPU.current_opc & PPC_OPC_Rc) {
2002 // update cr0 flags
2003 ppc_update_cr0(gCPU.gpr[rA]);
2004 }
2005 }
2006 static void ppc_opc_gen_rotl_and(NativeReg r, int SH, uint32 mask)
2007 {
2008 SH &= 0x1f;
2009 if (SH) {
2010 if (mask & ((1<<SH)-1)) {
2011 if (mask & ~((1<<SH)-1)) {
2012 if (SH == 31) {
2013 asmShiftRegImm(X86_ROR, r, 1);
2014 } else {
2015 asmShiftRegImm(X86_ROL, r, SH);
2016 }
2017 } else {
2018 asmShiftRegImm(X86_SHR, r, 32-SH);
2019 }
2020 } else {
2021 asmShiftRegImm(X86_SHL, r, SH);
2022 }
2023 }
2024 }
2025 JITCFlow ppc_opc_gen_rlwimix()
2026 {
2027 int rS, rA, SH, MB, ME;
2028 PPC_OPC_TEMPL_M(gJITC.current_opc, rS, rA, SH, MB, ME);
2029 if (gJITC.current_opc & PPC_OPC_Rc) {
2030 jitcClobberCarry();
2031 } else {
2032 jitcClobberCarryAndFlags();
2033 }
2034 uint32 mask = ppc_mask(MB, ME);
2035 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
2036 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
2037 NativeReg tmp = jitcAllocRegister();
2038 asmALURegReg(X86_MOV, tmp, s);
2039 ppc_opc_gen_rotl_and(tmp, SH, mask);
2040 asmALURegImm(X86_AND, a, ~mask);
2041 asmALURegImm(X86_AND, tmp, mask);
2042 asmALURegReg(X86_OR, a, tmp);
2043 if (gJITC.current_opc & PPC_OPC_Rc) {
2044 jitcMapFlagsDirty();
2045 }
2046 return flowContinue;
2047 }
2048 /*
2049 * rlwinmx Rotate Left Word Immediate then AND with Mask
2050 * .618
2051 */
2052 void ppc_opc_rlwinmx()
2053 {
2054 int rS, rA, SH;
2055 uint32 MB, ME;
2056 PPC_OPC_TEMPL_M(gCPU.current_opc, rS, rA, SH, MB, ME);
2057 uint32 v = ppc_word_rotl(gCPU.gpr[rS], SH);
2058 uint32 mask = ppc_mask(MB, ME);
2059 gCPU.gpr[rA] = v & mask;
2060 if (gCPU.current_opc & PPC_OPC_Rc) {
2061 // update cr0 flags
2062 ppc_update_cr0(gCPU.gpr[rA]);
2063 }
2064 }
2065 JITCFlow ppc_opc_gen_rlwinmx()
2066 {
2067 int rS, rA, SH;
2068 uint32 MB, ME;
2069 PPC_OPC_TEMPL_M(gJITC.current_opc, rS, rA, SH, MB, ME);
2070 if (gJITC.current_opc & PPC_OPC_Rc) {
2071 jitcClobberCarry();
2072 } else {
2073 jitcClobberCarryAndFlags();
2074 }
2075 NativeReg a;
2076 if (rS == rA) {
2077 a = jitcGetClientRegisterDirty(PPC_GPR(rA));
2078 } else {
2079 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
2080 a = jitcMapClientRegisterDirty(PPC_GPR(rA));
2081 asmALURegReg(X86_MOV, a, s);
2082 }
2083 uint32 mask = ppc_mask(MB, ME);
2084 ppc_opc_gen_rotl_and(a, SH, mask);
2085 asmALURegImm(X86_AND, a, mask);
2086 if (gJITC.current_opc & PPC_OPC_Rc) {
2087 /*
2088 * Important side-node:
2089 * ROL doesn't update the flags, so beware if you want to
2090 * get rid of the above AND
2091 */
2092 jitcMapFlagsDirty();
2093 }
2094 return flowContinue;
2095 }
2096 /*
2097 * rlwnmx Rotate Left Word then AND with Mask
2098 * .620
2099 */
2100 void ppc_opc_rlwnmx()
2101 {
2102 int rS, rA, rB, MB, ME;
2103 PPC_OPC_TEMPL_M(gCPU.current_opc, rS, rA, rB, MB, ME);
2104 uint32 v = ppc_word_rotl(gCPU.gpr[rS], gCPU.gpr[rB]);
2105 uint32 mask = ppc_mask(MB, ME);
2106 gCPU.gpr[rA] = v & mask;
2107 if (gCPU.current_opc & PPC_OPC_Rc) {
2108 // update cr0 flags
2109 ppc_update_cr0(gCPU.gpr[rA]);
2110 }
2111 }
2112 JITCFlow ppc_opc_gen_rlwnmx()
2113 {
2114 int rS, rA, rB, MB, ME;
2115 PPC_OPC_TEMPL_M(gJITC.current_opc, rS, rA, rB, MB, ME);
2116 if (gJITC.current_opc & PPC_OPC_Rc) {
2117 jitcClobberCarry();
2118 } else {
2119 jitcClobberCarryAndFlags();
2120 }
2121 NativeReg a;
2122 jitcGetClientRegister(PPC_GPR(rB), NATIVE_REG | ECX);
2123 if (rS == rA) {
2124 a = jitcGetClientRegisterDirty(PPC_GPR(rA));
2125 } else {
2126 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
2127 if (rA == rB) {
2128 a = jitcAllocRegister();
2129 } else {
2130 a = jitcMapClientRegisterDirty(PPC_GPR(rA));
2131 }
2132 asmALURegReg(X86_MOV, a, s);
2133 }
2134 asmShiftRegCL(X86_ROL, a);
2135 if (rA != rS && rA == rB) {
2136 jitcMapClientRegisterDirty(PPC_GPR(rA), NATIVE_REG | a);
2137 }
2138 uint32 mask = ppc_mask(MB, ME);
2139 asmALURegImm(X86_AND, a, mask);
2140 if (gJITC.current_opc & PPC_OPC_Rc) {
2141 /*
2142 * Important side-node:
2143 * ROL doesn't update the flags, so beware if you want to
2144 * get rid of the above AND
2145 */
2146 jitcMapFlagsDirty();
2147 }
2148 return flowContinue;
2149 }
2150
2151 /*
2152 * slwx Shift Left Word
2153 * .625
2154 */
2155 void ppc_opc_slwx()
2156 {
2157 int rS, rA, rB;
2158 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
2159 uint32 s = gCPU.gpr[rB] & 0x3f;
2160 if (s > 31) {
2161 gCPU.gpr[rA] = 0;
2162 } else {
2163 gCPU.gpr[rA] = gCPU.gpr[rS] << s;
2164 }
2165 if (gCPU.current_opc & PPC_OPC_Rc) {
2166 // update cr0 flags
2167 ppc_update_cr0(gCPU.gpr[rA]);
2168 }
2169 }
2170 JITCFlow ppc_opc_gen_slwx()
2171 {
2172 int rS, rA, rB;
2173 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
2174 if (gJITC.current_opc & PPC_OPC_Rc) {
2175 jitcClobberCarry();
2176 } else {
2177 jitcClobberCarryAndFlags();
2178 }
2179 NativeReg a;
2180 NativeReg b = jitcGetClientRegister(PPC_GPR(rB), NATIVE_REG | ECX);
2181 asmALURegImm(X86_TEST, b, 0x20);
2182 if (rA == rS) {
2183 a = jitcGetClientRegisterDirty(PPC_GPR(rA));
2184 } else {
2185 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
2186 if (rA == rB) {
2187 a = jitcAllocRegister();
2188 } else {
2189 a = jitcMapClientRegisterDirty(PPC_GPR(rA));
2190 }
2191 asmALURegReg(X86_MOV, a, s);
2192 }
2193 NativeAddress fixup = asmJxxFixup(X86_Z);
2194 asmALURegImm(X86_MOV, a, 0);
2195 asmResolveFixup(fixup, asmHERE());
2196 asmShiftRegCL(X86_SHL, a);
2197 if (rA != rS && rA == rB) {
2198 jitcMapClientRegisterDirty(PPC_GPR(rA), NATIVE_REG | a);
2199 }
2200 if (gJITC.current_opc & PPC_OPC_Rc) {
2201 /*
2202 * Welcome to the wonderful world of braindead
2203 * processor design.
2204 * (shl x, cl doesn't update the flags in case of cl==0)
2205 */
2206 asmALURegReg(X86_TEST, a, a);
2207 jitcMapFlagsDirty();
2208 }
2209 return flowContinue;
2210 }
2211 /*
2212 * srawx Shift Right Algebraic Word
2213 * .628
2214 */
2215 void ppc_opc_srawx()
2216 {
2217 int rS, rA, rB;
2218 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
2219 uint32 SH = gCPU.gpr[rB] & 0x3f;
2220 gCPU.gpr[rA] = gCPU.gpr[rS];
2221 gCPU.xer_ca = 0;
2222 if (gCPU.gpr[rA] & 0x80000000) {
2223 uint32 ca = 0;
2224 for (uint i=0; i < SH; i++) {
2225 if (gCPU.gpr[rA] & 1) ca = 1;
2226 gCPU.gpr[rA] >>= 1;
2227 gCPU.gpr[rA] |= 0x80000000;
2228 }
2229 if (ca) gCPU.xer_ca = 1;
2230 } else {
2231 if (SH > 31) {
2232 gCPU.gpr[rA] = 0;
2233 } else {
2234 gCPU.gpr[rA] >>= SH;
2235 }
2236 }
2237 if (gCPU.current_opc & PPC_OPC_Rc) {
2238 // update cr0 flags
2239 ppc_update_cr0(gCPU.gpr[rA]);
2240 }
2241 }
2242 JITCFlow ppc_opc_gen_srawx()
2243 {
2244 #if 0
2245 int rS, rA, rB;
2246 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
2247 if (!(gJITC.current_opc & PPC_OPC_Rc)) {
2248 jitcClobberFlags();
2249 }
2250 NativeReg a = REG_NO;
2251 jitcGetClientRegister(PPC_GPR(rB), NATIVE_REG | ECX);
2252 byte modrm[6];
2253 asmALURegImm(X86_TEST, ECX, 0x20);
2254 NativeAddress ecx_gt_1f = asmJxxFixup(X86_NZ);
2255
2256 // 0 <= SH <= 31
2257 NativeReg t;
2258 if (rS != rA) {
2259 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
2260 a = jitcMapClientRegisterDirty(PPC_GPR(rA));
2261 asmALURegReg(X86_MOV, a, s);
2262 t = jitcAllocRegister();
2263 asmALURegReg(X86_MOV, t, s);
2264 } else {
2265 a = jitcGetClientRegisterDirty(PPC_GPR(rA));
2266 t = jitcAllocRegister();
2267 asmALURegReg(X86_MOV, t, a);
2268 }
2269 asmShiftRegImm(X86_SAR, t, 31);
2270 asmALURegReg(X86_AND, t, a);
2271 asmShiftRegCL(X86_SAR, a);
2272 static int test_values[] = {
2273 0x00000000, 0x00000001, 0x00000003, 0x00000007,
2274 0x0000000f, 0x0000001f, 0x0000003f, 0x0000007f,
2275 0x000000ff, 0x000001ff, 0x000003ff, 0x000007ff,
2276 0x00000fff, 0x00001fff, 0x00003fff, 0x00007fff,
2277 0x0000ffff, 0x0001ffff, 0x0003ffff, 0x0007ffff,
2278 0x000fffff, 0x001fffff, 0x003fffff, 0x007fffff,
2279 0x00ffffff, 0x01ffffff, 0x03ffffff, 0x07ffffff,
2280 0x0fffffff, 0x1fffffff, 0x3fffffff, 0x7fffffff,
2281 };
2282 asmALURegMem(X86_TEST, t, modrm, x86_mem_sib(modrm, REG_NO, 4, ECX, (uint32)&test_values));
2283 asmSETMem(X86_NZ, modrm, x86_mem(modrm, REG_NO, (uint32)&gCPU.xer_ca));
2284 if (gJITC.current_opc & PPC_OPC_Rc) {
2285 asmALURegReg(X86_TEST, a, a);
2286 }
2287 NativeAddress end = asmJMPFixup();
2288
2289
2290 asmResolveFixup(ecx_gt_1f, asmHERE());
2291 // SH > 31
2292 if (rS != rA) {
2293 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
2294 a = jitcMapClientRegisterDirty(PPC_GPR(rA));
2295 asmALURegReg(X86_MOV, a, s);
2296 } else {
2297 a = jitcGetClientRegister(PPC_GPR(rA));
2298 }
2299 asmShiftRegImm(X86_SAR, a, 31);
2300 asmShiftRegImm(X86_SAR, a, 1);
2301 jitcMapCarryDirty();
2302 asmResolveFixup(end, asmHERE());
2303
2304 if (gJITC.current_opc & PPC_OPC_Rc) {
2305 jitcMapFlagsDirty();
2306 }
2307
2308 return flowContinue;
2309 #endif
2310 ppc_opc_gen_interpret(ppc_opc_srawx);
2311 return flowEndBlock;
2312 }
2313 /*
2314 * srawix Shift Right Algebraic Word Immediate
2315 * .629
2316 */
2317 void ppc_opc_srawix()
2318 {
2319 int rS, rA;
2320 uint32 SH;
2321 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, SH);
2322 gCPU.gpr[rA] = gCPU.gpr[rS];
2323 gCPU.xer_ca = 0;
2324 if (gCPU.gpr[rA] & 0x80000000) {
2325 uint32 ca = 0;
2326 for (uint i=0; i < SH; i++) {
2327 if (gCPU.gpr[rA] & 1) ca = 1;
2328 gCPU.gpr[rA] >>= 1;
2329 gCPU.gpr[rA] |= 0x80000000;
2330 }
2331 if (ca) gCPU.xer_ca = 1;
2332 } else {
2333 gCPU.gpr[rA] >>= SH;
2334 }
2335 if (gCPU.current_opc & PPC_OPC_Rc) {
2336 // update cr0 flags
2337 ppc_update_cr0(gCPU.gpr[rA]);
2338 }
2339 }
2340 JITCFlow ppc_opc_gen_srawix()
2341 {
2342 int rS, rA;
2343 uint32 SH;
2344 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, SH);
2345 if (!(gJITC.current_opc & PPC_OPC_Rc)) {
2346 jitcClobberFlags();
2347 }
2348 NativeReg a = REG_NO;
2349 if (SH) {
2350 NativeReg t;
2351 if (rS != rA) {
2352 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
2353 a = jitcMapClientRegisterDirty(PPC_GPR(rA));
2354 asmALURegReg(X86_MOV, a, s);
2355 t = jitcAllocRegister();
2356 asmALURegReg(X86_MOV, t, s);
2357 } else {
2358 a = jitcGetClientRegisterDirty(PPC_GPR(rA));
2359 t = jitcAllocRegister();
2360 asmALURegReg(X86_MOV, t, a);
2361 }
2362 asmShiftRegImm(X86_SAR, t, 31);
2363 asmALURegReg(X86_AND, t, a);
2364 asmShiftRegImm(X86_SAR, a, SH);
2365 asmALURegImm(X86_TEST, t, (1<<SH)-1);
2366 byte modrm[6];
2367 asmSETMem(X86_NZ, modrm, x86_mem(modrm, REG_NO, (uint32)&gCPU.xer_ca));
2368 } else {
2369 if (rS != rA) {
2370 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
2371 a = jitcMapClientRegisterDirty(PPC_GPR(rA));
2372 asmALURegReg(X86_MOV, a, s);
2373 } else if (gJITC.current_opc & PPC_OPC_Rc) {
2374 a = jitcGetClientRegister(PPC_GPR(rA));
2375 }
2376 byte modrm[6];
2377 asmALUMemImm8(X86_MOV, modrm, x86_mem(modrm, REG_NO, (uint32)&gCPU.xer_ca), 0);
2378 }
2379 if (gJITC.current_opc & PPC_OPC_Rc) {
2380 asmALURegReg(X86_TEST, a, a);
2381 jitcMapFlagsDirty();
2382 }
2383 return flowContinue;
2384 }
2385 /*
2386 * srwx Shift Right Word
2387 * .631
2388 */
2389 void ppc_opc_srwx()
2390 {
2391 int rS, rA, rB;
2392 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
2393 uint32 v = gCPU.gpr[rB] & 0x3f;
2394 if (v > 31) {
2395 gCPU.gpr[rA] = 0;
2396 } else {
2397 gCPU.gpr[rA] = gCPU.gpr[rS] >> v;
2398 }
2399 if (gCPU.current_opc & PPC_OPC_Rc) {
2400 // update cr0 flags
2401 ppc_update_cr0(gCPU.gpr[rA]);
2402 }
2403 }
2404 JITCFlow ppc_opc_gen_srwx()
2405 {
2406 int rS, rA, rB;
2407 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
2408 if (gJITC.current_opc & PPC_OPC_Rc) {
2409 jitcClobberCarry();
2410 } else {
2411 jitcClobberCarryAndFlags();
2412 }
2413 NativeReg a;
2414 NativeReg b = jitcGetClientRegister(PPC_GPR(rB), NATIVE_REG | ECX);
2415 asmALURegImm(X86_TEST, b, 0x20);
2416 if (rA == rS) {
2417 a = jitcGetClientRegisterDirty(PPC_GPR(rA));
2418 } else {
2419 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
2420 if (rA == rB) {
2421 a = jitcAllocRegister();
2422 } else {
2423 a = jitcMapClientRegisterDirty(PPC_GPR(rA));
2424 }
2425 asmALURegReg(X86_MOV, a, s);
2426 }
2427 NativeAddress fixup = asmJxxFixup(X86_Z);
2428 asmALURegImm(X86_MOV, a, 0);
2429 asmResolveFixup(fixup, asmHERE());
2430 asmShiftRegCL(X86_SHR, a);
2431 if (rA != rS && rA == rB) {
2432 jitcMapClientRegisterDirty(PPC_GPR(rA), NATIVE_REG | a);
2433 }
2434 if (gJITC.current_opc & PPC_OPC_Rc) {
2435 /*
2436 * Welcome to the wonderful world of braindead
2437 * processor design.
2438 * (shr x, cl doesn't update the flags in case of cl==0)
2439 */
2440 asmALURegReg(X86_TEST, a, a);
2441 jitcMapFlagsDirty();
2442 }
2443 return flowContinue;
2444 /* ppc_opc_gen_interpret(ppc_opc_srwx);
2445 return flowEndBlock;*/
2446 }
2447
2448 /*
2449 * subfx Subtract From
2450 * .666
2451 */
2452 void ppc_opc_subfx()
2453 {
2454 int rD, rA, rB;
2455 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
2456 gCPU.gpr[rD] = ~gCPU.gpr[rA] + gCPU.gpr[rB] + 1;
2457 if (gCPU.current_opc & PPC_OPC_Rc) {
2458 // update cr0 flags
2459 ppc_update_cr0(gCPU.gpr[rD]);
2460 }
2461 }
2462 JITCFlow ppc_opc_gen_subfx()
2463 {
2464 int rD, rA, rB;
2465 PPC_OPC_TEMPL_XO(gJITC.current_opc, rD, rA, rB);
2466 if (gJITC.current_opc & PPC_OPC_Rc) {
2467 jitcClobberCarry();
2468 } else {
2469 jitcClobberCarryAndFlags();
2470 }
2471 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
2472 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
2473 if (rD == rA) {
2474 if (rD == rB) {
2475 asmALURegImm(X86_MOV, a, 0);
2476 } else {
2477 // subf rA, rA, rB (a = b - a)
2478 asmALUReg(X86_NEG, a);
2479 asmALURegReg(X86_ADD, a, b);
2480 }
2481 jitcDirtyRegister(a);
2482 } else if (rD == rB) {
2483 // subf rB, rA, rB (b = b - a)
2484 asmALURegReg(X86_SUB, b, a);
2485 jitcDirtyRegister(b);
2486 } else {
2487 // subf rD, rA, rB (d = b - a)
2488 NativeReg d = jitcMapClientRegisterDirty(PPC_GPR(rD));
2489 asmALURegReg(X86_MOV, d, b);
2490 asmALURegReg(X86_SUB, d, a);
2491 }
2492 if (gJITC.current_opc & PPC_OPC_Rc) {
2493 jitcMapFlagsDirty();
2494 }
2495 return flowContinue;
2496 }
2497 /*
2498 * subfox Subtract From with Overflow
2499 * .666
2500 */
2501 void ppc_opc_subfox()
2502 {
2503 int rD, rA, rB;
2504 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
2505 gCPU.gpr[rD] = ~gCPU.gpr[rA] + gCPU.gpr[rB] + 1;
2506 if (gCPU.current_opc & PPC_OPC_Rc) {
2507 // update cr0 flags
2508 ppc_update_cr0(gCPU.gpr[rD]);
2509 }
2510 // update XER flags
2511 PPC_ALU_ERR("subfox unimplemented\n");
2512 }
2513 /*
2514 * subfcx Subtract From Carrying
2515 * .667
2516 */
2517 void ppc_opc_subfcx()
2518 {
2519 int rD, rA, rB;
2520 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
2521 uint32 a = gCPU.gpr[rA];
2522 uint32 b = gCPU.gpr[rB];
2523 gCPU.gpr[rD] = ~a + b + 1;
2524 gCPU.xer_ca = ppc_carry_3(~a, b, 1);
2525 if (gCPU.current_opc & PPC_OPC_Rc) {
2526 // update cr0 flags
2527 ppc_update_cr0(gCPU.gpr[rD]);
2528 }
2529 }
2530 JITCFlow ppc_opc_gen_subfcx()
2531 {
2532 int rD, rA, rB;
2533 PPC_OPC_TEMPL_XO(gJITC.current_opc, rD, rA, rB);
2534 if (!(gJITC.current_opc & PPC_OPC_Rc)) {
2535 jitcClobberFlags();
2536 }
2537 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
2538 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
2539 if (rA != rD) {
2540 if (rD == rB) {
2541 // b = b - a
2542 asmALURegReg(X86_SUB, b, a);
2543 jitcDirtyRegister(b);
2544 } else {
2545 // d = b - a
2546 NativeReg d = jitcMapClientRegisterDirty(PPC_GPR(rD));
2547 asmALURegReg(X86_MOV, d, b);
2548 asmALURegReg(X86_SUB, d, a);
2549 }
2550 } else {
2551 // a = b - a
2552 NativeReg tmp = jitcAllocRegister();
2553 asmALURegReg(X86_MOV, tmp, b);
2554 asmALURegReg(X86_SUB, tmp, a);
2555 jitcMapClientRegisterDirty(PPC_GPR(rA), NATIVE_REG | tmp);
2556 }
2557 asmSimple(X86_CMC);
2558 jitcMapCarryDirty();
2559 if (gJITC.current_opc & PPC_OPC_Rc) {
2560 jitcMapFlagsDirty();
2561 }
2562 return flowContinue;
2563 }
2564 /*
2565 * subfcox Subtract From Carrying with Overflow
2566 * .667
2567 */
2568 void ppc_opc_subfcox()
2569 {
2570 int rD, rA, rB;
2571 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
2572 uint32 a = gCPU.gpr[rA];
2573 uint32 b = gCPU.gpr[rB];
2574 gCPU.gpr[rD] = ~a + b + 1;
2575 gCPU.xer_ca = ppc_carry_3(~a, b, 1);
2576 if (gCPU.current_opc & PPC_OPC_Rc) {
2577 // update cr0 flags
2578 ppc_update_cr0(gCPU.gpr[rD]);
2579 }
2580 // update XER flags
2581 PPC_ALU_ERR("subfcox unimplemented\n");
2582 }
2583 /*
2584 * subfex Subtract From Extended
2585 * .668
2586 */
2587 void ppc_opc_subfex()
2588 {
2589 int rD, rA, rB;
2590 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
2591 uint32 a = gCPU.gpr[rA];
2592 uint32 b = gCPU.gpr[rB];
2593 uint32 ca = (gCPU.xer_ca);
2594 gCPU.gpr[rD] = ~a + b + ca;
2595 gCPU.xer_ca = ppc_carry_3(~a, b, ca);
2596 if (gCPU.current_opc & PPC_OPC_Rc) {
2597 // update cr0 flags
2598 ppc_update_cr0(gCPU.gpr[rD]);
2599 }
2600 }
2601 JITCFlow ppc_opc_gen_subfex()
2602 {
2603 int rD, rA, rB;
2604 PPC_OPC_TEMPL_XO(gJITC.current_opc, rD, rA, rB);
2605 if (!(gJITC.current_opc & PPC_OPC_Rc)) {
2606 jitcClobberFlags();
2607 }
2608 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
2609 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
2610 jitcGetClientCarry();
2611 asmSimple(X86_CMC);
2612 if (rA != rD) {
2613 if (rD == rB) {
2614 // b = b - a
2615 asmALURegReg(X86_SBB, b, a);
2616 jitcDirtyRegister(b);
2617 } else {
2618 // d = b - a
2619 NativeReg d = jitcMapClientRegisterDirty(PPC_GPR(rD));
2620 asmALURegReg(X86_MOV, d, b);
2621 asmALURegReg(X86_SBB, d, a);
2622 }
2623 } else {
2624 // a = b - a
2625 NativeReg tmp = jitcAllocRegister();
2626 asmALURegReg(X86_MOV, tmp, b);
2627 asmALURegReg(X86_SBB, tmp, a);
2628 jitcMapClientRegisterDirty(PPC_GPR(rA), NATIVE_REG | tmp);
2629 }
2630 asmSimple(X86_CMC);
2631 jitcMapCarryDirty();
2632 if (gJITC.current_opc & PPC_OPC_Rc) {
2633 jitcMapFlagsDirty();
2634 }
2635 return flowContinue;
2636 }
2637 /*
2638 * subfeox Subtract From Extended with Overflow
2639 * .668
2640 */
2641 void ppc_opc_subfeox()
2642 {
2643 int rD, rA, rB;
2644 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
2645 uint32 a = gCPU.gpr[rA];
2646 uint32 b = gCPU.gpr[rB];
2647 uint32 ca = gCPU.xer_ca;
2648 gCPU.gpr[rD] = ~a + b + ca;
2649 gCPU.xer_ca = (ppc_carry_3(~a, b, ca));
2650 if (gCPU.current_opc & PPC_OPC_Rc) {
2651 // update cr0 flags
2652 ppc_update_cr0(gCPU.gpr[rD]);
2653 }
2654 // update XER flags
2655 PPC_ALU_ERR("subfeox unimplemented\n");
2656 }
2657 /*
2658 * subfic Subtract From Immediate Carrying
2659 * .669
2660 */
2661 void ppc_opc_subfic()
2662 {
2663 int rD, rA;
2664 uint32 imm;
2665 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rD, rA, imm);
2666 uint32 a = gCPU.gpr[rA];
2667 gCPU.gpr[rD] = ~a + imm + 1;
2668 gCPU.xer_ca = (ppc_carry_3(~a, imm, 1));
2669 }
2670 JITCFlow ppc_opc_gen_subfic()
2671 {
2672 int rD, rA;
2673 uint32 imm;
2674 PPC_OPC_TEMPL_D_SImm(gJITC.current_opc, rD, rA, imm);
2675 jitcClobberFlags();
2676 NativeReg d;
2677 if (rA == rD) {
2678 d = jitcGetClientRegisterDirty(PPC_GPR(rD));
2679 } else {
2680 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
2681 d = jitcMapClientRegisterDirty(PPC_GPR(rD));
2682 asmALURegReg(X86_MOV, d, a);
2683 }
2684 asmALUReg(X86_NOT, d);
2685 if (imm == 0xffffffff) {
2686 asmSimple(X86_STC);
2687 } else {
2688 asmALURegImm(X86_ADD, d, imm+1);
2689 }
2690 jitcMapCarryDirty();
2691 return flowContinue;
2692 }
2693 /*
2694 * subfmex Subtract From Minus One Extended
2695 * .670
2696 */
2697 void ppc_opc_subfmex()
2698 {
2699 int rD, rA, rB;
2700 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
2701 PPC_OPC_ASSERT(rB == 0);
2702 uint32 a = gCPU.gpr[rA];
2703 uint32 ca = gCPU.xer_ca;
2704 gCPU.gpr[rD] = ~a + ca + 0xffffffff;
2705 gCPU.xer_ca = ((a!=0xffffffff) || ca);
2706 if (gCPU.current_opc & PPC_OPC_Rc) {
2707 // update cr0 flags
2708 ppc_update_cr0(gCPU.gpr[rD]);
2709 }
2710 }
2711 JITCFlow ppc_opc_gen_subfmex()
2712 {
2713 ppc_opc_gen_interpret(ppc_opc_subfmex);
2714 return flowEndBlock;
2715 }
2716 /*
2717 * subfmeox Subtract From Minus One Extended with Overflow
2718 * .670
2719 */
2720 void ppc_opc_subfmeox()
2721 {
2722 int rD, rA, rB;
2723 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
2724 PPC_OPC_ASSERT(rB == 0);
2725 uint32 a = gCPU.gpr[rA];
2726 uint32 ca = gCPU.xer_ca;
2727 gCPU.gpr[rD] = ~a + ca + 0xffffffff;
2728 gCPU.xer_ca = ((a!=0xffffffff) || ca);
2729 if (gCPU.current_opc & PPC_OPC_Rc) {
2730 // update cr0 flags
2731 ppc_update_cr0(gCPU.gpr[rD]);
2732 }
2733 // update XER flags
2734 PPC_ALU_ERR("subfmeox unimplemented\n");
2735 }
2736 /*
2737 * subfzex Subtract From Zero Extended
2738 * .671
2739 */
2740 void ppc_opc_subfzex()
2741 {
2742 int rD, rA, rB;
2743 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
2744 PPC_OPC_ASSERT(rB == 0);
2745 uint32 a = gCPU.gpr[rA];
2746 uint32 ca = gCPU.xer_ca;
2747 gCPU.gpr[rD] = ~a + ca;
2748 gCPU.xer_ca = (!a && ca);
2749 if (gCPU.current_opc & PPC_OPC_Rc) {
2750 // update cr0 flags
2751 ppc_update_cr0(gCPU.gpr[rD]);
2752 }
2753 }
2754 JITCFlow ppc_opc_gen_subfzex()
2755 {
2756 int rD, rA, rB;
2757 PPC_OPC_TEMPL_XO(gJITC.current_opc, rD, rA, rB);
2758 if (!(gJITC.current_opc & PPC_OPC_Rc)) {
2759 jitcClobberFlags();
2760 }
2761 NativeReg a = jitcGetClientRegister(PPC_GPR(rA));
2762 if (rD != rA) {
2763 NativeReg d = jitcMapClientRegisterDirty(PPC_GPR(rD));
2764 asmALURegReg(X86_MOV, d, a);
2765 a = d;
2766 }
2767 jitcGetClientCarry();
2768 asmALUReg(X86_NOT, a);
2769 asmALURegImm(X86_ADC, a, 0);
2770 jitcDirtyRegister(a);
2771 jitcMapCarryDirty();
2772 if (gJITC.current_opc & PPC_OPC_Rc) {
2773 jitcMapFlagsDirty();
2774 }
2775 return flowContinue;
2776 }
2777 /*
2778 * subfzeox Subtract From Zero Extended with Overflow
2779 * .671
2780 */
2781 void ppc_opc_subfzeox()
2782 {
2783 int rD, rA, rB;
2784 PPC_OPC_TEMPL_XO(gCPU.current_opc, rD, rA, rB);
2785 PPC_OPC_ASSERT(rB == 0);
2786 uint32 a = gCPU.gpr[rA];
2787 uint32 ca = gCPU.xer_ca;
2788 gCPU.gpr[rD] = ~a + ca;
2789 gCPU.xer_ca = (!a && ca);
2790 if (gCPU.current_opc & PPC_OPC_Rc) {
2791 // update cr0 flags
2792 ppc_update_cr0(gCPU.gpr[rD]);
2793 }
2794 // update XER flags
2795 PPC_ALU_ERR("subfzeox unimplemented\n");
2796 }
2797
2798 /*
2799 * xorx XOR
2800 * .680
2801 */
2802 void ppc_opc_xorx()
2803 {
2804 int rS, rA, rB;
2805 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
2806 gCPU.gpr[rA] = gCPU.gpr[rS] ^ gCPU.gpr[rB];
2807 if (gCPU.current_opc & PPC_OPC_Rc) {
2808 // update cr0 flags
2809 ppc_update_cr0(gCPU.gpr[rA]);
2810 }
2811 }
2812 JITCFlow ppc_opc_gen_xorx()
2813 {
2814 int rS, rA, rB;
2815 PPC_OPC_TEMPL_X(gJITC.current_opc, rS, rA, rB);
2816 if (gJITC.current_opc & PPC_OPC_Rc) {
2817 jitcClobberCarry();
2818 } else {
2819 jitcClobberCarryAndFlags();
2820 }
2821 if (rA == rS) {
2822 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
2823 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
2824 asmALURegReg(X86_XOR, a, b);
2825 } else if (rA == rB) {
2826 NativeReg a = jitcGetClientRegisterDirty(PPC_GPR(rA));
2827 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
2828 asmALURegReg(X86_XOR, a, s);
2829 } else {
2830 NativeReg s = jitcGetClientRegister(PPC_GPR(rS));
2831 NativeReg b = jitcGetClientRegister(PPC_GPR(rB));
2832 NativeReg a = jitcMapClientRegisterDirty(PPC_GPR(rA));
2833 asmALURegReg(X86_MOV, a, s);
2834 asmALURegReg(X86_XOR, a, b);
2835 }
2836 if (gJITC.current_opc & PPC_OPC_Rc) {
2837 jitcMapFlagsDirty();
2838 }
2839 return flowContinue;
2840 }
2841 /*
2842 * xori XOR Immediate
2843 * .681
2844 */
2845 void ppc_opc_xori()
2846 {
2847 int rS, rA;
2848 uint32 imm;
2849 PPC_OPC_TEMPL_D_UImm(gCPU.current_opc, rS, rA, imm);
2850 gCPU.gpr[rA] = gCPU.gpr[rS] ^ imm;
2851 }
2852 JITCFlow ppc_opc_gen_xori()
2853 {
2854 int rS, rA;
2855 uint32 imm;
2856 PPC_OPC_TEMPL_D_UImm(gJITC.current_opc, rS, rA, imm);
2857 return ppc_opc_gen_ori_oris_xori_xoris(X86_XOR, imm, rS, rA);
2858 }
2859 /*
2860 * xoris XOR Immediate Shifted
2861 * .682
2862 */
2863 void ppc_opc_xoris()
2864 {
2865 int rS, rA;
2866 uint32 imm;
2867 PPC_OPC_TEMPL_D_Shift16(gCPU.current_opc, rS, rA, imm);
2868 gCPU.gpr[rA] = gCPU.gpr[rS] ^ imm;
2869 }
2870 JITCFlow ppc_opc_gen_xoris()
2871 {
2872 int rS, rA;
2873 uint32 imm;
2874 PPC_OPC_TEMPL_D_Shift16(gJITC.current_opc, rS, rA, imm);
2875 return ppc_opc_gen_ori_oris_xori_xoris(X86_XOR, imm, rS, rA);
2876 }

  ViewVC Help
Powered by ViewVC 1.1.26