/[pearpc]/src/cpu/cpu_generic/ppc_mmu.cc
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /src/cpu/cpu_generic/ppc_mmu.cc

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1 - (show annotations)
Wed Sep 5 17:11:21 2007 UTC (16 years, 6 months ago) by dpavlin
File size: 49031 byte(s)
import upstream CVS
1 /*
2 * PearPC
3 * ppc_mmu.cc
4 *
5 * Copyright (C) 2003, 2004 Sebastian Biallas (sb@biallas.net)
6 * Portions Copyright (C) 2004 Daniel Foesch (dfoesch@cs.nmsu.edu)
7 * Portions Copyright (C) 2004 Apple Computer, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23 /* Pages marked: v.???
24 * From: IBM PowerPC MicroProcessor Family: Altivec(tm) Technology...
25 * Programming Environments Manual
26 */
27
28 #include <cstdlib>
29 #include <cstring>
30 #include "system/arch/sysendian.h"
31 #include "tools/snprintf.h"
32 #include "debug/tracers.h"
33 #include "io/prom/prom.h"
34 #include "io/io.h"
35 #include "ppc_cpu.h"
36 #include "ppc_fpu.h"
37 #include "ppc_vec.h"
38 #include "ppc_mmu.h"
39 #include "ppc_exc.h"
40 #include "ppc_tools.h"
41
42 byte *gMemory = NULL;
43 uint32 gMemorySize;
44
45 #undef TLB
46
47 static int ppc_pte_protection[] = {
48 // read(0)/write(1) key pp
49
50 // read
51 1, // r/w
52 1, // r/w
53 1, // r/w
54 1, // r
55 0, // -
56 1, // r
57 1, // r/w
58 1, // r
59
60 // write
61 1, // r/w
62 1, // r/w
63 1, // r/w
64 0, // r
65 0, // -
66 0, // r
67 1, // r/w
68 0, // r
69 };
70
71 inline int FASTCALL ppc_effective_to_physical(uint32 addr, int flags, uint32 &result)
72 {
73 if (flags & PPC_MMU_CODE) {
74 if (!(gCPU.msr & MSR_IR)) {
75 result = addr;
76 return PPC_MMU_OK;
77 }
78 /*
79 * BAT translation .329
80 */
81
82 uint32 batu = (gCPU.msr & MSR_PR ? BATU_Vp : BATU_Vs);
83
84 for (int i=0; i<4; i++) {
85 uint32 bl17 = gCPU.ibat_bl17[i];
86 uint32 addr2 = addr & (bl17 | 0xf001ffff);
87 if (BATU_BEPI(addr2) == BATU_BEPI(gCPU.ibatu[i])) {
88 // bat applies to this address
89 if (gCPU.ibatu[i] & batu) {
90 // bat entry valid
91 uint32 offset = BAT_EA_OFFSET(addr);
92 uint32 page = BAT_EA_11(addr);
93 page &= ~bl17;
94 page |= BATL_BRPN(gCPU.ibatl[i]);
95 // fixme: check access rights
96 result = page | offset;
97 return PPC_MMU_OK;
98 }
99 }
100 }
101 } else {
102 if (!(gCPU.msr & MSR_DR)) {
103 result = addr;
104 return PPC_MMU_OK;
105 }
106 /*
107 * BAT translation .329
108 */
109
110 uint32 batu = (gCPU.msr & MSR_PR ? BATU_Vp : BATU_Vs);
111
112 for (int i=0; i<4; i++) {
113 uint32 bl17 = gCPU.dbat_bl17[i];
114 uint32 addr2 = addr & (bl17 | 0xf001ffff);
115 if (BATU_BEPI(addr2) == BATU_BEPI(gCPU.dbatu[i])) {
116 // bat applies to this address
117 if (gCPU.dbatu[i] & batu) {
118 // bat entry valid
119 uint32 offset = BAT_EA_OFFSET(addr);
120 uint32 page = BAT_EA_11(addr);
121 page &= ~bl17;
122 page |= BATL_BRPN(gCPU.dbatl[i]);
123 // fixme: check access rights
124 result = page | offset;
125 return PPC_MMU_OK;
126 }
127 }
128 }
129 }
130
131 /*
132 * Address translation with segment register
133 */
134 uint32 sr = gCPU.sr[EA_SR(addr)];
135
136 if (sr & SR_T) {
137 // woea
138 // FIXME: implement me
139 PPC_MMU_ERR("sr & T\n");
140 } else {
141 #ifdef TLB
142 for (int i=0; i<4; i++) {
143 if ((addr & ~0xfff) == (gCPU.tlb_va[i])) {
144 gCPU.tlb_last = i;
145 // ht_printf("TLB: %d: %08x -> %08x\n", i, addr, gCPU.tlb_pa[i] | (addr & 0xfff));
146 result = gCPU.tlb_pa[i] | (addr & 0xfff);
147 return PPC_MMU_OK;
148 }
149 }
150 #endif
151 // page address translation
152 if ((flags & PPC_MMU_CODE) && (sr & SR_N)) {
153 // segment isnt executable
154 if (!(flags & PPC_MMU_NO_EXC)) {
155 ppc_exception(PPC_EXC_ISI, PPC_EXC_SRR1_GUARD);
156 return PPC_MMU_EXC;
157 }
158 return PPC_MMU_FATAL;
159 }
160 uint32 offset = EA_Offset(addr); // 12 bit
161 uint32 page_index = EA_PageIndex(addr); // 16 bit
162 uint32 VSID = SR_VSID(sr); // 24 bit
163 uint32 api = EA_API(addr); // 6 bit (part of page_index)
164 // VSID.page_index = Virtual Page Number (VPN)
165
166 // Hashfunction no 1 "xor" .360
167 uint32 hash1 = (VSID ^ page_index);
168 uint32 pteg_addr = ((hash1 & gCPU.pagetable_hashmask)<<6) | gCPU.pagetable_base;
169 int key;
170 if (gCPU.msr & MSR_PR) {
171 key = (sr & SR_Kp) ? 4 : 0;
172 } else {
173 key = (sr & SR_Ks) ? 4 : 0;
174 }
175
176 uint32 pte_protection_offset = ((flags&PPC_MMU_WRITE) ? 8:0) + key;
177
178 for (int i=0; i<8; i++) {
179 uint32 pte;
180 if (ppc_read_physical_word(pteg_addr, pte)) {
181 if (!(flags & PPC_MMU_NO_EXC)) {
182 PPC_MMU_ERR("read physical in address translate failed\n");
183 return PPC_MMU_EXC;
184 }
185 return PPC_MMU_FATAL;
186 }
187 if ((pte & PTE1_V) && (!(pte & PTE1_H))) {
188 if (VSID == PTE1_VSID(pte) && (api == PTE1_API(pte))) {
189 // page found
190 if (ppc_read_physical_word(pteg_addr+4, pte)) {
191 if (!(flags & PPC_MMU_NO_EXC)) {
192 PPC_MMU_ERR("read physical in address translate failed\n");
193 return PPC_MMU_EXC;
194 }
195 return PPC_MMU_FATAL;
196 }
197 // check accessmode .346
198 if (!ppc_pte_protection[pte_protection_offset + PTE2_PP(pte)]) {
199 if (!(flags & PPC_MMU_NO_EXC)) {
200 if (flags & PPC_MMU_CODE) {
201 PPC_MMU_WARN("correct impl? code + read protection\n");
202 ppc_exception(PPC_EXC_ISI, PPC_EXC_SRR1_PROT, addr);
203 return PPC_MMU_EXC;
204 } else {
205 if (flags & PPC_MMU_WRITE) {
206 ppc_exception(PPC_EXC_DSI, PPC_EXC_DSISR_PROT | PPC_EXC_DSISR_STORE, addr);
207 } else {
208 ppc_exception(PPC_EXC_DSI, PPC_EXC_DSISR_PROT, addr);
209 }
210 return PPC_MMU_EXC;
211 }
212 }
213 return PPC_MMU_FATAL;
214 }
215 // ok..
216 uint32 pap = PTE2_RPN(pte);
217 result = pap | offset;
218 #ifdef TLB
219 gCPU.tlb_last++;
220 gCPU.tlb_last &= 3;
221 gCPU.tlb_pa[gCPU.tlb_last] = pap;
222 gCPU.tlb_va[gCPU.tlb_last] = addr & ~0xfff;
223 // ht_printf("TLB: STORE %d: %08x -> %08x\n", gCPU.tlb_last, addr, pap);
224 #endif
225 // update access bits
226 if (flags & PPC_MMU_WRITE) {
227 pte |= PTE2_C | PTE2_R;
228 } else {
229 pte |= PTE2_R;
230 }
231 ppc_write_physical_word(pteg_addr+4, pte);
232 return PPC_MMU_OK;
233 }
234 }
235 pteg_addr+=8;
236 }
237
238 // Hashfunction no 2 "not" .360
239 hash1 = ~hash1;
240 pteg_addr = ((hash1 & gCPU.pagetable_hashmask)<<6) | gCPU.pagetable_base;
241 for (int i=0; i<8; i++) {
242 uint32 pte;
243 if (ppc_read_physical_word(pteg_addr, pte)) {
244 if (!(flags & PPC_MMU_NO_EXC)) {
245 PPC_MMU_ERR("read physical in address translate failed\n");
246 return PPC_MMU_EXC;
247 }
248 return PPC_MMU_FATAL;
249 }
250 if ((pte & PTE1_V) && (pte & PTE1_H)) {
251 if (VSID == PTE1_VSID(pte) && (api == PTE1_API(pte))) {
252 // page found
253 if (ppc_read_physical_word(pteg_addr+4, pte)) {
254 if (!(flags & PPC_MMU_NO_EXC)) {
255 PPC_MMU_ERR("read physical in address translate failed\n");
256 return PPC_MMU_EXC;
257 }
258 return PPC_MMU_FATAL;
259 }
260 // check accessmode
261 int key;
262 if (gCPU.msr & MSR_PR) {
263 key = (sr & SR_Kp) ? 4 : 0;
264 } else {
265 key = (sr & SR_Ks) ? 4 : 0;
266 }
267 if (!ppc_pte_protection[((flags&PPC_MMU_WRITE)?8:0) + key + PTE2_PP(pte)]) {
268 if (!(flags & PPC_MMU_NO_EXC)) {
269 if (flags & PPC_MMU_CODE) {
270 PPC_MMU_WARN("correct impl? code + read protection\n");
271 ppc_exception(PPC_EXC_ISI, PPC_EXC_SRR1_PROT, addr);
272 return PPC_MMU_EXC;
273 } else {
274 if (flags & PPC_MMU_WRITE) {
275 ppc_exception(PPC_EXC_DSI, PPC_EXC_DSISR_PROT | PPC_EXC_DSISR_STORE, addr);
276 } else {
277 ppc_exception(PPC_EXC_DSI, PPC_EXC_DSISR_PROT, addr);
278 }
279 return PPC_MMU_EXC;
280 }
281 }
282 return PPC_MMU_FATAL;
283 }
284 // ok..
285 result = PTE2_RPN(pte) | offset;
286
287 // update access bits
288 if (flags & PPC_MMU_WRITE) {
289 pte |= PTE2_C | PTE2_R;
290 } else {
291 pte |= PTE2_R;
292 }
293 ppc_write_physical_word(pteg_addr+4, pte);
294 // PPC_MMU_WARN("hash function 2 used!\n");
295 // gSinglestep = true;
296 return PPC_MMU_OK;
297 }
298 }
299 pteg_addr+=8;
300 }
301 }
302 // page fault
303 if (!(flags & PPC_MMU_NO_EXC)) {
304 if (flags & PPC_MMU_CODE) {
305 ppc_exception(PPC_EXC_ISI, PPC_EXC_SRR1_PAGE);
306 } else {
307 if (flags & PPC_MMU_WRITE) {
308 ppc_exception(PPC_EXC_DSI, PPC_EXC_DSISR_PAGE | PPC_EXC_DSISR_STORE, addr);
309 } else {
310 ppc_exception(PPC_EXC_DSI, PPC_EXC_DSISR_PAGE, addr);
311 }
312 }
313 return PPC_MMU_EXC;
314 }
315 return PPC_MMU_FATAL;
316 }
317
318 void ppc_mmu_tlb_invalidate()
319 {
320 gCPU.effective_code_page = 0xffffffff;
321 }
322
323 /*
324 pagetable:
325 min. 2^10 (64k) PTEGs
326 PTEG = 64byte
327 The page table can be any size 2^n where 16 <= n <= 25.
328
329 A PTEG contains eight
330 PTEs of eight bytes each; therefore, each PTEG is 64 bytes long.
331 */
332
333 bool FASTCALL ppc_mmu_set_sdr1(uint32 newval, bool quiesce)
334 {
335 /* if (newval == gCPU.sdr1)*/ quiesce = false;
336 PPC_MMU_TRACE("new pagetable: sdr1 = 0x%08x\n", newval);
337 uint32 htabmask = SDR1_HTABMASK(newval);
338 uint32 x = 1;
339 uint32 xx = 0;
340 int n = 0;
341 while ((htabmask & x) && (n < 9)) {
342 n++;
343 xx|=x;
344 x<<=1;
345 }
346 if (htabmask & ~xx) {
347 PPC_MMU_TRACE("new pagetable: broken htabmask (%05x)\n", htabmask);
348 return false;
349 }
350 uint32 htaborg = SDR1_HTABORG(newval);
351 if (htaborg & xx) {
352 PPC_MMU_TRACE("new pagetable: broken htaborg (%05x)\n", htaborg);
353 return false;
354 }
355 gCPU.pagetable_base = htaborg<<16;
356 gCPU.sdr1 = newval;
357 gCPU.pagetable_hashmask = ((xx<<10)|0x3ff);
358 PPC_MMU_TRACE("new pagetable: sdr1 accepted\n");
359 PPC_MMU_TRACE("number of pages: 2^%d pagetable_start: 0x%08x size: 2^%d\n", n+13, gCPU.pagetable_base, n+16);
360 if (quiesce) {
361 prom_quiesce();
362 }
363 return true;
364 }
365
366 bool FASTCALL ppc_mmu_page_create(uint32 ea, uint32 pa)
367 {
368 uint32 sr = gCPU.sr[EA_SR(ea)];
369 uint32 page_index = EA_PageIndex(ea); // 16 bit
370 uint32 VSID = SR_VSID(sr); // 24 bit
371 uint32 api = EA_API(ea); // 6 bit (part of page_index)
372 uint32 hash1 = (VSID ^ page_index);
373 uint32 pte, pte2;
374 uint32 h = 0;
375 for (int j=0; j<2; j++) {
376 uint32 pteg_addr = ((hash1 & gCPU.pagetable_hashmask)<<6) | gCPU.pagetable_base;
377 for (int i=0; i<8; i++) {
378 if (ppc_read_physical_word(pteg_addr, pte)) {
379 PPC_MMU_ERR("read physical in address translate failed\n");
380 return false;
381 }
382 if (!(pte & PTE1_V)) {
383 // free pagetable entry found
384 pte = PTE1_V | (VSID << 7) | h | api;
385 pte2 = (PA_RPN(pa) << 12) | 0;
386 if (ppc_write_physical_word(pteg_addr, pte)
387 || ppc_write_physical_word(pteg_addr+4, pte2)) {
388 return false;
389 } else {
390 // ok
391 return true;
392 }
393 }
394 pteg_addr+=8;
395 }
396 hash1 = ~hash1;
397 h = PTE1_H;
398 }
399 return false;
400 }
401
402 inline bool FASTCALL ppc_mmu_page_free(uint32 ea)
403 {
404 return true;
405 }
406
407 inline int FASTCALL ppc_direct_physical_memory_handle(uint32 addr, byte *&ptr)
408 {
409 if (addr < gMemorySize) {
410 ptr = &gMemory[addr];
411 return PPC_MMU_OK;
412 }
413 return PPC_MMU_FATAL;
414 }
415
416 int FASTCALL ppc_direct_effective_memory_handle(uint32 addr, byte *&ptr)
417 {
418 uint32 ea;
419 int r;
420 if (!((r = ppc_effective_to_physical(addr, PPC_MMU_READ, ea)))) {
421 return ppc_direct_physical_memory_handle(ea, ptr);
422 }
423 return r;
424 }
425
426 int FASTCALL ppc_direct_effective_memory_handle_code(uint32 addr, byte *&ptr)
427 {
428 uint32 ea;
429 int r;
430 if (!((r = ppc_effective_to_physical(addr, PPC_MMU_READ | PPC_MMU_CODE, ea)))) {
431 return ppc_direct_physical_memory_handle(ea, ptr);
432 }
433 return r;
434 }
435
436 inline int FASTCALL ppc_read_physical_qword(uint32 addr, Vector_t &result)
437 {
438 if (addr < gMemorySize) {
439 // big endian
440 VECT_D(result,0) = ppc_dword_from_BE(*((uint64*)(gMemory+addr)));
441 VECT_D(result,1) = ppc_dword_from_BE(*((uint64*)(gMemory+addr+8)));
442 return PPC_MMU_OK;
443 }
444 return io_mem_read128(addr, (uint128 *)&result);
445 }
446
447 inline int FASTCALL ppc_read_physical_dword(uint32 addr, uint64 &result)
448 {
449 if (addr < gMemorySize) {
450 // big endian
451 result = ppc_dword_from_BE(*((uint64*)(gMemory+addr)));
452 return PPC_MMU_OK;
453 }
454 int ret = io_mem_read64(addr, result);
455 result = ppc_bswap_dword(result);
456 return ret;
457 }
458
459 inline int FASTCALL ppc_read_physical_word(uint32 addr, uint32 &result)
460 {
461 if (addr < gMemorySize) {
462 // big endian
463 result = ppc_word_from_BE(*((uint32*)(gMemory+addr)));
464 return PPC_MMU_OK;
465 }
466 int ret = io_mem_read(addr, result, 4);
467 result = ppc_bswap_word(result);
468 return ret;
469 }
470
471 inline int FASTCALL ppc_read_physical_half(uint32 addr, uint16 &result)
472 {
473 if (addr < gMemorySize) {
474 // big endian
475 result = ppc_half_from_BE(*((uint16*)(gMemory+addr)));
476 return PPC_MMU_OK;
477 }
478 uint32 r;
479 int ret = io_mem_read(addr, r, 2);
480 result = ppc_bswap_half(r);
481 return ret;
482 }
483
484 inline int FASTCALL ppc_read_physical_byte(uint32 addr, uint8 &result)
485 {
486 if (addr < gMemorySize) {
487 // big endian
488 result = gMemory[addr];
489 return PPC_MMU_OK;
490 }
491 uint32 r;
492 int ret = io_mem_read(addr, r, 1);
493 result = r;
494 return ret;
495 }
496
497 inline int FASTCALL ppc_read_effective_code(uint32 addr, uint32 &result)
498 {
499 if (addr & 3) {
500 // EXC..bla
501 return PPC_MMU_FATAL;
502 }
503 uint32 p;
504 int r;
505 if (!((r=ppc_effective_to_physical(addr, PPC_MMU_READ | PPC_MMU_CODE, p)))) {
506 return ppc_read_physical_word(p, result);
507 }
508 return r;
509 }
510
511 inline int FASTCALL ppc_read_effective_qword(uint32 addr, Vector_t &result)
512 {
513 uint32 p;
514 int r;
515
516 addr &= ~0x0f;
517
518 if (!(r = ppc_effective_to_physical(addr, PPC_MMU_READ, p))) {
519 return ppc_read_physical_qword(p, result);
520 }
521
522 return r;
523 }
524
525 inline int FASTCALL ppc_read_effective_dword(uint32 addr, uint64 &result)
526 {
527 uint32 p;
528 int r;
529 if (!(r = ppc_effective_to_physical(addr, PPC_MMU_READ, p))) {
530 if (EA_Offset(addr) > 4088) {
531 // read overlaps two pages.. tricky
532 byte *r1, *r2;
533 byte b[14];
534 ppc_effective_to_physical((addr & ~0xfff)+4089, PPC_MMU_READ, p);
535 if ((r = ppc_direct_physical_memory_handle(p, r1))) return r;
536 if ((r = ppc_effective_to_physical((addr & ~0xfff)+4096, PPC_MMU_READ, p))) return r;
537 if ((r = ppc_direct_physical_memory_handle(p, r2))) return r;
538 memmove(&b[0], r1, 7);
539 memmove(&b[7], r2, 7);
540 memmove(&result, &b[EA_Offset(addr)-4089], 8);
541 result = ppc_dword_from_BE(result);
542 return PPC_MMU_OK;
543 } else {
544 return ppc_read_physical_dword(p, result);
545 }
546 }
547 return r;
548 }
549
550 inline int FASTCALL ppc_read_effective_word(uint32 addr, uint32 &result)
551 {
552 uint32 p;
553 int r;
554 if (!(r = ppc_effective_to_physical(addr, PPC_MMU_READ, p))) {
555 if (EA_Offset(addr) > 4092) {
556 // read overlaps two pages.. tricky
557 byte *r1, *r2;
558 byte b[6];
559 ppc_effective_to_physical((addr & ~0xfff)+4093, PPC_MMU_READ, p);
560 if ((r = ppc_direct_physical_memory_handle(p, r1))) return r;
561 if ((r = ppc_effective_to_physical((addr & ~0xfff)+4096, PPC_MMU_READ, p))) return r;
562 if ((r = ppc_direct_physical_memory_handle(p, r2))) return r;
563 memmove(&b[0], r1, 3);
564 memmove(&b[3], r2, 3);
565 memmove(&result, &b[EA_Offset(addr)-4093], 4);
566 result = ppc_word_from_BE(result);
567 return PPC_MMU_OK;
568 } else {
569 return ppc_read_physical_word(p, result);
570 }
571 }
572 return r;
573 }
574
575 inline int FASTCALL ppc_read_effective_half(uint32 addr, uint16 &result)
576 {
577 uint32 p;
578 int r;
579 if (!((r = ppc_effective_to_physical(addr, PPC_MMU_READ, p)))) {
580 if (EA_Offset(addr) > 4094) {
581 // read overlaps two pages.. tricky
582 byte b1, b2;
583 ppc_effective_to_physical((addr & ~0xfff)+4095, PPC_MMU_READ, p);
584 if ((r = ppc_read_physical_byte(p, b1))) return r;
585 if ((r = ppc_effective_to_physical((addr & ~0xfff)+4096, PPC_MMU_READ, p))) return r;
586 if ((r = ppc_read_physical_byte(p, b2))) return r;
587 result = (b1<<8)|b2;
588 return PPC_MMU_OK;
589 } else {
590 return ppc_read_physical_half(p, result);
591 }
592 }
593 return r;
594 }
595
596 inline int FASTCALL ppc_read_effective_byte(uint32 addr, uint8 &result)
597 {
598 uint32 p;
599 int r;
600 if (!((r = ppc_effective_to_physical(addr, PPC_MMU_READ, p)))) {
601 return ppc_read_physical_byte(p, result);
602 }
603 return r;
604 }
605
606 inline int FASTCALL ppc_write_physical_qword(uint32 addr, Vector_t data)
607 {
608 if (addr < gMemorySize) {
609 // big endian
610 *((uint64*)(gMemory+addr)) = ppc_dword_to_BE(VECT_D(data,0));
611 *((uint64*)(gMemory+addr+8)) = ppc_dword_to_BE(VECT_D(data,1));
612 return PPC_MMU_OK;
613 }
614 if (io_mem_write128(addr, (uint128 *)&data) == IO_MEM_ACCESS_OK) {
615 return PPC_MMU_OK;
616 } else {
617 return PPC_MMU_FATAL;
618 }
619 }
620
621 inline int FASTCALL ppc_write_physical_dword(uint32 addr, uint64 data)
622 {
623 if (addr < gMemorySize) {
624 // big endian
625 *((uint64*)(gMemory+addr)) = ppc_dword_to_BE(data);
626 return PPC_MMU_OK;
627 }
628 if (io_mem_write64(addr, ppc_bswap_dword(data)) == IO_MEM_ACCESS_OK) {
629 return PPC_MMU_OK;
630 } else {
631 return PPC_MMU_FATAL;
632 }
633 }
634
635 inline int FASTCALL ppc_write_physical_word(uint32 addr, uint32 data)
636 {
637 if (addr < gMemorySize) {
638 // big endian
639 *((uint32*)(gMemory+addr)) = ppc_word_to_BE(data);
640 return PPC_MMU_OK;
641 }
642 return io_mem_write(addr, ppc_bswap_word(data), 4);
643 }
644
645 inline int FASTCALL ppc_write_physical_half(uint32 addr, uint16 data)
646 {
647 if (addr < gMemorySize) {
648 // big endian
649 *((uint16*)(gMemory+addr)) = ppc_half_to_BE(data);
650 return PPC_MMU_OK;
651 }
652 return io_mem_write(addr, ppc_bswap_half(data), 2);
653 }
654
655 inline int FASTCALL ppc_write_physical_byte(uint32 addr, uint8 data)
656 {
657 if (addr < gMemorySize) {
658 // big endian
659 gMemory[addr] = data;
660 return PPC_MMU_OK;
661 }
662 return io_mem_write(addr, data, 1);
663 }
664
665 inline int FASTCALL ppc_write_effective_qword(uint32 addr, Vector_t data)
666 {
667 uint32 p;
668 int r;
669
670 addr &= ~0x0f;
671
672 if (!((r=ppc_effective_to_physical(addr, PPC_MMU_WRITE, p)))) {
673 return ppc_write_physical_qword(p, data);
674 }
675 return r;
676 }
677
678 inline int FASTCALL ppc_write_effective_dword(uint32 addr, uint64 data)
679 {
680 uint32 p;
681 int r;
682 if (!((r=ppc_effective_to_physical(addr, PPC_MMU_WRITE, p)))) {
683 if (EA_Offset(addr) > 4088) {
684 // write overlaps two pages.. tricky
685 byte *r1, *r2;
686 byte b[14];
687 ppc_effective_to_physical((addr & ~0xfff)+4089, PPC_MMU_WRITE, p);
688 if ((r = ppc_direct_physical_memory_handle(p, r1))) return r;
689 if ((r = ppc_effective_to_physical((addr & ~0xfff)+4096, PPC_MMU_WRITE, p))) return r;
690 if ((r = ppc_direct_physical_memory_handle(p, r2))) return r;
691 data = ppc_dword_to_BE(data);
692 memmove(&b[0], r1, 7);
693 memmove(&b[7], r2, 7);
694 memmove(&b[EA_Offset(addr)-4089], &data, 8);
695 memmove(r1, &b[0], 7);
696 memmove(r2, &b[7], 7);
697 return PPC_MMU_OK;
698 } else {
699 return ppc_write_physical_dword(p, data);
700 }
701 }
702 return r;
703 }
704
705 inline int FASTCALL ppc_write_effective_word(uint32 addr, uint32 data)
706 {
707 uint32 p;
708 int r;
709 if (!((r=ppc_effective_to_physical(addr, PPC_MMU_WRITE, p)))) {
710 if (EA_Offset(addr) > 4092) {
711 // write overlaps two pages.. tricky
712 byte *r1, *r2;
713 byte b[6];
714 ppc_effective_to_physical((addr & ~0xfff)+4093, PPC_MMU_WRITE, p);
715 if ((r = ppc_direct_physical_memory_handle(p, r1))) return r;
716 if ((r = ppc_effective_to_physical((addr & ~0xfff)+4096, PPC_MMU_WRITE, p))) return r;
717 if ((r = ppc_direct_physical_memory_handle(p, r2))) return r;
718 data = ppc_word_to_BE(data);
719 memmove(&b[0], r1, 3);
720 memmove(&b[3], r2, 3);
721 memmove(&b[EA_Offset(addr)-4093], &data, 4);
722 memmove(r1, &b[0], 3);
723 memmove(r2, &b[3], 3);
724 return PPC_MMU_OK;
725 } else {
726 return ppc_write_physical_word(p, data);
727 }
728 }
729 return r;
730 }
731
732 inline int FASTCALL ppc_write_effective_half(uint32 addr, uint16 data)
733 {
734 uint32 p;
735 int r;
736 if (!((r=ppc_effective_to_physical(addr, PPC_MMU_WRITE, p)))) {
737 if (EA_Offset(addr) > 4094) {
738 // write overlaps two pages.. tricky
739 ppc_effective_to_physical((addr & ~0xfff)+4095, PPC_MMU_WRITE, p);
740 if ((r = ppc_write_physical_byte(p, data>>8))) return r;
741 if ((r = ppc_effective_to_physical((addr & ~0xfff)+4096, PPC_MMU_WRITE, p))) return r;
742 if ((r = ppc_write_physical_byte(p, data))) return r;
743 return PPC_MMU_OK;
744 } else {
745 return ppc_write_physical_half(p, data);
746 }
747 }
748 return r;
749 }
750
751 inline int FASTCALL ppc_write_effective_byte(uint32 addr, uint8 data)
752 {
753 uint32 p;
754 int r;
755 if (!((r=ppc_effective_to_physical(addr, PPC_MMU_WRITE, p)))) {
756 return ppc_write_physical_byte(p, data);
757 }
758 return r;
759 }
760
761 bool FASTCALL ppc_init_physical_memory(uint size)
762 {
763 if (size < 64*1024*1024) {
764 PPC_MMU_ERR("Main memory size must >= 64MB!\n");
765 }
766 gMemory = (byte*)malloc(size);
767 gMemorySize = size;
768 return gMemory != NULL;
769 }
770
771 uint32 ppc_get_memory_size()
772 {
773 return gMemorySize;
774 }
775
776 /***************************************************************************
777 * DMA Interface
778 */
779
780 bool ppc_dma_write(uint32 dest, const void *src, uint32 size)
781 {
782 if (dest > gMemorySize || (dest+size) > gMemorySize) return false;
783
784 byte *ptr;
785 ppc_direct_physical_memory_handle(dest, ptr);
786
787 memcpy(ptr, src, size);
788 return true;
789 }
790
791 bool ppc_dma_read(void *dest, uint32 src, uint32 size)
792 {
793 if (src > gMemorySize || (src+size) > gMemorySize) return false;
794
795 byte *ptr;
796 ppc_direct_physical_memory_handle(src, ptr);
797
798 memcpy(dest, ptr, size);
799 return true;
800 }
801
802 bool ppc_dma_set(uint32 dest, int c, uint32 size)
803 {
804 if (dest > gMemorySize || (dest+size) > gMemorySize) return false;
805
806 byte *ptr;
807 ppc_direct_physical_memory_handle(dest, ptr);
808
809 memset(ptr, c, size);
810 return true;
811 }
812
813
814 /***************************************************************************
815 * DEPRECATED prom interface
816 */
817 bool ppc_prom_set_sdr1(uint32 newval, bool quiesce)
818 {
819 return ppc_mmu_set_sdr1(newval, quiesce);
820 }
821
822 bool ppc_prom_effective_to_physical(uint32 &result, uint32 ea)
823 {
824 return ppc_effective_to_physical(ea, PPC_MMU_READ|PPC_MMU_SV|PPC_MMU_NO_EXC, result) == PPC_MMU_OK;
825 }
826
827 bool ppc_prom_page_create(uint32 ea, uint32 pa)
828 {
829 uint32 sr = gCPU.sr[EA_SR(ea)];
830 uint32 page_index = EA_PageIndex(ea); // 16 bit
831 uint32 VSID = SR_VSID(sr); // 24 bit
832 uint32 api = EA_API(ea); // 6 bit (part of page_index)
833 uint32 hash1 = (VSID ^ page_index);
834 uint32 pte, pte2;
835 uint32 h = 0;
836 for (int j=0; j<2; j++) {
837 uint32 pteg_addr = ((hash1 & gCPU.pagetable_hashmask)<<6) | gCPU.pagetable_base;
838 for (int i=0; i<8; i++) {
839 if (ppc_read_physical_word(pteg_addr, pte)) {
840 PPC_MMU_ERR("read physical in address translate failed\n");
841 return false;
842 }
843 if (!(pte & PTE1_V)) {
844 // free pagetable entry found
845 pte = PTE1_V | (VSID << 7) | h | api;
846 pte2 = (PA_RPN(pa) << 12) | 0;
847 if (ppc_write_physical_word(pteg_addr, pte)
848 || ppc_write_physical_word(pteg_addr+4, pte2)) {
849 return false;
850 } else {
851 // ok
852 return true;
853 }
854 }
855 pteg_addr+=8;
856 }
857 hash1 = ~hash1;
858 h = PTE1_H;
859 }
860 return false;
861 }
862
863 bool ppc_prom_page_free(uint32 ea)
864 {
865 return true;
866 }
867
868 /***************************************************************************
869 * MMU Opcodes
870 */
871
872 #include "ppc_dec.h"
873
874 /*
875 * dcbz Data Cache Clear to Zero
876 * .464
877 */
878 void ppc_opc_dcbz()
879 {
880 //PPC_L1_CACHE_LINE_SIZE
881 int rA, rD, rB;
882 PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
883 // assert rD=0
884 uint32 a = (rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB];
885 // BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
886 ppc_write_effective_dword(a, 0)
887 || ppc_write_effective_dword(a+8, 0)
888 || ppc_write_effective_dword(a+16, 0)
889 || ppc_write_effective_dword(a+24, 0);
890 }
891
892 /*
893 * lbz Load Byte and Zero
894 * .521
895 */
896 void ppc_opc_lbz()
897 {
898 int rA, rD;
899 uint32 imm;
900 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rD, rA, imm);
901 uint8 r;
902 int ret = ppc_read_effective_byte((rA?gCPU.gpr[rA]:0)+imm, r);
903 if (ret == PPC_MMU_OK) {
904 gCPU.gpr[rD] = r;
905 }
906 }
907 /*
908 * lbzu Load Byte and Zero with Update
909 * .522
910 */
911 void ppc_opc_lbzu()
912 {
913 int rA, rD;
914 uint32 imm;
915 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rD, rA, imm);
916 // FIXME: check rA!=0 && rA!=rD
917 uint8 r;
918 int ret = ppc_read_effective_byte(gCPU.gpr[rA]+imm, r);
919 if (ret == PPC_MMU_OK) {
920 gCPU.gpr[rA] += imm;
921 gCPU.gpr[rD] = r;
922 }
923 }
924 /*
925 * lbzux Load Byte and Zero with Update Indexed
926 * .523
927 */
928 void ppc_opc_lbzux()
929 {
930 int rA, rD, rB;
931 PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
932 // FIXME: check rA!=0 && rA!=rD
933 uint8 r;
934 int ret = ppc_read_effective_byte(gCPU.gpr[rA]+gCPU.gpr[rB], r);
935 if (ret == PPC_MMU_OK) {
936 gCPU.gpr[rA] += gCPU.gpr[rB];
937 gCPU.gpr[rD] = r;
938 }
939 }
940 /*
941 * lbzx Load Byte and Zero Indexed
942 * .524
943 */
944 void ppc_opc_lbzx()
945 {
946 int rA, rD, rB;
947 PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
948 uint8 r;
949 int ret = ppc_read_effective_byte((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], r);
950 if (ret == PPC_MMU_OK) {
951 gCPU.gpr[rD] = r;
952 }
953 }
954 /*
955 * lfd Load Floating-Point Double
956 * .530
957 */
958 void ppc_opc_lfd()
959 {
960 if ((gCPU.msr & MSR_FP) == 0) {
961 ppc_exception(PPC_EXC_NO_FPU);
962 return;
963 }
964 int rA, frD;
965 uint32 imm;
966 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, frD, rA, imm);
967 uint64 r;
968 int ret = ppc_read_effective_dword((rA?gCPU.gpr[rA]:0)+imm, r);
969 if (ret == PPC_MMU_OK) {
970 gCPU.fpr[frD] = r;
971 }
972 }
973 /*
974 * lfdu Load Floating-Point Double with Update
975 * .531
976 */
977 void ppc_opc_lfdu()
978 {
979 if ((gCPU.msr & MSR_FP) == 0) {
980 ppc_exception(PPC_EXC_NO_FPU);
981 return;
982 }
983 int rA, frD;
984 uint32 imm;
985 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, frD, rA, imm);
986 // FIXME: check rA!=0
987 uint64 r;
988 int ret = ppc_read_effective_dword(gCPU.gpr[rA]+imm, r);
989 if (ret == PPC_MMU_OK) {
990 gCPU.fpr[frD] = r;
991 gCPU.gpr[rA] += imm;
992 }
993 }
994 /*
995 * lfdux Load Floating-Point Double with Update Indexed
996 * .532
997 */
998 void ppc_opc_lfdux()
999 {
1000 if ((gCPU.msr & MSR_FP) == 0) {
1001 ppc_exception(PPC_EXC_NO_FPU);
1002 return;
1003 }
1004 int rA, frD, rB;
1005 PPC_OPC_TEMPL_X(gCPU.current_opc, frD, rA, rB);
1006 // FIXME: check rA!=0
1007 uint64 r;
1008 int ret = ppc_read_effective_dword(gCPU.gpr[rA]+gCPU.gpr[rB], r);
1009 if (ret == PPC_MMU_OK) {
1010 gCPU.gpr[rA] += gCPU.gpr[rB];
1011 gCPU.fpr[frD] = r;
1012 }
1013 }
1014 /*
1015 * lfdx Load Floating-Point Double Indexed
1016 * .533
1017 */
1018 void ppc_opc_lfdx()
1019 {
1020 if ((gCPU.msr & MSR_FP) == 0) {
1021 ppc_exception(PPC_EXC_NO_FPU);
1022 return;
1023 }
1024 int rA, frD, rB;
1025 PPC_OPC_TEMPL_X(gCPU.current_opc, frD, rA, rB);
1026 uint64 r;
1027 int ret = ppc_read_effective_dword((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], r);
1028 if (ret == PPC_MMU_OK) {
1029 gCPU.fpr[frD] = r;
1030 }
1031 }
1032 /*
1033 * lfs Load Floating-Point Single
1034 * .534
1035 */
1036 void ppc_opc_lfs()
1037 {
1038 if ((gCPU.msr & MSR_FP) == 0) {
1039 ppc_exception(PPC_EXC_NO_FPU);
1040 return;
1041 }
1042 int rA, frD;
1043 uint32 imm;
1044 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, frD, rA, imm);
1045 uint32 r;
1046 int ret = ppc_read_effective_word((rA?gCPU.gpr[rA]:0)+imm, r);
1047 if (ret == PPC_MMU_OK) {
1048 ppc_single s;
1049 ppc_double d;
1050 ppc_fpu_unpack_single(s, r);
1051 ppc_fpu_single_to_double(s, d);
1052 ppc_fpu_pack_double(d, gCPU.fpr[frD]);
1053 }
1054 }
1055 /*
1056 * lfsu Load Floating-Point Single with Update
1057 * .535
1058 */
1059 void ppc_opc_lfsu()
1060 {
1061 if ((gCPU.msr & MSR_FP) == 0) {
1062 ppc_exception(PPC_EXC_NO_FPU);
1063 return;
1064 }
1065 int rA, frD;
1066 uint32 imm;
1067 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, frD, rA, imm);
1068 // FIXME: check rA!=0
1069 uint32 r;
1070 int ret = ppc_read_effective_word(gCPU.gpr[rA]+imm, r);
1071 if (ret == PPC_MMU_OK) {
1072 ppc_single s;
1073 ppc_double d;
1074 ppc_fpu_unpack_single(s, r);
1075 ppc_fpu_single_to_double(s, d);
1076 ppc_fpu_pack_double(d, gCPU.fpr[frD]);
1077 gCPU.gpr[rA] += imm;
1078 }
1079 }
1080 /*
1081 * lfsux Load Floating-Point Single with Update Indexed
1082 * .536
1083 */
1084 void ppc_opc_lfsux()
1085 {
1086 if ((gCPU.msr & MSR_FP) == 0) {
1087 ppc_exception(PPC_EXC_NO_FPU);
1088 return;
1089 }
1090 int rA, frD, rB;
1091 PPC_OPC_TEMPL_X(gCPU.current_opc, frD, rA, rB);
1092 // FIXME: check rA!=0
1093 uint32 r;
1094 int ret = ppc_read_effective_word(gCPU.gpr[rA]+gCPU.gpr[rB], r);
1095 if (ret == PPC_MMU_OK) {
1096 gCPU.gpr[rA] += gCPU.gpr[rB];
1097 ppc_single s;
1098 ppc_double d;
1099 ppc_fpu_unpack_single(s, r);
1100 ppc_fpu_single_to_double(s, d);
1101 ppc_fpu_pack_double(d, gCPU.fpr[frD]);
1102 }
1103 }
1104 /*
1105 * lfsx Load Floating-Point Single Indexed
1106 * .537
1107 */
1108 void ppc_opc_lfsx()
1109 {
1110 if ((gCPU.msr & MSR_FP) == 0) {
1111 ppc_exception(PPC_EXC_NO_FPU);
1112 return;
1113 }
1114 int rA, frD, rB;
1115 PPC_OPC_TEMPL_X(gCPU.current_opc, frD, rA, rB);
1116 uint32 r;
1117 int ret = ppc_read_effective_word((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], r);
1118 if (ret == PPC_MMU_OK) {
1119 ppc_single s;
1120 ppc_double d;
1121 ppc_fpu_unpack_single(s, r);
1122 ppc_fpu_single_to_double(s, d);
1123 ppc_fpu_pack_double(d, gCPU.fpr[frD]);
1124 }
1125 }
1126 /*
1127 * lha Load Half Word Algebraic
1128 * .538
1129 */
1130 void ppc_opc_lha()
1131 {
1132 int rA, rD;
1133 uint32 imm;
1134 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rD, rA, imm);
1135 uint16 r;
1136 int ret = ppc_read_effective_half((rA?gCPU.gpr[rA]:0)+imm, r);
1137 if (ret == PPC_MMU_OK) {
1138 gCPU.gpr[rD] = (r&0x8000)?(r|0xffff0000):r;
1139 }
1140 }
1141 /*
1142 * lhau Load Half Word Algebraic with Update
1143 * .539
1144 */
1145 void ppc_opc_lhau()
1146 {
1147 int rA, rD;
1148 uint32 imm;
1149 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rD, rA, imm);
1150 uint16 r;
1151 // FIXME: rA != 0
1152 int ret = ppc_read_effective_half(gCPU.gpr[rA]+imm, r);
1153 if (ret == PPC_MMU_OK) {
1154 gCPU.gpr[rA] += imm;
1155 gCPU.gpr[rD] = (r&0x8000)?(r|0xffff0000):r;
1156 }
1157 }
1158 /*
1159 * lhaux Load Half Word Algebraic with Update Indexed
1160 * .540
1161 */
1162 void ppc_opc_lhaux()
1163 {
1164 int rA, rD, rB;
1165 PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
1166 uint16 r;
1167 // FIXME: rA != 0
1168 int ret = ppc_read_effective_half(gCPU.gpr[rA]+gCPU.gpr[rB], r);
1169 if (ret == PPC_MMU_OK) {
1170 gCPU.gpr[rA] += gCPU.gpr[rB];
1171 gCPU.gpr[rD] = (r&0x8000)?(r|0xffff0000):r;
1172 }
1173 }
1174 /*
1175 * lhax Load Half Word Algebraic Indexed
1176 * .541
1177 */
1178 void ppc_opc_lhax()
1179 {
1180 int rA, rD, rB;
1181 PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
1182 uint16 r;
1183 // FIXME: rA != 0
1184 int ret = ppc_read_effective_half((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], r);
1185 if (ret == PPC_MMU_OK) {
1186 gCPU.gpr[rD] = (r&0x8000) ? (r|0xffff0000):r;
1187 }
1188 }
1189 /*
1190 * lhbrx Load Half Word Byte-Reverse Indexed
1191 * .542
1192 */
1193 void ppc_opc_lhbrx()
1194 {
1195 int rA, rD, rB;
1196 PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
1197 uint16 r;
1198 int ret = ppc_read_effective_half((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], r);
1199 if (ret == PPC_MMU_OK) {
1200 gCPU.gpr[rD] = ppc_bswap_half(r);
1201 }
1202 }
1203 /*
1204 * lhz Load Half Word and Zero
1205 * .543
1206 */
1207 void ppc_opc_lhz()
1208 {
1209 int rA, rD;
1210 uint32 imm;
1211 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rD, rA, imm);
1212 uint16 r;
1213 int ret = ppc_read_effective_half((rA?gCPU.gpr[rA]:0)+imm, r);
1214 if (ret == PPC_MMU_OK) {
1215 gCPU.gpr[rD] = r;
1216 }
1217 }
1218 /*
1219 * lhzu Load Half Word and Zero with Update
1220 * .544
1221 */
1222 void ppc_opc_lhzu()
1223 {
1224 int rA, rD;
1225 uint32 imm;
1226 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rD, rA, imm);
1227 uint16 r;
1228 // FIXME: rA!=0
1229 int ret = ppc_read_effective_half(gCPU.gpr[rA]+imm, r);
1230 if (ret == PPC_MMU_OK) {
1231 gCPU.gpr[rD] = r;
1232 gCPU.gpr[rA] += imm;
1233 }
1234 }
1235 /*
1236 * lhzux Load Half Word and Zero with Update Indexed
1237 * .545
1238 */
1239 void ppc_opc_lhzux()
1240 {
1241 int rA, rD, rB;
1242 PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
1243 uint16 r;
1244 // FIXME: rA != 0
1245 int ret = ppc_read_effective_half(gCPU.gpr[rA]+gCPU.gpr[rB], r);
1246 if (ret == PPC_MMU_OK) {
1247 gCPU.gpr[rA] += gCPU.gpr[rB];
1248 gCPU.gpr[rD] = r;
1249 }
1250 }
1251 /*
1252 * lhzx Load Half Word and Zero Indexed
1253 * .546
1254 */
1255 void ppc_opc_lhzx()
1256 {
1257 int rA, rD, rB;
1258 PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
1259 uint16 r;
1260 int ret = ppc_read_effective_half((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], r);
1261 if (ret == PPC_MMU_OK) {
1262 gCPU.gpr[rD] = r;
1263 }
1264 }
1265 /*
1266 * lmw Load Multiple Word
1267 * .547
1268 */
1269 void ppc_opc_lmw()
1270 {
1271 int rD, rA;
1272 uint32 imm;
1273 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rD, rA, imm);
1274 uint32 ea = (rA ? gCPU.gpr[rA] : 0) + imm;
1275 while (rD <= 31) {
1276 if (ppc_read_effective_word(ea, gCPU.gpr[rD])) {
1277 return;
1278 }
1279 rD++;
1280 ea += 4;
1281 }
1282 }
1283 /*
1284 * lswi Load String Word Immediate
1285 * .548
1286 */
1287 void ppc_opc_lswi()
1288 {
1289 int rA, rD, NB;
1290 PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, NB);
1291 if (NB==0) NB=32;
1292 uint32 ea = rA ? gCPU.gpr[rA] : 0;
1293 uint32 r = 0;
1294 int i = 4;
1295 uint8 v;
1296 while (NB > 0) {
1297 if (!i) {
1298 i = 4;
1299 gCPU.gpr[rD] = r;
1300 rD++;
1301 rD%=32;
1302 r = 0;
1303 }
1304 if (ppc_read_effective_byte(ea, v)) {
1305 return;
1306 }
1307 r<<=8;
1308 r|=v;
1309 ea++;
1310 i--;
1311 NB--;
1312 }
1313 while (i) { r<<=8; i--; }
1314 gCPU.gpr[rD] = r;
1315 }
1316 /*
1317 * lswx Load String Word Indexed
1318 * .550
1319 */
1320 void ppc_opc_lswx()
1321 {
1322 int rA, rD, rB;
1323 PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
1324 int NB = XER_n(gCPU.xer);
1325 uint32 ea = gCPU.gpr[rB] + (rA ? gCPU.gpr[rA] : 0);
1326
1327 uint32 r = 0;
1328 int i = 4;
1329 uint8 v;
1330 while (NB > 0) {
1331 if (!i) {
1332 i = 4;
1333 gCPU.gpr[rD] = r;
1334 rD++;
1335 rD%=32;
1336 r = 0;
1337 }
1338 if (ppc_read_effective_byte(ea, v)) {
1339 return;
1340 }
1341 r<<=8;
1342 r|=v;
1343 ea++;
1344 i--;
1345 NB--;
1346 }
1347 while (i) { r<<=8; i--; }
1348 gCPU.gpr[rD] = r;
1349 }
1350 /*
1351 * lwarx Load Word and Reserve Indexed
1352 * .553
1353 */
1354 void ppc_opc_lwarx()
1355 {
1356 int rA, rD, rB;
1357 PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
1358 uint32 r;
1359 int ret = ppc_read_effective_word((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], r);
1360 if (ret == PPC_MMU_OK) {
1361 gCPU.gpr[rD] = r;
1362 gCPU.reserve = r;
1363 gCPU.have_reservation = 1;
1364 }
1365 }
1366 /*
1367 * lwbrx Load Word Byte-Reverse Indexed
1368 * .556
1369 */
1370 void ppc_opc_lwbrx()
1371 {
1372 int rA, rD, rB;
1373 PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
1374 uint32 r;
1375 int ret = ppc_read_effective_word((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], r);
1376 if (ret == PPC_MMU_OK) {
1377 gCPU.gpr[rD] = ppc_bswap_word(r);
1378 }
1379 }
1380 /*
1381 * lwz Load Word and Zero
1382 * .557
1383 */
1384 void ppc_opc_lwz()
1385 {
1386 int rA, rD;
1387 uint32 imm;
1388 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rD, rA, imm);
1389 uint32 r;
1390 int ret = ppc_read_effective_word((rA?gCPU.gpr[rA]:0)+imm, r);
1391 if (ret == PPC_MMU_OK) {
1392 gCPU.gpr[rD] = r;
1393 }
1394 }
1395 /*
1396 * lbzu Load Word and Zero with Update
1397 * .558
1398 */
1399 void ppc_opc_lwzu()
1400 {
1401 int rA, rD;
1402 uint32 imm;
1403 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rD, rA, imm);
1404 // FIXME: check rA!=0 && rA!=rD
1405 uint32 r;
1406 int ret = ppc_read_effective_word(gCPU.gpr[rA]+imm, r);
1407 if (ret == PPC_MMU_OK) {
1408 gCPU.gpr[rA] += imm;
1409 gCPU.gpr[rD] = r;
1410 }
1411 }
1412 /*
1413 * lwzux Load Word and Zero with Update Indexed
1414 * .559
1415 */
1416 void ppc_opc_lwzux()
1417 {
1418 int rA, rD, rB;
1419 PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
1420 // FIXME: check rA!=0 && rA!=rD
1421 uint32 r;
1422 int ret = ppc_read_effective_word(gCPU.gpr[rA]+gCPU.gpr[rB], r);
1423 if (ret == PPC_MMU_OK) {
1424 gCPU.gpr[rA] += gCPU.gpr[rB];
1425 gCPU.gpr[rD] = r;
1426 }
1427 }
1428 /*
1429 * lwzx Load Word and Zero Indexed
1430 * .560
1431 */
1432 void ppc_opc_lwzx()
1433 {
1434 int rA, rD, rB;
1435 PPC_OPC_TEMPL_X(gCPU.current_opc, rD, rA, rB);
1436 uint32 r;
1437 int ret = ppc_read_effective_word((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], r);
1438 if (ret == PPC_MMU_OK) {
1439 gCPU.gpr[rD] = r;
1440 }
1441 }
1442
1443 /* lvx Load Vector Indexed
1444 * v.127
1445 */
1446 void ppc_opc_lvx()
1447 {
1448 #ifndef __VEC_EXC_OFF__
1449 if ((gCPU.msr & MSR_VEC) == 0) {
1450 ppc_exception(PPC_EXC_NO_VEC);
1451 return;
1452 }
1453 #endif
1454 VECTOR_DEBUG;
1455 int rA, vrD, rB;
1456 PPC_OPC_TEMPL_X(gCPU.current_opc, vrD, rA, rB);
1457 Vector_t r;
1458
1459 int ea = ((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB]);
1460
1461 int ret = ppc_read_effective_qword(ea, r);
1462 if (ret == PPC_MMU_OK) {
1463 gCPU.vr[vrD] = r;
1464 }
1465 }
1466
1467 /* lvxl Load Vector Index LRU
1468 * v.128
1469 */
1470 void ppc_opc_lvxl()
1471 {
1472 ppc_opc_lvx();
1473 /* This instruction should hint to the cache that the value won't be
1474 * needed again in memory anytime soon. We don't emulate the cache,
1475 * so this is effectively exactly the same as lvx.
1476 */
1477 }
1478
1479 /* lvebx Load Vector Element Byte Indexed
1480 * v.119
1481 */
1482 void ppc_opc_lvebx()
1483 {
1484 #ifndef __VEC_EXC_OFF__
1485 if ((gCPU.msr & MSR_VEC) == 0) {
1486 ppc_exception(PPC_EXC_NO_VEC);
1487 return;
1488 }
1489 #endif
1490 VECTOR_DEBUG;
1491 int rA, vrD, rB;
1492 PPC_OPC_TEMPL_X(gCPU.current_opc, vrD, rA, rB);
1493 uint32 ea;
1494 uint8 r;
1495 ea = (rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB];
1496 int ret = ppc_read_effective_byte(ea, r);
1497 if (ret == PPC_MMU_OK) {
1498 VECT_B(gCPU.vr[vrD], ea & 0xf) = r;
1499 }
1500 }
1501
1502 /* lvehx Load Vector Element Half Word Indexed
1503 * v.121
1504 */
1505 void ppc_opc_lvehx()
1506 {
1507 #ifndef __VEC_EXC_OFF__
1508 if ((gCPU.msr & MSR_VEC) == 0) {
1509 ppc_exception(PPC_EXC_NO_VEC);
1510 return;
1511 }
1512 #endif
1513 VECTOR_DEBUG;
1514 int rA, vrD, rB;
1515 PPC_OPC_TEMPL_X(gCPU.current_opc, vrD, rA, rB);
1516 uint32 ea;
1517 uint16 r;
1518 ea = ((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB]) & ~1;
1519 int ret = ppc_read_effective_half(ea, r);
1520 if (ret == PPC_MMU_OK) {
1521 VECT_H(gCPU.vr[vrD], (ea & 0xf) >> 1) = r;
1522 }
1523 }
1524
1525 /* lvewx Load Vector Element Word Indexed
1526 * v.122
1527 */
1528 void ppc_opc_lvewx()
1529 {
1530 #ifndef __VEC_EXC_OFF__
1531 if ((gCPU.msr & MSR_VEC) == 0) {
1532 ppc_exception(PPC_EXC_NO_VEC);
1533 return;
1534 }
1535 #endif
1536 VECTOR_DEBUG;
1537 int rA, vrD, rB;
1538 PPC_OPC_TEMPL_X(gCPU.current_opc, vrD, rA, rB);
1539 uint32 ea;
1540 uint32 r;
1541 ea = ((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB]) & ~3;
1542 int ret = ppc_read_effective_word(ea, r);
1543 if (ret == PPC_MMU_OK) {
1544 VECT_W(gCPU.vr[vrD], (ea & 0xf) >> 2) = r;
1545 }
1546 }
1547
1548 #if HOST_ENDIANESS == HOST_ENDIANESS_LE
1549 static byte lvsl_helper[] = {
1550 0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18,
1551 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
1552 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08,
1553 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00
1554 };
1555 #elif HOST_ENDIANESS == HOST_ENDIANESS_BE
1556 static byte lvsl_helper[] = {
1557 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
1558 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
1559 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
1560 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F
1561 };
1562 #else
1563 #error Endianess not supported!
1564 #endif
1565
1566 /*
1567 * lvsl Load Vector for Shift Left
1568 * v.123
1569 */
1570 void ppc_opc_lvsl()
1571 {
1572 #ifndef __VEC_EXC_OFF__
1573 if ((gCPU.msr & MSR_VEC) == 0) {
1574 ppc_exception(PPC_EXC_NO_VEC);
1575 return;
1576 }
1577 #endif
1578 VECTOR_DEBUG;
1579 int rA, vrD, rB;
1580 PPC_OPC_TEMPL_X(gCPU.current_opc, vrD, rA, rB);
1581 uint32 ea;
1582 ea = ((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB]);
1583 #if HOST_ENDIANESS == HOST_ENDIANESS_LE
1584 memmove(&gCPU.vr[vrD], lvsl_helper+0x10-(ea & 0xf), 16);
1585 #elif HOST_ENDIANESS == HOST_ENDIANESS_BE
1586 memmove(&gCPU.vr[vrD], lvsl_helper+(ea & 0xf), 16);
1587 #else
1588 #error Endianess not supported!
1589 #endif
1590 }
1591
1592 /*
1593 * lvsr Load Vector for Shift Right
1594 * v.125
1595 */
1596 void ppc_opc_lvsr()
1597 {
1598 #ifndef __VEC_EXC_OFF__
1599 if ((gCPU.msr & MSR_VEC) == 0) {
1600 ppc_exception(PPC_EXC_NO_VEC);
1601 return;
1602 }
1603 #endif
1604 VECTOR_DEBUG;
1605 int rA, vrD, rB;
1606 PPC_OPC_TEMPL_X(gCPU.current_opc, vrD, rA, rB);
1607 uint32 ea;
1608 ea = ((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB]);
1609 #if HOST_ENDIANESS == HOST_ENDIANESS_LE
1610 memmove(&gCPU.vr[vrD], lvsl_helper+(ea & 0xf), 16);
1611 #elif HOST_ENDIANESS == HOST_ENDIANESS_BE
1612 memmove(&gCPU.vr[vrD], lvsl_helper+0x10-(ea & 0xf), 16);
1613 #else
1614 #error Endianess not supported!
1615 #endif
1616 }
1617
1618 /*
1619 * dst Data Stream Touch
1620 * v.115
1621 */
1622 void ppc_opc_dst()
1623 {
1624 VECTOR_DEBUG;
1625 /* Since we are not emulating the cache, this is a nop */
1626 }
1627
1628 /*
1629 * stb Store Byte
1630 * .632
1631 */
1632 void ppc_opc_stb()
1633 {
1634 int rA, rS;
1635 uint32 imm;
1636 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rS, rA, imm);
1637 ppc_write_effective_byte((rA?gCPU.gpr[rA]:0)+imm, (uint8)gCPU.gpr[rS]) != PPC_MMU_FATAL;
1638 }
1639 /*
1640 * stbu Store Byte with Update
1641 * .633
1642 */
1643 void ppc_opc_stbu()
1644 {
1645 int rA, rS;
1646 uint32 imm;
1647 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rS, rA, imm);
1648 // FIXME: check rA!=0
1649 int ret = ppc_write_effective_byte(gCPU.gpr[rA]+imm, (uint8)gCPU.gpr[rS]);
1650 if (ret == PPC_MMU_OK) {
1651 gCPU.gpr[rA] += imm;
1652 }
1653 }
1654 /*
1655 * stbux Store Byte with Update Indexed
1656 * .634
1657 */
1658 void ppc_opc_stbux()
1659 {
1660 int rA, rS, rB;
1661 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1662 // FIXME: check rA!=0
1663 int ret = ppc_write_effective_byte(gCPU.gpr[rA]+gCPU.gpr[rB], (uint8)gCPU.gpr[rS]);
1664 if (ret == PPC_MMU_OK) {
1665 gCPU.gpr[rA] += gCPU.gpr[rB];
1666 }
1667 }
1668 /*
1669 * stbx Store Byte Indexed
1670 * .635
1671 */
1672 void ppc_opc_stbx()
1673 {
1674 int rA, rS, rB;
1675 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1676 ppc_write_effective_byte((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], (uint8)gCPU.gpr[rS]) != PPC_MMU_FATAL;
1677 }
1678 /*
1679 * stfd Store Floating-Point Double
1680 * .642
1681 */
1682 void ppc_opc_stfd()
1683 {
1684 if ((gCPU.msr & MSR_FP) == 0) {
1685 ppc_exception(PPC_EXC_NO_FPU);
1686 return;
1687 }
1688 int rA, frS;
1689 uint32 imm;
1690 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, frS, rA, imm);
1691 ppc_write_effective_dword((rA?gCPU.gpr[rA]:0)+imm, gCPU.fpr[frS]) != PPC_MMU_FATAL;
1692 }
1693 /*
1694 * stfdu Store Floating-Point Double with Update
1695 * .643
1696 */
1697 void ppc_opc_stfdu()
1698 {
1699 if ((gCPU.msr & MSR_FP) == 0) {
1700 ppc_exception(PPC_EXC_NO_FPU);
1701 return;
1702 }
1703 int rA, frS;
1704 uint32 imm;
1705 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, frS, rA, imm);
1706 // FIXME: check rA!=0
1707 int ret = ppc_write_effective_dword(gCPU.gpr[rA]+imm, gCPU.fpr[frS]);
1708 if (ret == PPC_MMU_OK) {
1709 gCPU.gpr[rA] += imm;
1710 }
1711 }
1712 /*
1713 * stfd Store Floating-Point Double with Update Indexed
1714 * .644
1715 */
1716 void ppc_opc_stfdux()
1717 {
1718 if ((gCPU.msr & MSR_FP) == 0) {
1719 ppc_exception(PPC_EXC_NO_FPU);
1720 return;
1721 }
1722 int rA, frS, rB;
1723 PPC_OPC_TEMPL_X(gCPU.current_opc, frS, rA, rB);
1724 // FIXME: check rA!=0
1725 int ret = ppc_write_effective_dword(gCPU.gpr[rA]+gCPU.gpr[rB], gCPU.fpr[frS]);
1726 if (ret == PPC_MMU_OK) {
1727 gCPU.gpr[rA] += gCPU.gpr[rB];
1728 }
1729 }
1730 /*
1731 * stfdx Store Floating-Point Double Indexed
1732 * .645
1733 */
1734 void ppc_opc_stfdx()
1735 {
1736 if ((gCPU.msr & MSR_FP) == 0) {
1737 ppc_exception(PPC_EXC_NO_FPU);
1738 return;
1739 }
1740 int rA, frS, rB;
1741 PPC_OPC_TEMPL_X(gCPU.current_opc, frS, rA, rB);
1742 ppc_write_effective_dword((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], gCPU.fpr[frS]) != PPC_MMU_FATAL;
1743 }
1744 /*
1745 * stfiwx Store Floating-Point as Integer Word Indexed
1746 * .646
1747 */
1748 void ppc_opc_stfiwx()
1749 {
1750 if ((gCPU.msr & MSR_FP) == 0) {
1751 ppc_exception(PPC_EXC_NO_FPU);
1752 return;
1753 }
1754 int rA, frS, rB;
1755 PPC_OPC_TEMPL_X(gCPU.current_opc, frS, rA, rB);
1756 ppc_write_effective_word((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], (uint32)gCPU.fpr[frS]) != PPC_MMU_FATAL;
1757 }
1758 /*
1759 * stfs Store Floating-Point Single
1760 * .647
1761 */
1762 void ppc_opc_stfs()
1763 {
1764 if ((gCPU.msr & MSR_FP) == 0) {
1765 ppc_exception(PPC_EXC_NO_FPU);
1766 return;
1767 }
1768 int rA, frS;
1769 uint32 imm;
1770 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, frS, rA, imm);
1771 uint32 s;
1772 ppc_double d;
1773 ppc_fpu_unpack_double(d, gCPU.fpr[frS]);
1774 ppc_fpu_pack_single(d, s);
1775 ppc_write_effective_word((rA?gCPU.gpr[rA]:0)+imm, s) != PPC_MMU_FATAL;
1776 }
1777 /*
1778 * stfsu Store Floating-Point Single with Update
1779 * .648
1780 */
1781 void ppc_opc_stfsu()
1782 {
1783 if ((gCPU.msr & MSR_FP) == 0) {
1784 ppc_exception(PPC_EXC_NO_FPU);
1785 return;
1786 }
1787 int rA, frS;
1788 uint32 imm;
1789 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, frS, rA, imm);
1790 // FIXME: check rA!=0
1791 uint32 s;
1792 ppc_double d;
1793 ppc_fpu_unpack_double(d, gCPU.fpr[frS]);
1794 ppc_fpu_pack_single(d, s);
1795 int ret = ppc_write_effective_word(gCPU.gpr[rA]+imm, s);
1796 if (ret == PPC_MMU_OK) {
1797 gCPU.gpr[rA] += imm;
1798 }
1799 }
1800 /*
1801 * stfsux Store Floating-Point Single with Update Indexed
1802 * .649
1803 */
1804 void ppc_opc_stfsux()
1805 {
1806 if ((gCPU.msr & MSR_FP) == 0) {
1807 ppc_exception(PPC_EXC_NO_FPU);
1808 return;
1809 }
1810 int rA, frS, rB;
1811 PPC_OPC_TEMPL_X(gCPU.current_opc, frS, rA, rB);
1812 // FIXME: check rA!=0
1813 uint32 s;
1814 ppc_double d;
1815 ppc_fpu_unpack_double(d, gCPU.fpr[frS]);
1816 ppc_fpu_pack_single(d, s);
1817 int ret = ppc_write_effective_word(gCPU.gpr[rA]+gCPU.gpr[rB], s);
1818 if (ret == PPC_MMU_OK) {
1819 gCPU.gpr[rA] += gCPU.gpr[rB];
1820 }
1821 }
1822 /*
1823 * stfsx Store Floating-Point Single Indexed
1824 * .650
1825 */
1826 void ppc_opc_stfsx()
1827 {
1828 if ((gCPU.msr & MSR_FP) == 0) {
1829 ppc_exception(PPC_EXC_NO_FPU);
1830 return;
1831 }
1832 int rA, frS, rB;
1833 PPC_OPC_TEMPL_X(gCPU.current_opc, frS, rA, rB);
1834 uint32 s;
1835 ppc_double d;
1836 ppc_fpu_unpack_double(d, gCPU.fpr[frS]);
1837 ppc_fpu_pack_single(d, s);
1838 ppc_write_effective_word((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], s) != PPC_MMU_FATAL;
1839 }
1840 /*
1841 * sth Store Half Word
1842 * .651
1843 */
1844 void ppc_opc_sth()
1845 {
1846 int rA, rS;
1847 uint32 imm;
1848 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rS, rA, imm);
1849 ppc_write_effective_half((rA?gCPU.gpr[rA]:0)+imm, (uint16)gCPU.gpr[rS]) != PPC_MMU_FATAL;
1850 }
1851 /*
1852 * sthbrx Store Half Word Byte-Reverse Indexed
1853 * .652
1854 */
1855 void ppc_opc_sthbrx()
1856 {
1857 int rA, rS, rB;
1858 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1859 ppc_write_effective_half((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], ppc_bswap_half(gCPU.gpr[rS])) != PPC_MMU_FATAL;
1860 }
1861 /*
1862 * sthu Store Half Word with Update
1863 * .653
1864 */
1865 void ppc_opc_sthu()
1866 {
1867 int rA, rS;
1868 uint32 imm;
1869 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rS, rA, imm);
1870 // FIXME: check rA!=0
1871 int ret = ppc_write_effective_half(gCPU.gpr[rA]+imm, (uint16)gCPU.gpr[rS]);
1872 if (ret == PPC_MMU_OK) {
1873 gCPU.gpr[rA] += imm;
1874 }
1875 }
1876 /*
1877 * sthux Store Half Word with Update Indexed
1878 * .654
1879 */
1880 void ppc_opc_sthux()
1881 {
1882 int rA, rS, rB;
1883 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1884 // FIXME: check rA!=0
1885 int ret = ppc_write_effective_half(gCPU.gpr[rA]+gCPU.gpr[rB], (uint16)gCPU.gpr[rS]);
1886 if (ret == PPC_MMU_OK) {
1887 gCPU.gpr[rA] += gCPU.gpr[rB];
1888 }
1889 }
1890 /*
1891 * sthx Store Half Word Indexed
1892 * .655
1893 */
1894 void ppc_opc_sthx()
1895 {
1896 int rA, rS, rB;
1897 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1898 ppc_write_effective_half((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], (uint16)gCPU.gpr[rS]) != PPC_MMU_FATAL;
1899 }
1900 /*
1901 * stmw Store Multiple Word
1902 * .656
1903 */
1904 void ppc_opc_stmw()
1905 {
1906 int rS, rA;
1907 uint32 imm;
1908 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rS, rA, imm);
1909 uint32 ea = (rA ? gCPU.gpr[rA] : 0) + imm;
1910 while (rS <= 31) {
1911 if (ppc_write_effective_word(ea, gCPU.gpr[rS])) {
1912 return;
1913 }
1914 rS++;
1915 ea += 4;
1916 }
1917 }
1918 /*
1919 * stswi Store String Word Immediate
1920 * .657
1921 */
1922 void ppc_opc_stswi()
1923 {
1924 int rA, rS, NB;
1925 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, NB);
1926 if (NB==0) NB=32;
1927 uint32 ea = rA ? gCPU.gpr[rA] : 0;
1928 uint32 r = 0;
1929 int i = 0;
1930
1931 while (NB > 0) {
1932 if (!i) {
1933 r = gCPU.gpr[rS];
1934 rS++;
1935 rS%=32;
1936 i = 4;
1937 }
1938 if (ppc_write_effective_byte(ea, (r>>24))) {
1939 return;
1940 }
1941 r<<=8;
1942 ea++;
1943 i--;
1944 NB--;
1945 }
1946 }
1947 /*
1948 * stswx Store String Word Indexed
1949 * .658
1950 */
1951 void ppc_opc_stswx()
1952 {
1953 int rA, rS, rB;
1954 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1955 int NB = XER_n(gCPU.xer);
1956 uint32 ea = gCPU.gpr[rB] + (rA ? gCPU.gpr[rA] : 0);
1957 uint32 r = 0;
1958 int i = 0;
1959
1960 while (NB > 0) {
1961 if (!i) {
1962 r = gCPU.gpr[rS];
1963 rS++;
1964 rS%=32;
1965 i = 4;
1966 }
1967 if (ppc_write_effective_byte(ea, (r>>24))) {
1968 return;
1969 }
1970 r<<=8;
1971 ea++;
1972 i--;
1973 NB--;
1974 }
1975 }
1976 /*
1977 * stw Store Word
1978 * .659
1979 */
1980 void ppc_opc_stw()
1981 {
1982 int rA, rS;
1983 uint32 imm;
1984 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rS, rA, imm);
1985 ppc_write_effective_word((rA?gCPU.gpr[rA]:0)+imm, gCPU.gpr[rS]) != PPC_MMU_FATAL;
1986 }
1987 /*
1988 * stwbrx Store Word Byte-Reverse Indexed
1989 * .660
1990 */
1991 void ppc_opc_stwbrx()
1992 {
1993 int rA, rS, rB;
1994 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
1995 // FIXME: doppelt gemoppelt
1996 ppc_write_effective_word((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], ppc_bswap_word(gCPU.gpr[rS])) != PPC_MMU_FATAL;
1997 }
1998 /*
1999 * stwcx. Store Word Conditional Indexed
2000 * .661
2001 */
2002 void ppc_opc_stwcx_()
2003 {
2004 int rA, rS, rB;
2005 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
2006 gCPU.cr &= 0x0fffffff;
2007 if (gCPU.have_reservation) {
2008 gCPU.have_reservation = false;
2009 uint32 v;
2010 if (ppc_read_effective_word((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], v)) {
2011 return;
2012 }
2013 if (v==gCPU.reserve) {
2014 if (ppc_write_effective_word((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], gCPU.gpr[rS])) {
2015 return;
2016 }
2017 gCPU.cr |= CR_CR0_EQ;
2018 }
2019 if (gCPU.xer & XER_SO) {
2020 gCPU.cr |= CR_CR0_SO;
2021 }
2022 }
2023 }
2024 /*
2025 * stwu Store Word with Update
2026 * .663
2027 */
2028 void ppc_opc_stwu()
2029 {
2030 int rA, rS;
2031 uint32 imm;
2032 PPC_OPC_TEMPL_D_SImm(gCPU.current_opc, rS, rA, imm);
2033 // FIXME: check rA!=0
2034 int ret = ppc_write_effective_word(gCPU.gpr[rA]+imm, gCPU.gpr[rS]);
2035 if (ret == PPC_MMU_OK) {
2036 gCPU.gpr[rA] += imm;
2037 }
2038 }
2039 /*
2040 * stwux Store Word with Update Indexed
2041 * .664
2042 */
2043 void ppc_opc_stwux()
2044 {
2045 int rA, rS, rB;
2046 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
2047 // FIXME: check rA!=0
2048 int ret = ppc_write_effective_word(gCPU.gpr[rA]+gCPU.gpr[rB], gCPU.gpr[rS]);
2049 if (ret == PPC_MMU_OK) {
2050 gCPU.gpr[rA] += gCPU.gpr[rB];
2051 }
2052 }
2053 /*
2054 * stwx Store Word Indexed
2055 * .665
2056 */
2057 void ppc_opc_stwx()
2058 {
2059 int rA, rS, rB;
2060 PPC_OPC_TEMPL_X(gCPU.current_opc, rS, rA, rB);
2061 ppc_write_effective_word((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB], gCPU.gpr[rS]) != PPC_MMU_FATAL;
2062 }
2063
2064 /* stvx Store Vector Indexed
2065 * v.134
2066 */
2067 void ppc_opc_stvx()
2068 {
2069 #ifndef __VEC_EXC_OFF__
2070 if ((gCPU.msr & MSR_VEC) == 0) {
2071 ppc_exception(PPC_EXC_NO_VEC);
2072 return;
2073 }
2074 #endif
2075 VECTOR_DEBUG;
2076 int rA, vrS, rB;
2077 PPC_OPC_TEMPL_X(gCPU.current_opc, vrS, rA, rB);
2078
2079 int ea = ((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB]);
2080
2081 ppc_write_effective_qword(ea, gCPU.vr[vrS]) != PPC_MMU_FATAL;
2082 }
2083
2084 /* stvxl Store Vector Indexed LRU
2085 * v.135
2086 */
2087 void ppc_opc_stvxl()
2088 {
2089 ppc_opc_stvx();
2090 /* This instruction should hint to the cache that the value won't be
2091 * needed again in memory anytime soon. We don't emulate the cache,
2092 * so this is effectively exactly the same as lvx.
2093 */
2094 }
2095
2096 /* stvebx Store Vector Element Byte Indexed
2097 * v.131
2098 */
2099 void ppc_opc_stvebx()
2100 {
2101 #ifndef __VEC_EXC_OFF__
2102 if ((gCPU.msr & MSR_VEC) == 0) {
2103 ppc_exception(PPC_EXC_NO_VEC);
2104 return;
2105 }
2106 #endif
2107 VECTOR_DEBUG;
2108 int rA, vrS, rB;
2109 PPC_OPC_TEMPL_X(gCPU.current_opc, vrS, rA, rB);
2110 uint32 ea;
2111 ea = (rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB];
2112 ppc_write_effective_byte(ea, VECT_B(gCPU.vr[vrS], ea & 0xf));
2113 }
2114
2115 /* stvehx Store Vector Element Half Word Indexed
2116 * v.132
2117 */
2118 void ppc_opc_stvehx()
2119 {
2120 #ifndef __VEC_EXC_OFF__
2121 if ((gCPU.msr & MSR_VEC) == 0) {
2122 ppc_exception(PPC_EXC_NO_VEC);
2123 return;
2124 }
2125 #endif
2126 VECTOR_DEBUG;
2127 int rA, vrS, rB;
2128 PPC_OPC_TEMPL_X(gCPU.current_opc, vrS, rA, rB);
2129 uint32 ea;
2130 ea = ((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB]) & ~1;
2131 ppc_write_effective_half(ea, VECT_H(gCPU.vr[vrS], (ea & 0xf) >> 1));
2132 }
2133
2134 /* stvewx Store Vector Element Word Indexed
2135 * v.133
2136 */
2137 void ppc_opc_stvewx()
2138 {
2139 #ifndef __VEC_EXC_OFF__
2140 if ((gCPU.msr & MSR_VEC) == 0) {
2141 ppc_exception(PPC_EXC_NO_VEC);
2142 return;
2143 }
2144 #endif
2145 VECTOR_DEBUG;
2146 int rA, vrS, rB;
2147 PPC_OPC_TEMPL_X(gCPU.current_opc, vrS, rA, rB);
2148 uint32 ea;
2149 ea = ((rA?gCPU.gpr[rA]:0)+gCPU.gpr[rB]) & ~3;
2150 ppc_write_effective_word(ea, VECT_W(gCPU.vr[vrS], (ea & 0xf) >> 2));
2151 }
2152
2153 /* dstst Data Stream Touch for Store
2154 * v.117
2155 */
2156 void ppc_opc_dstst()
2157 {
2158 VECTOR_DEBUG;
2159 /* Since we are not emulating the cache, this is a nop */
2160 }
2161
2162 /* dss Data Stream Stop
2163 * v.114
2164 */
2165 void ppc_opc_dss()
2166 {
2167 VECTOR_DEBUG;
2168 /* Since we are not emulating the cache, this is a nop */
2169 }

  ViewVC Help
Powered by ViewVC 1.1.26