/[dynamips]/trunk/ppc32_mem.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /trunk/ppc32_mem.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 8 - (show annotations)
Sat Oct 6 16:24:54 2007 UTC (16 years, 5 months ago) by dpavlin
Original Path: upstream/dynamips-0.2.7-RC2/ppc32_mem.c
File MIME type: text/plain
File size: 29056 byte(s)
dynamips-0.2.7-RC2

1 /*
2 * Cisco router simulation platform.
3 * Copyright (c) 2006 Christophe Fillot (cf@utc.fr)
4 *
5 * PowerPC MMU.
6 */
7
8 #define _GNU_SOURCE
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <unistd.h>
12 #include <string.h>
13 #include <sys/types.h>
14 #include <sys/stat.h>
15 #include <sys/mman.h>
16 #include <fcntl.h>
17 #include <assert.h>
18
19 #include "cpu.h"
20 #include "vm.h"
21 #include "dynamips.h"
22 #include "memory.h"
23 #include "device.h"
24 #include "ppc32_jit.h"
25
26 #define DEBUG_ICBI 0
27
28 /* Memory access with special access mask */
29 void ppc32_access_special(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int cid,
30 m_uint32_t mask,u_int op_code,u_int op_type,
31 u_int op_size,m_uint64_t *data,u_int *exc)
32 {
33 switch(mask) {
34 case MTS_ACC_T:
35 if (op_code != PPC_MEMOP_LOOKUP) {
36 #if DEBUG_MTS_ACC_T
37 cpu_log(cpu->gen,
38 "MTS","MMU exception for address 0x%8.8x at ia=0x%8.8x "
39 "(%s access, size=%u)\n",
40 vaddr,cpu->ia,(op_type == MTS_READ) ?
41 "read":"write",op_size);
42 //ppc32_dump_regs(cpu->gen);
43 #if MEMLOG_ENABLE
44 memlog_dump(cpu->gen);
45 #endif
46 #endif
47
48 if (cid == PPC32_MTS_DCACHE) {
49 cpu->dsisr = PPC32_DSISR_NOTRANS;
50
51 if (op_type == MTS_WRITE)
52 cpu->dsisr |= PPC32_DSISR_STORE;
53
54 cpu->dar = vaddr;
55 ppc32_trigger_exception(cpu,PPC32_EXC_DSI);
56 }
57 }
58
59 *exc = 1;
60 break;
61
62 case MTS_ACC_U:
63 #if DEBUG_MTS_ACC_U
64 if (op_type == MTS_READ)
65 cpu_log(cpu->gen,
66 "MTS","read access to undefined address 0x%8.8x at "
67 "ia=0x%8.8x (size=%u)\n",vaddr,cpu->ia,op_size);
68 else
69 cpu_log(cpu->gen,
70 "MTS","write access to undefined address 0x%8.8x at "
71 "ia=0x%8.8x, value=0x%8.8llx (size=%u)\n",
72 vaddr,cpu->ia,*data,op_size);
73 #endif
74 if (op_type == MTS_READ)
75 *data = 0;
76 break;
77 }
78 }
79
80 /* Initialize the MTS subsystem for the specified CPU */
81 int ppc32_mem_init(cpu_ppc_t *cpu)
82 {
83 size_t len;
84
85 /* Initialize the cache entries to 0 (empty) */
86 len = MTS32_HASH_SIZE * sizeof(mts32_entry_t);
87
88 if (!(cpu->mts_cache[PPC32_MTS_ICACHE] = malloc(len)))
89 return(-1);
90
91 if (!(cpu->mts_cache[PPC32_MTS_DCACHE] = malloc(len)))
92 return(-1);
93
94 memset(cpu->mts_cache[PPC32_MTS_ICACHE],0xFF,len);
95 memset(cpu->mts_cache[PPC32_MTS_DCACHE],0xFF,len);
96
97 cpu->mts_lookups = 0;
98 cpu->mts_misses = 0;
99 return(0);
100 }
101
102 /* Free memory used by MTS */
103 void ppc32_mem_shutdown(cpu_ppc_t *cpu)
104 {
105 if (cpu != NULL) {
106 /* Free the caches themselves */
107 free(cpu->mts_cache[PPC32_MTS_ICACHE]);
108 free(cpu->mts_cache[PPC32_MTS_DCACHE]);
109 cpu->mts_cache[PPC32_MTS_ICACHE] = NULL;
110 cpu->mts_cache[PPC32_MTS_DCACHE] = NULL;
111 }
112 }
113
114 /* Show MTS detailed information (debugging only!) */
115 void ppc32_mem_show_stats(cpu_gen_t *gen_cpu)
116 {
117 cpu_ppc_t *cpu = CPU_PPC32(gen_cpu);
118 #if DEBUG_MTS_MAP_VIRT
119 mts32_entry_t *entry;
120 u_int i,count;
121 #endif
122
123 printf("\nCPU%u: MTS statistics:\n",cpu->gen->id);
124
125 #if DEBUG_MTS_MAP_VIRT
126 printf("Instruction cache:\n");
127
128 /* Valid hash entries for Instruction Cache */
129 for(count=0,i=0;i<MTS32_HASH_SIZE;i++) {
130 entry = &cpu->mts_cache[PPC32_MTS_ICACHE][i];
131
132 if (!(entry->gvpa & MTS_INV_ENTRY_MASK)) {
133 printf(" %4u: vaddr=0x%8.8x, paddr=0x%8.8x, hpa=%p\n",
134 i,entry->gvpa,entry->gppa,(void *)entry->hpa);
135 count++;
136 }
137 }
138
139 printf(" %u/%u valid hash entries for icache.\n",count,MTS32_HASH_SIZE);
140
141
142 printf("Data cache:\n");
143
144 /* Valid hash entries for Instruction Cache */
145 for(count=0,i=0;i<MTS32_HASH_SIZE;i++) {
146 entry = &cpu->mts_cache[PPC32_MTS_DCACHE][i];
147
148 if (!(entry->gvpa & MTS_INV_ENTRY_MASK)) {
149 printf(" %4u: vaddr=0x%8.8x, paddr=0x%8.8x, hpa=%p\n",
150 i,entry->gvpa,entry->gppa,(void *)entry->hpa);
151 count++;
152 }
153 }
154
155 printf(" %u/%u valid hash entries for dcache.\n",count,MTS32_HASH_SIZE);
156 #endif
157
158 printf("\n Total lookups: %llu, misses: %llu, efficiency: %g%%\n",
159 cpu->mts_lookups, cpu->mts_misses,
160 100 - ((double)(cpu->mts_misses*100)/
161 (double)cpu->mts_lookups));
162 }
163
164 /* Invalidate the MTS caches (instruction and data) */
165 void ppc32_mem_invalidate_cache(cpu_ppc_t *cpu)
166 {
167 size_t len;
168
169 len = MTS32_HASH_SIZE * sizeof(mts32_entry_t);
170 memset(cpu->mts_cache[PPC32_MTS_ICACHE],0xFF,len);
171 memset(cpu->mts_cache[PPC32_MTS_DCACHE],0xFF,len);
172 }
173
174 /*
175 * MTS mapping.
176 *
177 * It is NOT inlined since it triggers a GCC bug on my config (x86, GCC 3.3.5)
178 */
179 static no_inline struct mts32_entry *
180 ppc32_mem_map(cpu_ppc_t *cpu,u_int op_type,mts_map_t *map,
181 mts32_entry_t *entry,mts32_entry_t *alt_entry)
182 {
183 ppc32_jit_tcb_t *block;
184 struct vdevice *dev;
185 m_uint32_t offset;
186 m_iptr_t host_ptr;
187 m_uint32_t exec_flag = 0;
188 int cow;
189
190 if (!(dev = dev_lookup(cpu->vm,map->paddr,map->cached)))
191 return NULL;
192
193 if (cpu->exec_phys_map) {
194 block = ppc32_jit_find_by_phys_page(cpu,map->paddr >> VM_PAGE_SHIFT);
195
196 if (block)
197 exec_flag = MTS_FLAG_EXEC;
198 }
199
200 if (dev->flags & VDEVICE_FLAG_SPARSE) {
201 host_ptr = dev_sparse_get_host_addr(cpu->vm,dev,map->paddr,op_type,&cow);
202
203 entry->gvpa = map->vaddr;
204 entry->gppa = map->paddr;
205 entry->hpa = host_ptr;
206 entry->flags = (cow) ? MTS_FLAG_COW : 0;
207 entry->flags |= exec_flag;
208 return entry;
209 }
210
211 if (!dev->host_addr || (dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) {
212 offset = map->paddr - dev->phys_addr;
213
214 alt_entry->gvpa = map->vaddr;
215 alt_entry->gppa = map->paddr;
216 alt_entry->hpa = (dev->id << MTS_DEVID_SHIFT) + offset;
217 alt_entry->flags = MTS_FLAG_DEV;
218 return alt_entry;
219 }
220
221 entry->gvpa = map->vaddr;
222 entry->gppa = map->paddr;
223 entry->hpa = dev->host_addr + (map->paddr - dev->phys_addr);
224 entry->flags = exec_flag;
225 return entry;
226 }
227
228 /* BAT lookup */
229 static forced_inline int ppc32_bat_lookup(cpu_ppc_t *cpu,m_uint32_t vaddr,
230 u_int cid,mts_map_t *map)
231 {
232 m_uint32_t bepi,mask,bl,pr,ubat;
233 int i;
234
235 pr = (cpu->msr & PPC32_MSR_PR) >> PPC32_MSR_PR_SHIFT;
236 pr = ((~pr << 1) | pr) & 0x03;
237
238 for(i=0;i<PPC32_BAT_NR;i++) {
239 ubat = cpu->bat[cid][i].reg[0];
240
241 if (!(ubat & pr))
242 continue;
243
244 //bl = (ubat & PPC32_UBAT_BL_MASK) >> PPC32_UBAT_BL_SHIFT;
245 bl = (ubat & PPC32_UBAT_XBL_MASK) >> PPC32_UBAT_XBL_SHIFT;
246
247 mask = ~bl << PPC32_BAT_ADDR_SHIFT;
248 bepi = ubat & PPC32_UBAT_BEPI_MASK;
249
250 if (bepi == (vaddr & mask)) {
251 map->vaddr = vaddr & PPC32_MIN_PAGE_MASK;
252 map->paddr = cpu->bat[cid][i].reg[1] & PPC32_LBAT_BRPN_MASK;
253 map->paddr += map->vaddr - bepi;
254 map->cached = FALSE;
255 return(TRUE);
256 }
257 }
258
259 return(FALSE);
260 }
261
262 /* Memory slow lookup */
263 static mts32_entry_t *ppc32_slow_lookup(cpu_ppc_t *cpu,m_uint32_t vaddr,
264 u_int cid,u_int op_code,u_int op_size,
265 u_int op_type,m_uint64_t *data,
266 u_int *exc,mts32_entry_t *alt_entry)
267 {
268 m_uint32_t hash_bucket,segment,vsid;
269 m_uint32_t hash,tmp,pteg_offset,pte_key,key,pte2;
270 mts32_entry_t *entry;
271 m_uint8_t *pte_haddr;
272 m_uint64_t paddr;
273 mts_map_t map;
274 int i;
275
276 #if DEBUG_MTS_STATS
277 cpu->mts_misses++;
278 #endif
279
280 hash_bucket = MTS32_HASH(vaddr);
281 entry = &cpu->mts_cache[cid][hash_bucket];
282
283 /* No translation - cover the 4GB space */
284 if (((cid == PPC32_MTS_ICACHE) && !(cpu->msr & PPC32_MSR_IR)) ||
285 ((cid == PPC32_MTS_DCACHE) && !(cpu->msr & PPC32_MSR_DR)))
286 {
287 map.vaddr = vaddr & PPC32_MIN_PAGE_MASK;
288 map.paddr = vaddr & PPC32_MIN_PAGE_MASK;
289 map.cached = FALSE;
290
291 if (!(entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry)))
292 goto err_undef;
293
294 return entry;
295 }
296
297 /* Walk through the BAT registers */
298 if (ppc32_bat_lookup(cpu,vaddr,cid,&map)) {
299 if (!(entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry)))
300 goto err_undef;
301
302 return entry;
303 }
304
305 if (unlikely(!cpu->sdr1))
306 goto no_pte;
307
308 /* Get the virtual segment identifier */
309 segment = vaddr >> 28;
310 vsid = cpu->sr[segment] & PPC32_SD_VSID_MASK;
311
312 /* Compute the first hash value */
313 hash = (vaddr >> PPC32_MIN_PAGE_SHIFT) & 0xFFFF;
314 hash ^= vsid;
315 hash &= 0x7FFFFF;
316
317 tmp = (hash >> 10) & (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK);
318 pteg_offset = (hash & 0x3FF) << 6;
319 pteg_offset |= tmp << 16;
320 pte_haddr = cpu->sdr1_hptr + pteg_offset;
321
322 pte_key = 0x80000000 | (vsid << 7);
323 pte_key |= (vaddr >> 22) & 0x3F;
324
325 for(i=0;i<8;i++,pte_haddr+=PPC32_PTE_SIZE) {
326 key = vmtoh32(*(m_uint32_t *)pte_haddr);
327
328 if (key == pte_key)
329 goto pte_lookup_done;
330 }
331
332 /* Secondary hash value */
333 hash = (~hash) & 0x7FFFFF;
334
335 tmp = (hash >> 10) & (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK);
336 pteg_offset = (hash & 0x3FF) << 6;
337 pteg_offset |= tmp << 16;
338 pte_haddr = cpu->sdr1_hptr + pteg_offset;
339
340 pte_key = 0x80000040 | (vsid << 7);
341 pte_key |= (vaddr >> 22) & 0x3F;
342
343 for(i=0;i<8;i++,pte_haddr+=PPC32_PTE_SIZE) {
344 key = vmtoh32(*(m_uint32_t *)pte_haddr);
345
346 if (key == pte_key)
347 goto pte_lookup_done;
348 }
349
350 no_pte:
351 /* No matching PTE for this virtual address */
352 ppc32_access_special(cpu,vaddr,cid,MTS_ACC_T,op_code,op_type,op_size,
353 data,exc);
354 return NULL;
355
356 pte_lookup_done:
357 pte2 = vmtoh32(*(m_uint32_t *)(pte_haddr + sizeof(m_uint32_t)));
358 paddr = pte2 & PPC32_PTEL_RPN_MASK;
359 paddr |= (pte2 & PPC32_PTEL_XPN_MASK) << (33 - PPC32_PTEL_XPN_SHIFT);
360 paddr |= (pte2 & PPC32_PTEL_X_MASK) << (32 - PPC32_PTEL_X_SHIFT);
361
362 map.vaddr = vaddr & ~PPC32_MIN_PAGE_IMASK;
363 map.paddr = paddr;
364 map.cached = FALSE;
365
366 if ((entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry)))
367 return entry;
368
369 err_undef:
370 ppc32_access_special(cpu,vaddr,cid,MTS_ACC_U,op_code,op_type,op_size,
371 data,exc);
372 return NULL;
373 }
374
375 /* Memory access */
376 static inline void *ppc32_mem_access(cpu_ppc_t *cpu,m_uint32_t vaddr,
377 u_int cid,u_int op_code,u_int op_size,
378 u_int op_type,m_uint64_t *data,
379 u_int *exc)
380 {
381 mts32_entry_t *entry,alt_entry;
382 ppc32_jit_tcb_t *block;
383 m_uint32_t hash_bucket;
384 m_uint32_t phys_page;
385 m_uint32_t ia_hash;
386 m_iptr_t haddr;
387 u_int dev_id;
388 int cow;
389
390 #if MEMLOG_ENABLE
391 /* Record the memory access */
392 memlog_rec_access(cpu->gen,vaddr,*data,op_size,op_type);
393 #endif
394
395 *exc = 0;
396 hash_bucket = MTS32_HASH(vaddr);
397 entry = &cpu->mts_cache[cid][hash_bucket];
398
399 #if DEBUG_MTS_STATS
400 cpu->mts_lookups++;
401 #endif
402
403 /* Copy-On-Write for sparse device ? */
404 cow = (op_type == MTS_WRITE) && (entry->flags & MTS_FLAG_COW);
405
406 /* Slow lookup if nothing found in cache */
407 if (unlikely(((vaddr & PPC32_MIN_PAGE_MASK) != entry->gvpa) || cow)) {
408 entry = cpu->mts_slow_lookup(cpu,vaddr,cid,op_code,op_size,op_type,
409 data,exc,&alt_entry);
410 if (!entry)
411 return NULL;
412
413 if (entry->flags & MTS_FLAG_DEV) {
414 dev_id = (entry->hpa & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT;
415 haddr = entry->hpa & MTS_DEVOFF_MASK;
416 haddr += vaddr - entry->gvpa;
417 return(dev_access_fast(cpu->gen,dev_id,haddr,op_size,op_type,data));
418 }
419 }
420
421 /* Invalidate JIT code for written pages */
422 if ((op_type == MTS_WRITE) && (entry->flags & MTS_FLAG_EXEC)) {
423 if (cpu->exec_phys_map) {
424 phys_page = entry->gppa >> VM_PAGE_SHIFT;
425
426 if (vaddr >= PPC32_EXC_SYS_RST) {
427 block = ppc32_jit_find_by_phys_page(cpu,phys_page);
428
429 if (block != NULL) {
430 //printf("Invalidation of block 0x%8.8x\n",block->start_ia);
431 ia_hash = ppc32_jit_get_ia_hash(block->start_ia);
432 ppc32_jit_tcb_free(cpu,block,TRUE);
433
434 if (cpu->exec_blk_map[ia_hash] == block)
435 cpu->exec_blk_map[ia_hash] = NULL;
436
437 entry->flags &= ~MTS_FLAG_EXEC;
438 }
439 }
440 }
441 }
442
443 /* Raw memory access */
444 haddr = entry->hpa + (vaddr & PPC32_MIN_PAGE_IMASK);
445 #if MEMLOG_ENABLE
446 memlog_update_read(cpu->gen,haddr);
447 #endif
448 return((void *)haddr);
449 }
450
451 /* Memory data access */
452 #define PPC32_MEM_DACCESS(cpu,vaddr,op_code,op_size,op_type,data,exc) \
453 ppc32_mem_access((cpu),(vaddr),PPC32_MTS_DCACHE,(op_code),(op_size),\
454 (op_type),(data),(exc))
455
456 /* Virtual address to physical page translation */
457 static fastcall int ppc32_translate(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int cid,
458 m_uint32_t *phys_page)
459 {
460 mts32_entry_t *entry,alt_entry;
461 m_uint32_t hash_bucket;
462 m_uint64_t data = 0;
463 u_int exc = 0;
464
465 hash_bucket = MTS32_HASH(vaddr);
466 entry = &cpu->mts_cache[cid][hash_bucket];
467
468 /* Slow lookup if nothing found in cache */
469 if (unlikely(((m_uint32_t)vaddr & PPC32_MIN_PAGE_MASK) != entry->gvpa)) {
470 entry = cpu->mts_slow_lookup(cpu,vaddr,cid,PPC_MEMOP_LOOKUP,4,MTS_READ,
471 &data,&exc,&alt_entry);
472 if (!entry)
473 return(-1);
474 }
475
476 *phys_page = entry->gppa >> PPC32_MIN_PAGE_SHIFT;
477 return(0);
478 }
479
480 /* Virtual address lookup */
481 static void *ppc32_mem_lookup(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int cid)
482 {
483 m_uint64_t data;
484 u_int exc;
485 return(ppc32_mem_access(cpu,vaddr,cid,PPC_MEMOP_LOOKUP,4,MTS_READ,
486 &data,&exc));
487 }
488
489 /* Set a BAT register */
490 int ppc32_set_bat(cpu_ppc_t *cpu,struct ppc32_bat_prog *bp)
491 {
492 struct ppc32_bat_reg *bat;
493
494 if ((bp->type != PPC32_IBAT_IDX) && (bp->type != PPC32_DBAT_IDX))
495 return(-1);
496
497 if (bp->index >= PPC32_BAT_NR)
498 return(-1);
499
500 bat = &cpu->bat[bp->type][bp->index];
501 bat->reg[0] = bp->hi;
502 bat->reg[1] = bp->lo;
503 return(0);
504 }
505
506 /* Load BAT registers from a BAT array */
507 void ppc32_load_bat_array(cpu_ppc_t *cpu,struct ppc32_bat_prog *bp)
508 {
509 while(bp->index != -1) {
510 ppc32_set_bat(cpu,bp);
511 bp++;
512 }
513 }
514
515 /* Get the host address for SDR1 */
516 int ppc32_set_sdr1(cpu_ppc_t *cpu,m_uint32_t sdr1)
517 {
518 struct vdevice *dev;
519 m_uint64_t pt_addr;
520
521 cpu->sdr1 = sdr1;
522 pt_addr = sdr1 & PPC32_SDR1_HTABORG_MASK;
523 pt_addr |= ((m_uint64_t)(sdr1 & PPC32_SDR1_HTABEXT_MASK) << 20);
524
525 if (!(dev = dev_lookup(cpu->vm,pt_addr,TRUE))) {
526 fprintf(stderr,"ppc32_set_sdr1: unable to find haddr for SDR1=0x%8.8x\n",
527 sdr1);
528 return(-1);
529 }
530
531 cpu->sdr1_hptr = (char *)dev->host_addr + (pt_addr - dev->phys_addr);
532 return(0);
533 }
534
535 /* Initialize the page table */
536 int ppc32_init_page_table(cpu_ppc_t *cpu)
537 {
538 m_uint32_t pt_size;
539
540 if (!cpu->sdr1_hptr)
541 return(-1);
542
543 pt_size = (1 + (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK)) << 16;
544 memset(cpu->sdr1_hptr,0,pt_size);
545 return(0);
546 }
547
548 /* Map a page */
549 int ppc32_map_page(cpu_ppc_t *cpu,u_int vsid,m_uint32_t vaddr,m_uint64_t paddr,
550 u_int wimg,u_int pp)
551 {
552 m_uint32_t hash,tmp,pteg_offset,key;
553 m_uint8_t *pte_haddr;
554 int i;
555
556 /* Compute the first hash value */
557 hash = (vaddr >> PPC32_MIN_PAGE_SHIFT) & 0xFFFF;
558 hash ^= vsid;
559 hash &= 0x7FFFFF;
560
561 tmp = (hash >> 10) & (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK);
562 pteg_offset = (hash & 0x3FF) << 6;
563 pteg_offset |= tmp << 16;
564 pte_haddr = cpu->sdr1_hptr + pteg_offset;
565
566 for(i=0;i<8;i++,pte_haddr+=PPC32_PTE_SIZE) {
567 key = vmtoh32(*(m_uint32_t *)pte_haddr);
568
569 if (!(key & PPC32_PTEU_V)) {
570 hash = 0;
571 goto free_pte_found;
572 }
573 }
574
575 /* Secondary hash value */
576 hash = (~hash) & 0x7FFFFF;
577
578 tmp = (hash >> 10) & (cpu->sdr1 & PPC32_SDR1_HTMEXT_MASK);
579 pteg_offset = (hash & 0x3FF) << 6;
580 pteg_offset |= tmp << 16;
581 pte_haddr = cpu->sdr1_hptr + pteg_offset;
582
583 for(i=0;i<8;i++,pte_haddr+=PPC32_PTE_SIZE) {
584 key = vmtoh32(*(m_uint32_t *)pte_haddr);
585
586 if (!(key & PPC32_PTEU_V)) {
587 hash = PPC32_PTEU_H;
588 goto free_pte_found;
589 }
590 }
591
592 /* No free PTE found */
593 return(-1);
594
595 free_pte_found:
596 tmp = PPC32_PTEU_V | (vsid << PPC32_PTEU_VSID_SHIFT) | hash;
597 tmp |= (vaddr >> 22) & 0x3F;
598 *(m_uint32_t *)pte_haddr = htovm32(tmp);
599
600 tmp = vaddr & PPC32_PTEL_RPN_MASK;
601 tmp |= (vaddr >> (32 - PPC32_PTEL_X_SHIFT)) & PPC32_PTEL_X_MASK;
602 tmp |= (vaddr >> (33 - PPC32_PTEL_XPN_SHIFT)) & PPC32_PTEL_XPN_MASK;
603
604 tmp |= (wimg << PPC32_PTEL_WIMG_SHIFT) + pp;
605 *(m_uint32_t *)(pte_haddr+sizeof(m_uint32_t)) = htovm32(tmp);
606 return(0);
607 }
608
609 /* Map a memory zone */
610 int ppc32_map_zone(cpu_ppc_t *cpu,u_int vsid,m_uint32_t vaddr,m_uint64_t paddr,
611 m_uint32_t size,u_int wimg,u_int pp)
612 {
613 while(size > 0) {
614 if (ppc32_map_page(cpu,vsid,vaddr,paddr,wimg,pp) == -1)
615 return(-1);
616
617 size -= PPC32_MIN_PAGE_SIZE;
618 vaddr += PPC32_MIN_PAGE_SIZE;
619 paddr += PPC32_MIN_PAGE_SIZE;
620 }
621
622 return(0);
623 }
624
625 /* PowerPC 405 TLB masks */
626 static m_uint32_t ppc405_tlb_masks[8] = {
627 0xFFFFFC00, 0xFFFFF000, 0xFFFFC000, 0xFFFF0000,
628 0xFFFC0000, 0xFFF00000, 0xFFC00000, 0xFF000000,
629 };
630
631 /* PowerPC 405 slow lookup */
632 static mts32_entry_t *ppc405_slow_lookup(cpu_ppc_t *cpu,m_uint32_t vaddr,
633 u_int cid,u_int op_code,u_int op_size,
634 u_int op_type,m_uint64_t *data,
635 u_int *exc,mts32_entry_t *alt_entry)
636 {
637 struct ppc405_tlb_entry *tlb_entry;
638 m_uint32_t hash_bucket,mask;
639 m_uint32_t page_size;
640 mts32_entry_t *entry;
641 mts_map_t map;
642 int i;
643
644 #if DEBUG_MTS_STATS
645 cpu->mts_misses++;
646 #endif
647
648 hash_bucket = MTS32_HASH(vaddr);
649 entry = &cpu->mts_cache[cid][hash_bucket];
650
651 /* No translation - cover the 4GB space */
652 if (((cid == PPC32_MTS_ICACHE) && !(cpu->msr & PPC32_MSR_IR)) ||
653 ((cid == PPC32_MTS_DCACHE) && !(cpu->msr & PPC32_MSR_DR)))
654 {
655 map.vaddr = vaddr & PPC32_MIN_PAGE_MASK;
656 map.paddr = vaddr & PPC32_MIN_PAGE_MASK;
657 map.cached = FALSE;
658
659 if (!(entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry)))
660 goto err_undef;
661
662 return entry;
663 }
664
665 /* Walk through the unified TLB */
666 for(i=0;i<PPC405_TLB_ENTRIES;i++)
667 {
668 tlb_entry = &cpu->ppc405_tlb[i];
669
670 /* We want a valid entry with TID = PID */
671 if (!(tlb_entry->tlb_hi & PPC405_TLBHI_V) ||
672 (tlb_entry->tid != cpu->ppc405_pid))
673 continue;
674
675 /* Get the address mask corresponding to this entry */
676 page_size = tlb_entry->tlb_hi & PPC405_TLBHI_SIZE_MASK;
677 page_size >>= PPC405_TLBHI_SIZE_SHIFT;
678 mask = ppc405_tlb_masks[page_size];
679
680 /* Matching entry ? */
681 if ((vaddr & mask) == (tlb_entry->tlb_hi & mask)) {
682 map.vaddr = vaddr & mask;
683 map.paddr = tlb_entry->tlb_lo & mask;
684 map.cached = FALSE;
685
686 if (!(entry = ppc32_mem_map(cpu,op_type,&map,entry,alt_entry)))
687 goto err_undef;
688
689 return entry;
690 }
691 }
692
693 /* No matching TLB entry for this virtual address */
694 ppc32_access_special(cpu,vaddr,cid,MTS_ACC_T,op_code,op_type,op_size,
695 data,exc);
696 return NULL;
697
698 err_undef:
699 ppc32_access_special(cpu,vaddr,cid,MTS_ACC_U,op_code,op_type,op_size,
700 data,exc);
701 return NULL;
702 }
703
704 /* Dump a PowerPC 405 TLB entry */
705 static void ppc405_dump_tlb_entry(cpu_ppc_t *cpu,u_int index)
706 {
707 struct ppc405_tlb_entry *entry;
708
709 entry = &cpu->ppc405_tlb[index];
710
711 printf(" %2d: hi=0x%8.8x lo=0x%8.8x tid=0x%2.2x\n",
712 index,entry->tlb_hi,entry->tlb_lo,entry->tid);
713 }
714
715 /* Dump the PowerPC 405 TLB */
716 static void ppc405_dump_tlb(cpu_gen_t *cpu)
717 {
718 cpu_ppc_t *pcpu = CPU_PPC32(cpu);
719 u_int i;
720
721 for(i=0;i<PPC405_TLB_ENTRIES;i++)
722 ppc405_dump_tlb_entry(pcpu,i);
723 }
724
725 /* === PPC Memory Operations ============================================= */
726
727 /* LBZ: Load Byte Zero */
728 fastcall u_int ppc32_lbz(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
729 {
730 m_uint64_t data;
731 void *haddr;
732 u_int exc;
733
734 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LBZ,1,MTS_READ,&data,&exc);
735 if (likely(haddr != NULL)) data = *(m_uint8_t *)haddr;
736 if (likely(!exc)) cpu->gpr[reg] = data & 0xFF;
737 return(exc);
738 }
739
740 /* LHZ: Load Half-Word Zero */
741 fastcall u_int ppc32_lhz(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
742 {
743 m_uint64_t data;
744 void *haddr;
745 u_int exc;
746
747 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LHZ,2,MTS_READ,&data,&exc);
748 if (likely(haddr != NULL)) data = vmtoh16(*(m_uint16_t *)haddr);
749 if (likely(!exc)) cpu->gpr[reg] = data & 0xFFFF;
750 return(exc);
751 }
752
753 /* LWZ: Load Word Zero */
754 fastcall u_int ppc32_lwz(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
755 {
756 m_uint64_t data;
757 void *haddr;
758 u_int exc;
759
760 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LWZ,4,MTS_READ,&data,&exc);
761 if (likely(haddr != NULL)) data = vmtoh32(*(m_uint32_t *)haddr);
762 if (likely(!exc)) cpu->gpr[reg] = data;
763 return(exc);
764 }
765
766 /* LWBR: Load Word Byte Reverse */
767 fastcall u_int ppc32_lwbr(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
768 {
769 m_uint64_t data;
770 void *haddr;
771 u_int exc;
772
773 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LWBR,4,MTS_READ,&data,&exc);
774 if (likely(haddr != NULL)) data = vmtoh32(*(m_uint32_t *)haddr);
775 if (likely(!exc)) cpu->gpr[reg] = swap32(data);
776 return(exc);
777 }
778
779 /* LHA: Load Half-Word Algebraic */
780 fastcall u_int ppc32_lha(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
781 {
782 m_uint64_t data;
783 void *haddr;
784 u_int exc;
785
786 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LHZ,2,MTS_READ,&data,&exc);
787 if (likely(haddr != NULL)) data = vmtoh16(*(m_uint16_t *)haddr);
788 if (likely(!exc)) cpu->gpr[reg] = sign_extend_32(data,16);
789 return(exc);
790 }
791
792 /* STB: Store Byte */
793 fastcall u_int ppc32_stb(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
794 {
795 m_uint64_t data;
796 void *haddr;
797 u_int exc;
798
799 data = cpu->gpr[reg] & 0xff;
800 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STB,1,MTS_WRITE,&data,&exc);
801 if (likely(haddr != NULL)) *(m_uint8_t *)haddr = data;
802 return(exc);
803 }
804
805 /* STH: Store Half-Word */
806 fastcall u_int ppc32_sth(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
807 {
808 m_uint64_t data;
809 void *haddr;
810 u_int exc;
811
812 data = cpu->gpr[reg] & 0xffff;
813 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STH,2,MTS_WRITE,&data,&exc);
814 if (likely(haddr != NULL)) *(m_uint16_t *)haddr = htovm16(data);
815 return(exc);
816 }
817
818 /* STW: Store Word */
819 fastcall u_int ppc32_stw(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
820 {
821 m_uint64_t data;
822 void *haddr;
823 u_int exc;
824
825 data = cpu->gpr[reg];
826 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STW,4,MTS_WRITE,&data,&exc);
827 if (likely(haddr != NULL)) *(m_uint32_t *)haddr = htovm32(data);
828 return(exc);
829 }
830
831 /* STWBR: Store Word Byte Reversed */
832 fastcall u_int ppc32_stwbr(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
833 {
834 m_uint64_t data;
835 void *haddr;
836 u_int exc;
837
838 data = swap32(cpu->gpr[reg]);
839 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STWBR,4,MTS_WRITE,&data,&exc);
840 if (likely(haddr != NULL)) *(m_uint32_t *)haddr = htovm32(data);
841 return(exc);
842 }
843
844 /* LSW: Load String Word */
845 fastcall u_int ppc32_lsw(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
846 {
847 m_uint64_t data;
848 void *haddr;
849 u_int exc;
850
851 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LSW,1,MTS_READ,&data,&exc);
852 if (likely(haddr != NULL)) data = *(m_uint8_t *)haddr;
853 if (likely(!exc)) cpu->gpr[reg] |= (data & 0xFF) << (24 - cpu->sw_pos);
854 return(exc);
855 }
856
857 /* STW: Store String Word */
858 fastcall u_int ppc32_stsw(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
859 {
860 m_uint64_t data;
861 void *haddr;
862 u_int exc;
863
864 data = (cpu->gpr[reg] >> (24 - cpu->sw_pos)) & 0xFF;
865 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STSW,1,MTS_WRITE,&data,&exc);
866 if (likely(haddr != NULL)) *(m_uint8_t *)haddr = data;
867 return(exc);
868 }
869
870 /* LFD: Load Floating-Point Double */
871 fastcall u_int ppc32_lfd(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
872 {
873 m_uint64_t data;
874 void *haddr;
875 u_int exc;
876
877 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_LWZ,8,MTS_READ,&data,&exc);
878 if (likely(haddr != NULL)) data = vmtoh64(*(m_uint64_t *)haddr);
879 if (likely(!exc)) cpu->fpu.reg[reg] = data;
880 return(exc);
881 }
882
883 /* STFD: Store Floating-Point Double */
884 fastcall u_int ppc32_stfd(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int reg)
885 {
886 m_uint64_t data;
887 void *haddr;
888 u_int exc;
889
890 data = cpu->fpu.reg[reg];
891 haddr = PPC32_MEM_DACCESS(cpu,vaddr,PPC_MEMOP_STW,8,MTS_WRITE,&data,&exc);
892 if (likely(haddr != NULL)) *(m_uint64_t *)haddr = htovm64(data);
893 return(exc);
894 }
895
896 /* ICBI: Instruction Cache Block Invalidate */
897 fastcall u_int ppc32_icbi(cpu_ppc_t *cpu,m_uint32_t vaddr,u_int op)
898 {
899 ppc32_jit_tcb_t *block;
900 m_uint32_t phys_page;
901
902 #if DEBUG_ICBI
903 cpu_log(cpu->gen,"MTS","ICBI: ia=0x%8.8x, vaddr=0x%8.8x\n",cpu->ia,vaddr);
904 #endif
905
906 if (!cpu->translate(cpu,vaddr,PPC32_MTS_ICACHE,&phys_page)) {
907 if (cpu->exec_phys_map) {
908 block = ppc32_jit_find_by_phys_page(cpu,phys_page);
909
910 if (block && ppc32_jit_tcb_match(cpu,block)) {
911 #if DEBUG_ICBI
912 cpu_log(cpu->gen,"MTS",
913 "ICBI: removing compiled page at 0x%8.8x, pc=0x%8.8x\n",
914 block->start_ia,cpu->ia);
915 #endif
916 ppc32_jit_tcb_free(cpu,block,TRUE);
917 cpu->exec_blk_map[ppc32_jit_get_ia_hash(vaddr)] = NULL;
918 }
919 else
920 {
921 #if DEBUG_ICBI
922 cpu_log(cpu->gen,"MTS",
923 "ICBI: trying to remove page 0x%llx with pc=0x%llx\n",
924 block->start_ia,cpu->ia);
925 #endif
926 }
927 }
928 }
929
930 return(0);
931 }
932
933 /* ======================================================================== */
934
935 /* Get a BAT register pointer given a SPR index */
936 static inline m_uint32_t *ppc32_get_bat_spr_ptr(cpu_ppc_t *cpu,u_int spr)
937 {
938 m_uint32_t spr_cat,cid,index;
939
940 spr_cat = spr >> 5;
941 if ((spr_cat != 0x10) && (spr_cat != 0x11))
942 return NULL;
943
944 cid = (spr >> 3) & 0x1;
945 index = (spr >> 1) & 0x3;
946
947 if (spr & 0x20)
948 index += 4;
949
950 //printf("GET_BAT_SPR: SPR=%u => cid=%u, index=%u\n",spr,cid,index);
951
952 return(&cpu->bat[cid][index].reg[spr & 0x1]);
953 }
954
955 /* Get a BAT SPR */
956 m_uint32_t ppc32_get_bat_spr(cpu_ppc_t *cpu,u_int spr)
957 {
958 m_uint32_t *p;
959
960 if (!(p = ppc32_get_bat_spr_ptr(cpu,spr)))
961 return(0);
962
963 return(*p);
964 }
965
966 /* Set a BAT SPR */
967 void ppc32_set_bat_spr(cpu_ppc_t *cpu,u_int spr,m_uint32_t val)
968 {
969 m_uint32_t *p;
970
971 if ((p = ppc32_get_bat_spr_ptr(cpu,spr))) {
972 *p = val;
973 ppc32_mem_invalidate_cache(cpu);
974 }
975 }
976
977 /* ======================================================================== */
978
979 /* Rebuild MTS data structures */
980 static void ppc32_mem_rebuild_mts(cpu_gen_t *gen_cpu)
981 {
982 ppc32_mem_invalidate_cache(CPU_PPC32(gen_cpu));
983 }
984
985 /* Initialize memory access vectors */
986 void ppc32_init_memop_vectors(cpu_ppc_t *cpu)
987 {
988 /* MTS slow lookup */
989 cpu->mts_slow_lookup = ppc32_slow_lookup;
990
991 /* MTS rebuild */
992 cpu->gen->mts_rebuild = ppc32_mem_rebuild_mts;
993
994 /* MTS statistics */
995 cpu->gen->mts_show_stats = ppc32_mem_show_stats;
996
997 /* Memory lookup operation */
998 cpu->mem_op_lookup = ppc32_mem_lookup;
999
1000 /* Translation operation */
1001 cpu->translate = ppc32_translate;
1002
1003 /* Load Operations */
1004 cpu->mem_op_fn[PPC_MEMOP_LBZ] = ppc32_lbz;
1005 cpu->mem_op_fn[PPC_MEMOP_LHZ] = ppc32_lhz;
1006 cpu->mem_op_fn[PPC_MEMOP_LWZ] = ppc32_lwz;
1007
1008 /* Load Operation with sign-extension */
1009 cpu->mem_op_fn[PPC_MEMOP_LHA] = ppc32_lha;
1010
1011 /* Store Operations */
1012 cpu->mem_op_fn[PPC_MEMOP_STB] = ppc32_stb;
1013 cpu->mem_op_fn[PPC_MEMOP_STH] = ppc32_sth;
1014 cpu->mem_op_fn[PPC_MEMOP_STW] = ppc32_stw;
1015
1016 /* Byte-Reversed operations */
1017 cpu->mem_op_fn[PPC_MEMOP_LWBR] = ppc32_lwbr;
1018 cpu->mem_op_fn[PPC_MEMOP_STWBR] = ppc32_stwbr;
1019
1020 /* String operations */
1021 cpu->mem_op_fn[PPC_MEMOP_LSW] = ppc32_lsw;
1022 cpu->mem_op_fn[PPC_MEMOP_STSW] = ppc32_stsw;
1023
1024 /* FPU operations */
1025 cpu->mem_op_fn[PPC_MEMOP_LFD] = ppc32_lfd;
1026 cpu->mem_op_fn[PPC_MEMOP_STFD] = ppc32_stfd;
1027
1028 /* ICBI - Instruction Cache Block Invalidate */
1029 cpu->mem_op_fn[PPC_MEMOP_ICBI] = ppc32_icbi;
1030 }
1031
1032 /* Restart the memory subsystem */
1033 int ppc32_mem_restart(cpu_ppc_t *cpu)
1034 {
1035 m_uint32_t family;
1036
1037 ppc32_mem_shutdown(cpu);
1038 ppc32_mem_init(cpu);
1039 ppc32_init_memop_vectors(cpu);
1040
1041 /* Override the MTS lookup vector depending on the cpu type */
1042 family = cpu->pvr & 0xFFFF0000;
1043
1044 if (family == PPC32_PVR_405) {
1045 cpu->mts_slow_lookup = ppc405_slow_lookup;
1046 cpu->gen->mmu_dump = ppc405_dump_tlb;
1047 cpu->gen->mmu_raw_dump = ppc405_dump_tlb;
1048 }
1049
1050 return(0);
1051 }

  ViewVC Help
Powered by ViewVC 1.1.26