/[dynamips]/trunk/mips64_mem.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Annotation of /trunk/mips64_mem.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 12 - (hide annotations)
Sat Oct 6 16:45:40 2007 UTC (16 years, 5 months ago) by dpavlin
File MIME type: text/plain
File size: 14717 byte(s)
make working copy

1 dpavlin 7 /*
2     * Cisco router simulation platform.
3     * Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr)
4     */
5    
6     #define _GNU_SOURCE
7     #include <stdio.h>
8     #include <stdlib.h>
9     #include <unistd.h>
10     #include <string.h>
11     #include <sys/types.h>
12     #include <sys/stat.h>
13     #include <sys/mman.h>
14     #include <fcntl.h>
15     #include <assert.h>
16    
17     #include "cpu.h"
18     #include "mips64_jit.h"
19     #include "vm.h"
20     #include "dynamips.h"
21     #include "memory.h"
22     #include "device.h"
23    
24     /* MTS access with special access mask */
25     void mips64_access_special(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint32_t mask,
26     u_int op_code,u_int op_type,u_int op_size,
27 dpavlin 11 m_uint64_t *data)
28 dpavlin 7 {
29     switch(mask) {
30     case MTS_ACC_U:
31 dpavlin 11 if (op_type == MTS_READ)
32     *data = 0;
33    
34     if (cpu->gen->undef_mem_handler != NULL) {
35     if (cpu->gen->undef_mem_handler(cpu->gen,vaddr,op_size,op_type,
36     data))
37     return;
38     }
39    
40 dpavlin 7 #if DEBUG_MTS_ACC_U
41     if (op_type == MTS_READ)
42     cpu_log(cpu->gen,
43     "MTS","read access to undefined address 0x%llx at "
44     "pc=0x%llx (size=%u)\n",vaddr,cpu->pc,op_size);
45     else
46     cpu_log(cpu->gen,
47     "MTS","write access to undefined address 0x%llx at "
48     "pc=0x%llx, value=0x%8.8llx (size=%u)\n",
49     vaddr,cpu->pc,*data,op_size);
50     #endif
51     break;
52    
53     case MTS_ACC_T:
54     if (op_code != MIPS_MEMOP_LOOKUP) {
55     #if DEBUG_MTS_ACC_T
56     cpu_log(cpu->gen,
57     "MTS","TLB exception for address 0x%llx at pc=0x%llx "
58     "(%s access, size=%u)\n",
59     vaddr,cpu->pc,(op_type == MTS_READ) ?
60     "read":"write",op_size);
61     mips64_dump_regs(cpu->gen);
62     #if MEMLOG_ENABLE
63     memlog_dump(cpu->gen);
64     #endif
65     #endif
66 dpavlin 11
67 dpavlin 7 cpu->cp0.reg[MIPS_CP0_BADVADDR] = vaddr;
68    
69     if (op_type == MTS_READ)
70     mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_TLB_LOAD,0);
71     else
72     mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_TLB_SAVE,0);
73 dpavlin 11
74     cpu_exec_loop_enter(cpu->gen);
75 dpavlin 7 }
76     break;
77    
78     case MTS_ACC_AE:
79     if (op_code != MIPS_MEMOP_LOOKUP) {
80     #if DEBUG_MTS_ACC_AE
81     cpu_log(cpu->gen,
82     "MTS","AE exception for address 0x%llx at pc=0x%llx "
83     "(%s access)\n",
84     vaddr,cpu->pc,(op_type == MTS_READ) ? "read":"write");
85     #endif
86     cpu->cp0.reg[MIPS_CP0_BADVADDR] = vaddr;
87    
88     if (op_type == MTS_READ)
89     mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_ADDR_LOAD,0);
90     else
91     mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_ADDR_SAVE,0);
92 dpavlin 11
93     cpu_exec_loop_enter(cpu->gen);
94 dpavlin 7 }
95     break;
96     }
97     }
98    
99     /* === MTS for 64-bit address space ======================================= */
100     #define MTS_ADDR_SIZE 64
101     #define MTS_NAME(name) mts64_##name
102     #define MTS_NAME_UP(name) MTS64_##name
103     #define MTS_PROTO(name) mips64_mts64_##name
104     #define MTS_PROTO_UP(name) MIPS64_MTS64_##name
105    
106     #include "mips_mts.c"
107    
108     /* === MTS for 32-bit address space ======================================= */
109     #define MTS_ADDR_SIZE 32
110     #define MTS_NAME(name) mts32_##name
111     #define MTS_NAME_UP(name) MTS32_##name
112     #define MTS_PROTO(name) mips64_mts32_##name
113     #define MTS_PROTO_UP(name) MIPS64_MTS32_##name
114    
115     #include "mips_mts.c"
116    
117     /* === Specific operations for MTS64 ====================================== */
118    
119     /* MTS64 slow lookup */
120     static mts64_entry_t *
121     mips64_mts64_slow_lookup(cpu_mips_t *cpu,m_uint64_t vaddr,
122     u_int op_code,u_int op_size,
123     u_int op_type,m_uint64_t *data,
124 dpavlin 11 mts64_entry_t *alt_entry)
125 dpavlin 7 {
126     m_uint32_t hash_bucket,zone,sub_zone,cca;
127     mts64_entry_t *entry;
128     mts_map_t map;
129    
130     map.tlb_index = -1;
131     hash_bucket = MTS64_HASH(vaddr);
132     entry = &cpu->mts_u.mts64_cache[hash_bucket];
133     zone = vaddr >> 40;
134    
135     #if DEBUG_MTS_STATS
136     cpu->mts_misses++;
137     #endif
138    
139     switch(zone) {
140     case 0x000000: /* xkuseg */
141     case 0x400000: /* xksseg */
142     case 0xc00000: /* xkseg */
143     /* trigger TLB exception if no matching entry found */
144     if (!mips64_cp0_tlb_lookup(cpu,vaddr,&map))
145     goto err_tlb;
146    
147     if (!(entry = mips64_mts64_map(cpu,op_type,&map,entry,alt_entry)))
148     goto err_undef;
149    
150     return(entry);
151    
152     case 0xffffff:
153     sub_zone = (vaddr >> 29) & 0x7FF;
154    
155     switch(sub_zone) {
156     case 0x7fc: /* ckseg0 */
157     map.vaddr = vaddr & MIPS_MIN_PAGE_MASK;
158     map.paddr = map.vaddr - 0xFFFFFFFF80000000ULL;
159 dpavlin 11 map.offset = vaddr & MIPS_MIN_PAGE_IMASK;
160 dpavlin 7 map.cached = TRUE;
161    
162     if (!(entry = mips64_mts64_map(cpu,op_type,&map,
163     entry,alt_entry)))
164     goto err_undef;
165    
166     return(entry);
167    
168     case 0x7fd: /* ckseg1 */
169     map.vaddr = vaddr & MIPS_MIN_PAGE_MASK;
170     map.paddr = map.vaddr - 0xFFFFFFFFA0000000ULL;
171 dpavlin 11 map.offset = vaddr & MIPS_MIN_PAGE_IMASK;
172 dpavlin 7 map.cached = FALSE;
173    
174     if (!(entry = mips64_mts64_map(cpu,op_type,&map,
175     entry,alt_entry)))
176     goto err_undef;
177    
178     return(entry);
179    
180     case 0x7fe: /* cksseg */
181     case 0x7ff: /* ckseg3 */
182     /* trigger TLB exception if no matching entry found */
183     if (!mips64_cp0_tlb_lookup(cpu,vaddr,&map))
184     goto err_tlb;
185    
186     if (!(entry = mips64_mts64_map(cpu,op_type,
187     &map,entry,alt_entry)))
188     goto err_undef;
189    
190     return(entry);
191    
192     default:
193     /* Invalid zone: generate Address Error (AE) exception */
194     goto err_address;
195     }
196     break;
197    
198     /* xkphys */
199     case 0x800000:
200     case 0x880000:
201     case 0x900000:
202     case 0x980000:
203     case 0xa00000:
204     case 0xa80000:
205     case 0xb00000:
206     case 0xb80000:
207     cca = (vaddr >> MIPS64_XKPHYS_CCA_SHIFT) & 0x03;
208     map.cached = mips64_cca_cached(cca);
209     map.vaddr = vaddr & MIPS_MIN_PAGE_MASK;
210     map.paddr = (vaddr & MIPS64_XKPHYS_PHYS_MASK);
211     map.paddr &= MIPS_MIN_PAGE_MASK;
212 dpavlin 11 map.offset = vaddr & MIPS_MIN_PAGE_IMASK;
213 dpavlin 7
214     if (!(entry = mips64_mts64_map(cpu,op_type,&map,entry,alt_entry)))
215     goto err_undef;
216    
217     return(entry);
218    
219     default:
220     /* Invalid zone: generate Address Error (AE) exception */
221     goto err_address;
222     }
223    
224     err_undef:
225 dpavlin 11 mips64_access_special(cpu,vaddr,MTS_ACC_U,op_code,op_type,op_size,data);
226 dpavlin 7 return NULL;
227     err_address:
228 dpavlin 11 mips64_access_special(cpu,vaddr,MTS_ACC_AE,op_code,op_type,op_size,data);
229 dpavlin 7 return NULL;
230     err_tlb:
231 dpavlin 11 mips64_access_special(cpu,vaddr,MTS_ACC_T,op_code,op_type,op_size,data);
232 dpavlin 7 return NULL;
233     }
234    
235     /* MTS64 access */
236     static forced_inline
237     void *mips64_mts64_access(cpu_mips_t *cpu,m_uint64_t vaddr,
238     u_int op_code,u_int op_size,
239 dpavlin 11 u_int op_type,m_uint64_t *data)
240 dpavlin 7 {
241     mts64_entry_t *entry,alt_entry;
242     m_uint32_t hash_bucket;
243     m_iptr_t haddr;
244     u_int dev_id;
245     int cow;
246    
247     #if MEMLOG_ENABLE
248     /* Record the memory access */
249     memlog_rec_access(cpu->gen,vaddr,*data,op_size,op_type);
250     #endif
251    
252     hash_bucket = MTS64_HASH(vaddr);
253     entry = &cpu->mts_u.mts64_cache[hash_bucket];
254    
255     #if DEBUG_MTS_STATS
256     cpu->mts_lookups++;
257     #endif
258    
259     /* Copy-On-Write for sparse device ? */
260     cow = (op_type == MTS_WRITE) && (entry->flags & MTS_FLAG_COW);
261    
262     /* Slow lookup if nothing found in cache */
263     if (unlikely(((vaddr & MIPS_MIN_PAGE_MASK) != entry->gvpa) || cow)) {
264     entry = mips64_mts64_slow_lookup(cpu,vaddr,op_code,op_size,op_type,
265 dpavlin 11 data,&alt_entry);
266 dpavlin 7 if (!entry)
267     return NULL;
268    
269     if (entry->flags & MTS_FLAG_DEV) {
270     dev_id = (entry->hpa & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT;
271     haddr = entry->hpa & MTS_DEVOFF_MASK;
272     return(dev_access_fast(cpu->gen,dev_id,haddr,op_size,op_type,data));
273     }
274     }
275    
276     /* Raw memory access */
277     haddr = entry->hpa + (vaddr & MIPS_MIN_PAGE_IMASK);
278     #if MEMLOG_ENABLE
279     memlog_update_read(cpu->gen,haddr);
280     #endif
281     return((void *)haddr);
282     }
283    
284     /* MTS64 virtual address to physical page translation */
285     static fastcall int mips64_mts64_translate(cpu_mips_t *cpu,m_uint64_t vaddr,
286     m_uint32_t *phys_page)
287     {
288     mts64_entry_t *entry,alt_entry;
289     m_uint32_t hash_bucket;
290     m_uint64_t data = 0;
291    
292     hash_bucket = MTS64_HASH(vaddr);
293     entry = &cpu->mts_u.mts64_cache[hash_bucket];
294    
295     /* Slow lookup if nothing found in cache */
296     if (unlikely((vaddr & MIPS_MIN_PAGE_MASK) != entry->gvpa)) {
297     entry = mips64_mts64_slow_lookup(cpu,vaddr,MIPS_MEMOP_LOOKUP,4,MTS_READ,
298 dpavlin 11 &data,&alt_entry);
299 dpavlin 7 if (!entry)
300     return(-1);
301     }
302    
303     *phys_page = entry->gppa >> MIPS_MIN_PAGE_SHIFT;
304     return(0);
305     }
306    
307     /* === Specific operations for MTS32 ====================================== */
308    
309     /* MTS32 slow lookup */
310     static mts32_entry_t *
311     mips64_mts32_slow_lookup(cpu_mips_t *cpu,m_uint64_t vaddr,
312     u_int op_code,u_int op_size,
313     u_int op_type,m_uint64_t *data,
314 dpavlin 11 mts32_entry_t *alt_entry)
315 dpavlin 7 {
316     m_uint32_t hash_bucket,zone;
317     mts32_entry_t *entry;
318     mts_map_t map;
319    
320     map.tlb_index = -1;
321     hash_bucket = MTS32_HASH(vaddr);
322     entry = &cpu->mts_u.mts32_cache[hash_bucket];
323     zone = (vaddr >> 29) & 0x7;
324    
325     #if DEBUG_MTS_STATS
326     cpu->mts_misses++;
327     #endif
328    
329     switch(zone) {
330     case 0x00 ... 0x03: /* kuseg */
331     /* trigger TLB exception if no matching entry found */
332     if (!mips64_cp0_tlb_lookup(cpu,vaddr,&map))
333     goto err_tlb;
334    
335     if (!(entry = mips64_mts32_map(cpu,op_type,&map,entry,alt_entry)))
336     goto err_undef;
337    
338     return(entry);
339    
340     case 0x04: /* kseg0 */
341     map.vaddr = vaddr & MIPS_MIN_PAGE_MASK;
342     map.paddr = map.vaddr - 0xFFFFFFFF80000000ULL;
343 dpavlin 11 map.offset = vaddr & MIPS_MIN_PAGE_IMASK;
344 dpavlin 7 map.cached = TRUE;
345    
346     if (!(entry = mips64_mts32_map(cpu,op_type,&map,entry,alt_entry)))
347     goto err_undef;
348    
349     return(entry);
350    
351     case 0x05: /* kseg1 */
352     map.vaddr = vaddr & MIPS_MIN_PAGE_MASK;
353     map.paddr = map.vaddr - 0xFFFFFFFFA0000000ULL;
354 dpavlin 11 map.offset = vaddr & MIPS_MIN_PAGE_IMASK;
355 dpavlin 7 map.cached = FALSE;
356    
357     if (!(entry = mips64_mts32_map(cpu,op_type,&map,entry,alt_entry)))
358     goto err_undef;
359    
360     return(entry);
361    
362     case 0x06: /* ksseg */
363     case 0x07: /* kseg3 */
364     /* trigger TLB exception if no matching entry found */
365     if (!mips64_cp0_tlb_lookup(cpu,vaddr,&map))
366     goto err_tlb;
367    
368     if (!(entry = mips64_mts32_map(cpu,op_type,&map,entry,alt_entry)))
369     goto err_undef;
370    
371     return(entry);
372     }
373    
374     err_undef:
375 dpavlin 11 mips64_access_special(cpu,vaddr,MTS_ACC_U,op_code,op_type,op_size,data);
376 dpavlin 7 return NULL;
377     err_address:
378 dpavlin 11 mips64_access_special(cpu,vaddr,MTS_ACC_AE,op_code,op_type,op_size,data);
379 dpavlin 7 return NULL;
380     err_tlb:
381 dpavlin 11 mips64_access_special(cpu,vaddr,MTS_ACC_T,op_code,op_type,op_size,data);
382 dpavlin 7 return NULL;
383     }
384    
385     /* MTS32 access */
386     static forced_inline
387     void *mips64_mts32_access(cpu_mips_t *cpu,m_uint64_t vaddr,
388     u_int op_code,u_int op_size,
389 dpavlin 11 u_int op_type,m_uint64_t *data)
390 dpavlin 7 {
391     mts32_entry_t *entry,alt_entry;
392     m_uint32_t hash_bucket;
393     m_iptr_t haddr;
394     u_int dev_id;
395     int cow;
396    
397     #if MEMLOG_ENABLE
398     /* Record the memory access */
399     memlog_rec_access(cpu->gen,vaddr,*data,op_size,op_type);
400     #endif
401    
402     hash_bucket = MTS32_HASH(vaddr);
403     entry = &cpu->mts_u.mts32_cache[hash_bucket];
404    
405     #if DEBUG_MTS_STATS
406     cpu->mts_lookups++;
407     #endif
408    
409     /* Copy-On-Write for sparse device ? */
410     cow = (op_type == MTS_WRITE) && (entry->flags & MTS_FLAG_COW);
411    
412     /* Slow lookup if nothing found in cache */
413     if (unlikely((((m_uint32_t)vaddr & MIPS_MIN_PAGE_MASK) != entry->gvpa) ||
414     cow))
415     {
416     entry = mips64_mts32_slow_lookup(cpu,vaddr,op_code,op_size,op_type,
417 dpavlin 11 data,&alt_entry);
418 dpavlin 7 if (!entry)
419     return NULL;
420    
421     if (entry->flags & MTS_FLAG_DEV) {
422     dev_id = (entry->hpa & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT;
423     haddr = entry->hpa & MTS_DEVOFF_MASK;
424     return(dev_access_fast(cpu->gen,dev_id,haddr,op_size,op_type,data));
425     }
426     }
427    
428     /* Raw memory access */
429     haddr = entry->hpa + (vaddr & MIPS_MIN_PAGE_IMASK);
430     #if MEMLOG_ENABLE
431     memlog_update_read(cpu->gen,haddr);
432     #endif
433     return((void *)haddr);
434     }
435    
436     /* MTS32 virtual address to physical page translation */
437     static fastcall int mips64_mts32_translate(cpu_mips_t *cpu,m_uint64_t vaddr,
438     m_uint32_t *phys_page)
439     {
440     mts32_entry_t *entry,alt_entry;
441     m_uint32_t hash_bucket;
442     m_uint64_t data = 0;
443    
444     hash_bucket = MTS32_HASH(vaddr);
445     entry = &cpu->mts_u.mts32_cache[hash_bucket];
446    
447     /* Slow lookup if nothing found in cache */
448     if (unlikely(((m_uint32_t)vaddr & MIPS_MIN_PAGE_MASK) != entry->gvpa)) {
449     entry = mips64_mts32_slow_lookup(cpu,vaddr,MIPS_MEMOP_LOOKUP,4,MTS_READ,
450 dpavlin 11 &data,&alt_entry);
451 dpavlin 7 if (!entry)
452     return(-1);
453     }
454    
455     *phys_page = entry->gppa >> MIPS_MIN_PAGE_SHIFT;
456     return(0);
457     }
458    
459     /* ======================================================================== */
460    
461     /* Shutdown MTS subsystem */
462     void mips64_mem_shutdown(cpu_mips_t *cpu)
463     {
464     if (cpu->mts_shutdown != NULL)
465     cpu->mts_shutdown(cpu);
466     }
467    
468     /* Set the address mode */
469     int mips64_set_addr_mode(cpu_mips_t *cpu,u_int addr_mode)
470     {
471     if (cpu->addr_mode != addr_mode) {
472     mips64_mem_shutdown(cpu);
473    
474     switch(addr_mode) {
475     case 32:
476     mips64_mts32_init(cpu);
477     mips64_mts32_init_memop_vectors(cpu);
478     break;
479     case 64:
480     mips64_mts64_init(cpu);
481     mips64_mts64_init_memop_vectors(cpu);
482     break;
483     default:
484     fprintf(stderr,
485     "mts_set_addr_mode: internal error (addr_mode=%u)\n",
486     addr_mode);
487     exit(EXIT_FAILURE);
488     }
489     }
490    
491     return(0);
492     }

  ViewVC Help
Powered by ViewVC 1.1.26