1 |
/* |
2 |
* Cisco 7200 (Predator) simulation platform. |
3 |
* Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr) |
4 |
*/ |
5 |
|
6 |
#define _GNU_SOURCE |
7 |
#include <stdio.h> |
8 |
#include <stdlib.h> |
9 |
#include <unistd.h> |
10 |
#include <string.h> |
11 |
#include <sys/types.h> |
12 |
#include <sys/stat.h> |
13 |
#include <sys/mman.h> |
14 |
#include <fcntl.h> |
15 |
#include <assert.h> |
16 |
|
17 |
#include "mips64.h" |
18 |
#include "dynamips.h" |
19 |
#include "memory.h" |
20 |
#include "device.h" |
21 |
#include "cpu.h" |
22 |
#include "cp0.h" |
23 |
#include "vm.h" |
24 |
|
25 |
/* Record a memory access */ |
26 |
void memlog_rec_access(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint64_t data, |
27 |
m_uint32_t op_size,m_uint32_t op_type) |
28 |
{ |
29 |
memlog_access_t *acc; |
30 |
|
31 |
acc = &cpu->memlog_array[cpu->memlog_pos]; |
32 |
acc->pc = cpu->pc; |
33 |
acc->vaddr = vaddr; |
34 |
acc->data = data; |
35 |
acc->op_size = op_size; |
36 |
acc->op_type = op_type; |
37 |
acc->data_valid = (op_type == MTS_WRITE); |
38 |
|
39 |
cpu->memlog_pos = (cpu->memlog_pos + 1) & (MEMLOG_COUNT - 1); |
40 |
} |
41 |
|
42 |
/* Show the latest memory accesses */ |
43 |
void memlog_dump(cpu_mips_t *cpu) |
44 |
{ |
45 |
memlog_access_t *acc; |
46 |
char s_data[64]; |
47 |
u_int i,pos; |
48 |
|
49 |
for(i=0;i<MEMLOG_COUNT;i++) { |
50 |
pos = cpu->memlog_pos + i; |
51 |
pos &= (MEMLOG_COUNT-1); |
52 |
acc = &cpu->memlog_array[pos]; |
53 |
|
54 |
if (cpu->pc) { |
55 |
if (acc->data_valid) |
56 |
snprintf(s_data,sizeof(s_data),"0x%llx",acc->data); |
57 |
else |
58 |
snprintf(s_data,sizeof(s_data),"XXXXXXXX"); |
59 |
|
60 |
printf("CPU%u: pc=0x%8.8llx, vaddr=0x%8.8llx, " |
61 |
"size=%u, type=%s, data=%s\n", |
62 |
cpu->id,acc->pc,acc->vaddr,acc->op_size, |
63 |
(acc->op_type == MTS_READ) ? "read " : "write", |
64 |
s_data); |
65 |
} |
66 |
} |
67 |
} |
68 |
|
69 |
/* Update the data obtained by a read access */ |
70 |
void memlog_update_read(cpu_mips_t *cpu,m_iptr_t raddr) |
71 |
{ |
72 |
memlog_access_t *acc; |
73 |
|
74 |
acc = &cpu->memlog_array[(cpu->memlog_pos-1) & (MEMLOG_COUNT-1)]; |
75 |
|
76 |
if (acc->op_type == MTS_READ) |
77 |
{ |
78 |
switch(acc->op_size) { |
79 |
case 1: |
80 |
acc->data = *(m_uint8_t *)raddr; |
81 |
break; |
82 |
case 2: |
83 |
acc->data = vmtoh16(*(m_uint16_t *)raddr); |
84 |
break; |
85 |
case 4: |
86 |
acc->data = vmtoh32(*(m_uint32_t *)raddr); |
87 |
break; |
88 |
case 8: |
89 |
acc->data = vmtoh64(*(m_uint64_t *)raddr); |
90 |
break; |
91 |
} |
92 |
|
93 |
acc->data_valid = TRUE; |
94 |
} |
95 |
} |
96 |
|
97 |
/* MTS access with special access mask */ |
98 |
void mts_access_special(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint32_t mask, |
99 |
u_int op_code,u_int op_type,u_int op_size, |
100 |
m_uint64_t *data,u_int *exc) |
101 |
{ |
102 |
switch(mask) { |
103 |
case MTS_ACC_U: |
104 |
#if DEBUG_MTS_ACC_U |
105 |
if (op_type == MTS_READ) |
106 |
cpu_log(cpu,"MTS","read access to undefined address 0x%llx at " |
107 |
"pc=0x%llx (size=%u)\n",vaddr,cpu->pc,op_size); |
108 |
else |
109 |
cpu_log(cpu,"MTS","write access to undefined address 0x%llx at " |
110 |
"pc=0x%llx, value=0x%8.8llx (size=%u)\n", |
111 |
vaddr,cpu->pc,*data,op_size); |
112 |
#endif |
113 |
if (op_type == MTS_READ) |
114 |
*data = 0; |
115 |
break; |
116 |
|
117 |
case MTS_ACC_T: |
118 |
if (op_code != MIPS_MEMOP_LOOKUP) { |
119 |
#if DEBUG_MTS_ACC_T |
120 |
cpu_log(cpu,"MTS","TLB exception for address 0x%llx at pc=0x%llx " |
121 |
"(%s access, size=%u)\n", |
122 |
vaddr,cpu->pc,(op_type == MTS_READ) ? |
123 |
"read":"write",op_size); |
124 |
mips64_dump_regs(cpu); |
125 |
#if MEMLOG_ENABLE |
126 |
memlog_dump(cpu); |
127 |
#endif |
128 |
#endif |
129 |
cpu->cp0.reg[MIPS_CP0_BADVADDR] = vaddr; |
130 |
|
131 |
if (op_type == MTS_READ) |
132 |
mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_TLB_LOAD,0); |
133 |
else |
134 |
mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_TLB_SAVE,0); |
135 |
} |
136 |
|
137 |
*exc = 1; |
138 |
break; |
139 |
|
140 |
case MTS_ACC_AE: |
141 |
if (op_code != MIPS_MEMOP_LOOKUP) { |
142 |
#if DEBUG_MTS_ACC_AE |
143 |
cpu_log(cpu,"MTS","AE exception for address 0x%llx at pc=0x%llx " |
144 |
"(%s access)\n", |
145 |
vaddr,cpu->pc,(op_type == MTS_READ) ? "read":"write"); |
146 |
#endif |
147 |
cpu->cp0.reg[MIPS_CP0_BADVADDR] = vaddr; |
148 |
|
149 |
if (op_type == MTS_READ) |
150 |
mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_ADDR_LOAD,0); |
151 |
else |
152 |
mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_ADDR_SAVE,0); |
153 |
} |
154 |
|
155 |
*exc = 1; |
156 |
break; |
157 |
} |
158 |
} |
159 |
|
160 |
/* === MTS for 64-bit address space ======================================= */ |
161 |
#define MTS_ADDR_SIZE 64 |
162 |
#define MTS_PROTO(name) mts64_##name |
163 |
#define MTS_PROTO_UP(name) MTS64_##name |
164 |
|
165 |
#include "mips_mts.c" |
166 |
|
167 |
/* === MTS for 32-bit address space ======================================= */ |
168 |
#define MTS_ADDR_SIZE 32 |
169 |
#define MTS_PROTO(name) mts32_##name |
170 |
#define MTS_PROTO_UP(name) MTS32_##name |
171 |
|
172 |
#include "mips_mts.c" |
173 |
|
174 |
/* === Specific operations for MTS64 ====================================== */ |
175 |
|
176 |
/* MTS64 slow lookup */ |
177 |
static forced_inline |
178 |
mts64_entry_t *mts64_slow_lookup(cpu_mips_t *cpu,m_uint64_t vaddr, |
179 |
u_int op_code,u_int op_size, |
180 |
u_int op_type,m_uint64_t *data, |
181 |
u_int *exc) |
182 |
{ |
183 |
m_uint32_t hash_bucket,zone,sub_zone,cca; |
184 |
mts64_entry_t *entry,new_entry; |
185 |
mts_map_t map; |
186 |
|
187 |
map.tlb_index = -1; |
188 |
hash_bucket = MTS64_HASH(vaddr); |
189 |
entry = cpu->mts_cache[hash_bucket]; |
190 |
zone = vaddr >> 40; |
191 |
|
192 |
#if DEBUG_MTS_STATS |
193 |
cpu->mts_misses++; |
194 |
#endif |
195 |
|
196 |
switch(zone) { |
197 |
case 0x000000: /* xkuseg */ |
198 |
case 0x400000: /* xksseg */ |
199 |
case 0xc00000: /* xkseg */ |
200 |
/* trigger TLB exception if no matching entry found */ |
201 |
if (!cp0_tlb_lookup(cpu,vaddr,&map)) |
202 |
goto err_tlb; |
203 |
|
204 |
if (!mts64_map(cpu,vaddr,&map,&new_entry)) |
205 |
goto err_undef; |
206 |
break; |
207 |
|
208 |
case 0xffffff: |
209 |
sub_zone = (vaddr >> 29) & 0x7FF; |
210 |
|
211 |
switch(sub_zone) { |
212 |
case 0x7fc: /* ckseg0 */ |
213 |
map.vaddr = sign_extend(MIPS_KSEG0_BASE,32); |
214 |
map.paddr = 0; |
215 |
map.len = MIPS_KSEG0_SIZE; |
216 |
map.cached = TRUE; |
217 |
if (!mts64_map(cpu,vaddr,&map,&new_entry)) |
218 |
goto err_undef; |
219 |
break; |
220 |
|
221 |
case 0x7fd: /* ckseg1 */ |
222 |
map.vaddr = sign_extend(MIPS_KSEG1_BASE,32); |
223 |
map.paddr = 0; |
224 |
map.len = MIPS_KSEG1_SIZE; |
225 |
map.cached = FALSE; |
226 |
if (!mts64_map(cpu,vaddr,&map,&new_entry)) |
227 |
goto err_undef; |
228 |
break; |
229 |
|
230 |
case 0x7fe: /* cksseg */ |
231 |
case 0x7ff: /* ckseg3 */ |
232 |
/* trigger TLB exception if no matching entry found */ |
233 |
if (!cp0_tlb_lookup(cpu,vaddr,&map)) |
234 |
goto err_tlb; |
235 |
|
236 |
if (!mts64_map(cpu,vaddr,&map,&new_entry)) |
237 |
goto err_undef; |
238 |
break; |
239 |
|
240 |
default: |
241 |
/* Invalid zone: generate Address Error (AE) exception */ |
242 |
goto err_address; |
243 |
} |
244 |
break; |
245 |
|
246 |
/* xkphys */ |
247 |
case 0x800000: |
248 |
case 0x880000: |
249 |
case 0x900000: |
250 |
case 0x980000: |
251 |
case 0xa00000: |
252 |
case 0xa80000: |
253 |
case 0xb00000: |
254 |
case 0xb80000: |
255 |
cca = (vaddr >> MIPS64_XKPHYS_CCA_SHIFT) & 0x03; |
256 |
map.cached = mips64_cca_cached(cca); |
257 |
map.vaddr = vaddr & MIPS64_XKPHYS_ZONE_MASK; |
258 |
map.paddr = 0; |
259 |
map.len = MIPS64_XKPHYS_PHYS_SIZE; |
260 |
if (!mts64_map(cpu,vaddr,&map,&new_entry)) |
261 |
goto err_undef; |
262 |
break; |
263 |
|
264 |
default: |
265 |
/* Invalid zone: generate Address Error (AE) exception */ |
266 |
goto err_address; |
267 |
} |
268 |
|
269 |
/* Get a new entry if necessary */ |
270 |
if (!entry) { |
271 |
entry = mts64_alloc_entry(cpu); |
272 |
entry->pself = entry->pprev = NULL; |
273 |
entry->next = NULL; |
274 |
|
275 |
/* Store the entry in hash table for future use */ |
276 |
cpu->mts_cache[hash_bucket] = entry; |
277 |
} else { |
278 |
/* Remove the entry from the reverse map list */ |
279 |
if (entry->pprev) { |
280 |
if (entry->next) |
281 |
entry->next->pprev = entry->pprev; |
282 |
|
283 |
*(entry->pprev) = entry->next; |
284 |
} |
285 |
} |
286 |
|
287 |
/* Add this entry to the reverse map list */ |
288 |
if (map.tlb_index != -1) { |
289 |
entry->pself = (mts64_entry_t **)&cpu->mts_cache[hash_bucket]; |
290 |
entry->next = cpu->mts_rmap[map.tlb_index]; |
291 |
entry->pprev = (mts64_entry_t **)&cpu->mts_rmap[map.tlb_index]; |
292 |
if (entry->next) |
293 |
entry->next->pprev = &entry->next; |
294 |
cpu->mts_rmap[map.tlb_index] = entry; |
295 |
} |
296 |
|
297 |
/* Fill the new entry or replace the previous */ |
298 |
entry->phys_page = new_entry.phys_page; |
299 |
entry->start = new_entry.start; |
300 |
entry->mask = new_entry.mask; |
301 |
entry->action = new_entry.action; |
302 |
return entry; |
303 |
|
304 |
err_undef: |
305 |
mts_access_special(cpu,vaddr,MTS_ACC_U,op_code,op_type,op_size,data,exc); |
306 |
return NULL; |
307 |
err_address: |
308 |
mts_access_special(cpu,vaddr,MTS_ACC_AE,op_code,op_type,op_size,data,exc); |
309 |
return NULL; |
310 |
err_tlb: |
311 |
mts_access_special(cpu,vaddr,MTS_ACC_T,op_code,op_type,op_size,data,exc); |
312 |
return NULL; |
313 |
} |
314 |
|
315 |
/* MTS64 access */ |
316 |
static forced_inline void *mts64_access(cpu_mips_t *cpu,m_uint64_t vaddr, |
317 |
u_int op_code,u_int op_size, |
318 |
u_int op_type,m_uint64_t *data, |
319 |
u_int *exc) |
320 |
{ |
321 |
m_uint32_t hash_bucket; |
322 |
mts64_entry_t *entry; |
323 |
m_iptr_t haddr; |
324 |
u_int dev_id; |
325 |
|
326 |
#if MEMLOG_ENABLE |
327 |
/* Record the memory access */ |
328 |
memlog_rec_access(cpu,vaddr,*data,op_size,op_type); |
329 |
#endif |
330 |
|
331 |
*exc = 0; |
332 |
hash_bucket = MTS64_HASH(vaddr); |
333 |
entry = cpu->mts_cache[hash_bucket]; |
334 |
|
335 |
#if DEBUG_MTS_STATS |
336 |
cpu->mts_lookups++; |
337 |
#endif |
338 |
|
339 |
/* Slow lookup if nothing found in cache */ |
340 |
if (unlikely((!entry) || |
341 |
unlikely((vaddr & sign_extend(entry->mask,32)) != entry->start))) |
342 |
{ |
343 |
entry = mts64_slow_lookup(cpu,vaddr,op_code,op_size,op_type,data,exc); |
344 |
if (!entry) return NULL; |
345 |
} |
346 |
|
347 |
/* Device access */ |
348 |
if (unlikely(entry->action & MTS_DEV_MASK)) { |
349 |
dev_id = (entry->action & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT; |
350 |
haddr = entry->action & MTS_DEVOFF_MASK; |
351 |
haddr += vaddr - entry->start; |
352 |
|
353 |
#if DEBUG_MTS_DEV |
354 |
cpu_log(cpu,"MTS64", |
355 |
"device access: vaddr=0x%llx, pc=0x%llx, dev_offset=0x%x\n", |
356 |
vaddr,cpu->pc,haddr); |
357 |
#endif |
358 |
return(dev_access_fast(cpu,dev_id,haddr,op_size,op_type,data)); |
359 |
} |
360 |
|
361 |
/* Raw memory access */ |
362 |
haddr = entry->action & MTS_ADDR_MASK; |
363 |
haddr += vaddr - entry->start; |
364 |
#if MEMLOG_ENABLE |
365 |
memlog_update_read(cpu,haddr); |
366 |
#endif |
367 |
return((void *)haddr); |
368 |
} |
369 |
|
370 |
/* MTS64 virtual address to physical page translation */ |
371 |
static fastcall int mts64_translate(cpu_mips_t *cpu,m_uint64_t vaddr, |
372 |
m_uint32_t *phys_page) |
373 |
{ |
374 |
m_uint32_t hash_bucket,offset; |
375 |
mts64_entry_t *entry; |
376 |
m_uint64_t data = 0; |
377 |
u_int exc = 0; |
378 |
|
379 |
hash_bucket = MTS64_HASH(vaddr); |
380 |
entry = cpu->mts_cache[hash_bucket]; |
381 |
|
382 |
/* Slow lookup if nothing found in cache */ |
383 |
if (unlikely((!entry) || |
384 |
unlikely((vaddr & sign_extend(entry->mask,32)) != entry->start))) |
385 |
{ |
386 |
entry = mts64_slow_lookup(cpu,vaddr,MIPS_MEMOP_LOOKUP,4,MTS_READ, |
387 |
&data,&exc); |
388 |
if (!entry) |
389 |
return(-1); |
390 |
} |
391 |
|
392 |
offset = vaddr - entry->start; |
393 |
*phys_page = entry->phys_page + (offset >> MIPS_MIN_PAGE_SHIFT); |
394 |
return(0); |
395 |
} |
396 |
|
397 |
/* === Specific operations for MTS32 ====================================== */ |
398 |
|
399 |
/* MTS32 slow lookup */ |
400 |
static forced_inline |
401 |
mts32_entry_t *mts32_slow_lookup(cpu_mips_t *cpu,m_uint64_t vaddr, |
402 |
u_int op_code,u_int op_size, |
403 |
u_int op_type,m_uint64_t *data, |
404 |
u_int *exc) |
405 |
{ |
406 |
m_uint32_t hash_bucket,zone; |
407 |
mts32_entry_t *entry,new_entry; |
408 |
mts_map_t map; |
409 |
|
410 |
map.tlb_index = -1; |
411 |
hash_bucket = MTS32_HASH(vaddr); |
412 |
entry = cpu->mts_cache[hash_bucket]; |
413 |
zone = (vaddr >> 29) & 0x7; |
414 |
|
415 |
#if DEBUG_MTS_STATS |
416 |
cpu->mts_misses++; |
417 |
#endif |
418 |
|
419 |
switch(zone) { |
420 |
case 0x00 ... 0x03: /* kuseg */ |
421 |
/* trigger TLB exception if no matching entry found */ |
422 |
if (!cp0_tlb_lookup(cpu,vaddr,&map)) |
423 |
goto err_tlb; |
424 |
|
425 |
if (!mts32_map(cpu,vaddr,&map,&new_entry)) |
426 |
goto err_undef; |
427 |
break; |
428 |
|
429 |
case 0x04: /* kseg0 */ |
430 |
map.vaddr = sign_extend(MIPS_KSEG0_BASE,32); |
431 |
map.paddr = 0; |
432 |
map.len = MIPS_KSEG0_SIZE; |
433 |
map.cached = TRUE; |
434 |
if (!mts32_map(cpu,vaddr,&map,&new_entry)) |
435 |
goto err_undef; |
436 |
break; |
437 |
|
438 |
case 0x05: /* kseg1 */ |
439 |
map.vaddr = sign_extend(MIPS_KSEG1_BASE,32); |
440 |
map.paddr = 0; |
441 |
map.len = MIPS_KSEG1_SIZE; |
442 |
map.cached = FALSE; |
443 |
if (!mts32_map(cpu,vaddr,&map,&new_entry)) |
444 |
goto err_undef; |
445 |
break; |
446 |
|
447 |
case 0x06: /* ksseg */ |
448 |
case 0x07: /* kseg3 */ |
449 |
/* trigger TLB exception if no matching entry found */ |
450 |
if (!cp0_tlb_lookup(cpu,vaddr,&map)) |
451 |
goto err_tlb; |
452 |
|
453 |
if (!mts32_map(cpu,vaddr,&map,&new_entry)) |
454 |
goto err_undef; |
455 |
break; |
456 |
} |
457 |
|
458 |
/* Get a new entry if necessary */ |
459 |
if (!entry) { |
460 |
entry = mts32_alloc_entry(cpu); |
461 |
entry->pself = entry->pprev = NULL; |
462 |
entry->next = NULL; |
463 |
|
464 |
/* Store the entry in hash table for future use */ |
465 |
cpu->mts_cache[hash_bucket] = entry; |
466 |
} else { |
467 |
/* Remove the entry from the reverse map list */ |
468 |
if (entry->pprev) { |
469 |
if (entry->next) |
470 |
entry->next->pprev = entry->pprev; |
471 |
|
472 |
*(entry->pprev) = entry->next; |
473 |
} |
474 |
} |
475 |
|
476 |
/* Add this entry to the reverse map list */ |
477 |
if (map.tlb_index != -1) { |
478 |
entry->pself = (mts32_entry_t **)&cpu->mts_cache[hash_bucket]; |
479 |
entry->next = cpu->mts_rmap[map.tlb_index]; |
480 |
entry->pprev = (mts32_entry_t **)&cpu->mts_rmap[map.tlb_index]; |
481 |
if (entry->next) |
482 |
entry->next->pprev = &entry->next; |
483 |
cpu->mts_rmap[map.tlb_index] = entry; |
484 |
} |
485 |
|
486 |
/* Fill the new entry or replace the previous */ |
487 |
entry->phys_page = new_entry.phys_page; |
488 |
entry->start = new_entry.start; |
489 |
entry->mask = new_entry.mask; |
490 |
entry->action = new_entry.action; |
491 |
return entry; |
492 |
|
493 |
err_undef: |
494 |
mts_access_special(cpu,vaddr,MTS_ACC_U,op_code,op_type,op_size,data,exc); |
495 |
return NULL; |
496 |
err_address: |
497 |
mts_access_special(cpu,vaddr,MTS_ACC_AE,op_code,op_type,op_size,data,exc); |
498 |
return NULL; |
499 |
err_tlb: |
500 |
mts_access_special(cpu,vaddr,MTS_ACC_T,op_code,op_type,op_size,data,exc); |
501 |
return NULL; |
502 |
} |
503 |
|
504 |
/* MTS32 access */ |
505 |
static forced_inline void *mts32_access(cpu_mips_t *cpu,m_uint64_t vaddr, |
506 |
u_int op_code,u_int op_size, |
507 |
u_int op_type,m_uint64_t *data, |
508 |
u_int *exc) |
509 |
{ |
510 |
m_uint32_t hash_bucket; |
511 |
mts32_entry_t *entry; |
512 |
m_iptr_t haddr; |
513 |
u_int dev_id; |
514 |
|
515 |
#if MEMLOG_ENABLE |
516 |
/* Record the memory access */ |
517 |
memlog_rec_access(cpu,vaddr,*data,op_size,op_type); |
518 |
#endif |
519 |
|
520 |
*exc = 0; |
521 |
hash_bucket = MTS32_HASH(vaddr); |
522 |
entry = cpu->mts_cache[hash_bucket]; |
523 |
|
524 |
#if DEBUG_MTS_STATS |
525 |
cpu->mts_lookups++; |
526 |
#endif |
527 |
|
528 |
/* Slow lookup if nothing found in cache */ |
529 |
if (unlikely((!entry) || unlikely((vaddr & entry->mask) != entry->start))) { |
530 |
entry = mts32_slow_lookup(cpu,vaddr,op_code,op_size,op_type,data,exc); |
531 |
if (!entry) return NULL; |
532 |
} |
533 |
|
534 |
/* Device access */ |
535 |
if (unlikely(entry->action & MTS_DEV_MASK)) { |
536 |
dev_id = (entry->action & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT; |
537 |
haddr = entry->action & MTS_DEVOFF_MASK; |
538 |
haddr += (m_uint32_t)vaddr - entry->start; |
539 |
|
540 |
#if DEBUG_MTS_DEV |
541 |
cpu_log(cpu,"MTS32", |
542 |
"device access: vaddr=0x%llx, pc=0x%llx, dev_offset=0x%x\n", |
543 |
vaddr,cpu->pc,haddr); |
544 |
#endif |
545 |
return(dev_access_fast(cpu,dev_id,haddr,op_size,op_type,data)); |
546 |
} |
547 |
|
548 |
/* Raw memory access */ |
549 |
haddr = entry->action & MTS_ADDR_MASK; |
550 |
haddr += (m_uint32_t)vaddr - entry->start; |
551 |
#if MEMLOG_ENABLE |
552 |
memlog_update_read(cpu,haddr); |
553 |
#endif |
554 |
return((void *)haddr); |
555 |
} |
556 |
|
557 |
/* MTS32 virtual address to physical page translation */ |
558 |
static fastcall int mts32_translate(cpu_mips_t *cpu,m_uint64_t vaddr, |
559 |
m_uint32_t *phys_page) |
560 |
{ |
561 |
m_uint32_t hash_bucket,offset; |
562 |
mts32_entry_t *entry; |
563 |
m_uint64_t data = 0; |
564 |
u_int exc = 0; |
565 |
|
566 |
hash_bucket = MTS32_HASH(vaddr); |
567 |
entry = cpu->mts_cache[hash_bucket]; |
568 |
|
569 |
/* Slow lookup if nothing found in cache */ |
570 |
if (unlikely((!entry) || unlikely((vaddr & entry->mask) != entry->start))) { |
571 |
entry = mts32_slow_lookup(cpu,vaddr,MIPS_MEMOP_LOOKUP,4,MTS_READ, |
572 |
&data,&exc); |
573 |
if (!entry) |
574 |
return(-1); |
575 |
} |
576 |
|
577 |
offset = vaddr - entry->start; |
578 |
*phys_page = entry->phys_page + (offset >> MIPS_MIN_PAGE_SHIFT); |
579 |
return(0); |
580 |
} |
581 |
|
582 |
/* ======================================================================== */ |
583 |
|
584 |
/* Shutdown MTS subsystem */ |
585 |
void mts_shutdown(cpu_mips_t *cpu) |
586 |
{ |
587 |
if (cpu->mts_shutdown != NULL) |
588 |
cpu->mts_shutdown(cpu); |
589 |
} |
590 |
|
591 |
/* Set the address mode */ |
592 |
int mts_set_addr_mode(cpu_mips_t *cpu,u_int addr_mode) |
593 |
{ |
594 |
if (cpu->addr_mode != addr_mode) { |
595 |
mts_shutdown(cpu); |
596 |
|
597 |
switch(addr_mode) { |
598 |
case 32: |
599 |
mts32_init(cpu); |
600 |
mts32_init_memop_vectors(cpu); |
601 |
break; |
602 |
case 64: |
603 |
mts64_init(cpu); |
604 |
mts64_init_memop_vectors(cpu); |
605 |
break; |
606 |
default: |
607 |
fprintf(stderr, |
608 |
"mts_set_addr_mode: internal error (addr_mode=%u)\n", |
609 |
addr_mode); |
610 |
exit(EXIT_FAILURE); |
611 |
} |
612 |
} |
613 |
|
614 |
return(0); |
615 |
} |
616 |
|
617 |
/* === Operations on physical memory ====================================== */ |
618 |
|
619 |
/* Copy a memory block from VM physical RAM to real host */ |
620 |
void physmem_copy_from_vm(vm_instance_t *vm,void *real_buffer, |
621 |
m_uint64_t paddr,size_t len) |
622 |
{ |
623 |
struct vdevice *vm_ram; |
624 |
u_char *ptr; |
625 |
|
626 |
if ((vm_ram = dev_lookup(vm,paddr,FALSE)) != NULL) { |
627 |
assert(vm_ram->host_addr != 0); |
628 |
ptr = (u_char *)vm_ram->host_addr + (paddr - vm_ram->phys_addr); |
629 |
memcpy(real_buffer,ptr,len); |
630 |
} |
631 |
} |
632 |
|
633 |
/* Copy a memory block to VM physical RAM from real host */ |
634 |
void physmem_copy_to_vm(vm_instance_t *vm,void *real_buffer, |
635 |
m_uint64_t paddr,size_t len) |
636 |
{ |
637 |
struct vdevice *vm_ram; |
638 |
u_char *ptr; |
639 |
|
640 |
if ((vm_ram = dev_lookup(vm,paddr,FALSE)) != NULL) { |
641 |
assert(vm_ram->host_addr != 0); |
642 |
ptr = (u_char *)vm_ram->host_addr + (paddr - vm_ram->phys_addr); |
643 |
memcpy(ptr,real_buffer,len); |
644 |
} |
645 |
} |
646 |
|
647 |
/* Copy a 32-bit word from the VM physical RAM to real host */ |
648 |
m_uint32_t physmem_copy_u32_from_vm(vm_instance_t *vm,m_uint64_t paddr) |
649 |
{ |
650 |
struct vdevice *dev; |
651 |
m_uint32_t offset; |
652 |
m_uint64_t tmp; |
653 |
void *ptr; |
654 |
|
655 |
if (unlikely((dev = dev_lookup(vm,paddr,FALSE)) == NULL)) |
656 |
return(0); |
657 |
|
658 |
offset = paddr - dev->phys_addr; |
659 |
|
660 |
if ((dev->host_addr != 0) && !(dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) |
661 |
ptr = (u_char *)dev->host_addr + offset; |
662 |
else { |
663 |
ptr = dev->handler(vm->boot_cpu,dev,offset,4,MTS_READ,&tmp); |
664 |
if (!ptr) return(tmp); |
665 |
} |
666 |
|
667 |
return(vmtoh32(*(m_uint32_t *)ptr)); |
668 |
} |
669 |
|
670 |
/* Copy a 32-bit word to the VM physical RAM from real host */ |
671 |
void physmem_copy_u32_to_vm(vm_instance_t *vm,m_uint64_t paddr,m_uint32_t val) |
672 |
{ |
673 |
struct vdevice *dev; |
674 |
m_uint32_t offset; |
675 |
m_uint64_t tmp; |
676 |
void *ptr; |
677 |
|
678 |
if (unlikely((dev = dev_lookup(vm,paddr,FALSE)) == NULL)) |
679 |
return; |
680 |
|
681 |
offset = paddr - dev->phys_addr; |
682 |
|
683 |
if ((dev->host_addr != 0) && !(dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) |
684 |
ptr = (u_char *)dev->host_addr + offset; |
685 |
else { |
686 |
tmp = val; |
687 |
ptr = dev->handler(vm->boot_cpu,dev,offset,4,MTS_WRITE,&tmp); |
688 |
if (!ptr) return; |
689 |
} |
690 |
|
691 |
*(m_uint32_t *)ptr = htovm32(val); |
692 |
} |
693 |
|
694 |
/* Copy a 16-bit word from the VM physical RAM to real host */ |
695 |
m_uint16_t physmem_copy_u16_from_vm(vm_instance_t *vm,m_uint64_t paddr) |
696 |
{ |
697 |
struct vdevice *dev; |
698 |
m_uint32_t offset; |
699 |
m_uint64_t tmp; |
700 |
void *ptr; |
701 |
|
702 |
if (unlikely((dev = dev_lookup(vm,paddr,FALSE)) == NULL)) |
703 |
return(0); |
704 |
|
705 |
offset = paddr - dev->phys_addr; |
706 |
|
707 |
if ((dev->host_addr != 0) && !(dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) |
708 |
ptr = (u_char *)dev->host_addr + offset; |
709 |
else { |
710 |
ptr = dev->handler(vm->boot_cpu,dev,offset,2,MTS_READ,&tmp); |
711 |
if (!ptr) return(tmp); |
712 |
} |
713 |
|
714 |
return(vmtoh16(*(m_uint16_t *)ptr)); |
715 |
} |
716 |
|
717 |
/* Copy a 16-bit word to the VM physical RAM from real host */ |
718 |
void physmem_copy_u16_to_vm(vm_instance_t *vm,m_uint64_t paddr,m_uint16_t val) |
719 |
{ |
720 |
struct vdevice *dev; |
721 |
m_uint32_t offset; |
722 |
m_uint64_t tmp; |
723 |
void *ptr; |
724 |
|
725 |
if (unlikely((dev = dev_lookup(vm,paddr,FALSE)) == NULL)) |
726 |
return; |
727 |
|
728 |
offset = paddr - dev->phys_addr; |
729 |
|
730 |
if ((dev->host_addr != 0) && !(dev->flags & VDEVICE_FLAG_NO_MTS_MMAP)) |
731 |
ptr = (u_char *)dev->host_addr + offset; |
732 |
else { |
733 |
tmp = val; |
734 |
ptr = dev->handler(vm->boot_cpu,dev,offset,2,MTS_WRITE,&tmp); |
735 |
if (!ptr) return; |
736 |
} |
737 |
|
738 |
*(m_uint16_t *)ptr = htovm16(val); |
739 |
} |
740 |
|
741 |
/* DMA transfer operation */ |
742 |
void physmem_dma_transfer(vm_instance_t *vm,m_uint64_t src,m_uint64_t dst, |
743 |
size_t len) |
744 |
{ |
745 |
struct vdevice *src_dev,*dst_dev; |
746 |
u_char *sptr,*dptr; |
747 |
|
748 |
src_dev = dev_lookup(vm,src,FALSE); |
749 |
dst_dev = dev_lookup(vm,dst,FALSE); |
750 |
|
751 |
if ((src_dev != NULL) && (dst_dev != NULL)) { |
752 |
assert(src_dev->host_addr != 0); |
753 |
assert(dst_dev->host_addr != 0); |
754 |
|
755 |
sptr = (u_char *)src_dev->host_addr + (src - src_dev->phys_addr); |
756 |
dptr = (u_char *)dst_dev->host_addr + (dst - dst_dev->phys_addr); |
757 |
memcpy(dptr,sptr,len); |
758 |
} else { |
759 |
vm_log(vm,"DMA","unable to transfer from 0x%llx to 0x%llx (len=%lu)\n", |
760 |
src,dst,(u_long)len); |
761 |
} |
762 |
} |
763 |
|
764 |
/* strlen in VM physical memory */ |
765 |
size_t physmem_strlen(vm_instance_t *vm,m_uint64_t paddr) |
766 |
{ |
767 |
struct vdevice *vm_ram; |
768 |
size_t len = 0; |
769 |
char *ptr; |
770 |
|
771 |
if ((vm_ram = dev_lookup(vm,paddr,TRUE)) != NULL) { |
772 |
ptr = (char *)vm_ram->host_addr + (paddr - vm_ram->phys_addr); |
773 |
len = strlen(ptr); |
774 |
} |
775 |
|
776 |
return(len); |
777 |
} |
778 |
|
779 |
/* Physical memory dump (32-bit words) */ |
780 |
void physmem_dump_vm(vm_instance_t *vm,m_uint64_t paddr,m_uint32_t u32_count) |
781 |
{ |
782 |
m_uint32_t i; |
783 |
|
784 |
for(i=0;i<u32_count;i++) { |
785 |
vm_log(vm,"physmem_dump","0x%8.8llx: 0x%8.8x\n", |
786 |
paddr+(i<<2),physmem_copy_u32_from_vm(vm,paddr+(i<<2))); |
787 |
} |
788 |
} |