1 |
dpavlin |
7 |
/* |
2 |
|
|
* Cisco router simulation platform. |
3 |
|
|
* Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr) |
4 |
|
|
*/ |
5 |
|
|
|
6 |
|
|
#define _GNU_SOURCE |
7 |
|
|
#include <stdio.h> |
8 |
|
|
#include <stdlib.h> |
9 |
|
|
#include <unistd.h> |
10 |
|
|
#include <string.h> |
11 |
|
|
#include <sys/types.h> |
12 |
|
|
#include <sys/stat.h> |
13 |
|
|
#include <sys/mman.h> |
14 |
|
|
#include <fcntl.h> |
15 |
|
|
#include <assert.h> |
16 |
|
|
|
17 |
|
|
#include "cpu.h" |
18 |
|
|
#include "mips64_jit.h" |
19 |
|
|
#include "vm.h" |
20 |
|
|
#include "dynamips.h" |
21 |
|
|
#include "memory.h" |
22 |
|
|
#include "device.h" |
23 |
|
|
|
24 |
|
|
/* MTS access with special access mask */ |
25 |
|
|
void mips64_access_special(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint32_t mask, |
26 |
|
|
u_int op_code,u_int op_type,u_int op_size, |
27 |
|
|
m_uint64_t *data,u_int *exc) |
28 |
|
|
{ |
29 |
|
|
switch(mask) { |
30 |
|
|
case MTS_ACC_U: |
31 |
|
|
#if DEBUG_MTS_ACC_U |
32 |
|
|
if (op_type == MTS_READ) |
33 |
|
|
cpu_log(cpu->gen, |
34 |
|
|
"MTS","read access to undefined address 0x%llx at " |
35 |
|
|
"pc=0x%llx (size=%u)\n",vaddr,cpu->pc,op_size); |
36 |
|
|
else |
37 |
|
|
cpu_log(cpu->gen, |
38 |
|
|
"MTS","write access to undefined address 0x%llx at " |
39 |
|
|
"pc=0x%llx, value=0x%8.8llx (size=%u)\n", |
40 |
|
|
vaddr,cpu->pc,*data,op_size); |
41 |
|
|
#endif |
42 |
|
|
if (op_type == MTS_READ) |
43 |
|
|
*data = 0; |
44 |
|
|
break; |
45 |
|
|
|
46 |
|
|
case MTS_ACC_T: |
47 |
|
|
if (op_code != MIPS_MEMOP_LOOKUP) { |
48 |
|
|
#if DEBUG_MTS_ACC_T |
49 |
|
|
cpu_log(cpu->gen, |
50 |
|
|
"MTS","TLB exception for address 0x%llx at pc=0x%llx " |
51 |
|
|
"(%s access, size=%u)\n", |
52 |
|
|
vaddr,cpu->pc,(op_type == MTS_READ) ? |
53 |
|
|
"read":"write",op_size); |
54 |
|
|
mips64_dump_regs(cpu->gen); |
55 |
|
|
#if MEMLOG_ENABLE |
56 |
|
|
memlog_dump(cpu->gen); |
57 |
|
|
#endif |
58 |
|
|
#endif |
59 |
|
|
cpu->cp0.reg[MIPS_CP0_BADVADDR] = vaddr; |
60 |
|
|
|
61 |
|
|
if (op_type == MTS_READ) |
62 |
|
|
mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_TLB_LOAD,0); |
63 |
|
|
else |
64 |
|
|
mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_TLB_SAVE,0); |
65 |
|
|
} |
66 |
|
|
|
67 |
|
|
*exc = 1; |
68 |
|
|
break; |
69 |
|
|
|
70 |
|
|
case MTS_ACC_AE: |
71 |
|
|
if (op_code != MIPS_MEMOP_LOOKUP) { |
72 |
|
|
#if DEBUG_MTS_ACC_AE |
73 |
|
|
cpu_log(cpu->gen, |
74 |
|
|
"MTS","AE exception for address 0x%llx at pc=0x%llx " |
75 |
|
|
"(%s access)\n", |
76 |
|
|
vaddr,cpu->pc,(op_type == MTS_READ) ? "read":"write"); |
77 |
|
|
#endif |
78 |
|
|
cpu->cp0.reg[MIPS_CP0_BADVADDR] = vaddr; |
79 |
|
|
|
80 |
|
|
if (op_type == MTS_READ) |
81 |
|
|
mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_ADDR_LOAD,0); |
82 |
|
|
else |
83 |
|
|
mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_ADDR_SAVE,0); |
84 |
|
|
} |
85 |
|
|
|
86 |
|
|
*exc = 1; |
87 |
|
|
break; |
88 |
|
|
} |
89 |
|
|
} |
90 |
|
|
|
91 |
|
|
/* === MTS for 64-bit address space ======================================= */ |
92 |
|
|
#define MTS_ADDR_SIZE 64 |
93 |
|
|
#define MTS_NAME(name) mts64_##name |
94 |
|
|
#define MTS_NAME_UP(name) MTS64_##name |
95 |
|
|
#define MTS_PROTO(name) mips64_mts64_##name |
96 |
|
|
#define MTS_PROTO_UP(name) MIPS64_MTS64_##name |
97 |
|
|
|
98 |
|
|
#include "mips_mts.c" |
99 |
|
|
|
100 |
|
|
/* === MTS for 32-bit address space ======================================= */ |
101 |
|
|
#define MTS_ADDR_SIZE 32 |
102 |
|
|
#define MTS_NAME(name) mts32_##name |
103 |
|
|
#define MTS_NAME_UP(name) MTS32_##name |
104 |
|
|
#define MTS_PROTO(name) mips64_mts32_##name |
105 |
|
|
#define MTS_PROTO_UP(name) MIPS64_MTS32_##name |
106 |
|
|
|
107 |
|
|
#include "mips_mts.c" |
108 |
|
|
|
109 |
|
|
/* === Specific operations for MTS64 ====================================== */ |
110 |
|
|
|
111 |
|
|
/* MTS64 slow lookup */ |
112 |
|
|
static mts64_entry_t * |
113 |
|
|
mips64_mts64_slow_lookup(cpu_mips_t *cpu,m_uint64_t vaddr, |
114 |
|
|
u_int op_code,u_int op_size, |
115 |
|
|
u_int op_type,m_uint64_t *data, |
116 |
|
|
u_int *exc,mts64_entry_t *alt_entry) |
117 |
|
|
{ |
118 |
|
|
m_uint32_t hash_bucket,zone,sub_zone,cca; |
119 |
|
|
mts64_entry_t *entry; |
120 |
|
|
mts_map_t map; |
121 |
|
|
|
122 |
|
|
map.tlb_index = -1; |
123 |
|
|
hash_bucket = MTS64_HASH(vaddr); |
124 |
|
|
entry = &cpu->mts_u.mts64_cache[hash_bucket]; |
125 |
|
|
zone = vaddr >> 40; |
126 |
|
|
|
127 |
|
|
#if DEBUG_MTS_STATS |
128 |
|
|
cpu->mts_misses++; |
129 |
|
|
#endif |
130 |
|
|
|
131 |
|
|
switch(zone) { |
132 |
|
|
case 0x000000: /* xkuseg */ |
133 |
|
|
case 0x400000: /* xksseg */ |
134 |
|
|
case 0xc00000: /* xkseg */ |
135 |
|
|
/* trigger TLB exception if no matching entry found */ |
136 |
|
|
if (!mips64_cp0_tlb_lookup(cpu,vaddr,&map)) |
137 |
|
|
goto err_tlb; |
138 |
|
|
|
139 |
|
|
if (!(entry = mips64_mts64_map(cpu,op_type,&map,entry,alt_entry))) |
140 |
|
|
goto err_undef; |
141 |
|
|
|
142 |
|
|
return(entry); |
143 |
|
|
|
144 |
|
|
case 0xffffff: |
145 |
|
|
sub_zone = (vaddr >> 29) & 0x7FF; |
146 |
|
|
|
147 |
|
|
switch(sub_zone) { |
148 |
|
|
case 0x7fc: /* ckseg0 */ |
149 |
|
|
map.vaddr = vaddr & MIPS_MIN_PAGE_MASK; |
150 |
|
|
map.paddr = map.vaddr - 0xFFFFFFFF80000000ULL; |
151 |
|
|
map.cached = TRUE; |
152 |
|
|
|
153 |
|
|
if (!(entry = mips64_mts64_map(cpu,op_type,&map, |
154 |
|
|
entry,alt_entry))) |
155 |
|
|
goto err_undef; |
156 |
|
|
|
157 |
|
|
return(entry); |
158 |
|
|
|
159 |
|
|
case 0x7fd: /* ckseg1 */ |
160 |
|
|
map.vaddr = vaddr & MIPS_MIN_PAGE_MASK; |
161 |
|
|
map.paddr = map.vaddr - 0xFFFFFFFFA0000000ULL; |
162 |
|
|
map.cached = FALSE; |
163 |
|
|
|
164 |
|
|
if (!(entry = mips64_mts64_map(cpu,op_type,&map, |
165 |
|
|
entry,alt_entry))) |
166 |
|
|
goto err_undef; |
167 |
|
|
|
168 |
|
|
return(entry); |
169 |
|
|
|
170 |
|
|
case 0x7fe: /* cksseg */ |
171 |
|
|
case 0x7ff: /* ckseg3 */ |
172 |
|
|
/* trigger TLB exception if no matching entry found */ |
173 |
|
|
if (!mips64_cp0_tlb_lookup(cpu,vaddr,&map)) |
174 |
|
|
goto err_tlb; |
175 |
|
|
|
176 |
|
|
if (!(entry = mips64_mts64_map(cpu,op_type, |
177 |
|
|
&map,entry,alt_entry))) |
178 |
|
|
goto err_undef; |
179 |
|
|
|
180 |
|
|
return(entry); |
181 |
|
|
|
182 |
|
|
default: |
183 |
|
|
/* Invalid zone: generate Address Error (AE) exception */ |
184 |
|
|
goto err_address; |
185 |
|
|
} |
186 |
|
|
break; |
187 |
|
|
|
188 |
|
|
/* xkphys */ |
189 |
|
|
case 0x800000: |
190 |
|
|
case 0x880000: |
191 |
|
|
case 0x900000: |
192 |
|
|
case 0x980000: |
193 |
|
|
case 0xa00000: |
194 |
|
|
case 0xa80000: |
195 |
|
|
case 0xb00000: |
196 |
|
|
case 0xb80000: |
197 |
|
|
cca = (vaddr >> MIPS64_XKPHYS_CCA_SHIFT) & 0x03; |
198 |
|
|
map.cached = mips64_cca_cached(cca); |
199 |
|
|
map.vaddr = vaddr & MIPS_MIN_PAGE_MASK; |
200 |
|
|
map.paddr = (vaddr & MIPS64_XKPHYS_PHYS_MASK); |
201 |
|
|
map.paddr &= MIPS_MIN_PAGE_MASK; |
202 |
|
|
|
203 |
|
|
if (!(entry = mips64_mts64_map(cpu,op_type,&map,entry,alt_entry))) |
204 |
|
|
goto err_undef; |
205 |
|
|
|
206 |
|
|
return(entry); |
207 |
|
|
|
208 |
|
|
default: |
209 |
|
|
/* Invalid zone: generate Address Error (AE) exception */ |
210 |
|
|
goto err_address; |
211 |
|
|
} |
212 |
|
|
|
213 |
|
|
err_undef: |
214 |
|
|
mips64_access_special(cpu,vaddr,MTS_ACC_U,op_code,op_type,op_size,data,exc); |
215 |
|
|
return NULL; |
216 |
|
|
err_address: |
217 |
|
|
mips64_access_special(cpu,vaddr,MTS_ACC_AE,op_code,op_type,op_size, |
218 |
|
|
data,exc); |
219 |
|
|
return NULL; |
220 |
|
|
err_tlb: |
221 |
|
|
mips64_access_special(cpu,vaddr,MTS_ACC_T,op_code,op_type,op_size,data,exc); |
222 |
|
|
return NULL; |
223 |
|
|
} |
224 |
|
|
|
225 |
|
|
/* MTS64 access */ |
226 |
|
|
static forced_inline |
227 |
|
|
void *mips64_mts64_access(cpu_mips_t *cpu,m_uint64_t vaddr, |
228 |
|
|
u_int op_code,u_int op_size, |
229 |
|
|
u_int op_type,m_uint64_t *data, |
230 |
|
|
u_int *exc) |
231 |
|
|
{ |
232 |
|
|
mts64_entry_t *entry,alt_entry; |
233 |
|
|
m_uint32_t hash_bucket; |
234 |
|
|
m_iptr_t haddr; |
235 |
|
|
u_int dev_id; |
236 |
|
|
int cow; |
237 |
|
|
|
238 |
|
|
#if MEMLOG_ENABLE |
239 |
|
|
/* Record the memory access */ |
240 |
|
|
memlog_rec_access(cpu->gen,vaddr,*data,op_size,op_type); |
241 |
|
|
#endif |
242 |
|
|
|
243 |
|
|
*exc = 0; |
244 |
|
|
hash_bucket = MTS64_HASH(vaddr); |
245 |
|
|
entry = &cpu->mts_u.mts64_cache[hash_bucket]; |
246 |
|
|
|
247 |
|
|
#if DEBUG_MTS_STATS |
248 |
|
|
cpu->mts_lookups++; |
249 |
|
|
#endif |
250 |
|
|
|
251 |
|
|
/* Copy-On-Write for sparse device ? */ |
252 |
|
|
cow = (op_type == MTS_WRITE) && (entry->flags & MTS_FLAG_COW); |
253 |
|
|
|
254 |
|
|
/* Slow lookup if nothing found in cache */ |
255 |
|
|
if (unlikely(((vaddr & MIPS_MIN_PAGE_MASK) != entry->gvpa) || cow)) { |
256 |
|
|
entry = mips64_mts64_slow_lookup(cpu,vaddr,op_code,op_size,op_type, |
257 |
|
|
data,exc,&alt_entry); |
258 |
|
|
if (!entry) |
259 |
|
|
return NULL; |
260 |
|
|
|
261 |
|
|
if (entry->flags & MTS_FLAG_DEV) { |
262 |
|
|
dev_id = (entry->hpa & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT; |
263 |
|
|
haddr = entry->hpa & MTS_DEVOFF_MASK; |
264 |
|
|
haddr += vaddr - entry->gvpa; |
265 |
|
|
return(dev_access_fast(cpu->gen,dev_id,haddr,op_size,op_type,data)); |
266 |
|
|
} |
267 |
|
|
} |
268 |
|
|
|
269 |
|
|
/* Raw memory access */ |
270 |
|
|
haddr = entry->hpa + (vaddr & MIPS_MIN_PAGE_IMASK); |
271 |
|
|
#if MEMLOG_ENABLE |
272 |
|
|
memlog_update_read(cpu->gen,haddr); |
273 |
|
|
#endif |
274 |
|
|
return((void *)haddr); |
275 |
|
|
} |
276 |
|
|
|
277 |
|
|
/* MTS64 virtual address to physical page translation */ |
278 |
|
|
static fastcall int mips64_mts64_translate(cpu_mips_t *cpu,m_uint64_t vaddr, |
279 |
|
|
m_uint32_t *phys_page) |
280 |
|
|
{ |
281 |
|
|
mts64_entry_t *entry,alt_entry; |
282 |
|
|
m_uint32_t hash_bucket; |
283 |
|
|
m_uint64_t data = 0; |
284 |
|
|
u_int exc = 0; |
285 |
|
|
|
286 |
|
|
hash_bucket = MTS64_HASH(vaddr); |
287 |
|
|
entry = &cpu->mts_u.mts64_cache[hash_bucket]; |
288 |
|
|
|
289 |
|
|
/* Slow lookup if nothing found in cache */ |
290 |
|
|
if (unlikely((vaddr & MIPS_MIN_PAGE_MASK) != entry->gvpa)) { |
291 |
|
|
entry = mips64_mts64_slow_lookup(cpu,vaddr,MIPS_MEMOP_LOOKUP,4,MTS_READ, |
292 |
|
|
&data,&exc,&alt_entry); |
293 |
|
|
if (!entry) |
294 |
|
|
return(-1); |
295 |
|
|
} |
296 |
|
|
|
297 |
|
|
*phys_page = entry->gppa >> MIPS_MIN_PAGE_SHIFT; |
298 |
|
|
return(0); |
299 |
|
|
} |
300 |
|
|
|
301 |
|
|
/* === Specific operations for MTS32 ====================================== */ |
302 |
|
|
|
303 |
|
|
/* MTS32 slow lookup */ |
304 |
|
|
static mts32_entry_t * |
305 |
|
|
mips64_mts32_slow_lookup(cpu_mips_t *cpu,m_uint64_t vaddr, |
306 |
|
|
u_int op_code,u_int op_size, |
307 |
|
|
u_int op_type,m_uint64_t *data, |
308 |
|
|
u_int *exc,mts32_entry_t *alt_entry) |
309 |
|
|
{ |
310 |
|
|
m_uint32_t hash_bucket,zone; |
311 |
|
|
mts32_entry_t *entry; |
312 |
|
|
mts_map_t map; |
313 |
|
|
|
314 |
|
|
map.tlb_index = -1; |
315 |
|
|
hash_bucket = MTS32_HASH(vaddr); |
316 |
|
|
entry = &cpu->mts_u.mts32_cache[hash_bucket]; |
317 |
|
|
zone = (vaddr >> 29) & 0x7; |
318 |
|
|
|
319 |
|
|
#if DEBUG_MTS_STATS |
320 |
|
|
cpu->mts_misses++; |
321 |
|
|
#endif |
322 |
|
|
|
323 |
|
|
switch(zone) { |
324 |
|
|
case 0x00 ... 0x03: /* kuseg */ |
325 |
|
|
/* trigger TLB exception if no matching entry found */ |
326 |
|
|
if (!mips64_cp0_tlb_lookup(cpu,vaddr,&map)) |
327 |
|
|
goto err_tlb; |
328 |
|
|
|
329 |
|
|
if (!(entry = mips64_mts32_map(cpu,op_type,&map,entry,alt_entry))) |
330 |
|
|
goto err_undef; |
331 |
|
|
|
332 |
|
|
return(entry); |
333 |
|
|
|
334 |
|
|
case 0x04: /* kseg0 */ |
335 |
|
|
map.vaddr = vaddr & MIPS_MIN_PAGE_MASK; |
336 |
|
|
map.paddr = map.vaddr - 0xFFFFFFFF80000000ULL; |
337 |
|
|
map.cached = TRUE; |
338 |
|
|
|
339 |
|
|
if (!(entry = mips64_mts32_map(cpu,op_type,&map,entry,alt_entry))) |
340 |
|
|
goto err_undef; |
341 |
|
|
|
342 |
|
|
return(entry); |
343 |
|
|
|
344 |
|
|
case 0x05: /* kseg1 */ |
345 |
|
|
map.vaddr = vaddr & MIPS_MIN_PAGE_MASK; |
346 |
|
|
map.paddr = map.vaddr - 0xFFFFFFFFA0000000ULL; |
347 |
|
|
map.cached = FALSE; |
348 |
|
|
|
349 |
|
|
if (!(entry = mips64_mts32_map(cpu,op_type,&map,entry,alt_entry))) |
350 |
|
|
goto err_undef; |
351 |
|
|
|
352 |
|
|
return(entry); |
353 |
|
|
|
354 |
|
|
case 0x06: /* ksseg */ |
355 |
|
|
case 0x07: /* kseg3 */ |
356 |
|
|
/* trigger TLB exception if no matching entry found */ |
357 |
|
|
if (!mips64_cp0_tlb_lookup(cpu,vaddr,&map)) |
358 |
|
|
goto err_tlb; |
359 |
|
|
|
360 |
|
|
if (!(entry = mips64_mts32_map(cpu,op_type,&map,entry,alt_entry))) |
361 |
|
|
goto err_undef; |
362 |
|
|
|
363 |
|
|
return(entry); |
364 |
|
|
} |
365 |
|
|
|
366 |
|
|
err_undef: |
367 |
|
|
mips64_access_special(cpu,vaddr,MTS_ACC_U,op_code,op_type,op_size,data,exc); |
368 |
|
|
return NULL; |
369 |
|
|
err_address: |
370 |
|
|
mips64_access_special(cpu,vaddr,MTS_ACC_AE,op_code,op_type,op_size, |
371 |
|
|
data,exc); |
372 |
|
|
return NULL; |
373 |
|
|
err_tlb: |
374 |
|
|
mips64_access_special(cpu,vaddr,MTS_ACC_T,op_code,op_type,op_size,data,exc); |
375 |
|
|
return NULL; |
376 |
|
|
} |
377 |
|
|
|
378 |
|
|
/* MTS32 access */ |
379 |
|
|
static forced_inline |
380 |
|
|
void *mips64_mts32_access(cpu_mips_t *cpu,m_uint64_t vaddr, |
381 |
|
|
u_int op_code,u_int op_size, |
382 |
|
|
u_int op_type,m_uint64_t *data, |
383 |
|
|
u_int *exc) |
384 |
|
|
{ |
385 |
|
|
mts32_entry_t *entry,alt_entry; |
386 |
|
|
m_uint32_t hash_bucket; |
387 |
|
|
m_iptr_t haddr; |
388 |
|
|
u_int dev_id; |
389 |
|
|
int cow; |
390 |
|
|
|
391 |
|
|
#if MEMLOG_ENABLE |
392 |
|
|
/* Record the memory access */ |
393 |
|
|
memlog_rec_access(cpu->gen,vaddr,*data,op_size,op_type); |
394 |
|
|
#endif |
395 |
|
|
|
396 |
|
|
*exc = 0; |
397 |
|
|
hash_bucket = MTS32_HASH(vaddr); |
398 |
|
|
entry = &cpu->mts_u.mts32_cache[hash_bucket]; |
399 |
|
|
|
400 |
|
|
#if DEBUG_MTS_STATS |
401 |
|
|
cpu->mts_lookups++; |
402 |
|
|
#endif |
403 |
|
|
|
404 |
|
|
/* Copy-On-Write for sparse device ? */ |
405 |
|
|
cow = (op_type == MTS_WRITE) && (entry->flags & MTS_FLAG_COW); |
406 |
|
|
|
407 |
|
|
/* Slow lookup if nothing found in cache */ |
408 |
|
|
if (unlikely((((m_uint32_t)vaddr & MIPS_MIN_PAGE_MASK) != entry->gvpa) || |
409 |
|
|
cow)) |
410 |
|
|
{ |
411 |
|
|
entry = mips64_mts32_slow_lookup(cpu,vaddr,op_code,op_size,op_type, |
412 |
|
|
data,exc,&alt_entry); |
413 |
|
|
if (!entry) |
414 |
|
|
return NULL; |
415 |
|
|
|
416 |
|
|
if (entry->flags & MTS_FLAG_DEV) { |
417 |
|
|
dev_id = (entry->hpa & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT; |
418 |
|
|
haddr = entry->hpa & MTS_DEVOFF_MASK; |
419 |
|
|
haddr += vaddr - entry->gvpa; |
420 |
|
|
return(dev_access_fast(cpu->gen,dev_id,haddr,op_size,op_type,data)); |
421 |
|
|
} |
422 |
|
|
} |
423 |
|
|
|
424 |
|
|
/* Raw memory access */ |
425 |
|
|
haddr = entry->hpa + (vaddr & MIPS_MIN_PAGE_IMASK); |
426 |
|
|
#if MEMLOG_ENABLE |
427 |
|
|
memlog_update_read(cpu->gen,haddr); |
428 |
|
|
#endif |
429 |
|
|
return((void *)haddr); |
430 |
|
|
} |
431 |
|
|
|
432 |
|
|
/* MTS32 virtual address to physical page translation */ |
433 |
|
|
static fastcall int mips64_mts32_translate(cpu_mips_t *cpu,m_uint64_t vaddr, |
434 |
|
|
m_uint32_t *phys_page) |
435 |
|
|
{ |
436 |
|
|
mts32_entry_t *entry,alt_entry; |
437 |
|
|
m_uint32_t hash_bucket; |
438 |
|
|
m_uint64_t data = 0; |
439 |
|
|
u_int exc = 0; |
440 |
|
|
|
441 |
|
|
hash_bucket = MTS32_HASH(vaddr); |
442 |
|
|
entry = &cpu->mts_u.mts32_cache[hash_bucket]; |
443 |
|
|
|
444 |
|
|
/* Slow lookup if nothing found in cache */ |
445 |
|
|
if (unlikely(((m_uint32_t)vaddr & MIPS_MIN_PAGE_MASK) != entry->gvpa)) { |
446 |
|
|
entry = mips64_mts32_slow_lookup(cpu,vaddr,MIPS_MEMOP_LOOKUP,4,MTS_READ, |
447 |
|
|
&data,&exc,&alt_entry); |
448 |
|
|
if (!entry) |
449 |
|
|
return(-1); |
450 |
|
|
} |
451 |
|
|
|
452 |
|
|
*phys_page = entry->gppa >> MIPS_MIN_PAGE_SHIFT; |
453 |
|
|
return(0); |
454 |
|
|
} |
455 |
|
|
|
456 |
|
|
/* ======================================================================== */ |
457 |
|
|
|
458 |
|
|
/* Shutdown MTS subsystem */ |
459 |
|
|
void mips64_mem_shutdown(cpu_mips_t *cpu) |
460 |
|
|
{ |
461 |
|
|
if (cpu->mts_shutdown != NULL) |
462 |
|
|
cpu->mts_shutdown(cpu); |
463 |
|
|
} |
464 |
|
|
|
465 |
|
|
/* Set the address mode */ |
466 |
|
|
int mips64_set_addr_mode(cpu_mips_t *cpu,u_int addr_mode) |
467 |
|
|
{ |
468 |
|
|
if (cpu->addr_mode != addr_mode) { |
469 |
|
|
mips64_mem_shutdown(cpu); |
470 |
|
|
|
471 |
|
|
switch(addr_mode) { |
472 |
|
|
case 32: |
473 |
|
|
mips64_mts32_init(cpu); |
474 |
|
|
mips64_mts32_init_memop_vectors(cpu); |
475 |
|
|
break; |
476 |
|
|
case 64: |
477 |
|
|
mips64_mts64_init(cpu); |
478 |
|
|
mips64_mts64_init_memop_vectors(cpu); |
479 |
|
|
break; |
480 |
|
|
default: |
481 |
|
|
fprintf(stderr, |
482 |
|
|
"mts_set_addr_mode: internal error (addr_mode=%u)\n", |
483 |
|
|
addr_mode); |
484 |
|
|
exit(EXIT_FAILURE); |
485 |
|
|
} |
486 |
|
|
} |
487 |
|
|
|
488 |
|
|
return(0); |
489 |
|
|
} |