1 |
/* |
2 |
* Copyright (C) 2003-2005 Anders Gavare. All rights reserved. |
3 |
* |
4 |
* Redistribution and use in source and binary forms, with or without |
5 |
* modification, are permitted provided that the following conditions are met: |
6 |
* |
7 |
* 1. Redistributions of source code must retain the above copyright |
8 |
* notice, this list of conditions and the following disclaimer. |
9 |
* 2. Redistributions in binary form must reproduce the above copyright |
10 |
* notice, this list of conditions and the following disclaimer in the |
11 |
* documentation and/or other materials provided with the distribution. |
12 |
* 3. The name of the author may not be used to endorse or promote products |
13 |
* derived from this software without specific prior written permission. |
14 |
* |
15 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
16 |
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
17 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
18 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
19 |
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
20 |
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
21 |
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
22 |
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
23 |
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
24 |
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
25 |
* SUCH DAMAGE. |
26 |
* |
27 |
* |
28 |
* $Id: memory_mips_v2p.c,v 1.3 2005/12/26 12:32:10 debug Exp $ |
29 |
* |
30 |
* Included from memory.c. |
31 |
*/ |
32 |
|
33 |
|
34 |
/* |
35 |
* translate_address(): |
36 |
* |
37 |
* Don't call this function is userland_emul is non-NULL, or cpu is NULL. |
38 |
* |
39 |
* TODO: vpn2 is a bad name for R2K/R3K, as it is the actual framenumber. |
40 |
* |
41 |
* Return values: |
42 |
* 0 Failure |
43 |
* 1 Success, the page is readable only |
44 |
* 2 Success, the page is read/write |
45 |
*/ |
46 |
int TRANSLATE_ADDRESS(struct cpu *cpu, uint64_t vaddr, |
47 |
uint64_t *return_addr, int flags) |
48 |
{ |
49 |
int writeflag = flags & FLAG_WRITEFLAG? MEM_WRITE : MEM_READ; |
50 |
int no_exceptions = flags & FLAG_NOEXCEPTIONS; |
51 |
int instr = flags & FLAG_INSTR; |
52 |
int ksu, use_tlb, status, i; |
53 |
uint64_t vaddr_vpn2=0, vaddr_asid=0; |
54 |
int exccode, tlb_refill; |
55 |
struct mips_coproc *cp0; |
56 |
int bintrans_cached = cpu->machine->bintrans_enable; |
57 |
|
58 |
#ifdef V2P_MMU3K |
59 |
const int x_64 = 0; |
60 |
const int n_tlbs = 64; |
61 |
const int pmask = 0xfff; |
62 |
#else |
63 |
#ifdef V2P_MMU10K |
64 |
const uint64_t vpn2_mask = ENTRYHI_VPN2_MASK_R10K; |
65 |
#else |
66 |
#ifdef V2P_MMU4100 |
67 |
/* This is ugly */ |
68 |
const uint64_t vpn2_mask = ENTRYHI_VPN2_MASK | 0x1800; |
69 |
#else |
70 |
const uint64_t vpn2_mask = ENTRYHI_VPN2_MASK; |
71 |
#endif |
72 |
#endif |
73 |
int x_64; /* non-zero for 64-bit address space accesses */ |
74 |
int pageshift, n_tlbs; |
75 |
int pmask; |
76 |
#ifdef V2P_MMU4100 |
77 |
const int pagemask_mask = PAGEMASK_MASK_R4100; |
78 |
const int pagemask_shift = PAGEMASK_SHIFT_R4100; |
79 |
const int pfn_shift = 10; |
80 |
#else |
81 |
const int pagemask_mask = PAGEMASK_MASK; |
82 |
const int pagemask_shift = PAGEMASK_SHIFT; |
83 |
const int pfn_shift = 12; |
84 |
#endif |
85 |
#endif /* !V2P_MMU3K */ |
86 |
|
87 |
|
88 |
#ifdef USE_TINY_CACHE |
89 |
/* |
90 |
* Check the tiny translation cache first: |
91 |
* |
92 |
* Only userland addresses are checked, because other addresses |
93 |
* are probably better of being statically translated, or through |
94 |
* the TLB. (Note: When running with 64-bit addresses, this |
95 |
* will still produce the correct result. At worst, we check the |
96 |
* cache in vain, but the result should still be correct.) |
97 |
*/ |
98 |
if (!bintrans_cached && |
99 |
(vaddr & 0xc0000000ULL) != 0x80000000ULL) { |
100 |
int i, wf = 1 + (writeflag == MEM_WRITE); |
101 |
uint64_t vaddr_shift_12 = vaddr >> 12; |
102 |
|
103 |
if (instr) { |
104 |
/* Code: */ |
105 |
for (i=0; i<N_TRANSLATION_CACHE_INSTR; i++) { |
106 |
if (cpu->cd.mips.translation_cache_instr[i].wf |
107 |
>= wf && vaddr_shift_12 == (cpu->cd.mips. |
108 |
translation_cache_instr[i].vaddr_pfn)) { |
109 |
*return_addr = cpu->cd.mips. |
110 |
translation_cache_instr[i].paddr |
111 |
| (vaddr & 0xfff); |
112 |
return cpu->cd.mips. |
113 |
translation_cache_instr[i].wf; |
114 |
} |
115 |
} |
116 |
} else { |
117 |
/* Data: */ |
118 |
for (i=0; i<N_TRANSLATION_CACHE_DATA; i++) { |
119 |
if (cpu->cd.mips.translation_cache_data[i].wf |
120 |
>= wf && vaddr_shift_12 == (cpu->cd.mips. |
121 |
translation_cache_data[i].vaddr_pfn)) { |
122 |
*return_addr = cpu->cd.mips. |
123 |
translation_cache_data[i].paddr |
124 |
| (vaddr & 0xfff); |
125 |
return cpu->cd.mips. |
126 |
translation_cache_data[i].wf; |
127 |
} |
128 |
} |
129 |
} |
130 |
} |
131 |
#endif |
132 |
|
133 |
exccode = -1; |
134 |
tlb_refill = 1; |
135 |
|
136 |
/* Cached values: */ |
137 |
cp0 = cpu->cd.mips.coproc[0]; |
138 |
status = cp0->reg[COP0_STATUS]; |
139 |
|
140 |
/* |
141 |
* R4000 Address Translation: |
142 |
* |
143 |
* An address may be in one of the kernel segments, that |
144 |
* are directly mapped, or the address can go through the |
145 |
* TLBs to be turned into a physical address. |
146 |
* |
147 |
* KSU: EXL: ERL: X: Name: Range: |
148 |
* ---- ---- ---- -- ----- ------ |
149 |
* |
150 |
* 10 0 0 0 useg 0 - 0x7fffffff (2GB) (via TLB) |
151 |
* 10 0 0 1 xuseg 0 - 0xffffffffff (1TB) (via TLB) |
152 |
* |
153 |
* 01 0 0 0 suseg 0 - 0x7fffffff (2GB via TLB) |
154 |
* 01 0 0 0 ssseg 0xc0000000 - 0xdfffffff (0.5 GB via TLB) |
155 |
* 01 0 0 1 xsuseg 0 - 0xffffffffff (1TB) (via TLB) |
156 |
* 01 0 0 1 xsseg 0x4000000000000000 - 0x400000ffffffffff |
157 |
* (1TB) (via TLB) |
158 |
* 01 0 0 1 csseg 0xffffffffc0000000 - 0xffffffffdfffffff |
159 |
* (0.5TB) (via TLB) |
160 |
* |
161 |
* 00 x x 0 kuseg 0 - 0x7fffffff (2GB) (via TLB) (*) |
162 |
* 00 x x 0 kseg0 0x80000000 - 0x9fffffff (0.5GB) |
163 |
* unmapped, cached |
164 |
* 00 x x 0 kseg1 0xa0000000 - 0xbfffffff (0.5GB) |
165 |
* unmapped, uncached |
166 |
* 00 x x 0 ksseg 0xc0000000 - 0xdfffffff (0.5GB) |
167 |
* (via TLB) |
168 |
* 00 x x 0 kseg3 0xe0000000 - 0xffffffff (0.5GB) |
169 |
* (via TLB) |
170 |
* 00 x x 1 xksuseg 0 - 0xffffffffff (1TB) (via TLB) (*) |
171 |
* 00 x x 1 xksseg 0x4000000000000000 - 0x400000ffffffffff |
172 |
* (1TB) (via TLB) |
173 |
* 00 x x 1 xkphys 0x8000000000000000 - 0xbfffffffffffffff |
174 |
* todo |
175 |
* 00 x x 1 xkseg 0xc000000000000000 - 0xc00000ff7fffffff |
176 |
* todo |
177 |
* 00 x x 1 ckseg0 0xffffffff80000000 - 0xffffffff9fffffff |
178 |
* like kseg0 |
179 |
* 00 x x 1 ckseg1 0xffffffffa0000000 - 0xffffffffbfffffff |
180 |
* like kseg1 |
181 |
* 00 x x 1 cksseg 0xffffffffc0000000 - 0xffffffffdfffffff |
182 |
* like ksseg |
183 |
* 00 x x 1 ckseg3 0xffffffffe0000000 - 0xffffffffffffffff |
184 |
* like kseg2 |
185 |
* |
186 |
* (*) = if ERL=1 then kuseg is not via TLB, but unmapped, |
187 |
* uncached physical memory. |
188 |
* |
189 |
* (KSU==0 or EXL=1 or ERL=1 is enough to use k*seg*.) |
190 |
* |
191 |
* An invalid address causes an Address Error. |
192 |
* |
193 |
* See chapter 4, page 96, in the R4000 manual for more info! |
194 |
*/ |
195 |
|
196 |
#ifdef V2P_MMU3K |
197 |
if (status & MIPS1_SR_KU_CUR) |
198 |
ksu = KSU_USER; |
199 |
else |
200 |
ksu = KSU_KERNEL; |
201 |
|
202 |
/* These are needed later: */ |
203 |
vaddr_asid = cp0->reg[COP0_ENTRYHI] & R2K3K_ENTRYHI_ASID_MASK; |
204 |
vaddr_vpn2 = vaddr & R2K3K_ENTRYHI_VPN_MASK; |
205 |
#else |
206 |
/* |
207 |
* R4000 and others: |
208 |
* |
209 |
* kx,sx,ux = 0 for 32-bit addressing, |
210 |
* 1 for 64-bit addressing. |
211 |
*/ |
212 |
n_tlbs = cpu->cd.mips.cpu_type.nr_of_tlb_entries; |
213 |
|
214 |
ksu = (status & STATUS_KSU_MASK) >> STATUS_KSU_SHIFT; |
215 |
if (status & (STATUS_EXL | STATUS_ERL)) |
216 |
ksu = KSU_KERNEL; |
217 |
|
218 |
/* Assume KSU_USER. */ |
219 |
x_64 = status & STATUS_UX; |
220 |
|
221 |
if (ksu == KSU_KERNEL) |
222 |
x_64 = status & STATUS_KX; |
223 |
else if (ksu == KSU_SUPERVISOR) |
224 |
x_64 = status & STATUS_SX; |
225 |
|
226 |
/* This suppresses a compiler warning: */ |
227 |
pageshift = 12; |
228 |
|
229 |
/* |
230 |
* Physical addressing on R10000 etc: |
231 |
* |
232 |
* TODO: Probably only accessible in kernel mode. |
233 |
* |
234 |
* 0x9000000080000000 = disable L2 cache (?) |
235 |
* TODO: Make this correct. |
236 |
*/ |
237 |
if ((vaddr >> 62) == 0x2) { |
238 |
/* |
239 |
* On IP30, addresses such as 0x900000001f600050 are used, |
240 |
* but also things like 0x90000000a0000000. (TODO) |
241 |
* |
242 |
* On IP27 (and probably others), addresses such as |
243 |
* 0x92... and 0x96... have to do with NUMA stuff. |
244 |
*/ |
245 |
*return_addr = vaddr & (((uint64_t)1 << 44) - 1); |
246 |
return 2; |
247 |
} |
248 |
|
249 |
/* This is needed later: */ |
250 |
vaddr_asid = cp0->reg[COP0_ENTRYHI] & ENTRYHI_ASID; |
251 |
/* vpn2 depends on pagemask, which is not fixed on R4000 */ |
252 |
#endif |
253 |
|
254 |
|
255 |
if (vaddr <= 0x7fffffff) |
256 |
use_tlb = 1; |
257 |
else { |
258 |
#if 1 |
259 |
/* TODO: This should be removed, but it seems that other |
260 |
bugs are triggered. */ |
261 |
/* Sign-extend vaddr, if necessary: */ |
262 |
if ((vaddr >> 32) == 0 && vaddr & (uint32_t)0x80000000ULL) |
263 |
vaddr |= 0xffffffff00000000ULL; |
264 |
#endif |
265 |
if (ksu == KSU_KERNEL) { |
266 |
/* kseg0, kseg1: */ |
267 |
if (vaddr >= (uint64_t)0xffffffff80000000ULL && |
268 |
vaddr <= (uint64_t)0xffffffffbfffffffULL) { |
269 |
*return_addr = vaddr & 0x1fffffff; |
270 |
return 2; |
271 |
} |
272 |
|
273 |
/* TODO: supervisor stuff */ |
274 |
|
275 |
/* other segments: */ |
276 |
use_tlb = 1; |
277 |
} else |
278 |
use_tlb = 0; |
279 |
} |
280 |
|
281 |
if (use_tlb) { |
282 |
#ifndef V2P_MMU3K |
283 |
int odd = 0; |
284 |
uint64_t cached_lo1 = 0; |
285 |
#endif |
286 |
int g_bit, v_bit, d_bit; |
287 |
uint64_t cached_hi, cached_lo0; |
288 |
uint64_t entry_vpn2 = 0, entry_asid, pfn; |
289 |
|
290 |
for (i=0; i<n_tlbs; i++) { |
291 |
#ifdef V2P_MMU3K |
292 |
/* R3000 or similar: */ |
293 |
cached_hi = cp0->tlbs[i].hi; |
294 |
cached_lo0 = cp0->tlbs[i].lo0; |
295 |
|
296 |
entry_vpn2 = cached_hi & R2K3K_ENTRYHI_VPN_MASK; |
297 |
entry_asid = cached_hi & R2K3K_ENTRYHI_ASID_MASK; |
298 |
g_bit = cached_lo0 & R2K3K_ENTRYLO_G; |
299 |
v_bit = cached_lo0 & R2K3K_ENTRYLO_V; |
300 |
d_bit = cached_lo0 & R2K3K_ENTRYLO_D; |
301 |
#else |
302 |
/* R4000 or similar: */ |
303 |
pmask = cp0->tlbs[i].mask & pagemask_mask; |
304 |
cached_hi = cp0->tlbs[i].hi; |
305 |
cached_lo0 = cp0->tlbs[i].lo0; |
306 |
cached_lo1 = cp0->tlbs[i].lo1; |
307 |
|
308 |
/* Optimized for minimum page size: */ |
309 |
if (pmask == 0) { |
310 |
pageshift = pagemask_shift - 1; |
311 |
entry_vpn2 = (cached_hi & vpn2_mask) |
312 |
>> pagemask_shift; |
313 |
vaddr_vpn2 = (vaddr & vpn2_mask) |
314 |
>> pagemask_shift; |
315 |
pmask = (1 << (pagemask_shift-1)) - 1; |
316 |
odd = (vaddr >> (pagemask_shift-1)) & 1; |
317 |
} else { |
318 |
/* Non-standard page mask: */ |
319 |
switch (pmask | ((1 << pagemask_shift) - 1)) { |
320 |
case 0x00007ff: pageshift = 10; break; |
321 |
case 0x0001fff: pageshift = 12; break; |
322 |
case 0x0007fff: pageshift = 14; break; |
323 |
case 0x001ffff: pageshift = 16; break; |
324 |
case 0x007ffff: pageshift = 18; break; |
325 |
case 0x01fffff: pageshift = 20; break; |
326 |
case 0x07fffff: pageshift = 22; break; |
327 |
case 0x1ffffff: pageshift = 24; break; |
328 |
case 0x7ffffff: pageshift = 26; break; |
329 |
default:fatal("pmask=%08x\n", pmask); |
330 |
exit(1); |
331 |
} |
332 |
|
333 |
entry_vpn2 = (cached_hi & |
334 |
vpn2_mask) >> (pageshift + 1); |
335 |
vaddr_vpn2 = (vaddr & vpn2_mask) >> |
336 |
(pageshift + 1); |
337 |
pmask = (1 << pageshift) - 1; |
338 |
odd = (vaddr >> pageshift) & 1; |
339 |
} |
340 |
|
341 |
/* Assume even virtual page... */ |
342 |
v_bit = cached_lo0 & ENTRYLO_V; |
343 |
d_bit = cached_lo0 & ENTRYLO_D; |
344 |
|
345 |
#ifdef V2P_MMU8K |
346 |
/* |
347 |
* TODO: I don't really know anything about the R8000. |
348 |
* http://futuretech.mirror.vuurwerk.net/i2sec7.html |
349 |
* says that it has a three-way associative TLB with |
350 |
* 384 entries, 16KB page size, and some other things. |
351 |
* |
352 |
* It feels like things like the valid bit (ala R4000) |
353 |
* and dirty bit are not implemented the same on R8000. |
354 |
* |
355 |
* http://sgistuff.tastensuppe.de/documents/ |
356 |
* R8000_chipset.html |
357 |
* also has some info, but no details. |
358 |
*/ |
359 |
v_bit = 1; /* Big TODO */ |
360 |
d_bit = 1; |
361 |
#endif |
362 |
|
363 |
entry_asid = cached_hi & ENTRYHI_ASID; |
364 |
|
365 |
/* ... reload pfn, v_bit, d_bit if |
366 |
it was the odd virtual page: */ |
367 |
if (odd) { |
368 |
v_bit = cached_lo1 & ENTRYLO_V; |
369 |
d_bit = cached_lo1 & ENTRYLO_D; |
370 |
} |
371 |
#ifdef V2P_MMU4100 |
372 |
g_bit = cached_lo1 & cached_lo0 & ENTRYLO_G; |
373 |
#else |
374 |
g_bit = cached_hi & TLB_G; |
375 |
#endif |
376 |
|
377 |
#endif |
378 |
|
379 |
/* Is there a VPN and ASID match? */ |
380 |
if (entry_vpn2 == vaddr_vpn2 && |
381 |
(entry_asid == vaddr_asid || g_bit)) { |
382 |
/* debug("OK MAP 1, i=%i { vaddr=%016llx " |
383 |
"==> paddr %016llx v=%i d=%i " |
384 |
"asid=0x%02x }\n", i, (long long)vaddr, |
385 |
(long long) *return_addr, v_bit?1:0, |
386 |
d_bit?1:0, vaddr_asid); */ |
387 |
if (v_bit) { |
388 |
if (d_bit || (!d_bit && |
389 |
writeflag == MEM_READ)) { |
390 |
uint64_t paddr; |
391 |
/* debug("OK MAP 2!!! { w=%i " |
392 |
"vaddr=%016llx ==> d=%i v=" |
393 |
"%i paddr %016llx ", |
394 |
writeflag, (long long)vaddr, |
395 |
d_bit?1:0, v_bit?1:0, |
396 |
(long long) *return_addr); |
397 |
debug(", tlb entry %2i: ma" |
398 |
"sk=%016llx hi=%016llx lo0" |
399 |
"=%016llx lo1=%016llx\n", |
400 |
i, cp0->tlbs[i].mask, cp0-> |
401 |
tlbs[i].hi, cp0->tlbs[i]. |
402 |
lo0, cp0->tlbs[i].lo1); |
403 |
*/ |
404 |
#ifdef V2P_MMU3K |
405 |
pfn = cached_lo0 & |
406 |
R2K3K_ENTRYLO_PFN_MASK; |
407 |
paddr = pfn | (vaddr & pmask); |
408 |
#else |
409 |
pfn = ((odd? cached_lo1 : |
410 |
cached_lo0) |
411 |
& ENTRYLO_PFN_MASK) |
412 |
>> ENTRYLO_PFN_SHIFT; |
413 |
paddr = (pfn << pfn_shift) | |
414 |
(vaddr & pmask); |
415 |
#endif |
416 |
|
417 |
/* |
418 |
* Enter into the tiny trans- |
419 |
* lation cache (if enabled) |
420 |
* and return: |
421 |
*/ |
422 |
if (!bintrans_cached) |
423 |
insert_into_tiny_cache( |
424 |
cpu, instr, d_bit? |
425 |
MEM_WRITE : |
426 |
MEM_READ, |
427 |
vaddr, paddr); |
428 |
|
429 |
*return_addr = paddr; |
430 |
return d_bit? 2 : 1; |
431 |
} else { |
432 |
/* TLB modif. exception */ |
433 |
tlb_refill = 0; |
434 |
exccode = EXCEPTION_MOD; |
435 |
goto exception; |
436 |
} |
437 |
} else { |
438 |
/* TLB invalid exception */ |
439 |
tlb_refill = 0; |
440 |
goto exception; |
441 |
} |
442 |
} |
443 |
} |
444 |
} |
445 |
|
446 |
/* |
447 |
* We are here if for example userland code tried to access |
448 |
* kernel memory, OR if there was a TLB refill. |
449 |
*/ |
450 |
|
451 |
if (!use_tlb) { |
452 |
tlb_refill = 0; |
453 |
if (writeflag == MEM_WRITE) |
454 |
exccode = EXCEPTION_ADES; |
455 |
else |
456 |
exccode = EXCEPTION_ADEL; |
457 |
} |
458 |
|
459 |
exception: |
460 |
if (no_exceptions) |
461 |
return 0; |
462 |
|
463 |
/* TLB Load or Store exception: */ |
464 |
if (exccode == -1) { |
465 |
if (writeflag == MEM_WRITE) |
466 |
exccode = EXCEPTION_TLBS; |
467 |
else |
468 |
exccode = EXCEPTION_TLBL; |
469 |
} |
470 |
|
471 |
#ifdef V2P_MMU3K |
472 |
vaddr_asid >>= R2K3K_ENTRYHI_ASID_SHIFT; |
473 |
vaddr_vpn2 >>= 12; |
474 |
#endif |
475 |
|
476 |
mips_cpu_exception(cpu, exccode, tlb_refill, vaddr, |
477 |
0, vaddr_vpn2, vaddr_asid, x_64); |
478 |
|
479 |
/* Return failure: */ |
480 |
return 0; |
481 |
} |
482 |
|