25 |
* SUCH DAMAGE. |
* SUCH DAMAGE. |
26 |
* |
* |
27 |
* |
* |
28 |
* $Id: memory_sh.c,v 1.1 2006/07/25 21:49:14 debug Exp $ |
* $Id: memory_sh.c,v 1.12 2006/10/28 04:00:32 debug Exp $ |
29 |
*/ |
*/ |
30 |
|
|
31 |
#include <stdio.h> |
#include <stdio.h> |
37 |
#include "memory.h" |
#include "memory.h" |
38 |
#include "misc.h" |
#include "misc.h" |
39 |
|
|
40 |
|
#include "sh4_exception.h" |
41 |
|
#include "sh4_mmu.h" |
42 |
|
|
43 |
|
|
44 |
/* |
/* |
45 |
* sh_translate_v2p(): |
* translate_via_mmu(): |
46 |
|
* |
47 |
|
* Scan the UTLB for a matching virtual address. If a match was found, then |
48 |
|
* check permission bits etc. If everything was ok, then return the physical |
49 |
|
* page address, otherwise cause an exception. |
50 |
|
* |
51 |
|
* The implementation should (hopefully) be quite complete, except for lack |
52 |
|
* of "Multiple matching entries" detection. (On a real CPU, these would |
53 |
|
* cause exceptions.) |
54 |
|
* |
55 |
|
* Same return values as sh_translate_v2p(). |
56 |
*/ |
*/ |
57 |
int sh_translate_v2p(struct cpu *cpu, uint64_t vaddr, |
static int translate_via_mmu(struct cpu *cpu, uint32_t vaddr, |
58 |
uint64_t *return_paddr, int flags) |
uint64_t *return_paddr, int flags) |
59 |
{ |
{ |
60 |
/* TODO */ |
int wf = flags & FLAG_WRITEFLAG; |
61 |
|
int i, urb, urc, require_asid_match, cur_asid, expevt = 0; |
62 |
|
uint32_t hi, lo = 0, mask = 0; |
63 |
|
int sh; /* Shared */ |
64 |
|
int d; /* Dirty bit */ |
65 |
|
int v; /* Valid bit */ |
66 |
|
int pr; /* Protection */ |
67 |
|
int i_start; |
68 |
|
|
69 |
|
cur_asid = cpu->cd.sh.pteh & SH4_PTEH_ASID_MASK; |
70 |
|
require_asid_match = !(cpu->cd.sh.mmucr & SH4_MMUCR_SV) |
71 |
|
|| !(cpu->cd.sh.sr & SH_SR_MD); |
72 |
|
|
73 |
|
if (!(flags & FLAG_NOEXCEPTIONS)) { |
74 |
|
/* |
75 |
|
* Increase URC every time the UTLB is accessed. (Note: |
76 |
|
* According to the SH4 manual, the URC should not be |
77 |
|
* increased when running the ldtlb instruction. Perhaps this |
78 |
|
* is a good place? Perhaps it is better to just set it to a |
79 |
|
* random value? TODO: Find out. |
80 |
|
*/ |
81 |
|
urb = (cpu->cd.sh.mmucr & SH4_MMUCR_URB_MASK) >> |
82 |
|
SH4_MMUCR_URB_SHIFT; |
83 |
|
urc = (cpu->cd.sh.mmucr & SH4_MMUCR_URC_MASK) >> |
84 |
|
SH4_MMUCR_URC_SHIFT; |
85 |
|
|
86 |
|
/* fatal("urc = %i ==> ", urc); */ |
87 |
|
urc ++; |
88 |
|
if (urc == SH_N_UTLB_ENTRIES || (urb > 0 && urc == urb)) |
89 |
|
urc = 0; |
90 |
|
/* fatal("%i\n", urc); */ |
91 |
|
|
92 |
|
cpu->cd.sh.mmucr &= ~SH4_MMUCR_URC_MASK; |
93 |
|
cpu->cd.sh.mmucr |= (urc << SH4_MMUCR_URC_SHIFT); |
94 |
|
} |
95 |
|
|
96 |
|
/* |
97 |
|
* When doing Instruction lookups, the ITLB should be scanned first. |
98 |
|
* This is done by using negative i. (Ugly hack, but works.) |
99 |
|
*/ |
100 |
|
if (flags & FLAG_INSTR) |
101 |
|
i_start = -SH_N_ITLB_ENTRIES; |
102 |
|
else |
103 |
|
i_start = 0; |
104 |
|
|
105 |
|
for (i=i_start; i<SH_N_UTLB_ENTRIES; i++) { |
106 |
|
if (i<0) { |
107 |
|
hi = cpu->cd.sh.itlb_hi[i + SH_N_ITLB_ENTRIES]; |
108 |
|
lo = cpu->cd.sh.itlb_lo[i + SH_N_ITLB_ENTRIES]; |
109 |
|
} else { |
110 |
|
hi = cpu->cd.sh.utlb_hi[i]; |
111 |
|
lo = cpu->cd.sh.utlb_lo[i]; |
112 |
|
} |
113 |
|
mask = 0xfff00000; |
114 |
|
|
115 |
|
v = lo & SH4_PTEL_V; |
116 |
|
|
117 |
|
switch (lo & SH4_PTEL_SZ_MASK) { |
118 |
|
case SH4_PTEL_SZ_1K: mask = 0xfffffc00; break; |
119 |
|
case SH4_PTEL_SZ_4K: mask = 0xfffff000; break; |
120 |
|
case SH4_PTEL_SZ_64K: mask = 0xffff0000; break; |
121 |
|
/* case SH4_PTEL_SZ_1M: mask = 0xfff00000; break; */ |
122 |
|
} |
123 |
|
|
124 |
|
if (!v || (hi & mask) != (vaddr & mask)) |
125 |
|
continue; |
126 |
|
|
127 |
|
sh = lo & SH4_PTEL_SH; |
128 |
|
|
129 |
|
if (!sh && require_asid_match) { |
130 |
|
int asid = hi & SH4_PTEH_ASID_MASK; |
131 |
|
if (asid != cur_asid) |
132 |
|
continue; |
133 |
|
} |
134 |
|
|
135 |
|
/* Note/TODO: Check for multiple matches is not implemented. */ |
136 |
|
|
137 |
|
break; |
138 |
|
} |
139 |
|
|
140 |
|
/* Virtual address not found? Then it's a TLB miss. */ |
141 |
|
if (i == SH_N_UTLB_ENTRIES) |
142 |
|
goto tlb_miss; |
143 |
|
|
144 |
|
/* Matching address found! Let's see it is readable/writable, etc: */ |
145 |
|
d = lo & SH4_PTEL_D; |
146 |
|
pr = (lo & SH4_PTEL_PR_MASK) >> SH4_PTEL_PR_SHIFT; |
147 |
|
|
148 |
|
*return_paddr = (vaddr & ~mask) | (lo & mask & 0x1fffffff); |
149 |
|
|
150 |
|
if (flags & FLAG_INSTR) { |
151 |
|
/* |
152 |
|
* Instruction access: |
153 |
|
* |
154 |
|
* If a matching entry wasn't found in the ITLB, but in the |
155 |
|
* UTLB, then copy it to a random place in the ITLB. |
156 |
|
*/ |
157 |
|
if (i >= 0) { |
158 |
|
int r = random() % SH_N_ITLB_ENTRIES; |
159 |
|
cpu->cd.sh.itlb_hi[r] = cpu->cd.sh.utlb_hi[i]; |
160 |
|
cpu->cd.sh.itlb_lo[r] = cpu->cd.sh.utlb_lo[i]; |
161 |
|
} |
162 |
|
|
163 |
|
/* Permission checks: */ |
164 |
|
if (cpu->cd.sh.sr & SH_SR_MD) |
165 |
|
return 1; |
166 |
|
if (!(pr & 2)) |
167 |
|
goto protection_violation; |
168 |
|
|
169 |
|
return 1; |
170 |
|
} |
171 |
|
|
172 |
|
/* Data access: */ |
173 |
|
if (cpu->cd.sh.sr & SH_SR_MD) { |
174 |
|
/* Kernel access: */ |
175 |
|
switch (pr) { |
176 |
|
case 0: |
177 |
|
case 2: if (wf) |
178 |
|
goto protection_violation; |
179 |
|
return 1; |
180 |
|
case 1: |
181 |
|
case 3: if (wf && !d) |
182 |
|
goto initial_write_exception; |
183 |
|
return 1; |
184 |
|
} |
185 |
|
} |
186 |
|
|
187 |
|
/* User access */ |
188 |
|
switch (pr) { |
189 |
|
case 0: |
190 |
|
case 1: goto protection_violation; |
191 |
|
case 2: if (wf) |
192 |
|
goto protection_violation; |
193 |
|
return 1; |
194 |
|
case 3: if (wf && !d) |
195 |
|
goto initial_write_exception; |
196 |
|
return 1; |
197 |
|
} |
198 |
|
|
199 |
|
|
200 |
*return_paddr = vaddr & 0x03ffffff; |
tlb_miss: |
201 |
return 2; |
expevt = wf? EXPEVT_TLB_MISS_ST : EXPEVT_TLB_MISS_LD; |
202 |
|
goto exception; |
203 |
|
|
204 |
|
protection_violation: |
205 |
|
expevt = wf? EXPEVT_TLB_PROT_ST : EXPEVT_TLB_PROT_LD; |
206 |
|
goto exception; |
207 |
|
|
208 |
|
initial_write_exception: |
209 |
|
expevt = EXPEVT_TLB_MOD; |
210 |
|
|
211 |
|
|
212 |
|
exception: |
213 |
|
if (flags & FLAG_NOEXCEPTIONS) { |
214 |
|
*return_paddr = 0; |
215 |
|
return 2; |
216 |
|
} |
217 |
|
|
218 |
|
sh_exception(cpu, expevt, 0, vaddr); |
219 |
|
|
220 |
|
return 0; |
221 |
|
} |
222 |
|
|
223 |
|
|
224 |
|
/* |
225 |
|
* sh_translate_v2p(): |
226 |
|
* |
227 |
|
* Return values: |
228 |
|
* |
229 |
|
* 0 No access to the virtual address. |
230 |
|
* 1 return_paddr contains the physical address, the page is |
231 |
|
* available as read-only. |
232 |
|
* 2 Same as 1, but the page is available as read/write. |
233 |
|
*/ |
234 |
|
int sh_translate_v2p(struct cpu *cpu, uint64_t vaddr, uint64_t *return_paddr, |
235 |
|
int flags) |
236 |
|
{ |
237 |
|
int user = cpu->cd.sh.sr & SH_SR_MD? 0 : 1; |
238 |
|
|
239 |
|
vaddr = (uint32_t)vaddr; |
240 |
|
|
241 |
|
/* U0/P0: Userspace addresses, or P3: Kernel virtual memory. */ |
242 |
|
if (!(vaddr & 0x80000000) || |
243 |
|
(vaddr >= 0xc0000000 && vaddr < 0xe0000000)) { |
244 |
|
/* Address translation turned off? */ |
245 |
|
if (!(cpu->cd.sh.mmucr & SH4_MMUCR_AT)) { |
246 |
|
/* Then return raw physical address: */ |
247 |
|
*return_paddr = vaddr & 0x1fffffff; |
248 |
|
return 2; |
249 |
|
} |
250 |
|
|
251 |
|
/* Perform translation via the MMU: */ |
252 |
|
return translate_via_mmu(cpu, vaddr, return_paddr, flags); |
253 |
|
} |
254 |
|
|
255 |
|
/* Store queue region: */ |
256 |
|
if (vaddr >= 0xe0000000 && vaddr < 0xe4000000) { |
257 |
|
/* Note/TODO: Take SH4_MMUCR_SQMD into account. */ |
258 |
|
*return_paddr = vaddr; |
259 |
|
return 2; |
260 |
|
} |
261 |
|
|
262 |
|
if (user) { |
263 |
|
if (flags & FLAG_NOEXCEPTIONS) { |
264 |
|
*return_paddr = 0; |
265 |
|
return 2; |
266 |
|
} |
267 |
|
|
268 |
|
fatal("Userspace tried to access non-user space memory." |
269 |
|
" TODO: cause exception! (vaddr=0x%08"PRIx32"\n", |
270 |
|
(uint32_t) vaddr); |
271 |
|
exit(1); |
272 |
|
} |
273 |
|
|
274 |
|
/* P1,P2: Direct-mapped physical memory. */ |
275 |
|
if (vaddr >= 0x80000000 && vaddr < 0xc0000000) { |
276 |
|
*return_paddr = vaddr & 0x1fffffff; |
277 |
|
return 2; |
278 |
|
} |
279 |
|
|
280 |
|
if (flags & FLAG_INSTR) { |
281 |
|
fatal("TODO: instr at 0x%08"PRIx32"\n", (uint32_t)vaddr); |
282 |
|
exit(1); |
283 |
|
} |
284 |
|
|
285 |
|
/* P4: Special registers mapped at 0xf0000000 .. 0xffffffff: */ |
286 |
|
if ((vaddr & 0xf0000000) == 0xf0000000) { |
287 |
|
*return_paddr = vaddr; |
288 |
|
return 2; |
289 |
|
} |
290 |
|
|
291 |
|
if (flags & FLAG_NOEXCEPTIONS) { |
292 |
|
*return_paddr = 0; |
293 |
|
return 2; |
294 |
|
} |
295 |
|
|
296 |
|
/* TODO */ |
297 |
|
fatal("Unimplemented SH vaddr 0x%08"PRIx32"\n", (uint32_t)vaddr); |
298 |
|
exit(1); |
299 |
} |
} |
300 |
|
|