/[gxemul]/upstream/0.4.5/src/cpus/memory_sh.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/0.4.5/src/cpus/memory_sh.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 39 - (show annotations)
Mon Oct 8 16:22:02 2007 UTC (16 years, 8 months ago) by dpavlin
File MIME type: text/plain
File size: 8353 byte(s)
0.4.5
1 /*
2 * Copyright (C) 2006-2007 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: memory_sh.c,v 1.18 2007/04/13 07:06:31 debug Exp $
29 */
30
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <string.h>
34
35 #include "cpu.h"
36 #include "machine.h"
37 #include "memory.h"
38 #include "misc.h"
39
40 #include "sh4_exception.h"
41 #include "sh4_mmu.h"
42
43
44 /*
45 * translate_via_mmu():
46 *
47 * Scan the UTLB for a matching virtual address. If a match was found, then
48 * check permission bits etc. If everything was ok, then return the physical
49 * page address, otherwise cause an exception.
50 *
51 * The implementation should (hopefully) be quite complete, except for lack
52 * of "Multiple matching entries" detection. (On a real CPU, these would
53 * cause exceptions.)
54 *
55 * Same return values as sh_translate_v2p().
56 */
57 static int translate_via_mmu(struct cpu *cpu, uint32_t vaddr,
58 uint64_t *return_paddr, int flags)
59 {
60 int wf = flags & FLAG_WRITEFLAG;
61 int i, urb, urc, require_asid_match, cur_asid, expevt = 0;
62 uint32_t hi, lo = 0, mask = 0;
63 int sh; /* Shared */
64 int d; /* Dirty bit */
65 int v; /* Valid bit */
66 int pr; /* Protection */
67 int i_start;
68
69 cur_asid = cpu->cd.sh.pteh & SH4_PTEH_ASID_MASK;
70 require_asid_match = !(cpu->cd.sh.mmucr & SH4_MMUCR_SV)
71 || !(cpu->cd.sh.sr & SH_SR_MD);
72
73 if (!(flags & FLAG_NOEXCEPTIONS)) {
74 /*
75 * Increase URC every time the UTLB is accessed. (Note:
76 * According to the SH4 manual, the URC should not be
77 * increased when running the ldtlb instruction. Perhaps this
78 * is a good place? Perhaps it is better to just set it to a
79 * random value? TODO: Find out.
80 */
81 urb = (cpu->cd.sh.mmucr & SH4_MMUCR_URB_MASK) >>
82 SH4_MMUCR_URB_SHIFT;
83 urc = (cpu->cd.sh.mmucr & SH4_MMUCR_URC_MASK) >>
84 SH4_MMUCR_URC_SHIFT;
85
86 /* fatal("urc = %i ==> ", urc); */
87 urc ++;
88 if (urc >= SH_N_UTLB_ENTRIES || (urb > 0 && urc == urb))
89 urc = 0;
90 /* fatal("%i\n", urc); */
91
92 cpu->cd.sh.mmucr &= ~SH4_MMUCR_URC_MASK;
93 cpu->cd.sh.mmucr |= (urc << SH4_MMUCR_URC_SHIFT);
94 }
95
96 /*
97 * When doing Instruction lookups, the ITLB should be scanned first.
98 * This is done by using negative i. (Ugly hack, but works.)
99 */
100 if (flags & FLAG_INSTR)
101 i_start = -SH_N_ITLB_ENTRIES;
102 else
103 i_start = 0;
104
105 for (i=i_start; i<SH_N_UTLB_ENTRIES; i++) {
106 if (i<0) {
107 hi = cpu->cd.sh.itlb_hi[i + SH_N_ITLB_ENTRIES];
108 lo = cpu->cd.sh.itlb_lo[i + SH_N_ITLB_ENTRIES];
109 } else {
110 hi = cpu->cd.sh.utlb_hi[i];
111 lo = cpu->cd.sh.utlb_lo[i];
112 }
113 mask = 0xfff00000;
114
115 v = lo & SH4_PTEL_V;
116 if (!v)
117 continue;
118
119 switch (lo & SH4_PTEL_SZ_MASK) {
120 case SH4_PTEL_SZ_1K: mask = 0xfffffc00; break;
121 case SH4_PTEL_SZ_4K: mask = 0xfffff000; break;
122 case SH4_PTEL_SZ_64K: mask = 0xffff0000; break;
123 /* case SH4_PTEL_SZ_1M: mask = 0xfff00000; break; */
124 }
125
126 if ((hi & mask) != (vaddr & mask))
127 continue;
128
129 sh = lo & SH4_PTEL_SH;
130
131 if (!sh && require_asid_match) {
132 int asid = hi & SH4_PTEH_ASID_MASK;
133 if (asid != cur_asid)
134 continue;
135 }
136
137 /* Note/TODO: Check for multiple matches is not implemented. */
138
139 break;
140 }
141
142 /* Virtual address not found? Then it's a TLB miss. */
143 if (i == SH_N_UTLB_ENTRIES)
144 goto tlb_miss;
145
146 /* Matching address found! Let's see whether it is
147 readable/writable, etc.: */
148 d = lo & SH4_PTEL_D? 1 : 0;
149 pr = (lo & SH4_PTEL_PR_MASK) >> SH4_PTEL_PR_SHIFT;
150
151 *return_paddr = (vaddr & ~mask) | (lo & mask & 0x1fffffff);
152
153 if (flags & FLAG_INSTR) {
154 /*
155 * Instruction access:
156 *
157 * If a matching entry wasn't found in the ITLB, but in the
158 * UTLB, then copy it to a random place in the ITLB.
159 */
160 if (i >= 0) {
161 int r = random() % SH_N_ITLB_ENTRIES;
162
163 /* NOTE: Make sure that the old mapping for
164 that itlb entry is invalidated: */
165 cpu->invalidate_translation_caches(cpu,
166 cpu->cd.sh.itlb_hi[r] & ~0xfff, INVALIDATE_VADDR);
167
168 cpu->invalidate_code_translation(cpu,
169 cpu->cd.sh.utlb_lo[i] & ~0xfff, INVALIDATE_PADDR);
170
171 cpu->cd.sh.itlb_hi[r] = cpu->cd.sh.utlb_hi[i];
172 cpu->cd.sh.itlb_lo[r] = cpu->cd.sh.utlb_lo[i];
173 }
174
175 /* Permission checks: */
176 if (cpu->cd.sh.sr & SH_SR_MD)
177 return 1;
178 if (!(pr & 2))
179 goto protection_violation;
180
181 return 1;
182 }
183
184 /* Data access: */
185 if (cpu->cd.sh.sr & SH_SR_MD) {
186 /* Kernel access: */
187 switch (pr) {
188 case 0:
189 case 2: if (wf)
190 goto protection_violation;
191 return 1;
192 case 1:
193 case 3: if (wf && !d)
194 goto initial_write_exception;
195 return 1 + d;
196 }
197 }
198
199 /* User access */
200 switch (pr) {
201 case 0:
202 case 1: goto protection_violation;
203 case 2: if (wf)
204 goto protection_violation;
205 return 1;
206 case 3: if (wf && !d)
207 goto initial_write_exception;
208 return 1 + d;
209 }
210
211
212 tlb_miss:
213 expevt = wf? EXPEVT_TLB_MISS_ST : EXPEVT_TLB_MISS_LD;
214 goto exception;
215
216 protection_violation:
217 expevt = wf? EXPEVT_TLB_PROT_ST : EXPEVT_TLB_PROT_LD;
218 goto exception;
219
220 initial_write_exception:
221 expevt = EXPEVT_TLB_MOD;
222
223
224 exception:
225 if (flags & FLAG_NOEXCEPTIONS) {
226 *return_paddr = 0;
227 return 2;
228 }
229
230 sh_exception(cpu, expevt, 0, vaddr);
231
232 return 0;
233 }
234
235
236 /*
237 * sh_translate_v2p():
238 *
239 * Return values:
240 *
241 * 0 No access to the virtual address.
242 * 1 return_paddr contains the physical address, the page is
243 * available as read-only.
244 * 2 Same as 1, but the page is available as read/write.
245 */
246 int sh_translate_v2p(struct cpu *cpu, uint64_t vaddr, uint64_t *return_paddr,
247 int flags)
248 {
249 int user = cpu->cd.sh.sr & SH_SR_MD? 0 : 1;
250
251 vaddr = (uint32_t)vaddr;
252
253 /* U0/P0: Userspace addresses, or P3: Kernel virtual memory. */
254 if (!(vaddr & 0x80000000) ||
255 (vaddr >= 0xc0000000 && vaddr < 0xe0000000)) {
256 /* Address translation turned off? */
257 if (!(cpu->cd.sh.mmucr & SH4_MMUCR_AT)) {
258 /* Then return raw physical address: */
259 *return_paddr = vaddr & 0x1fffffff;
260 return 2;
261 }
262
263 /* Perform translation via the MMU: */
264 return translate_via_mmu(cpu, vaddr, return_paddr, flags);
265 }
266
267 /* Store queue region: */
268 if (vaddr >= 0xe0000000 && vaddr < 0xe4000000) {
269 /* Note/TODO: Take SH4_MMUCR_SQMD into account. */
270 *return_paddr = vaddr;
271 return 2;
272 }
273
274 if (user) {
275 if (flags & FLAG_NOEXCEPTIONS) {
276 *return_paddr = 0;
277 return 2;
278 }
279
280 fatal("Userspace tried to access non-user space memory."
281 " TODO: cause exception! (vaddr=0x%08"PRIx32"\n",
282 (uint32_t) vaddr);
283 exit(1);
284 }
285
286 /* P1,P2: Direct-mapped physical memory. */
287 if (vaddr >= 0x80000000 && vaddr < 0xc0000000) {
288 *return_paddr = vaddr & 0x1fffffff;
289 return 2;
290 }
291
292 if (flags & FLAG_INSTR) {
293 fatal("TODO: instr at 0x%08"PRIx32"\n", (uint32_t)vaddr);
294 exit(1);
295 }
296
297 /* P4: Special registers mapped at 0xf0000000 .. 0xffffffff: */
298 if ((vaddr & 0xf0000000) == 0xf0000000) {
299 *return_paddr = vaddr;
300 return 2;
301 }
302
303 if (flags & FLAG_NOEXCEPTIONS) {
304 *return_paddr = 0;
305 return 2;
306 }
307
308 /* TODO */
309 fatal("Unimplemented SH vaddr 0x%08"PRIx32"\n", (uint32_t)vaddr);
310 exit(1);
311
312 return 0;
313 }
314

  ViewVC Help
Powered by ViewVC 1.1.26