1 |
/* |
/* |
2 |
* Copyright (C) 2003-2005 Anders Gavare. All rights reserved. |
* Copyright (C) 2003-2006 Anders Gavare. All rights reserved. |
3 |
* |
* |
4 |
* Redistribution and use in source and binary forms, with or without |
* Redistribution and use in source and binary forms, with or without |
5 |
* modification, are permitted provided that the following conditions are met: |
* modification, are permitted provided that the following conditions are met: |
25 |
* SUCH DAMAGE. |
* SUCH DAMAGE. |
26 |
* |
* |
27 |
* |
* |
28 |
* $Id: memory_rw.c,v 1.65 2005/10/10 18:43:36 debug Exp $ |
* $Id: memory_rw.c,v 1.97 2006/09/07 11:44:01 debug Exp $ |
29 |
* |
* |
30 |
* Generic memory_rw(), with special hacks for specific CPU families. |
* Generic memory_rw(), with special hacks for specific CPU families. |
31 |
* |
* |
48 |
* a placeholder for data when reading from memory |
* a placeholder for data when reading from memory |
49 |
* len the length of the 'data' buffer |
* len the length of the 'data' buffer |
50 |
* writeflag set to MEM_READ or MEM_WRITE |
* writeflag set to MEM_READ or MEM_WRITE |
51 |
* cache_flags CACHE_{NONE,DATA,INSTRUCTION} | other flags |
* misc_flags CACHE_{NONE,DATA,INSTRUCTION} | other flags |
52 |
* |
* |
53 |
* If the address indicates access to a memory mapped device, that device' |
* If the address indicates access to a memory mapped device, that device' |
54 |
* read/write access function is called. |
* read/write access function is called. |
55 |
* |
* |
|
* If instruction latency/delay support is enabled, then |
|
|
* cpu->instruction_delay is increased by the number of instruction to |
|
|
* delay execution. |
|
|
* |
|
56 |
* This function should not be called with cpu == NULL. |
* This function should not be called with cpu == NULL. |
57 |
* |
* |
58 |
* Returns one of the following: |
* Returns one of the following: |
62 |
* (MEMORY_ACCESS_FAILED is 0.) |
* (MEMORY_ACCESS_FAILED is 0.) |
63 |
*/ |
*/ |
64 |
int MEMORY_RW(struct cpu *cpu, struct memory *mem, uint64_t vaddr, |
int MEMORY_RW(struct cpu *cpu, struct memory *mem, uint64_t vaddr, |
65 |
unsigned char *data, size_t len, int writeflag, int cache_flags) |
unsigned char *data, size_t len, int writeflag, int misc_flags) |
66 |
{ |
{ |
67 |
#ifdef MEM_ALPHA |
#ifdef MEM_ALPHA |
68 |
const int offset_mask = 0x1fff; |
const int offset_mask = 0x1fff; |
76 |
uint64_t paddr; |
uint64_t paddr; |
77 |
int cache, no_exceptions, offset; |
int cache, no_exceptions, offset; |
78 |
unsigned char *memblock; |
unsigned char *memblock; |
79 |
#ifdef MEM_MIPS |
int dyntrans_device_danger = 0; |
|
int bintrans_cached = cpu->machine->bintrans_enable; |
|
|
#endif |
|
|
int bintrans_device_danger = 0; |
|
80 |
|
|
81 |
no_exceptions = cache_flags & NO_EXCEPTIONS; |
no_exceptions = misc_flags & NO_EXCEPTIONS; |
82 |
cache = cache_flags & CACHE_FLAGS_MASK; |
cache = misc_flags & CACHE_FLAGS_MASK; |
83 |
|
|
84 |
#ifdef MEM_X86 |
#ifdef MEM_X86 |
85 |
/* Real-mode wrap-around: */ |
/* Real-mode wrap-around: */ |
86 |
if (REAL_MODE && !(cache_flags & PHYSICAL)) { |
if (REAL_MODE && !(misc_flags & PHYSICAL)) { |
87 |
if ((vaddr & 0xffff) + len > 0x10000) { |
if ((vaddr & 0xffff) + len > 0x10000) { |
88 |
/* Do one byte at a time: */ |
/* Do one byte at a time: */ |
89 |
int res = 0, i; |
int res = 0; |
90 |
|
size_t i; |
91 |
for (i=0; i<len; i++) |
for (i=0; i<len; i++) |
92 |
res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1, |
res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1, |
93 |
writeflag, cache_flags); |
writeflag, misc_flags); |
94 |
return res; |
return res; |
95 |
} |
} |
96 |
} |
} |
97 |
|
|
98 |
/* Crossing a page boundary? Then do one byte at a time: */ |
/* Crossing a page boundary? Then do one byte at a time: */ |
99 |
if ((vaddr & 0xfff) + len > 0x1000 && !(cache_flags & PHYSICAL) |
if ((vaddr & 0xfff) + len > 0x1000 && !(misc_flags & PHYSICAL) |
100 |
&& cpu->cd.x86.cr[0] & X86_CR0_PG) { |
&& cpu->cd.x86.cr[0] & X86_CR0_PG) { |
101 |
/* For WRITES: Read ALL BYTES FIRST and write them back!!! |
/* |
102 |
Then do a write of all the new bytes. This is to make sure |
* For WRITES: Read ALL BYTES FIRST and write them back!!! |
103 |
than both pages around the boundary are writable so we don't |
* Then do a write of all the new bytes. This is to make sure |
104 |
do a partial write. */ |
* than both pages around the boundary are writable so that |
105 |
int res = 0, i; |
* there is no "partial write" performed. |
106 |
|
*/ |
107 |
|
int res = 0; |
108 |
|
size_t i; |
109 |
if (writeflag == MEM_WRITE) { |
if (writeflag == MEM_WRITE) { |
110 |
unsigned char tmp; |
unsigned char tmp; |
111 |
for (i=0; i<len; i++) { |
for (i=0; i<len; i++) { |
112 |
res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1, |
res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1, |
113 |
MEM_READ, cache_flags); |
MEM_READ, misc_flags); |
114 |
if (!res) |
if (!res) |
115 |
return 0; |
return 0; |
116 |
res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1, |
res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1, |
117 |
MEM_WRITE, cache_flags); |
MEM_WRITE, misc_flags); |
118 |
if (!res) |
if (!res) |
119 |
return 0; |
return 0; |
120 |
} |
} |
121 |
for (i=0; i<len; i++) { |
for (i=0; i<len; i++) { |
122 |
res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1, |
res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1, |
123 |
MEM_WRITE, cache_flags); |
MEM_WRITE, misc_flags); |
124 |
if (!res) |
if (!res) |
125 |
return 0; |
return 0; |
126 |
} |
} |
128 |
for (i=0; i<len; i++) { |
for (i=0; i<len; i++) { |
129 |
/* Do one byte at a time: */ |
/* Do one byte at a time: */ |
130 |
res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1, |
res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1, |
131 |
writeflag, cache_flags); |
writeflag, misc_flags); |
132 |
if (!res) { |
if (!res) { |
133 |
if (cache == CACHE_INSTRUCTION) { |
if (cache == CACHE_INSTRUCTION) { |
134 |
fatal("FAILED instruction " |
fatal("FAILED instruction " |
145 |
} |
} |
146 |
#endif /* X86 */ |
#endif /* X86 */ |
147 |
|
|
|
#ifdef MEM_MIPS |
|
|
if (bintrans_cached) { |
|
|
if (cache == CACHE_INSTRUCTION) { |
|
|
cpu->cd.mips.pc_bintrans_host_4kpage = NULL; |
|
|
cpu->cd.mips.pc_bintrans_paddr_valid = 0; |
|
|
} |
|
|
} |
|
|
#endif /* MEM_MIPS */ |
|
148 |
|
|
149 |
#ifdef MEM_USERLAND |
#ifdef MEM_USERLAND |
150 |
#ifdef MEM_ALPHA |
#ifdef MEM_ALPHA |
152 |
#else |
#else |
153 |
paddr = vaddr & 0x7fffffff; |
paddr = vaddr & 0x7fffffff; |
154 |
#endif |
#endif |
155 |
goto have_paddr; |
#else /* !MEM_USERLAND */ |
156 |
#endif |
if (misc_flags & PHYSICAL || cpu->translate_v2p == NULL) { |
|
|
|
|
#ifndef MEM_USERLAND |
|
|
#ifdef MEM_MIPS |
|
|
/* |
|
|
* For instruction fetch, are we on the same page as the last |
|
|
* instruction we fetched? |
|
|
* |
|
|
* NOTE: There's no need to check this stuff here if this address |
|
|
* is known to be in host ram, as it's done at instruction fetch |
|
|
* time in cpu.c! Only check if _host_4k_page == NULL. |
|
|
*/ |
|
|
if (cache == CACHE_INSTRUCTION && |
|
|
cpu->cd.mips.pc_last_host_4k_page == NULL && |
|
|
(vaddr & ~0xfff) == cpu->cd.mips.pc_last_virtual_page) { |
|
|
paddr = cpu->cd.mips.pc_last_physical_page | (vaddr & 0xfff); |
|
|
goto have_paddr; |
|
|
} |
|
|
#endif /* MEM_MIPS */ |
|
|
|
|
|
if (cache_flags & PHYSICAL || cpu->translate_address == NULL) { |
|
157 |
paddr = vaddr; |
paddr = vaddr; |
|
|
|
|
#ifdef MEM_ALPHA |
|
|
/* paddr &= 0x1fffffff; For testalpha */ |
|
|
paddr &= 0x000003ffffffffffULL; |
|
|
#endif |
|
|
|
|
|
#ifdef MEM_IA64 |
|
|
/* For testia64 */ |
|
|
paddr &= 0x3fffffff; |
|
|
#endif |
|
|
|
|
|
#ifdef MEM_PPC |
|
|
if (cpu->cd.ppc.bits == 32) |
|
|
paddr &= 0xffffffff; |
|
|
#endif |
|
|
|
|
|
#ifdef MEM_SH |
|
|
paddr &= 0xffffffff; |
|
|
#endif |
|
158 |
} else { |
} else { |
159 |
ok = cpu->translate_address(cpu, vaddr, &paddr, |
ok = cpu->translate_v2p(cpu, vaddr, &paddr, |
160 |
(writeflag? FLAG_WRITEFLAG : 0) + |
(writeflag? FLAG_WRITEFLAG : 0) + |
161 |
(no_exceptions? FLAG_NOEXCEPTIONS : 0) |
(no_exceptions? FLAG_NOEXCEPTIONS : 0) |
162 |
#ifdef MEM_X86 |
#ifdef MEM_X86 |
163 |
+ (cache_flags & NO_SEGMENTATION) |
+ (misc_flags & NO_SEGMENTATION) |
164 |
#endif |
#endif |
165 |
#ifdef MEM_ARM |
#ifdef MEM_ARM |
166 |
+ (cache_flags & MEMORY_USER_ACCESS) |
+ (misc_flags & MEMORY_USER_ACCESS) |
167 |
#endif |
#endif |
168 |
+ (cache==CACHE_INSTRUCTION? FLAG_INSTR : 0)); |
+ (cache==CACHE_INSTRUCTION? FLAG_INSTR : 0)); |
169 |
/* If the translation caused an exception, or was invalid in |
|
170 |
some way, we simply return without doing the memory |
/* |
171 |
access: */ |
* If the translation caused an exception, or was invalid in |
172 |
|
* some way, then simply return without doing the memory |
173 |
|
* access: |
174 |
|
*/ |
175 |
if (!ok) |
if (!ok) |
176 |
return MEMORY_ACCESS_FAILED; |
return MEMORY_ACCESS_FAILED; |
177 |
} |
} |
179 |
|
|
180 |
#ifdef MEM_X86 |
#ifdef MEM_X86 |
181 |
/* DOS debugging :-) */ |
/* DOS debugging :-) */ |
182 |
if (!quiet_mode && !(cache_flags & PHYSICAL)) { |
if (!quiet_mode && !(misc_flags & PHYSICAL)) { |
183 |
if (paddr >= 0x400 && paddr <= 0x4ff) |
if (paddr >= 0x400 && paddr <= 0x4ff) |
184 |
debug("{ PC BIOS DATA AREA: %s 0x%x }\n", writeflag == |
debug("{ PC BIOS DATA AREA: %s 0x%x }\n", writeflag == |
185 |
MEM_WRITE? "writing to" : "reading from", |
MEM_WRITE? "writing to" : "reading from", |
192 |
#endif |
#endif |
193 |
} |
} |
194 |
#endif |
#endif |
195 |
|
#endif /* !MEM_USERLAND */ |
|
#ifdef MEM_MIPS |
|
|
/* |
|
|
* If correct cache emulation is enabled, and we need to simluate |
|
|
* cache misses even from the instruction cache, we can't run directly |
|
|
* from a host page. :-/ |
|
|
*/ |
|
|
#if defined(ENABLE_CACHE_EMULATION) && defined(ENABLE_INSTRUCTION_DELAYS) |
|
|
#else |
|
|
if (cache == CACHE_INSTRUCTION) { |
|
|
cpu->cd.mips.pc_last_virtual_page = vaddr & ~0xfff; |
|
|
cpu->cd.mips.pc_last_physical_page = paddr & ~0xfff; |
|
|
cpu->cd.mips.pc_last_host_4k_page = NULL; |
|
|
|
|
|
/* _last_host_4k_page will be set to 1 further down, |
|
|
if the page is actually in host ram */ |
|
|
} |
|
|
#endif |
|
|
#endif /* MEM_MIPS */ |
|
|
#endif /* ifndef MEM_USERLAND */ |
|
|
|
|
|
|
|
|
#if defined(MEM_MIPS) || defined(MEM_USERLAND) |
|
|
have_paddr: |
|
|
#endif |
|
|
|
|
|
|
|
|
#ifdef MEM_MIPS |
|
|
/* TODO: How about bintrans vs cache emulation? */ |
|
|
if (bintrans_cached) { |
|
|
if (cache == CACHE_INSTRUCTION) { |
|
|
cpu->cd.mips.pc_bintrans_paddr_valid = 1; |
|
|
cpu->cd.mips.pc_bintrans_paddr = paddr; |
|
|
} |
|
|
} |
|
|
#endif /* MEM_MIPS */ |
|
|
|
|
196 |
|
|
197 |
|
|
198 |
#ifndef MEM_USERLAND |
#ifndef MEM_USERLAND |
199 |
/* |
/* |
200 |
* Memory mapped device? |
* Memory mapped device? |
201 |
* |
* |
202 |
* TODO: this is utterly slow. |
* TODO: if paddr < base, but len enough, then the device should |
203 |
* TODO2: if paddr<base, but len enough, then we should write |
* still be written to! |
|
* to a device to |
|
204 |
*/ |
*/ |
205 |
if (paddr >= mem->mmap_dev_minaddr && paddr < mem->mmap_dev_maxaddr) { |
if (paddr >= mem->mmap_dev_minaddr && paddr < mem->mmap_dev_maxaddr) { |
206 |
uint64_t orig_paddr = paddr; |
uint64_t orig_paddr = paddr; |
207 |
int i, start, res; |
int i, start, end, res; |
208 |
|
|
209 |
/* |
/* |
210 |
* Really really slow, but unfortunately necessary. This is |
* Really really slow, but unfortunately necessary. This is |
214 |
* b) offsets 0x124..0x777 are a device |
* b) offsets 0x124..0x777 are a device |
215 |
* |
* |
216 |
* 1) a read is done from offset 0x100. the page is |
* 1) a read is done from offset 0x100. the page is |
217 |
* added to the bintrans system as a "RAM" page |
* added to the dyntrans system as a "RAM" page |
218 |
* 2) a bintranslated read is done from offset 0x200, |
* 2) a dyntranslated read is done from offset 0x200, |
219 |
* which should access the device, but since the |
* which should access the device, but since the |
220 |
* entire page is added, it will access non-existant |
* entire page is added, it will access non-existant |
221 |
* RAM instead, without warning. |
* RAM instead, without warning. |
222 |
* |
* |
223 |
* Setting bintrans_device_danger = 1 on accesses which are |
* Setting dyntrans_device_danger = 1 on accesses which are |
224 |
* on _any_ offset on pages that are device mapped avoids |
* on _any_ offset on pages that are device mapped avoids |
225 |
* this problem, but it is probably not very fast. |
* this problem, but it is probably not very fast. |
226 |
|
* |
227 |
|
* TODO: Convert this into a quick (multi-level, 64-bit) |
228 |
|
* address space lookup, to find dangerous pages. |
229 |
*/ |
*/ |
230 |
|
#if 1 |
231 |
for (i=0; i<mem->n_mmapped_devices; i++) |
for (i=0; i<mem->n_mmapped_devices; i++) |
232 |
if (paddr >= (mem->dev_baseaddr[i] & ~offset_mask) && |
if (paddr >= (mem->devices[i].baseaddr & ~offset_mask)&& |
233 |
paddr <= ((mem->dev_baseaddr[i] + |
paddr <= ((mem->devices[i].endaddr-1)|offset_mask)){ |
234 |
mem->dev_length[i] - 1) | offset_mask)) { |
dyntrans_device_danger = 1; |
|
bintrans_device_danger = 1; |
|
235 |
break; |
break; |
236 |
} |
} |
237 |
|
#endif |
238 |
|
|
239 |
i = start = mem->last_accessed_device; |
start = 0; end = mem->n_mmapped_devices - 1; |
240 |
|
i = mem->last_accessed_device; |
241 |
|
|
242 |
/* Scan through all devices: */ |
/* Scan through all devices: */ |
243 |
do { |
do { |
244 |
if (paddr >= mem->dev_baseaddr[i] && |
if (paddr >= mem->devices[i].baseaddr && |
245 |
paddr < mem->dev_baseaddr[i] + mem->dev_length[i]) { |
paddr < mem->devices[i].endaddr) { |
246 |
/* Found a device, let's access it: */ |
/* Found a device, let's access it: */ |
247 |
mem->last_accessed_device = i; |
mem->last_accessed_device = i; |
248 |
|
|
249 |
paddr -= mem->dev_baseaddr[i]; |
paddr -= mem->devices[i].baseaddr; |
250 |
if (paddr + len > mem->dev_length[i]) |
if (paddr + len > mem->devices[i].length) |
251 |
len = mem->dev_length[i] - paddr; |
len = mem->devices[i].length - paddr; |
252 |
|
|
253 |
if (cpu->update_translation_table != NULL && |
if (cpu->update_translation_table != NULL && |
254 |
mem->dev_flags[i] & MEM_DYNTRANS_OK) { |
!(ok & MEMORY_NOT_FULL_PAGE) && |
255 |
|
mem->devices[i].flags & DM_DYNTRANS_OK) { |
256 |
int wf = writeflag == MEM_WRITE? 1 : 0; |
int wf = writeflag == MEM_WRITE? 1 : 0; |
257 |
|
unsigned char *host_addr; |
258 |
|
|
259 |
if (writeflag) { |
if (!(mem->devices[i].flags & |
260 |
if (paddr < mem-> |
DM_DYNTRANS_WRITE_OK)) |
261 |
dev_dyntrans_write_low[i]) |
wf = 0; |
262 |
mem-> |
|
263 |
dev_dyntrans_write_low |
if (writeflag && wf) { |
264 |
[i] = paddr & |
if (paddr < mem->devices[i]. |
265 |
~offset_mask; |
dyntrans_write_low) |
266 |
if (paddr >= mem-> |
mem->devices[i]. |
267 |
dev_dyntrans_write_high[i]) |
dyntrans_write_low = |
268 |
mem-> |
paddr &~offset_mask; |
269 |
dev_dyntrans_write_high |
if (paddr >= mem->devices[i]. |
270 |
[i] = paddr | |
dyntrans_write_high) |
271 |
offset_mask; |
mem->devices[i]. |
272 |
|
dyntrans_write_high = |
273 |
|
paddr | offset_mask; |
274 |
} |
} |
275 |
|
|
276 |
if (!(mem->dev_flags[i] & |
if (mem->devices[i].flags & |
277 |
MEM_DYNTRANS_WRITE_OK)) |
DM_EMULATED_RAM) { |
278 |
wf = 0; |
/* MEM_WRITE to force the page |
279 |
|
to be allocated, if it |
280 |
|
wasn't already */ |
281 |
|
uint64_t *pp = (uint64_t *)mem-> |
282 |
|
devices[i].dyntrans_data; |
283 |
|
uint64_t p = orig_paddr - *pp; |
284 |
|
host_addr = |
285 |
|
memory_paddr_to_hostaddr( |
286 |
|
mem, p & ~offset_mask, |
287 |
|
MEM_WRITE); |
288 |
|
} else { |
289 |
|
host_addr = mem->devices[i]. |
290 |
|
dyntrans_data + |
291 |
|
(paddr & ~offset_mask); |
292 |
|
} |
293 |
|
|
294 |
cpu->update_translation_table(cpu, |
cpu->update_translation_table(cpu, |
295 |
vaddr & ~offset_mask, |
vaddr & ~offset_mask, host_addr, |
|
mem->dev_dyntrans_data[i] + |
|
|
(paddr & ~offset_mask), |
|
296 |
wf, orig_paddr & ~offset_mask); |
wf, orig_paddr & ~offset_mask); |
297 |
} |
} |
298 |
|
|
299 |
res = 0; |
res = 0; |
300 |
if (!no_exceptions || (mem->dev_flags[i] & |
if (!no_exceptions || (mem->devices[i].flags & |
301 |
MEM_READING_HAS_NO_SIDE_EFFECTS)) |
DM_READS_HAVE_NO_SIDE_EFFECTS)) |
302 |
res = mem->dev_f[i](cpu, mem, paddr, |
res = mem->devices[i].f(cpu, mem, paddr, |
303 |
data, len, writeflag, |
data, len, writeflag, |
304 |
mem->dev_extra[i]); |
mem->devices[i].extra); |
305 |
|
|
|
#ifdef ENABLE_INSTRUCTION_DELAYS |
|
306 |
if (res == 0) |
if (res == 0) |
307 |
res = -1; |
res = -1; |
308 |
|
|
|
cpu->cd.mips.instruction_delay += |
|
|
( (abs(res) - 1) * |
|
|
cpu->cd.mips.cpu_type.instrs_per_cycle ); |
|
|
#endif |
|
|
|
|
309 |
#ifndef MEM_X86 |
#ifndef MEM_X86 |
310 |
/* |
/* |
311 |
* If accessing the memory mapped device |
* If accessing the memory mapped device |
315 |
debug("%s device '%s' addr %08lx " |
debug("%s device '%s' addr %08lx " |
316 |
"failed\n", writeflag? |
"failed\n", writeflag? |
317 |
"writing to" : "reading from", |
"writing to" : "reading from", |
318 |
mem->dev_name[i], (long)paddr); |
mem->devices[i].name, (long)paddr); |
319 |
#ifdef MEM_MIPS |
#ifdef MEM_MIPS |
320 |
mips_cpu_exception(cpu, EXCEPTION_DBE, |
mips_cpu_exception(cpu, EXCEPTION_DBE, |
321 |
0, vaddr, 0, 0, 0, 0); |
0, vaddr, 0, 0, 0, 0); |
326 |
goto do_return_ok; |
goto do_return_ok; |
327 |
} |
} |
328 |
|
|
329 |
i ++; |
if (paddr < mem->devices[i].baseaddr) |
330 |
if (i == mem->n_mmapped_devices) |
end = i - 1; |
331 |
i = 0; |
if (paddr >= mem->devices[i].endaddr) |
332 |
} while (i != start); |
start = i + 1; |
333 |
|
i = (start + end) >> 1; |
334 |
|
} while (start <= end); |
335 |
} |
} |
336 |
|
|
337 |
|
|
343 |
switch (cpu->cd.mips.cpu_type.mmu_model) { |
switch (cpu->cd.mips.cpu_type.mmu_model) { |
344 |
case MMU3K: |
case MMU3K: |
345 |
/* if not uncached addess (TODO: generalize this) */ |
/* if not uncached addess (TODO: generalize this) */ |
346 |
if (!(cache_flags & PHYSICAL) && cache != CACHE_NONE && |
if (!(misc_flags & PHYSICAL) && cache != CACHE_NONE && |
347 |
!((vaddr & 0xffffffffULL) >= 0xa0000000ULL && |
!((vaddr & 0xffffffffULL) >= 0xa0000000ULL && |
348 |
(vaddr & 0xffffffffULL) <= 0xbfffffffULL)) { |
(vaddr & 0xffffffffULL) <= 0xbfffffffULL)) { |
349 |
if (memory_cache_R3000(cpu, cache, paddr, |
if (memory_cache_R3000(cpu, cache, paddr, |
374 |
#endif /* MIPS */ |
#endif /* MIPS */ |
375 |
{ |
{ |
376 |
if (paddr >= mem->physical_max) { |
if (paddr >= mem->physical_max) { |
377 |
|
uint64_t offset, old_pc = cpu->pc; |
378 |
char *symbol; |
char *symbol; |
|
uint64_t old_pc; |
|
|
uint64_t offset; |
|
|
|
|
|
#ifdef MEM_MIPS |
|
|
old_pc = cpu->cd.mips.pc_last; |
|
|
#else |
|
|
/* Default instruction size on most |
|
|
RISC archs is 32 bits: */ |
|
|
old_pc = cpu->pc - sizeof(uint32_t); |
|
|
#endif |
|
379 |
|
|
380 |
/* This allows for example OS kernels to probe |
/* This allows for example OS kernels to probe |
381 |
memory a few KBs past the end of memory, |
memory a few KBs past the end of memory, |
422 |
fatal(" <%s> ]\n", |
fatal(" <%s> ]\n", |
423 |
symbol? symbol : " no symbol "); |
symbol? symbol : " no symbol "); |
424 |
} |
} |
|
|
|
|
if (cpu->machine->single_step_on_bad_addr) { |
|
|
fatal("[ unimplemented access to " |
|
|
"0x%llx, pc=0x",(long long)paddr); |
|
|
if (cpu->is_32bit) |
|
|
fatal("%08x ]\n", |
|
|
(int)old_pc); |
|
|
else |
|
|
fatal("%016llx ]\n", |
|
|
(long long)old_pc); |
|
|
single_step = 1; |
|
|
} |
|
425 |
} |
} |
426 |
|
|
427 |
if (writeflag == MEM_READ) { |
if (writeflag == MEM_READ) { |
462 |
|
|
463 |
/* |
/* |
464 |
* Uncached access: |
* Uncached access: |
465 |
|
* |
466 |
|
* 1) Translate the physical address to a host address. |
467 |
|
* |
468 |
|
* 2) Insert this virtual->physical->host translation into the |
469 |
|
* fast translation arrays (using update_translation_table()). |
470 |
|
* |
471 |
|
* 3) If this was a Write, then invalidate any code translations |
472 |
|
* in that page. |
473 |
*/ |
*/ |
474 |
memblock = memory_paddr_to_hostaddr(mem, paddr, writeflag); |
memblock = memory_paddr_to_hostaddr(mem, paddr & ~offset_mask, |
475 |
|
writeflag); |
476 |
if (memblock == NULL) { |
if (memblock == NULL) { |
477 |
if (writeflag == MEM_READ) |
if (writeflag == MEM_READ) |
478 |
memset(data, 0, len); |
memset(data, 0, len); |
479 |
goto do_return_ok; |
goto do_return_ok; |
480 |
} |
} |
481 |
|
|
482 |
offset = paddr & ((1 << BITS_PER_MEMBLOCK) - 1); |
offset = paddr & offset_mask; |
483 |
|
|
484 |
if (cpu->update_translation_table != NULL && !bintrans_device_danger |
if (cpu->update_translation_table != NULL && !dyntrans_device_danger |
485 |
|
#ifdef MEM_MIPS |
486 |
|
/* Ugly hack for R2000/R3000 caches: */ |
487 |
|
&& (cpu->cd.mips.cpu_type.mmu_model != MMU3K || |
488 |
|
!(cpu->cd.mips.coproc[0]->reg[COP0_STATUS] & MIPS1_ISOL_CACHES)) |
489 |
|
#endif |
490 |
|
#ifndef MEM_MIPS |
491 |
|
/* && !(misc_flags & MEMORY_USER_ACCESS) */ |
492 |
|
#ifndef MEM_USERLAND |
493 |
|
&& !(ok & MEMORY_NOT_FULL_PAGE) |
494 |
|
#endif |
495 |
|
#endif |
496 |
&& !no_exceptions) |
&& !no_exceptions) |
497 |
cpu->update_translation_table(cpu, vaddr & ~offset_mask, |
cpu->update_translation_table(cpu, vaddr & ~offset_mask, |
498 |
memblock + (offset & ~offset_mask), |
memblock, (misc_flags & MEMORY_USER_ACCESS) | |
499 |
#if 0 |
#if !defined(MEM_MIPS) && !defined(MEM_USERLAND) |
500 |
cache == CACHE_INSTRUCTION? |
(cache == CACHE_INSTRUCTION? |
501 |
(writeflag == MEM_WRITE? 1 : 0) |
(writeflag == MEM_WRITE? 1 : 0) : ok - 1), |
|
: ok - 1, |
|
502 |
#else |
#else |
503 |
writeflag == MEM_WRITE? 1 : 0, |
(writeflag == MEM_WRITE? 1 : 0), |
504 |
#endif |
#endif |
505 |
paddr & ~offset_mask); |
paddr & ~offset_mask); |
506 |
|
|
507 |
if (writeflag == MEM_WRITE && |
/* |
508 |
cpu->invalidate_code_translation != NULL) |
* If writing, then invalidate code translations for the (physical) |
509 |
|
* page address: |
510 |
|
*/ |
511 |
|
if (writeflag == MEM_WRITE && cpu->invalidate_code_translation != NULL) |
512 |
cpu->invalidate_code_translation(cpu, paddr, INVALIDATE_PADDR); |
cpu->invalidate_code_translation(cpu, paddr, INVALIDATE_PADDR); |
513 |
|
|
514 |
|
if ((paddr&((1<<BITS_PER_MEMBLOCK)-1)) + len > (1<<BITS_PER_MEMBLOCK)) { |
515 |
|
printf("Write over memblock boundary?\n"); |
516 |
|
exit(1); |
517 |
|
} |
518 |
|
|
519 |
if (writeflag == MEM_WRITE) { |
if (writeflag == MEM_WRITE) { |
520 |
/* Ugly optimization, but it works: */ |
/* Ugly optimization, but it works: */ |
521 |
if (len == sizeof(uint32_t) && (offset & 3)==0 |
if (len == sizeof(uint32_t) && (offset & 3)==0 |
534 |
*(uint8_t *)data = *(uint8_t *)(memblock + offset); |
*(uint8_t *)data = *(uint8_t *)(memblock + offset); |
535 |
else |
else |
536 |
memcpy(data, memblock + offset, len); |
memcpy(data, memblock + offset, len); |
|
|
|
|
#ifdef MEM_MIPS |
|
|
if (cache == CACHE_INSTRUCTION) { |
|
|
cpu->cd.mips.pc_last_host_4k_page = memblock |
|
|
+ (offset & ~offset_mask); |
|
|
if (bintrans_cached) { |
|
|
cpu->cd.mips.pc_bintrans_host_4kpage = |
|
|
cpu->cd.mips.pc_last_host_4k_page; |
|
|
} |
|
|
} |
|
|
#endif /* MIPS */ |
|
537 |
} |
} |
538 |
|
|
539 |
|
|