/[gxemul]/upstream/0.3.5/src/memory_rw.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /upstream/0.3.5/src/memory_rw.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 13 - (show annotations)
Mon Oct 8 16:18:43 2007 UTC (16 years, 7 months ago) by dpavlin
File MIME type: text/plain
File size: 16503 byte(s)
0.3.5
1 /*
2 * Copyright (C) 2003-2005 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: memory_rw.c,v 1.57 2005/08/12 21:57:02 debug Exp $
29 *
30 * Generic memory_rw(), with special hacks for specific CPU families.
31 *
32 * Example for inclusion from memory_mips.c:
33 *
34 * MEMORY_RW should be mips_memory_rw
35 * MEM_MIPS should be defined
36 */
37
38
39 /*
40 * memory_rw():
41 *
42 * Read or write data from/to memory.
43 *
44 * cpu the cpu doing the read/write
45 * mem the memory object to use
46 * vaddr the virtual address
47 * data a pointer to the data to be written to memory, or
48 * a placeholder for data when reading from memory
49 * len the length of the 'data' buffer
50 * writeflag set to MEM_READ or MEM_WRITE
51 * cache_flags CACHE_{NONE,DATA,INSTRUCTION} | other flags
52 *
53 * If the address indicates access to a memory mapped device, that device'
54 * read/write access function is called.
55 *
56 * If instruction latency/delay support is enabled, then
57 * cpu->instruction_delay is increased by the number of instruction to
58 * delay execution.
59 *
60 * This function should not be called with cpu == NULL.
61 *
62 * Returns one of the following:
63 * MEMORY_ACCESS_FAILED
64 * MEMORY_ACCESS_OK
65 *
66 * (MEMORY_ACCESS_FAILED is 0.)
67 */
68 int MEMORY_RW(struct cpu *cpu, struct memory *mem, uint64_t vaddr,
69 unsigned char *data, size_t len, int writeflag, int cache_flags)
70 {
71 #ifdef MEM_ALPHA
72 const int offset_mask = 0x1fff;
73 #else
74 const int offset_mask = 0xfff;
75 #endif
76
77 #ifndef MEM_USERLAND
78 int ok = 1;
79 #endif
80 uint64_t paddr;
81 int cache, no_exceptions, offset;
82 unsigned char *memblock;
83 #ifdef MEM_MIPS
84 int bintrans_cached = cpu->machine->bintrans_enable;
85 #endif
86 int bintrans_device_danger = 0;
87
88 no_exceptions = cache_flags & NO_EXCEPTIONS;
89 cache = cache_flags & CACHE_FLAGS_MASK;
90
91 #ifdef MEM_X86
92 /* Real-mode wrap-around: */
93 if (REAL_MODE && !(cache_flags & PHYSICAL)) {
94 if ((vaddr & 0xffff) + len > 0x10000) {
95 /* Do one byte at a time: */
96 int res = 0, i;
97 for (i=0; i<len; i++)
98 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
99 writeflag, cache_flags);
100 return res;
101 }
102 }
103
104 /* Crossing a page boundary? Then do one byte at a time: */
105 if ((vaddr & 0xfff) + len > 0x1000 && !(cache_flags & PHYSICAL)
106 && cpu->cd.x86.cr[0] & X86_CR0_PG) {
107 /* For WRITES: Read ALL BYTES FIRST and write them back!!!
108 Then do a write of all the new bytes. This is to make sure
109 than both pages around the boundary are writable so we don't
110 do a partial write. */
111 int res = 0, i;
112 if (writeflag == MEM_WRITE) {
113 unsigned char tmp;
114 for (i=0; i<len; i++) {
115 res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
116 MEM_READ, cache_flags);
117 if (!res)
118 return 0;
119 res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
120 MEM_WRITE, cache_flags);
121 if (!res)
122 return 0;
123 }
124 for (i=0; i<len; i++) {
125 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
126 MEM_WRITE, cache_flags);
127 if (!res)
128 return 0;
129 }
130 } else {
131 for (i=0; i<len; i++) {
132 /* Do one byte at a time: */
133 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
134 writeflag, cache_flags);
135 if (!res) {
136 if (cache == CACHE_INSTRUCTION) {
137 fatal("FAILED instruction "
138 "fetch across page boundar"
139 "y: todo. vaddr=0x%08x\n",
140 (int)vaddr);
141 cpu->running = 0;
142 }
143 return 0;
144 }
145 }
146 }
147 return res;
148 }
149 #endif /* X86 */
150
151 #ifdef MEM_MIPS
152 if (bintrans_cached) {
153 if (cache == CACHE_INSTRUCTION) {
154 cpu->cd.mips.pc_bintrans_host_4kpage = NULL;
155 cpu->cd.mips.pc_bintrans_paddr_valid = 0;
156 }
157 }
158 #endif /* MEM_MIPS */
159
160 #ifdef MEM_USERLAND
161 #ifdef MEM_ALPHA
162 paddr = vaddr;
163 #else
164 paddr = vaddr & 0x7fffffff;
165 #endif
166 goto have_paddr;
167 #endif
168
169 #ifndef MEM_USERLAND
170 #ifdef MEM_MIPS
171 /*
172 * For instruction fetch, are we on the same page as the last
173 * instruction we fetched?
174 *
175 * NOTE: There's no need to check this stuff here if this address
176 * is known to be in host ram, as it's done at instruction fetch
177 * time in cpu.c! Only check if _host_4k_page == NULL.
178 */
179 if (cache == CACHE_INSTRUCTION &&
180 cpu->cd.mips.pc_last_host_4k_page == NULL &&
181 (vaddr & ~0xfff) == cpu->cd.mips.pc_last_virtual_page) {
182 paddr = cpu->cd.mips.pc_last_physical_page | (vaddr & 0xfff);
183 goto have_paddr;
184 }
185 #endif /* MEM_MIPS */
186
187 if (cache_flags & PHYSICAL || cpu->translate_address == NULL) {
188 paddr = vaddr;
189
190 #ifdef MEM_ALPHA
191 /* paddr &= 0x1fffffff; For testalpha */
192 paddr &= 0x000003ffffffffffULL;
193 #endif
194
195 #ifdef MEM_ARM
196 paddr &= 0x3fffffff;
197 #endif
198
199 #ifdef MEM_IA64
200 /* For testia64 */
201 paddr &= 0x3fffffff;
202 #endif
203
204 #ifdef MEM_PPC
205 if (cpu->cd.ppc.bits == 32)
206 paddr &= 0xffffffff;
207 #endif
208
209 } else {
210 ok = cpu->translate_address(cpu, vaddr, &paddr,
211 (writeflag? FLAG_WRITEFLAG : 0) +
212 (no_exceptions? FLAG_NOEXCEPTIONS : 0)
213 #ifdef MEM_X86
214 + (cache_flags & NO_SEGMENTATION)
215 #endif
216 + (cache==CACHE_INSTRUCTION? FLAG_INSTR : 0));
217 /* If the translation caused an exception, or was invalid in
218 some way, we simply return without doing the memory
219 access: */
220 if (!ok)
221 return MEMORY_ACCESS_FAILED;
222 }
223
224
225 #ifdef MEM_X86
226 /* DOS debugging :-) */
227 if (!quiet_mode && !(cache_flags & PHYSICAL)) {
228 if (paddr >= 0x400 && paddr <= 0x4ff)
229 debug("{ PC BIOS DATA AREA: %s 0x%x }\n", writeflag ==
230 MEM_WRITE? "writing to" : "reading from",
231 (int)paddr);
232 #if 0
233 if (paddr >= 0xf0000 && paddr <= 0xfffff)
234 debug("{ BIOS ACCESS: %s 0x%x }\n",
235 writeflag == MEM_WRITE? "writing to" :
236 "reading from", (int)paddr);
237 #endif
238 }
239 #endif
240
241 #ifdef MEM_MIPS
242 /*
243 * If correct cache emulation is enabled, and we need to simluate
244 * cache misses even from the instruction cache, we can't run directly
245 * from a host page. :-/
246 */
247 #if defined(ENABLE_CACHE_EMULATION) && defined(ENABLE_INSTRUCTION_DELAYS)
248 #else
249 if (cache == CACHE_INSTRUCTION) {
250 cpu->cd.mips.pc_last_virtual_page = vaddr & ~0xfff;
251 cpu->cd.mips.pc_last_physical_page = paddr & ~0xfff;
252 cpu->cd.mips.pc_last_host_4k_page = NULL;
253
254 /* _last_host_4k_page will be set to 1 further down,
255 if the page is actually in host ram */
256 }
257 #endif
258 #endif /* MEM_MIPS */
259 #endif /* ifndef MEM_USERLAND */
260
261
262 #if defined(MEM_MIPS) || defined(MEM_USERLAND)
263 have_paddr:
264 #endif
265
266
267 #ifdef MEM_MIPS
268 /* TODO: How about bintrans vs cache emulation? */
269 if (bintrans_cached) {
270 if (cache == CACHE_INSTRUCTION) {
271 cpu->cd.mips.pc_bintrans_paddr_valid = 1;
272 cpu->cd.mips.pc_bintrans_paddr = paddr;
273 }
274 }
275 #endif /* MEM_MIPS */
276
277
278
279 #ifndef MEM_USERLAND
280 /*
281 * Memory mapped device?
282 *
283 * TODO: this is utterly slow.
284 * TODO2: if paddr<base, but len enough, then we should write
285 * to a device to
286 */
287 if (paddr >= mem->mmap_dev_minaddr && paddr < mem->mmap_dev_maxaddr) {
288 uint64_t orig_paddr = paddr;
289 int i, start, res;
290
291 /*
292 * Really really slow, but unfortunately necessary. This is
293 * to avoid the folowing scenario:
294 *
295 * a) offsets 0x000..0x123 are normal memory
296 * b) offsets 0x124..0x777 are a device
297 *
298 * 1) a read is done from offset 0x100. the page is
299 * added to the bintrans system as a "RAM" page
300 * 2) a bintranslated read is done from offset 0x200,
301 * which should access the device, but since the
302 * entire page is added, it will access non-existant
303 * RAM instead, without warning.
304 *
305 * Setting bintrans_device_danger = 1 on accesses which are
306 * on _any_ offset on pages that are device mapped avoids
307 * this problem, but it is probably not very fast.
308 */
309 for (i=0; i<mem->n_mmapped_devices; i++)
310 if (paddr >= (mem->dev_baseaddr[i] & ~offset_mask) &&
311 paddr <= ((mem->dev_baseaddr[i] +
312 mem->dev_length[i] - 1) | offset_mask)) {
313 bintrans_device_danger = 1;
314 break;
315 }
316
317 i = start = mem->last_accessed_device;
318
319 /* Scan through all devices: */
320 do {
321 if (paddr >= mem->dev_baseaddr[i] &&
322 paddr < mem->dev_baseaddr[i] + mem->dev_length[i]) {
323 /* Found a device, let's access it: */
324 mem->last_accessed_device = i;
325
326 paddr -= mem->dev_baseaddr[i];
327 if (paddr + len > mem->dev_length[i])
328 len = mem->dev_length[i] - paddr;
329
330 if (cpu->update_translation_table != NULL &&
331 mem->dev_flags[i] & MEM_DYNTRANS_OK) {
332 int wf = writeflag == MEM_WRITE? 1 : 0;
333
334 if (writeflag) {
335 if (paddr < mem->
336 dev_dyntrans_write_low[i])
337 mem->
338 dev_dyntrans_write_low
339 [i] = paddr &
340 ~offset_mask;
341 if (paddr >= mem->
342 dev_dyntrans_write_high[i])
343 mem->
344 dev_dyntrans_write_high
345 [i] = paddr |
346 offset_mask;
347 }
348
349 if (!(mem->dev_flags[i] &
350 MEM_DYNTRANS_WRITE_OK))
351 wf = 0;
352
353 cpu->update_translation_table(cpu,
354 vaddr & ~offset_mask,
355 mem->dev_dyntrans_data[i] +
356 (paddr & ~offset_mask),
357 wf, orig_paddr & ~offset_mask);
358 }
359
360 res = 0;
361 if (!no_exceptions || (mem->dev_flags[i] &
362 MEM_READING_HAS_NO_SIDE_EFFECTS))
363 res = mem->dev_f[i](cpu, mem, paddr,
364 data, len, writeflag,
365 mem->dev_extra[i]);
366
367 #ifdef ENABLE_INSTRUCTION_DELAYS
368 if (res == 0)
369 res = -1;
370
371 cpu->cd.mips.instruction_delay +=
372 ( (abs(res) - 1) *
373 cpu->cd.mips.cpu_type.instrs_per_cycle );
374 #endif
375
376 #ifndef MEM_X86
377 /*
378 * If accessing the memory mapped device
379 * failed, then return with a DBE exception.
380 */
381 if (res <= 0 && !no_exceptions) {
382 debug("%s device '%s' addr %08lx "
383 "failed\n", writeflag?
384 "writing to" : "reading from",
385 mem->dev_name[i], (long)paddr);
386 #ifdef MEM_MIPS
387 mips_cpu_exception(cpu, EXCEPTION_DBE,
388 0, vaddr, 0, 0, 0, 0);
389 #endif
390 return MEMORY_ACCESS_FAILED;
391 }
392 #endif
393 goto do_return_ok;
394 }
395
396 i ++;
397 if (i == mem->n_mmapped_devices)
398 i = 0;
399 } while (i != start);
400 }
401
402
403 #ifdef MEM_MIPS
404 /*
405 * Data and instruction cache emulation:
406 */
407
408 switch (cpu->cd.mips.cpu_type.mmu_model) {
409 case MMU3K:
410 /* if not uncached addess (TODO: generalize this) */
411 if (!(cache_flags & PHYSICAL) && cache != CACHE_NONE &&
412 !((vaddr & 0xffffffffULL) >= 0xa0000000ULL &&
413 (vaddr & 0xffffffffULL) <= 0xbfffffffULL)) {
414 if (memory_cache_R3000(cpu, cache, paddr,
415 writeflag, len, data))
416 goto do_return_ok;
417 }
418 break;
419 default:
420 /* R4000 etc */
421 /* TODO */
422 ;
423 }
424 #endif /* MEM_MIPS */
425
426
427 /* Outside of physical RAM? */
428 if (paddr >= mem->physical_max) {
429 #ifdef MEM_MIPS
430 if ((paddr & 0xffffc00000ULL) == 0x1fc00000) {
431 /* Ok, this is PROM stuff */
432 } else if ((paddr & 0xfffff00000ULL) == 0x1ff00000) {
433 /* Sprite reads from this area of memory... */
434 /* TODO: is this still correct? */
435 if (writeflag == MEM_READ)
436 memset(data, 0, len);
437 goto do_return_ok;
438 } else
439 #endif /* MIPS */
440 {
441 if (paddr >= mem->physical_max) {
442 char *symbol;
443 uint64_t old_pc;
444 uint64_t offset;
445
446 #ifdef MEM_MIPS
447 old_pc = cpu->cd.mips.pc_last;
448 #else
449 /* Default instruction size on most
450 RISC archs is 32 bits: */
451 old_pc = cpu->pc - sizeof(uint32_t);
452 #endif
453
454 /* This allows for example OS kernels to probe
455 memory a few KBs past the end of memory,
456 without giving too many warnings. */
457 if (!quiet_mode && !no_exceptions && paddr >=
458 mem->physical_max + 0x40000) {
459 fatal("[ memory_rw(): writeflag=%i ",
460 writeflag);
461 if (writeflag) {
462 unsigned int i;
463 debug("data={", writeflag);
464 if (len > 16) {
465 int start2 = len-16;
466 for (i=0; i<16; i++)
467 debug("%s%02x",
468 i?",":"",
469 data[i]);
470 debug(" .. ");
471 if (start2 < 16)
472 start2 = 16;
473 for (i=start2; i<len;
474 i++)
475 debug("%s%02x",
476 i?",":"",
477 data[i]);
478 } else
479 for (i=0; i<len; i++)
480 debug("%s%02x",
481 i?",":"",
482 data[i]);
483 debug("}");
484 }
485
486 fatal(" paddr=0x%llx >= physical_max"
487 "; pc=", (long long)paddr);
488 if (cpu->is_32bit)
489 fatal("0x%08x",(int)old_pc);
490 else
491 fatal("0x%016llx",
492 (long long)old_pc);
493 symbol = get_symbol_name(
494 &cpu->machine->symbol_context,
495 old_pc, &offset);
496 fatal(" <%s> ]\n",
497 symbol? symbol : " no symbol ");
498 }
499
500 if (cpu->machine->single_step_on_bad_addr) {
501 fatal("[ unimplemented access to "
502 "0x%llx, pc=0x",(long long)paddr);
503 if (cpu->is_32bit)
504 fatal("%08x ]\n",
505 (int)old_pc);
506 else
507 fatal("%016llx ]\n",
508 (long long)old_pc);
509 single_step = 1;
510 }
511 }
512
513 if (writeflag == MEM_READ) {
514 #ifdef MEM_X86
515 /* Reading non-existant memory on x86: */
516 memset(data, 0xff, len);
517 #else
518 /* Return all zeroes? (Or 0xff? TODO) */
519 memset(data, 0, len);
520 #endif
521
522 #ifdef MEM_MIPS
523 /*
524 * For real data/instruction accesses, cause
525 * an exceptions on an illegal read:
526 */
527 if (cache != CACHE_NONE && cpu->machine->
528 dbe_on_nonexistant_memaccess &&
529 !no_exceptions) {
530 if (paddr >= mem->physical_max &&
531 paddr < mem->physical_max+1048576)
532 mips_cpu_exception(cpu,
533 EXCEPTION_DBE, 0, vaddr, 0,
534 0, 0, 0);
535 }
536 #endif /* MEM_MIPS */
537 }
538
539 /* Hm? Shouldn't there be a DBE exception for
540 invalid writes as well? TODO */
541
542 goto do_return_ok;
543 }
544 }
545
546 #endif /* ifndef MEM_USERLAND */
547
548
549 /*
550 * Uncached access:
551 */
552 memblock = memory_paddr_to_hostaddr(mem, paddr, writeflag);
553 if (memblock == NULL) {
554 if (writeflag == MEM_READ)
555 memset(data, 0, len);
556 goto do_return_ok;
557 }
558
559 offset = paddr & ((1 << BITS_PER_MEMBLOCK) - 1);
560
561 if (cpu->update_translation_table != NULL && !bintrans_device_danger)
562 cpu->update_translation_table(cpu, vaddr & ~offset_mask,
563 memblock + (offset & ~offset_mask),
564 #if 0
565 cache == CACHE_INSTRUCTION?
566 (writeflag == MEM_WRITE? 1 : 0)
567 : ok - 1,
568 #else
569 writeflag == MEM_WRITE? 1 : 0,
570 #endif
571 paddr & ~offset_mask);
572
573 if (writeflag == MEM_WRITE) {
574 /* Ugly optimization, but it works: */
575 if (len == sizeof(uint32_t) && (offset & 3)==0
576 && ((size_t)data&3)==0)
577 *(uint32_t *)(memblock + offset) = *(uint32_t *)data;
578 else if (len == sizeof(uint8_t))
579 *(uint8_t *)(memblock + offset) = *(uint8_t *)data;
580 else
581 memcpy(memblock + offset, data, len);
582 } else {
583 /* Ugly optimization, but it works: */
584 if (len == sizeof(uint32_t) && (offset & 3)==0
585 && ((size_t)data&3)==0)
586 *(uint32_t *)data = *(uint32_t *)(memblock + offset);
587 else if (len == sizeof(uint8_t))
588 *(uint8_t *)data = *(uint8_t *)(memblock + offset);
589 else
590 memcpy(data, memblock + offset, len);
591
592 #ifdef MEM_MIPS
593 if (cache == CACHE_INSTRUCTION) {
594 cpu->cd.mips.pc_last_host_4k_page = memblock
595 + (offset & ~offset_mask);
596 if (bintrans_cached) {
597 cpu->cd.mips.pc_bintrans_host_4kpage =
598 cpu->cd.mips.pc_last_host_4k_page;
599 }
600 }
601 #endif /* MIPS */
602 }
603
604
605 do_return_ok:
606 return MEMORY_ACCESS_OK;
607 }
608

  ViewVC Help
Powered by ViewVC 1.1.26