/[gxemul]/trunk/src/memory_rw.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /trunk/src/memory_rw.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 18 - (show annotations)
Mon Oct 8 16:19:11 2007 UTC (16 years, 7 months ago) by dpavlin
File MIME type: text/plain
File size: 17862 byte(s)
++ trunk/HISTORY	(local)
$Id: HISTORY,v 1.1004 2005/10/27 14:01:10 debug Exp $
20051011        Passing -A as the default boot arg for CATS (works fine with
                OpenBSD/cats).
20051012	Fixing the VGA cursor offset bug, and speeding up framebuffer
		redraws if character cells contain the same thing as during
		the last redraw.
20051013	Adding a slow strd ARM instruction hack.
20051017	Minor updates: Adding a dummy i80321 Verde controller (for
		XScale emulation), fixing the disassembly of the ARM "ldrd"
		instruction, adding "support" for less-than-4KB pages for ARM
		(by not adding them to translation tables).
20051020	Continuing on some HPCarm stuff. A NetBSD/hpcarm kernel prints
		some boot messages on an emulated Jornada 720.
		Making dev_ram work better with dyntrans (speeds up some things
		quite a bit).
20051021	Automatically generating some of the most common ARM load/store
		multiple instructions.
20051022	Better statistics gathering for the ARM load/store multiple.
		Various other dyntrans and device updates.
20051023	Various minor updates.
20051024	Continuing; minor device and dyntrans fine-tuning. Adding the
		first "reasonable" instruction combination hacks for ARM (the
		cores of NetBSD/cats' memset and memcpy).
20051025	Fixing a dyntrans-related bug in dev_vga. Also changing the
		dyntrans low/high access notification to only be updated on
		writes, not reads. Hopefully it will be enough. (dev_vga in
		charcell mode now seems to work correctly with both reads and
		writes.)
		Experimenting with gathering dyntrans statistics (which parts
		of emulated RAM that are actually executed), and adding
		instruction combination hacks for cache cleaning and a part of
		NetBSD's scanc() function.
20051026	Adding a bitmap for ARM emulation which indicates if a page is
		(specifically) user accessible; loads and stores with the t-
		flag set can now use the translation arrays, which results in
		a measurable speedup.
20051027	Dyntrans updates; adding an extra bitmap array for 32-bit
		emulation modes, speeding up the check whether a physical page
		has any code translations or not (O(n) -> O(1)). Doing a
		similar reduction of O(n) to O(1) by avoiding the scan through
		the translation entries on a translation update (32-bit mode
		only).
		Various other minor hacks.
20051029	Quick release, without any testing at all.

==============  RELEASE 0.3.6.2  ==============


1 /*
2 * Copyright (C) 2003-2005 Anders Gavare. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 *
28 * $Id: memory_rw.c,v 1.75 2005/10/27 14:01:12 debug Exp $
29 *
30 * Generic memory_rw(), with special hacks for specific CPU families.
31 *
32 * Example for inclusion from memory_mips.c:
33 *
34 * MEMORY_RW should be mips_memory_rw
35 * MEM_MIPS should be defined
36 */
37
38
39 /*
40 * memory_rw():
41 *
42 * Read or write data from/to memory.
43 *
44 * cpu the cpu doing the read/write
45 * mem the memory object to use
46 * vaddr the virtual address
47 * data a pointer to the data to be written to memory, or
48 * a placeholder for data when reading from memory
49 * len the length of the 'data' buffer
50 * writeflag set to MEM_READ or MEM_WRITE
51 * cache_flags CACHE_{NONE,DATA,INSTRUCTION} | other flags
52 *
53 * If the address indicates access to a memory mapped device, that device'
54 * read/write access function is called.
55 *
56 * If instruction latency/delay support is enabled, then
57 * cpu->instruction_delay is increased by the number of instruction to
58 * delay execution.
59 *
60 * This function should not be called with cpu == NULL.
61 *
62 * Returns one of the following:
63 * MEMORY_ACCESS_FAILED
64 * MEMORY_ACCESS_OK
65 *
66 * (MEMORY_ACCESS_FAILED is 0.)
67 */
68 int MEMORY_RW(struct cpu *cpu, struct memory *mem, uint64_t vaddr,
69 unsigned char *data, size_t len, int writeflag, int cache_flags)
70 {
71 #ifdef MEM_ALPHA
72 const int offset_mask = 0x1fff;
73 #else
74 const int offset_mask = 0xfff;
75 #endif
76
77 #ifndef MEM_USERLAND
78 int ok = 1;
79 #endif
80 uint64_t paddr;
81 int cache, no_exceptions, offset;
82 unsigned char *memblock;
83 #ifdef MEM_MIPS
84 int bintrans_cached = cpu->machine->bintrans_enable;
85 #endif
86 int bintrans_device_danger = 0;
87
88 no_exceptions = cache_flags & NO_EXCEPTIONS;
89 cache = cache_flags & CACHE_FLAGS_MASK;
90
91 #ifdef MEM_X86
92 /* Real-mode wrap-around: */
93 if (REAL_MODE && !(cache_flags & PHYSICAL)) {
94 if ((vaddr & 0xffff) + len > 0x10000) {
95 /* Do one byte at a time: */
96 int res = 0, i;
97 for (i=0; i<len; i++)
98 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
99 writeflag, cache_flags);
100 return res;
101 }
102 }
103
104 /* Crossing a page boundary? Then do one byte at a time: */
105 if ((vaddr & 0xfff) + len > 0x1000 && !(cache_flags & PHYSICAL)
106 && cpu->cd.x86.cr[0] & X86_CR0_PG) {
107 /* For WRITES: Read ALL BYTES FIRST and write them back!!!
108 Then do a write of all the new bytes. This is to make sure
109 than both pages around the boundary are writable so we don't
110 do a partial write. */
111 int res = 0, i;
112 if (writeflag == MEM_WRITE) {
113 unsigned char tmp;
114 for (i=0; i<len; i++) {
115 res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
116 MEM_READ, cache_flags);
117 if (!res)
118 return 0;
119 res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
120 MEM_WRITE, cache_flags);
121 if (!res)
122 return 0;
123 }
124 for (i=0; i<len; i++) {
125 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
126 MEM_WRITE, cache_flags);
127 if (!res)
128 return 0;
129 }
130 } else {
131 for (i=0; i<len; i++) {
132 /* Do one byte at a time: */
133 res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
134 writeflag, cache_flags);
135 if (!res) {
136 if (cache == CACHE_INSTRUCTION) {
137 fatal("FAILED instruction "
138 "fetch across page boundar"
139 "y: todo. vaddr=0x%08x\n",
140 (int)vaddr);
141 cpu->running = 0;
142 }
143 return 0;
144 }
145 }
146 }
147 return res;
148 }
149 #endif /* X86 */
150
151 #ifdef MEM_MIPS
152 if (bintrans_cached) {
153 if (cache == CACHE_INSTRUCTION) {
154 cpu->cd.mips.pc_bintrans_host_4kpage = NULL;
155 cpu->cd.mips.pc_bintrans_paddr_valid = 0;
156 }
157 }
158 #endif /* MEM_MIPS */
159
160 #ifdef MEM_USERLAND
161 #ifdef MEM_ALPHA
162 paddr = vaddr;
163 #else
164 paddr = vaddr & 0x7fffffff;
165 #endif
166 goto have_paddr;
167 #endif
168
169 #ifndef MEM_USERLAND
170 #ifdef MEM_MIPS
171 /*
172 * For instruction fetch, are we on the same page as the last
173 * instruction we fetched?
174 *
175 * NOTE: There's no need to check this stuff here if this address
176 * is known to be in host ram, as it's done at instruction fetch
177 * time in cpu.c! Only check if _host_4k_page == NULL.
178 */
179 if (cache == CACHE_INSTRUCTION &&
180 cpu->cd.mips.pc_last_host_4k_page == NULL &&
181 (vaddr & ~0xfff) == cpu->cd.mips.pc_last_virtual_page) {
182 paddr = cpu->cd.mips.pc_last_physical_page | (vaddr & 0xfff);
183 goto have_paddr;
184 }
185 #endif /* MEM_MIPS */
186
187 if (cache_flags & PHYSICAL || cpu->translate_address == NULL) {
188 paddr = vaddr;
189
190 #ifdef MEM_ALPHA
191 /* paddr &= 0x1fffffff; For testalpha */
192 paddr &= 0x000003ffffffffffULL;
193 #endif
194
195 #ifdef MEM_IA64
196 /* For testia64 */
197 paddr &= 0x3fffffff;
198 #endif
199
200 #ifdef MEM_PPC
201 if (cpu->cd.ppc.bits == 32)
202 paddr &= 0xffffffff;
203 #endif
204
205 #ifdef MEM_SH
206 paddr &= 0xffffffff;
207 #endif
208 } else {
209 ok = cpu->translate_address(cpu, vaddr, &paddr,
210 (writeflag? FLAG_WRITEFLAG : 0) +
211 (no_exceptions? FLAG_NOEXCEPTIONS : 0)
212 #ifdef MEM_X86
213 + (cache_flags & NO_SEGMENTATION)
214 #endif
215 #ifdef MEM_ARM
216 + (cache_flags & MEMORY_USER_ACCESS)
217 #endif
218 + (cache==CACHE_INSTRUCTION? FLAG_INSTR : 0));
219 /* If the translation caused an exception, or was invalid in
220 some way, we simply return without doing the memory
221 access: */
222 if (!ok)
223 return MEMORY_ACCESS_FAILED;
224 }
225
226
227 #ifdef MEM_X86
228 /* DOS debugging :-) */
229 if (!quiet_mode && !(cache_flags & PHYSICAL)) {
230 if (paddr >= 0x400 && paddr <= 0x4ff)
231 debug("{ PC BIOS DATA AREA: %s 0x%x }\n", writeflag ==
232 MEM_WRITE? "writing to" : "reading from",
233 (int)paddr);
234 #if 0
235 if (paddr >= 0xf0000 && paddr <= 0xfffff)
236 debug("{ BIOS ACCESS: %s 0x%x }\n",
237 writeflag == MEM_WRITE? "writing to" :
238 "reading from", (int)paddr);
239 #endif
240 }
241 #endif
242
243 #ifdef MEM_MIPS
244 /*
245 * If correct cache emulation is enabled, and we need to simluate
246 * cache misses even from the instruction cache, we can't run directly
247 * from a host page. :-/
248 */
249 #if defined(ENABLE_CACHE_EMULATION) && defined(ENABLE_INSTRUCTION_DELAYS)
250 #else
251 if (cache == CACHE_INSTRUCTION) {
252 cpu->cd.mips.pc_last_virtual_page = vaddr & ~0xfff;
253 cpu->cd.mips.pc_last_physical_page = paddr & ~0xfff;
254 cpu->cd.mips.pc_last_host_4k_page = NULL;
255
256 /* _last_host_4k_page will be set to 1 further down,
257 if the page is actually in host ram */
258 }
259 #endif
260 #endif /* MEM_MIPS */
261 #endif /* ifndef MEM_USERLAND */
262
263
264 #if defined(MEM_MIPS) || defined(MEM_USERLAND)
265 have_paddr:
266 #endif
267
268
269 #ifdef MEM_MIPS
270 /* TODO: How about bintrans vs cache emulation? */
271 if (bintrans_cached) {
272 if (cache == CACHE_INSTRUCTION) {
273 cpu->cd.mips.pc_bintrans_paddr_valid = 1;
274 cpu->cd.mips.pc_bintrans_paddr = paddr;
275 }
276 }
277 #endif /* MEM_MIPS */
278
279
280
281 #ifndef MEM_USERLAND
282 /*
283 * Memory mapped device?
284 *
285 * TODO: this is utterly slow.
286 * TODO2: if paddr<base, but len enough, then we should write
287 * to a device to
288 */
289 if (paddr >= mem->mmap_dev_minaddr && paddr < mem->mmap_dev_maxaddr) {
290 uint64_t orig_paddr = paddr;
291 int i, start, res;
292
293 /*
294 * Really really slow, but unfortunately necessary. This is
295 * to avoid the folowing scenario:
296 *
297 * a) offsets 0x000..0x123 are normal memory
298 * b) offsets 0x124..0x777 are a device
299 *
300 * 1) a read is done from offset 0x100. the page is
301 * added to the bintrans system as a "RAM" page
302 * 2) a bintranslated read is done from offset 0x200,
303 * which should access the device, but since the
304 * entire page is added, it will access non-existant
305 * RAM instead, without warning.
306 *
307 * Setting bintrans_device_danger = 1 on accesses which are
308 * on _any_ offset on pages that are device mapped avoids
309 * this problem, but it is probably not very fast.
310 */
311 for (i=0; i<mem->n_mmapped_devices; i++)
312 if (paddr >= (mem->dev_baseaddr[i] & ~offset_mask) &&
313 paddr <= ((mem->dev_endaddr[i]-1) | offset_mask)) {
314 bintrans_device_danger = 1;
315 break;
316 }
317
318 i = start = mem->last_accessed_device;
319
320 /* Scan through all devices: */
321 do {
322 if (paddr >= mem->dev_baseaddr[i] &&
323 paddr < mem->dev_endaddr[i]) {
324 /* Found a device, let's access it: */
325 mem->last_accessed_device = i;
326
327 paddr -= mem->dev_baseaddr[i];
328 if (paddr + len > mem->dev_length[i])
329 len = mem->dev_length[i] - paddr;
330
331 if (cpu->update_translation_table != NULL &&
332 mem->dev_flags[i] & MEM_DYNTRANS_OK) {
333 int wf = writeflag == MEM_WRITE? 1 : 0;
334 unsigned char *host_addr;
335
336 if (!(mem->dev_flags[i] &
337 MEM_DYNTRANS_WRITE_OK))
338 wf = 0;
339
340 if (writeflag && wf) {
341 if (paddr < mem->
342 dev_dyntrans_write_low[i])
343 mem->
344 dev_dyntrans_write_low
345 [i] = paddr &
346 ~offset_mask;
347 if (paddr >= mem->
348 dev_dyntrans_write_high[i])
349 mem->
350 dev_dyntrans_write_high
351 [i] = paddr |
352 offset_mask;
353 }
354
355 if (mem->dev_flags[i] &
356 MEM_EMULATED_RAM) {
357 /* MEM_WRITE to force the page
358 to be allocated, if it
359 wasn't already */
360 uint64_t *pp = (uint64_t *)
361 mem->dev_dyntrans_data[i];
362 uint64_t p = orig_paddr - *pp;
363 host_addr =
364 memory_paddr_to_hostaddr(
365 mem, p, MEM_WRITE)
366 + (p & ~offset_mask
367 & ((1 <<
368 BITS_PER_MEMBLOCK) - 1));
369 } else {
370 host_addr =
371 mem->dev_dyntrans_data[i] +
372 (paddr & ~offset_mask);
373 }
374 cpu->update_translation_table(cpu,
375 vaddr & ~offset_mask, host_addr,
376 wf, orig_paddr & ~offset_mask);
377 }
378
379 res = 0;
380 if (!no_exceptions || (mem->dev_flags[i] &
381 MEM_READING_HAS_NO_SIDE_EFFECTS))
382 res = mem->dev_f[i](cpu, mem, paddr,
383 data, len, writeflag,
384 mem->dev_extra[i]);
385
386 #ifdef ENABLE_INSTRUCTION_DELAYS
387 if (res == 0)
388 res = -1;
389
390 #ifdef MEM_MIPS
391 cpu->cd.mips.instruction_delay +=
392 ( (abs(res) - 1) *
393 cpu->cd.mips.cpu_type.instrs_per_cycle );
394 #endif
395 #endif
396
397 #ifndef MEM_X86
398 /*
399 * If accessing the memory mapped device
400 * failed, then return with a DBE exception.
401 */
402 if (res <= 0 && !no_exceptions) {
403 debug("%s device '%s' addr %08lx "
404 "failed\n", writeflag?
405 "writing to" : "reading from",
406 mem->dev_name[i], (long)paddr);
407 #ifdef MEM_MIPS
408 mips_cpu_exception(cpu, EXCEPTION_DBE,
409 0, vaddr, 0, 0, 0, 0);
410 #endif
411 return MEMORY_ACCESS_FAILED;
412 }
413 #endif
414 goto do_return_ok;
415 }
416
417 i ++;
418 if (i == mem->n_mmapped_devices)
419 i = 0;
420 } while (i != start);
421 }
422
423
424 #ifdef MEM_MIPS
425 /*
426 * Data and instruction cache emulation:
427 */
428
429 switch (cpu->cd.mips.cpu_type.mmu_model) {
430 case MMU3K:
431 /* if not uncached addess (TODO: generalize this) */
432 if (!(cache_flags & PHYSICAL) && cache != CACHE_NONE &&
433 !((vaddr & 0xffffffffULL) >= 0xa0000000ULL &&
434 (vaddr & 0xffffffffULL) <= 0xbfffffffULL)) {
435 if (memory_cache_R3000(cpu, cache, paddr,
436 writeflag, len, data))
437 goto do_return_ok;
438 }
439 break;
440 default:
441 /* R4000 etc */
442 /* TODO */
443 ;
444 }
445 #endif /* MEM_MIPS */
446
447
448 /* Outside of physical RAM? */
449 if (paddr >= mem->physical_max) {
450 #ifdef MEM_MIPS
451 if ((paddr & 0xffffc00000ULL) == 0x1fc00000) {
452 /* Ok, this is PROM stuff */
453 } else if ((paddr & 0xfffff00000ULL) == 0x1ff00000) {
454 /* Sprite reads from this area of memory... */
455 /* TODO: is this still correct? */
456 if (writeflag == MEM_READ)
457 memset(data, 0, len);
458 goto do_return_ok;
459 } else
460 #endif /* MIPS */
461 {
462 if (paddr >= mem->physical_max) {
463 char *symbol;
464 uint64_t old_pc;
465 uint64_t offset;
466
467 #ifdef MEM_MIPS
468 old_pc = cpu->cd.mips.pc_last;
469 #else
470 /* Default instruction size on most
471 RISC archs is 32 bits: */
472 old_pc = cpu->pc - sizeof(uint32_t);
473 #endif
474
475 /* This allows for example OS kernels to probe
476 memory a few KBs past the end of memory,
477 without giving too many warnings. */
478 if (!quiet_mode && !no_exceptions && paddr >=
479 mem->physical_max + 0x40000) {
480 fatal("[ memory_rw(): writeflag=%i ",
481 writeflag);
482 if (writeflag) {
483 unsigned int i;
484 debug("data={", writeflag);
485 if (len > 16) {
486 int start2 = len-16;
487 for (i=0; i<16; i++)
488 debug("%s%02x",
489 i?",":"",
490 data[i]);
491 debug(" .. ");
492 if (start2 < 16)
493 start2 = 16;
494 for (i=start2; i<len;
495 i++)
496 debug("%s%02x",
497 i?",":"",
498 data[i]);
499 } else
500 for (i=0; i<len; i++)
501 debug("%s%02x",
502 i?",":"",
503 data[i]);
504 debug("}");
505 }
506
507 fatal(" paddr=0x%llx >= physical_max"
508 "; pc=", (long long)paddr);
509 if (cpu->is_32bit)
510 fatal("0x%08x",(int)old_pc);
511 else
512 fatal("0x%016llx",
513 (long long)old_pc);
514 symbol = get_symbol_name(
515 &cpu->machine->symbol_context,
516 old_pc, &offset);
517 fatal(" <%s> ]\n",
518 symbol? symbol : " no symbol ");
519 }
520
521 if (cpu->machine->single_step_on_bad_addr) {
522 fatal("[ unimplemented access to "
523 "0x%llx, pc=0x",(long long)paddr);
524 if (cpu->is_32bit)
525 fatal("%08x ]\n",
526 (int)old_pc);
527 else
528 fatal("%016llx ]\n",
529 (long long)old_pc);
530 single_step = 1;
531 }
532 }
533
534 if (writeflag == MEM_READ) {
535 #ifdef MEM_X86
536 /* Reading non-existant memory on x86: */
537 memset(data, 0xff, len);
538 #else
539 /* Return all zeroes? (Or 0xff? TODO) */
540 memset(data, 0, len);
541 #endif
542
543 #ifdef MEM_MIPS
544 /*
545 * For real data/instruction accesses, cause
546 * an exceptions on an illegal read:
547 */
548 if (cache != CACHE_NONE && cpu->machine->
549 dbe_on_nonexistant_memaccess &&
550 !no_exceptions) {
551 if (paddr >= mem->physical_max &&
552 paddr < mem->physical_max+1048576)
553 mips_cpu_exception(cpu,
554 EXCEPTION_DBE, 0, vaddr, 0,
555 0, 0, 0);
556 }
557 #endif /* MEM_MIPS */
558 }
559
560 /* Hm? Shouldn't there be a DBE exception for
561 invalid writes as well? TODO */
562
563 goto do_return_ok;
564 }
565 }
566
567 #endif /* ifndef MEM_USERLAND */
568
569
570 /*
571 * Uncached access:
572 *
573 * 1) Translate the physical address to a host address.
574 *
575 * 2) Insert this virtual->physical->host translation into the
576 * fast translation arrays (using update_translation_table()).
577 *
578 * 3) If this was a Write, then invalidate any code translations
579 * in that page.
580 */
581 memblock = memory_paddr_to_hostaddr(mem, paddr, writeflag);
582 if (memblock == NULL) {
583 if (writeflag == MEM_READ)
584 memset(data, 0, len);
585 goto do_return_ok;
586 }
587
588 offset = paddr & ((1 << BITS_PER_MEMBLOCK) - 1);
589
590 if (cpu->update_translation_table != NULL && !bintrans_device_danger
591 #ifndef MEM_MIPS
592 /* && !(cache_flags & MEMORY_USER_ACCESS) */
593 #ifndef MEM_USERLAND
594 && !(ok & MEMORY_NOT_FULL_PAGE)
595 #endif
596 #endif
597 && !no_exceptions)
598 cpu->update_translation_table(cpu, vaddr & ~offset_mask,
599 memblock + (offset & ~offset_mask),
600 (cache_flags & MEMORY_USER_ACCESS) |
601 #ifndef MEM_MIPS
602 (cache == CACHE_INSTRUCTION? TLB_CODE : 0) |
603 #endif
604 #if 0
605 (cache == CACHE_INSTRUCTION?
606 (writeflag == MEM_WRITE? 1 : 0)
607 : ok - 1),
608 #else
609 (writeflag == MEM_WRITE? 1 : 0),
610 #endif
611 paddr & ~offset_mask);
612
613 /* Invalidate code translations for the page we are writing to. */
614 if (writeflag == MEM_WRITE &&
615 cpu->invalidate_code_translation != NULL)
616 cpu->invalidate_code_translation(cpu, paddr, INVALIDATE_PADDR);
617
618 if (writeflag == MEM_WRITE) {
619 /* Ugly optimization, but it works: */
620 if (len == sizeof(uint32_t) && (offset & 3)==0
621 && ((size_t)data&3)==0)
622 *(uint32_t *)(memblock + offset) = *(uint32_t *)data;
623 else if (len == sizeof(uint8_t))
624 *(uint8_t *)(memblock + offset) = *(uint8_t *)data;
625 else
626 memcpy(memblock + offset, data, len);
627 } else {
628 /* Ugly optimization, but it works: */
629 if (len == sizeof(uint32_t) && (offset & 3)==0
630 && ((size_t)data&3)==0)
631 *(uint32_t *)data = *(uint32_t *)(memblock + offset);
632 else if (len == sizeof(uint8_t))
633 *(uint8_t *)data = *(uint8_t *)(memblock + offset);
634 else
635 memcpy(data, memblock + offset, len);
636
637 #ifdef MEM_MIPS
638 if (cache == CACHE_INSTRUCTION) {
639 cpu->cd.mips.pc_last_host_4k_page = memblock
640 + (offset & ~offset_mask);
641 if (bintrans_cached) {
642 cpu->cd.mips.pc_bintrans_host_4kpage =
643 cpu->cd.mips.pc_last_host_4k_page;
644 }
645 }
646 #endif /* MIPS */
647 }
648
649
650 do_return_ok:
651 return MEMORY_ACCESS_OK;
652 }
653

  ViewVC Help
Powered by ViewVC 1.1.26