/[gxemul]/trunk/src/memory_rw.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Annotation of /trunk/src/memory_rw.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 26 - (hide annotations)
Mon Oct 8 16:20:10 2007 UTC (16 years, 7 months ago) by dpavlin
File MIME type: text/plain
File size: 15097 byte(s)
++ trunk/HISTORY	(local)
$Id: HISTORY,v 1.1264 2006/06/25 11:08:04 debug Exp $
20060624	Replacing the error-prone machine type initialization stuff
		with something more reasonable.
		Finally removing the old "cpu_run" kludge; moving around stuff
		in machine.c and emul.c to better suit the dyntrans system.
		Various minor dyntrans cleanups (renaming translate_address to
		translate_v2p, and experimenting with template physpages).
20060625	Removing the speed hack which separated the vph entries into
		two halves (code vs data); things seem a lot more stable now.
		Minor performance hack: R2000/R3000 cache isolation now only
		clears address translations when going into isolation, not
		when going out of it.
		Fixing the MIPS interrupt problems by letting mtc0 immediately
		cause interrupts.

==============  RELEASE 0.4.0.1  ==============


1 dpavlin 2 /*
2 dpavlin 22 * Copyright (C) 2003-2006 Anders Gavare. All rights reserved.
3 dpavlin 2 *
4     * Redistribution and use in source and binary forms, with or without
5     * modification, are permitted provided that the following conditions are met:
6     *
7     * 1. Redistributions of source code must retain the above copyright
8     * notice, this list of conditions and the following disclaimer.
9     * 2. Redistributions in binary form must reproduce the above copyright
10     * notice, this list of conditions and the following disclaimer in the
11     * documentation and/or other materials provided with the distribution.
12     * 3. The name of the author may not be used to endorse or promote products
13     * derived from this software without specific prior written permission.
14     *
15     * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16     * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17     * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18     * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19     * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20     * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21     * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22     * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23     * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24     * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25     * SUCH DAMAGE.
26     *
27     *
28 dpavlin 26 * $Id: memory_rw.c,v 1.90 2006/06/25 00:15:44 debug Exp $
29 dpavlin 2 *
30     * Generic memory_rw(), with special hacks for specific CPU families.
31     *
32     * Example for inclusion from memory_mips.c:
33     *
34     * MEMORY_RW should be mips_memory_rw
35     * MEM_MIPS should be defined
36     */
37    
38    
39     /*
40     * memory_rw():
41     *
42     * Read or write data from/to memory.
43     *
44     * cpu the cpu doing the read/write
45     * mem the memory object to use
46     * vaddr the virtual address
47     * data a pointer to the data to be written to memory, or
48     * a placeholder for data when reading from memory
49     * len the length of the 'data' buffer
50     * writeflag set to MEM_READ or MEM_WRITE
51 dpavlin 20 * misc_flags CACHE_{NONE,DATA,INSTRUCTION} | other flags
52 dpavlin 2 *
53     * If the address indicates access to a memory mapped device, that device'
54     * read/write access function is called.
55     *
56     * This function should not be called with cpu == NULL.
57     *
58     * Returns one of the following:
59     * MEMORY_ACCESS_FAILED
60     * MEMORY_ACCESS_OK
61     *
62     * (MEMORY_ACCESS_FAILED is 0.)
63     */
64     int MEMORY_RW(struct cpu *cpu, struct memory *mem, uint64_t vaddr,
65 dpavlin 20 unsigned char *data, size_t len, int writeflag, int misc_flags)
66 dpavlin 2 {
67 dpavlin 12 #ifdef MEM_ALPHA
68     const int offset_mask = 0x1fff;
69     #else
70     const int offset_mask = 0xfff;
71     #endif
72    
73 dpavlin 2 #ifndef MEM_USERLAND
74     int ok = 1;
75     #endif
76     uint64_t paddr;
77     int cache, no_exceptions, offset;
78     unsigned char *memblock;
79 dpavlin 22 int dyntrans_device_danger = 0;
80 dpavlin 12
81 dpavlin 20 no_exceptions = misc_flags & NO_EXCEPTIONS;
82     cache = misc_flags & CACHE_FLAGS_MASK;
83 dpavlin 2
84 dpavlin 4 #ifdef MEM_X86
85 dpavlin 6 /* Real-mode wrap-around: */
86 dpavlin 20 if (REAL_MODE && !(misc_flags & PHYSICAL)) {
87 dpavlin 6 if ((vaddr & 0xffff) + len > 0x10000) {
88     /* Do one byte at a time: */
89 dpavlin 22 int res = 0;
90     size_t i;
91 dpavlin 6 for (i=0; i<len; i++)
92     res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
93 dpavlin 20 writeflag, misc_flags);
94 dpavlin 6 return res;
95     }
96     }
97 dpavlin 4
98 dpavlin 6 /* Crossing a page boundary? Then do one byte at a time: */
99 dpavlin 20 if ((vaddr & 0xfff) + len > 0x1000 && !(misc_flags & PHYSICAL)
100 dpavlin 6 && cpu->cd.x86.cr[0] & X86_CR0_PG) {
101     /* For WRITES: Read ALL BYTES FIRST and write them back!!!
102     Then do a write of all the new bytes. This is to make sure
103     than both pages around the boundary are writable so we don't
104     do a partial write. */
105 dpavlin 22 int res = 0;
106     size_t i;
107 dpavlin 6 if (writeflag == MEM_WRITE) {
108     unsigned char tmp;
109     for (i=0; i<len; i++) {
110     res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
111 dpavlin 20 MEM_READ, misc_flags);
112 dpavlin 6 if (!res)
113 dpavlin 4 return 0;
114 dpavlin 6 res = MEMORY_RW(cpu, mem, vaddr+i, &tmp, 1,
115 dpavlin 20 MEM_WRITE, misc_flags);
116 dpavlin 6 if (!res)
117     return 0;
118     }
119     for (i=0; i<len; i++) {
120     res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
121 dpavlin 20 MEM_WRITE, misc_flags);
122 dpavlin 6 if (!res)
123     return 0;
124     }
125     } else {
126     for (i=0; i<len; i++) {
127     /* Do one byte at a time: */
128     res = MEMORY_RW(cpu, mem, vaddr+i, &data[i], 1,
129 dpavlin 20 writeflag, misc_flags);
130 dpavlin 6 if (!res) {
131     if (cache == CACHE_INSTRUCTION) {
132     fatal("FAILED instruction "
133     "fetch across page boundar"
134     "y: todo. vaddr=0x%08x\n",
135     (int)vaddr);
136     cpu->running = 0;
137     }
138     return 0;
139 dpavlin 4 }
140     }
141     }
142 dpavlin 6 return res;
143 dpavlin 4 }
144 dpavlin 6 #endif /* X86 */
145 dpavlin 4
146 dpavlin 2
147     #ifdef MEM_USERLAND
148 dpavlin 12 #ifdef MEM_ALPHA
149     paddr = vaddr;
150     #else
151 dpavlin 2 paddr = vaddr & 0x7fffffff;
152 dpavlin 12 #endif
153 dpavlin 24 #else /* !MEM_USERLAND */
154 dpavlin 26 if (misc_flags & PHYSICAL || cpu->translate_v2p == NULL) {
155 dpavlin 2 paddr = vaddr;
156     } else {
157 dpavlin 26 ok = cpu->translate_v2p(cpu, vaddr, &paddr,
158 dpavlin 2 (writeflag? FLAG_WRITEFLAG : 0) +
159     (no_exceptions? FLAG_NOEXCEPTIONS : 0)
160 dpavlin 6 #ifdef MEM_X86
161 dpavlin 20 + (misc_flags & NO_SEGMENTATION)
162 dpavlin 6 #endif
163 dpavlin 14 #ifdef MEM_ARM
164 dpavlin 20 + (misc_flags & MEMORY_USER_ACCESS)
165 dpavlin 14 #endif
166 dpavlin 2 + (cache==CACHE_INSTRUCTION? FLAG_INSTR : 0));
167     /* If the translation caused an exception, or was invalid in
168     some way, we simply return without doing the memory
169     access: */
170     if (!ok)
171     return MEMORY_ACCESS_FAILED;
172     }
173    
174    
175 dpavlin 6 #ifdef MEM_X86
176     /* DOS debugging :-) */
177 dpavlin 20 if (!quiet_mode && !(misc_flags & PHYSICAL)) {
178 dpavlin 6 if (paddr >= 0x400 && paddr <= 0x4ff)
179     debug("{ PC BIOS DATA AREA: %s 0x%x }\n", writeflag ==
180     MEM_WRITE? "writing to" : "reading from",
181     (int)paddr);
182     #if 0
183     if (paddr >= 0xf0000 && paddr <= 0xfffff)
184     debug("{ BIOS ACCESS: %s 0x%x }\n",
185     writeflag == MEM_WRITE? "writing to" :
186     "reading from", (int)paddr);
187     #endif
188     }
189     #endif
190 dpavlin 24 #endif /* !MEM_USERLAND */
191 dpavlin 6
192 dpavlin 2
193     #ifndef MEM_USERLAND
194     /*
195     * Memory mapped device?
196     *
197 dpavlin 22 * TODO: if paddr < base, but len enough, then the device should
198     * still be written to!
199 dpavlin 2 */
200     if (paddr >= mem->mmap_dev_minaddr && paddr < mem->mmap_dev_maxaddr) {
201     uint64_t orig_paddr = paddr;
202 dpavlin 22 int i, start, end, res;
203 dpavlin 4
204     /*
205     * Really really slow, but unfortunately necessary. This is
206     * to avoid the folowing scenario:
207     *
208     * a) offsets 0x000..0x123 are normal memory
209     * b) offsets 0x124..0x777 are a device
210     *
211     * 1) a read is done from offset 0x100. the page is
212 dpavlin 22 * added to the dyntrans system as a "RAM" page
213     * 2) a dyntranslated read is done from offset 0x200,
214 dpavlin 4 * which should access the device, but since the
215     * entire page is added, it will access non-existant
216     * RAM instead, without warning.
217     *
218 dpavlin 22 * Setting dyntrans_device_danger = 1 on accesses which are
219 dpavlin 4 * on _any_ offset on pages that are device mapped avoids
220     * this problem, but it is probably not very fast.
221 dpavlin 22 *
222     * TODO: Convert this into a quick (multi-level, 64-bit)
223     * address space lookup, to find dangerous pages.
224 dpavlin 4 */
225 dpavlin 22 #if 1
226 dpavlin 12 for (i=0; i<mem->n_mmapped_devices; i++)
227     if (paddr >= (mem->dev_baseaddr[i] & ~offset_mask) &&
228 dpavlin 18 paddr <= ((mem->dev_endaddr[i]-1) | offset_mask)) {
229 dpavlin 22 dyntrans_device_danger = 1;
230 dpavlin 12 break;
231     }
232 dpavlin 22 #endif
233 dpavlin 4
234 dpavlin 22 start = 0; end = mem->n_mmapped_devices - 1;
235     i = mem->last_accessed_device;
236 dpavlin 2
237     /* Scan through all devices: */
238     do {
239     if (paddr >= mem->dev_baseaddr[i] &&
240 dpavlin 18 paddr < mem->dev_endaddr[i]) {
241 dpavlin 2 /* Found a device, let's access it: */
242     mem->last_accessed_device = i;
243    
244     paddr -= mem->dev_baseaddr[i];
245     if (paddr + len > mem->dev_length[i])
246     len = mem->dev_length[i] - paddr;
247    
248 dpavlin 12 if (cpu->update_translation_table != NULL &&
249 dpavlin 20 !(ok & MEMORY_NOT_FULL_PAGE) &&
250     mem->dev_flags[i] & DM_DYNTRANS_OK) {
251 dpavlin 2 int wf = writeflag == MEM_WRITE? 1 : 0;
252 dpavlin 18 unsigned char *host_addr;
253 dpavlin 2
254 dpavlin 18 if (!(mem->dev_flags[i] &
255 dpavlin 20 DM_DYNTRANS_WRITE_OK))
256 dpavlin 18 wf = 0;
257    
258     if (writeflag && wf) {
259 dpavlin 2 if (paddr < mem->
260 dpavlin 12 dev_dyntrans_write_low[i])
261 dpavlin 2 mem->
262 dpavlin 12 dev_dyntrans_write_low
263     [i] = paddr &
264     ~offset_mask;
265     if (paddr >= mem->
266     dev_dyntrans_write_high[i])
267 dpavlin 2 mem->
268 dpavlin 12 dev_dyntrans_write_high
269     [i] = paddr |
270     offset_mask;
271 dpavlin 2 }
272    
273 dpavlin 18 if (mem->dev_flags[i] &
274 dpavlin 20 DM_EMULATED_RAM) {
275 dpavlin 18 /* MEM_WRITE to force the page
276     to be allocated, if it
277     wasn't already */
278     uint64_t *pp = (uint64_t *)
279     mem->dev_dyntrans_data[i];
280     uint64_t p = orig_paddr - *pp;
281     host_addr =
282     memory_paddr_to_hostaddr(
283     mem, p, MEM_WRITE)
284     + (p & ~offset_mask
285     & ((1 <<
286     BITS_PER_MEMBLOCK) - 1));
287     } else {
288     host_addr =
289     mem->dev_dyntrans_data[i] +
290     (paddr & ~offset_mask);
291     }
292 dpavlin 12 cpu->update_translation_table(cpu,
293 dpavlin 18 vaddr & ~offset_mask, host_addr,
294 dpavlin 12 wf, orig_paddr & ~offset_mask);
295 dpavlin 2 }
296    
297 dpavlin 6 res = 0;
298     if (!no_exceptions || (mem->dev_flags[i] &
299 dpavlin 20 DM_READS_HAVE_NO_SIDE_EFFECTS))
300 dpavlin 6 res = mem->dev_f[i](cpu, mem, paddr,
301     data, len, writeflag,
302     mem->dev_extra[i]);
303 dpavlin 2
304     if (res == 0)
305     res = -1;
306    
307 dpavlin 6 #ifndef MEM_X86
308 dpavlin 2 /*
309     * If accessing the memory mapped device
310     * failed, then return with a DBE exception.
311     */
312 dpavlin 6 if (res <= 0 && !no_exceptions) {
313 dpavlin 2 debug("%s device '%s' addr %08lx "
314     "failed\n", writeflag?
315     "writing to" : "reading from",
316     mem->dev_name[i], (long)paddr);
317     #ifdef MEM_MIPS
318     mips_cpu_exception(cpu, EXCEPTION_DBE,
319     0, vaddr, 0, 0, 0, 0);
320     #endif
321     return MEMORY_ACCESS_FAILED;
322     }
323 dpavlin 6 #endif
324 dpavlin 2 goto do_return_ok;
325     }
326    
327 dpavlin 22 if (paddr < mem->dev_baseaddr[i])
328     end = i - 1;
329     if (paddr >= mem->dev_endaddr[i])
330     start = i + 1;
331     i = (start + end) >> 1;
332     } while (start <= end);
333 dpavlin 2 }
334    
335    
336     #ifdef MEM_MIPS
337     /*
338     * Data and instruction cache emulation:
339     */
340    
341     switch (cpu->cd.mips.cpu_type.mmu_model) {
342     case MMU3K:
343     /* if not uncached addess (TODO: generalize this) */
344 dpavlin 20 if (!(misc_flags & PHYSICAL) && cache != CACHE_NONE &&
345 dpavlin 2 !((vaddr & 0xffffffffULL) >= 0xa0000000ULL &&
346     (vaddr & 0xffffffffULL) <= 0xbfffffffULL)) {
347     if (memory_cache_R3000(cpu, cache, paddr,
348     writeflag, len, data))
349     goto do_return_ok;
350     }
351     break;
352     default:
353     /* R4000 etc */
354     /* TODO */
355     ;
356     }
357     #endif /* MEM_MIPS */
358    
359    
360     /* Outside of physical RAM? */
361     if (paddr >= mem->physical_max) {
362 dpavlin 6 #ifdef MEM_MIPS
363     if ((paddr & 0xffffc00000ULL) == 0x1fc00000) {
364 dpavlin 2 /* Ok, this is PROM stuff */
365     } else if ((paddr & 0xfffff00000ULL) == 0x1ff00000) {
366     /* Sprite reads from this area of memory... */
367     /* TODO: is this still correct? */
368     if (writeflag == MEM_READ)
369     memset(data, 0, len);
370     goto do_return_ok;
371 dpavlin 6 } else
372     #endif /* MIPS */
373     {
374     if (paddr >= mem->physical_max) {
375 dpavlin 24 uint64_t offset, old_pc = cpu->pc;
376 dpavlin 2 char *symbol;
377 dpavlin 12
378 dpavlin 6 /* This allows for example OS kernels to probe
379     memory a few KBs past the end of memory,
380     without giving too many warnings. */
381 dpavlin 12 if (!quiet_mode && !no_exceptions && paddr >=
382 dpavlin 6 mem->physical_max + 0x40000) {
383 dpavlin 2 fatal("[ memory_rw(): writeflag=%i ",
384     writeflag);
385     if (writeflag) {
386     unsigned int i;
387     debug("data={", writeflag);
388     if (len > 16) {
389     int start2 = len-16;
390     for (i=0; i<16; i++)
391     debug("%s%02x",
392     i?",":"",
393     data[i]);
394     debug(" .. ");
395     if (start2 < 16)
396     start2 = 16;
397     for (i=start2; i<len;
398     i++)
399     debug("%s%02x",
400     i?",":"",
401     data[i]);
402     } else
403     for (i=0; i<len; i++)
404     debug("%s%02x",
405     i?",":"",
406     data[i]);
407     debug("}");
408     }
409 dpavlin 12
410     fatal(" paddr=0x%llx >= physical_max"
411     "; pc=", (long long)paddr);
412     if (cpu->is_32bit)
413     fatal("0x%08x",(int)old_pc);
414     else
415     fatal("0x%016llx",
416     (long long)old_pc);
417 dpavlin 2 symbol = get_symbol_name(
418     &cpu->machine->symbol_context,
419 dpavlin 12 old_pc, &offset);
420     fatal(" <%s> ]\n",
421     symbol? symbol : " no symbol ");
422 dpavlin 2 }
423     }
424    
425     if (writeflag == MEM_READ) {
426 dpavlin 6 #ifdef MEM_X86
427     /* Reading non-existant memory on x86: */
428     memset(data, 0xff, len);
429     #else
430 dpavlin 2 /* Return all zeroes? (Or 0xff? TODO) */
431     memset(data, 0, len);
432 dpavlin 6 #endif
433 dpavlin 2
434     #ifdef MEM_MIPS
435     /*
436     * For real data/instruction accesses, cause
437     * an exceptions on an illegal read:
438     */
439     if (cache != CACHE_NONE && cpu->machine->
440 dpavlin 6 dbe_on_nonexistant_memaccess &&
441     !no_exceptions) {
442 dpavlin 2 if (paddr >= mem->physical_max &&
443     paddr < mem->physical_max+1048576)
444     mips_cpu_exception(cpu,
445     EXCEPTION_DBE, 0, vaddr, 0,
446     0, 0, 0);
447     }
448     #endif /* MEM_MIPS */
449     }
450    
451     /* Hm? Shouldn't there be a DBE exception for
452     invalid writes as well? TODO */
453    
454     goto do_return_ok;
455     }
456     }
457    
458     #endif /* ifndef MEM_USERLAND */
459    
460    
461     /*
462     * Uncached access:
463 dpavlin 18 *
464     * 1) Translate the physical address to a host address.
465     *
466     * 2) Insert this virtual->physical->host translation into the
467     * fast translation arrays (using update_translation_table()).
468     *
469     * 3) If this was a Write, then invalidate any code translations
470     * in that page.
471 dpavlin 2 */
472     memblock = memory_paddr_to_hostaddr(mem, paddr, writeflag);
473     if (memblock == NULL) {
474     if (writeflag == MEM_READ)
475     memset(data, 0, len);
476     goto do_return_ok;
477     }
478    
479     offset = paddr & ((1 << BITS_PER_MEMBLOCK) - 1);
480    
481 dpavlin 22 if (cpu->update_translation_table != NULL && !dyntrans_device_danger
482 dpavlin 26 #ifdef MEM_MIPS
483     /* Ugly hack for R2000/R3000 caches: */
484     && (cpu->cd.mips.cpu_type.mmu_model != MMU3K ||
485     !(cpu->cd.mips.coproc[0]->reg[COP0_STATUS] & MIPS1_ISOL_CACHES))
486     #endif
487 dpavlin 18 #ifndef MEM_MIPS
488 dpavlin 20 /* && !(misc_flags & MEMORY_USER_ACCESS) */
489 dpavlin 18 #ifndef MEM_USERLAND
490     && !(ok & MEMORY_NOT_FULL_PAGE)
491     #endif
492     #endif
493 dpavlin 16 && !no_exceptions)
494 dpavlin 12 cpu->update_translation_table(cpu, vaddr & ~offset_mask,
495     memblock + (offset & ~offset_mask),
496 dpavlin 20 (misc_flags & MEMORY_USER_ACCESS) |
497     #if !defined(MEM_MIPS) && !defined(MEM_USERLAND)
498 dpavlin 18 (cache == CACHE_INSTRUCTION?
499 dpavlin 20 (writeflag == MEM_WRITE? 1 : 0) : ok - 1),
500 dpavlin 2 #else
501 dpavlin 18 (writeflag == MEM_WRITE? 1 : 0),
502 dpavlin 2 #endif
503 dpavlin 12 paddr & ~offset_mask);
504 dpavlin 2
505 dpavlin 18 /* Invalidate code translations for the page we are writing to. */
506 dpavlin 20 if (writeflag == MEM_WRITE && cpu->invalidate_code_translation != NULL)
507 dpavlin 14 cpu->invalidate_code_translation(cpu, paddr, INVALIDATE_PADDR);
508    
509 dpavlin 2 if (writeflag == MEM_WRITE) {
510 dpavlin 12 /* Ugly optimization, but it works: */
511     if (len == sizeof(uint32_t) && (offset & 3)==0
512     && ((size_t)data&3)==0)
513 dpavlin 2 *(uint32_t *)(memblock + offset) = *(uint32_t *)data;
514     else if (len == sizeof(uint8_t))
515     *(uint8_t *)(memblock + offset) = *(uint8_t *)data;
516     else
517     memcpy(memblock + offset, data, len);
518     } else {
519 dpavlin 12 /* Ugly optimization, but it works: */
520     if (len == sizeof(uint32_t) && (offset & 3)==0
521     && ((size_t)data&3)==0)
522 dpavlin 2 *(uint32_t *)data = *(uint32_t *)(memblock + offset);
523     else if (len == sizeof(uint8_t))
524     *(uint8_t *)data = *(uint8_t *)(memblock + offset);
525     else
526     memcpy(data, memblock + offset, len);
527     }
528    
529    
530     do_return_ok:
531     return MEMORY_ACCESS_OK;
532     }
533    

  ViewVC Help
Powered by ViewVC 1.1.26