/[dynamips]/trunk/memory.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Diff of /trunk/memory.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

upstream/dynamips-0.2.6-RC5/memory.c revision 6 by dpavlin, Sat Oct 6 16:09:07 2007 UTC upstream/dynamips-0.2.7-RC1/memory.c revision 7 by dpavlin, Sat Oct 6 16:23:47 2007 UTC
# Line 1  Line 1 
1  /*  /*
2   * Cisco 7200 (Predator) simulation platform.   * Cisco router simulation platform.
3   * Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr)   * Copyright (c) 2005,2006 Christophe Fillot (cf@utc.fr)
4   */   */
5    
# Line 14  Line 14 
14  #include <fcntl.h>  #include <fcntl.h>
15  #include <assert.h>  #include <assert.h>
16    
17  #include "mips64.h"  #include "cpu.h"
18    #include "vm.h"
19  #include "dynamips.h"  #include "dynamips.h"
20  #include "memory.h"  #include "memory.h"
21  #include "device.h"  #include "device.h"
 #include "cpu.h"  
 #include "cp0.h"  
 #include "vm.h"  
22    
23  /* Record a memory access */  /* Record a memory access */
24  void memlog_rec_access(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint64_t data,  void memlog_rec_access(cpu_gen_t *cpu,m_uint64_t vaddr,m_uint64_t data,
25                         m_uint32_t op_size,m_uint32_t op_type)                         m_uint32_t op_size,m_uint32_t op_type)
26  {  {
27     memlog_access_t *acc;     memlog_access_t *acc;
28    
29     acc = &cpu->memlog_array[cpu->memlog_pos];     acc = &cpu->memlog_array[cpu->memlog_pos];
30     acc->pc      = cpu->pc;     acc->iaddr   = cpu_get_pc(cpu);
31     acc->vaddr   = vaddr;     acc->vaddr   = vaddr;
32     acc->data    = data;     acc->data    = data;
33     acc->op_size = op_size;     acc->op_size = op_size;
# Line 40  void memlog_rec_access(cpu_mips_t *cpu,m Line 38  void memlog_rec_access(cpu_mips_t *cpu,m
38  }  }
39    
40  /* Show the latest memory accesses */  /* Show the latest memory accesses */
41  void memlog_dump(cpu_mips_t *cpu)  void memlog_dump(cpu_gen_t *cpu)
42  {  {
43     memlog_access_t *acc;     memlog_access_t *acc;
44     char s_data[64];     char s_data[64];
# Line 51  void memlog_dump(cpu_mips_t *cpu) Line 49  void memlog_dump(cpu_mips_t *cpu)
49        pos &= (MEMLOG_COUNT-1);        pos &= (MEMLOG_COUNT-1);
50        acc = &cpu->memlog_array[pos];        acc = &cpu->memlog_array[pos];
51    
52        if (cpu->pc) {        if (cpu_get_pc(cpu)) {
53           if (acc->data_valid)           if (acc->data_valid)
54              snprintf(s_data,sizeof(s_data),"0x%llx",acc->data);              snprintf(s_data,sizeof(s_data),"0x%llx",acc->data);
55           else           else
# Line 59  void memlog_dump(cpu_mips_t *cpu) Line 57  void memlog_dump(cpu_mips_t *cpu)
57    
58           printf("CPU%u: pc=0x%8.8llx, vaddr=0x%8.8llx, "           printf("CPU%u: pc=0x%8.8llx, vaddr=0x%8.8llx, "
59                  "size=%u, type=%s, data=%s\n",                  "size=%u, type=%s, data=%s\n",
60                  cpu->id,acc->pc,acc->vaddr,acc->op_size,                  cpu->id,acc->iaddr,acc->vaddr,acc->op_size,
61                  (acc->op_type == MTS_READ) ? "read " : "write",                  (acc->op_type == MTS_READ) ? "read " : "write",
62                  s_data);                  s_data);
63        }        }
# Line 67  void memlog_dump(cpu_mips_t *cpu) Line 65  void memlog_dump(cpu_mips_t *cpu)
65  }  }
66    
67  /* Update the data obtained by a read access */  /* Update the data obtained by a read access */
68  void memlog_update_read(cpu_mips_t *cpu,m_iptr_t raddr)  void memlog_update_read(cpu_gen_t *cpu,m_iptr_t raddr)
69  {  {
70     memlog_access_t *acc;     memlog_access_t *acc;
71    
# Line 94  void memlog_update_read(cpu_mips_t *cpu, Line 92  void memlog_update_read(cpu_mips_t *cpu,
92     }     }
93  }  }
94    
 /* MTS access with special access mask */  
 void mts_access_special(cpu_mips_t *cpu,m_uint64_t vaddr,m_uint32_t mask,  
                         u_int op_code,u_int op_type,u_int op_size,  
                         m_uint64_t *data,u_int *exc)  
 {  
    switch(mask) {  
       case MTS_ACC_U:  
 #if DEBUG_MTS_ACC_U  
          if (op_type == MTS_READ)  
             cpu_log(cpu,"MTS","read  access to undefined address 0x%llx at "  
                     "pc=0x%llx (size=%u)\n",vaddr,cpu->pc,op_size);  
          else  
             cpu_log(cpu,"MTS","write access to undefined address 0x%llx at "  
                     "pc=0x%llx, value=0x%8.8llx (size=%u)\n",  
                     vaddr,cpu->pc,*data,op_size);  
 #endif  
          if (op_type == MTS_READ)  
             *data = 0;  
          break;  
   
       case MTS_ACC_T:  
          if (op_code != MIPS_MEMOP_LOOKUP) {  
 #if DEBUG_MTS_ACC_T  
             cpu_log(cpu,"MTS","TLB exception for address 0x%llx at pc=0x%llx "  
                     "(%s access, size=%u)\n",  
                     vaddr,cpu->pc,(op_type == MTS_READ) ?  
                     "read":"write",op_size);  
             mips64_dump_regs(cpu);  
 #if MEMLOG_ENABLE  
             memlog_dump(cpu);  
 #endif  
 #endif  
             cpu->cp0.reg[MIPS_CP0_BADVADDR] = vaddr;  
   
             if (op_type == MTS_READ)  
                mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_TLB_LOAD,0);  
             else  
                mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_TLB_SAVE,0);  
          }  
           
          *exc = 1;  
          break;  
   
       case MTS_ACC_AE:  
          if (op_code != MIPS_MEMOP_LOOKUP) {  
 #if DEBUG_MTS_ACC_AE  
             cpu_log(cpu,"MTS","AE exception for address 0x%llx at pc=0x%llx "  
                     "(%s access)\n",  
                     vaddr,cpu->pc,(op_type == MTS_READ) ? "read":"write");  
 #endif  
             cpu->cp0.reg[MIPS_CP0_BADVADDR] = vaddr;  
   
             if (op_type == MTS_READ)  
                mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_ADDR_LOAD,0);  
             else  
                mips64_trigger_exception(cpu,MIPS_CP0_CAUSE_ADDR_SAVE,0);  
          }  
   
          *exc = 1;  
          break;  
    }  
 }  
   
 /* === MTS for 64-bit address space ======================================= */  
 #define MTS_ADDR_SIZE      64  
 #define MTS_PROTO(name)    mts64_##name  
 #define MTS_PROTO_UP(name) MTS64_##name  
   
 #include "mips_mts.c"  
   
 /* === MTS for 32-bit address space ======================================= */  
 #define MTS_ADDR_SIZE      32  
 #define MTS_PROTO(name)    mts32_##name  
 #define MTS_PROTO_UP(name) MTS32_##name  
   
 #include "mips_mts.c"  
   
 /* === Specific operations for MTS64 ====================================== */  
   
 /* MTS64 slow lookup */  
 static forced_inline  
 mts64_entry_t *mts64_slow_lookup(cpu_mips_t *cpu,m_uint64_t vaddr,  
                                  u_int op_code,u_int op_size,  
                                  u_int op_type,m_uint64_t *data,  
                                  u_int *exc)  
 {  
    m_uint32_t hash_bucket,zone,sub_zone,cca;  
    mts64_entry_t *entry,new_entry;  
    mts_map_t map;  
   
    map.tlb_index = -1;  
    hash_bucket = MTS64_HASH(vaddr);  
    entry = cpu->mts_cache[hash_bucket];  
    zone = vaddr >> 40;  
   
 #if DEBUG_MTS_STATS  
    cpu->mts_misses++;  
 #endif  
   
    switch(zone) {  
       case 0x000000:   /* xkuseg */  
       case 0x400000:   /* xksseg */  
       case 0xc00000:   /* xkseg */  
          /* trigger TLB exception if no matching entry found */  
          if (!cp0_tlb_lookup(cpu,vaddr,&map))  
             goto err_tlb;  
   
          if (!mts64_map(cpu,vaddr,&map,&new_entry))  
             goto err_undef;  
          break;  
   
       case 0xffffff:  
          sub_zone  = (vaddr >> 29) & 0x7FF;  
   
          switch(sub_zone) {  
             case 0x7fc:   /* ckseg0 */  
                map.vaddr  = sign_extend(MIPS_KSEG0_BASE,32);  
                map.paddr  = 0;  
                map.len    = MIPS_KSEG0_SIZE;  
                map.cached = TRUE;  
                if (!mts64_map(cpu,vaddr,&map,&new_entry))  
                   goto err_undef;  
                break;  
   
             case 0x7fd:   /* ckseg1 */  
                map.vaddr  = sign_extend(MIPS_KSEG1_BASE,32);  
                map.paddr  = 0;  
                map.len    = MIPS_KSEG1_SIZE;  
                map.cached = FALSE;  
                if (!mts64_map(cpu,vaddr,&map,&new_entry))  
                   goto err_undef;  
                break;  
   
             case 0x7fe:   /* cksseg */  
             case 0x7ff:   /* ckseg3 */  
                /* trigger TLB exception if no matching entry found */  
                if (!cp0_tlb_lookup(cpu,vaddr,&map))  
                   goto err_tlb;  
   
                if (!mts64_map(cpu,vaddr,&map,&new_entry))  
                   goto err_undef;  
                break;  
   
             default:  
                /* Invalid zone: generate Address Error (AE) exception */  
                goto err_address;  
          }  
          break;  
     
          /* xkphys */  
       case 0x800000:  
       case 0x880000:  
       case 0x900000:  
       case 0x980000:  
       case 0xa00000:  
       case 0xa80000:  
       case 0xb00000:  
       case 0xb80000:  
          cca = (vaddr >> MIPS64_XKPHYS_CCA_SHIFT) & 0x03;  
          map.cached = mips64_cca_cached(cca);  
          map.vaddr  = vaddr & MIPS64_XKPHYS_ZONE_MASK;  
          map.paddr  = 0;  
          map.len    = MIPS64_XKPHYS_PHYS_SIZE;  
          if (!mts64_map(cpu,vaddr,&map,&new_entry))  
             goto err_undef;  
          break;  
   
       default:  
          /* Invalid zone: generate Address Error (AE) exception */  
          goto err_address;  
    }  
   
    /* Get a new entry if necessary */  
    if (!entry) {  
       entry = mts64_alloc_entry(cpu);  
       entry->pself = entry->pprev = NULL;  
       entry->next = NULL;  
   
       /* Store the entry in hash table for future use */  
       cpu->mts_cache[hash_bucket] = entry;  
    } else {  
       /* Remove the entry from the reverse map list */  
       if (entry->pprev) {  
          if (entry->next)  
             entry->next->pprev = entry->pprev;  
   
          *(entry->pprev) = entry->next;  
       }  
    }  
   
    /* Add this entry to the reverse map list */  
    if (map.tlb_index != -1) {  
       entry->pself = (mts64_entry_t **)&cpu->mts_cache[hash_bucket];  
       entry->next  = cpu->mts_rmap[map.tlb_index];  
       entry->pprev = (mts64_entry_t **)&cpu->mts_rmap[map.tlb_index];  
       if (entry->next)  
          entry->next->pprev = &entry->next;  
       cpu->mts_rmap[map.tlb_index] = entry;  
    }  
   
    /* Fill the new entry or replace the previous */  
    entry->phys_page = new_entry.phys_page;  
    entry->start  = new_entry.start;  
    entry->mask   = new_entry.mask;  
    entry->action = new_entry.action;  
    return entry;  
   
  err_undef:  
    mts_access_special(cpu,vaddr,MTS_ACC_U,op_code,op_type,op_size,data,exc);  
    return NULL;  
  err_address:  
    mts_access_special(cpu,vaddr,MTS_ACC_AE,op_code,op_type,op_size,data,exc);  
    return NULL;  
  err_tlb:  
    mts_access_special(cpu,vaddr,MTS_ACC_T,op_code,op_type,op_size,data,exc);  
    return NULL;  
 }  
   
 /* MTS64 access */  
 static forced_inline void *mts64_access(cpu_mips_t *cpu,m_uint64_t vaddr,  
                                         u_int op_code,u_int op_size,  
                                         u_int op_type,m_uint64_t *data,  
                                         u_int *exc)  
 {  
    m_uint32_t hash_bucket;  
    mts64_entry_t *entry;  
    m_iptr_t haddr;  
    u_int dev_id;  
   
 #if MEMLOG_ENABLE  
    /* Record the memory access */  
    memlog_rec_access(cpu,vaddr,*data,op_size,op_type);  
 #endif  
   
    *exc = 0;  
    hash_bucket = MTS64_HASH(vaddr);  
    entry = cpu->mts_cache[hash_bucket];  
   
 #if DEBUG_MTS_STATS  
    cpu->mts_lookups++;  
 #endif  
   
    /* Slow lookup if nothing found in cache */  
    if (unlikely((!entry) ||  
        unlikely((vaddr & sign_extend(entry->mask,32)) != entry->start)))  
    {  
       entry = mts64_slow_lookup(cpu,vaddr,op_code,op_size,op_type,data,exc);  
       if (!entry) return NULL;  
    }  
   
    /* Device access */  
    if (unlikely(entry->action & MTS_DEV_MASK)) {  
       dev_id = (entry->action & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT;  
       haddr = entry->action & MTS_DEVOFF_MASK;  
       haddr += vaddr - entry->start;  
   
 #if DEBUG_MTS_DEV  
       cpu_log(cpu,"MTS64",  
               "device access: vaddr=0x%llx, pc=0x%llx, dev_offset=0x%x\n",  
               vaddr,cpu->pc,haddr);  
 #endif  
       return(dev_access_fast(cpu,dev_id,haddr,op_size,op_type,data));  
    }  
   
    /* Raw memory access */  
    haddr = entry->action & MTS_ADDR_MASK;  
    haddr += vaddr - entry->start;  
 #if MEMLOG_ENABLE  
    memlog_update_read(cpu,haddr);  
 #endif  
    return((void *)haddr);  
 }  
   
 /* MTS64 virtual address to physical page translation */  
 static fastcall int mts64_translate(cpu_mips_t *cpu,m_uint64_t vaddr,  
                                     m_uint32_t *phys_page)  
 {    
    m_uint32_t hash_bucket,offset;  
    mts64_entry_t *entry;  
    m_uint64_t data = 0;  
    u_int exc = 0;  
     
    hash_bucket = MTS64_HASH(vaddr);  
    entry = cpu->mts_cache[hash_bucket];  
   
    /* Slow lookup if nothing found in cache */  
    if (unlikely((!entry) ||  
        unlikely((vaddr & sign_extend(entry->mask,32)) != entry->start)))  
    {  
       entry = mts64_slow_lookup(cpu,vaddr,MIPS_MEMOP_LOOKUP,4,MTS_READ,  
                                 &data,&exc);  
       if (!entry)  
          return(-1);  
    }  
   
    offset = vaddr - entry->start;  
    *phys_page = entry->phys_page + (offset >> MIPS_MIN_PAGE_SHIFT);  
    return(0);  
 }  
   
 /* === Specific operations for MTS32 ====================================== */  
   
 /* MTS32 slow lookup */  
 static forced_inline  
 mts32_entry_t *mts32_slow_lookup(cpu_mips_t *cpu,m_uint64_t vaddr,  
                                  u_int op_code,u_int op_size,  
                                  u_int op_type,m_uint64_t *data,  
                                  u_int *exc)  
 {  
    m_uint32_t hash_bucket,zone;  
    mts32_entry_t *entry,new_entry;  
    mts_map_t map;  
   
    map.tlb_index = -1;  
    hash_bucket = MTS32_HASH(vaddr);  
    entry = cpu->mts_cache[hash_bucket];  
    zone = (vaddr >> 29) & 0x7;  
   
 #if DEBUG_MTS_STATS  
    cpu->mts_misses++;  
 #endif  
   
    switch(zone) {  
       case 0x00 ... 0x03:   /* kuseg */  
          /* trigger TLB exception if no matching entry found */  
          if (!cp0_tlb_lookup(cpu,vaddr,&map))  
             goto err_tlb;  
   
          if (!mts32_map(cpu,vaddr,&map,&new_entry))  
             goto err_undef;  
          break;  
   
       case 0x04:   /* kseg0 */  
          map.vaddr  = sign_extend(MIPS_KSEG0_BASE,32);  
          map.paddr  = 0;  
          map.len    = MIPS_KSEG0_SIZE;  
          map.cached = TRUE;  
          if (!mts32_map(cpu,vaddr,&map,&new_entry))  
             goto err_undef;  
          break;  
   
       case 0x05:   /* kseg1 */  
          map.vaddr  = sign_extend(MIPS_KSEG1_BASE,32);  
          map.paddr  = 0;  
          map.len    = MIPS_KSEG1_SIZE;  
          map.cached = FALSE;  
          if (!mts32_map(cpu,vaddr,&map,&new_entry))  
             goto err_undef;  
          break;  
   
       case 0x06:   /* ksseg */  
       case 0x07:   /* kseg3 */  
          /* trigger TLB exception if no matching entry found */  
          if (!cp0_tlb_lookup(cpu,vaddr,&map))  
             goto err_tlb;  
   
          if (!mts32_map(cpu,vaddr,&map,&new_entry))  
             goto err_undef;  
          break;  
    }  
95    
96     /* Get a new entry if necessary */  /* === Operations on physical memory ====================================== */
    if (!entry) {  
       entry = mts32_alloc_entry(cpu);  
       entry->pself = entry->pprev = NULL;  
       entry->next = NULL;  
   
       /* Store the entry in hash table for future use */  
       cpu->mts_cache[hash_bucket] = entry;  
    } else {  
       /* Remove the entry from the reverse map list */  
       if (entry->pprev) {  
          if (entry->next)  
             entry->next->pprev = entry->pprev;  
   
          *(entry->pprev) = entry->next;  
       }  
    }  
   
    /* Add this entry to the reverse map list */  
    if (map.tlb_index != -1) {  
       entry->pself = (mts32_entry_t **)&cpu->mts_cache[hash_bucket];  
       entry->next  = cpu->mts_rmap[map.tlb_index];  
       entry->pprev = (mts32_entry_t **)&cpu->mts_rmap[map.tlb_index];  
       if (entry->next)  
          entry->next->pprev = &entry->next;  
       cpu->mts_rmap[map.tlb_index] = entry;  
    }  
97    
98     /* Fill the new entry or replace the previous */  /* Get host pointer for the physical address */
99     entry->phys_page = new_entry.phys_page;  static inline void *physmem_get_hptr(vm_instance_t *vm,m_uint64_t paddr,
100     entry->start  = new_entry.start;                                       u_int op_size,u_int op_type,
101     entry->mask   = new_entry.mask;                                       m_uint64_t *data)
102     entry->action = new_entry.action;  {
103     return entry;     struct vdevice *dev;
104       m_uint32_t offset;
105   err_undef:     void *ptr;
106     mts_access_special(cpu,vaddr,MTS_ACC_U,op_code,op_type,op_size,data,exc);     int cow;
    return NULL;  
  err_address:  
    mts_access_special(cpu,vaddr,MTS_ACC_AE,op_code,op_type,op_size,data,exc);  
    return NULL;  
  err_tlb:  
    mts_access_special(cpu,vaddr,MTS_ACC_T,op_code,op_type,op_size,data,exc);  
    return NULL;  
 }  
   
 /* MTS32 access */  
 static forced_inline void *mts32_access(cpu_mips_t *cpu,m_uint64_t vaddr,  
                                         u_int op_code,u_int op_size,  
                                         u_int op_type,m_uint64_t *data,  
                                         u_int *exc)  
 {  
    m_uint32_t hash_bucket;  
    mts32_entry_t *entry;  
    m_iptr_t haddr;  
    u_int dev_id;  
   
 #if MEMLOG_ENABLE  
    /* Record the memory access */  
    memlog_rec_access(cpu,vaddr,*data,op_size,op_type);  
 #endif  
   
    *exc = 0;  
    hash_bucket = MTS32_HASH(vaddr);  
    entry = cpu->mts_cache[hash_bucket];  
   
 #if DEBUG_MTS_STATS  
    cpu->mts_lookups++;  
 #endif  
   
    /* Slow lookup if nothing found in cache */  
    if (unlikely((!entry) || unlikely((vaddr & entry->mask) != entry->start))) {  
       entry = mts32_slow_lookup(cpu,vaddr,op_code,op_size,op_type,data,exc);  
       if (!entry) return NULL;  
    }  
107    
108     /* Device access */     if (!(dev = dev_lookup(vm,paddr,FALSE)))
109     if (unlikely(entry->action & MTS_DEV_MASK)) {        return NULL;
       dev_id = (entry->action & MTS_DEVID_MASK) >> MTS_DEVID_SHIFT;  
       haddr = entry->action & MTS_DEVOFF_MASK;  
       haddr += (m_uint32_t)vaddr - entry->start;  
   
 #if DEBUG_MTS_DEV  
       cpu_log(cpu,"MTS32",  
               "device access: vaddr=0x%llx, pc=0x%llx, dev_offset=0x%x\n",  
               vaddr,cpu->pc,haddr);  
 #endif  
       return(dev_access_fast(cpu,dev_id,haddr,op_size,op_type,data));  
    }  
110    
111     /* Raw memory access */     if (dev->flags & VDEVICE_FLAG_SPARSE) {
112     haddr = entry->action & MTS_ADDR_MASK;        ptr = (void *)dev_sparse_get_host_addr(vm,dev,paddr,op_type,&cow);
113     haddr += (m_uint32_t)vaddr - entry->start;        if (!ptr) return NULL;
 #if MEMLOG_ENABLE  
    memlog_update_read(cpu,haddr);  
 #endif  
    return((void *)haddr);  
 }  
   
 /* MTS32 virtual address to physical page translation */  
 static fastcall int mts32_translate(cpu_mips_t *cpu,m_uint64_t vaddr,  
                                     m_uint32_t *phys_page)  
 {    
    m_uint32_t hash_bucket,offset;  
    mts32_entry_t *entry;  
    m_uint64_t data = 0;  
    u_int exc = 0;  
     
    hash_bucket = MTS32_HASH(vaddr);  
    entry = cpu->mts_cache[hash_bucket];  
114    
115     /* Slow lookup if nothing found in cache */        return(ptr + (paddr & VM_PAGE_IMASK));
    if (unlikely((!entry) || unlikely((vaddr & entry->mask) != entry->start))) {  
       entry = mts32_slow_lookup(cpu,vaddr,MIPS_MEMOP_LOOKUP,4,MTS_READ,  
                                 &data,&exc);  
       if (!entry)  
          return(-1);  
116     }     }
117    
118     offset = vaddr - entry->start;     if ((dev->host_addr != 0) && !(dev->flags & VDEVICE_FLAG_NO_MTS_MMAP))
119     *phys_page = entry->phys_page + (offset >> MIPS_MIN_PAGE_SHIFT);        return((void *)dev->host_addr + (paddr - dev->phys_addr));
    return(0);  
 }  
120    
121  /* ======================================================================== */     if (op_size == 0)
122          return NULL;
123    
124  /* Shutdown MTS subsystem */     offset = paddr - dev->phys_addr;
125  void mts_shutdown(cpu_mips_t *cpu)     return(dev->handler(vm->boot_cpu,dev,offset,op_size,op_type,data));
 {  
    if (cpu->mts_shutdown != NULL)  
       cpu->mts_shutdown(cpu);  
 }  
   
 /* Set the address mode */  
 int mts_set_addr_mode(cpu_mips_t *cpu,u_int addr_mode)  
 {  
    if (cpu->addr_mode != addr_mode) {  
       mts_shutdown(cpu);  
         
       switch(addr_mode) {  
          case 32:  
             mts32_init(cpu);  
             mts32_init_memop_vectors(cpu);  
             break;  
          case 64:  
             mts64_init(cpu);  
             mts64_init_memop_vectors(cpu);  
             break;  
          default:  
             fprintf(stderr,  
                     "mts_set_addr_mode: internal error (addr_mode=%u)\n",  
                     addr_mode);  
             exit(EXIT_FAILURE);  
       }  
    }  
   
    return(0);  
126  }  }
127    
 /* === Operations on physical memory ====================================== */  
   
128  /* Copy a memory block from VM physical RAM to real host */  /* Copy a memory block from VM physical RAM to real host */
129  void physmem_copy_from_vm(vm_instance_t *vm,void *real_buffer,  void physmem_copy_from_vm(vm_instance_t *vm,void *real_buffer,
130                            m_uint64_t paddr,size_t len)                            m_uint64_t paddr,size_t len)
131  {  {
132     struct vdevice *vm_ram;     m_uint64_t dummy;
133       m_uint32_t r;
134     u_char *ptr;     u_char *ptr;
135    
136     if ((vm_ram = dev_lookup(vm,paddr,FALSE)) != NULL) {     while(len > 0) {
137        assert(vm_ram->host_addr != 0);        r = m_min(VM_PAGE_SIZE - (paddr & VM_PAGE_IMASK), len);
138        ptr = (u_char *)vm_ram->host_addr + (paddr - vm_ram->phys_addr);        ptr = physmem_get_hptr(vm,paddr,0,MTS_READ,&dummy);
139        memcpy(real_buffer,ptr,len);        
140          if (likely(ptr != NULL)) {
141             memcpy(real_buffer,ptr,r);
142          } else {
143             r = m_min(len,4);
144             switch(r) {
145                case 4:
146                   *(m_uint32_t *)real_buffer =
147                      htovm32(physmem_copy_u32_from_vm(vm,paddr));
148                   break;
149                case 2:
150                   *(m_uint16_t *)real_buffer =
151                      htovm16(physmem_copy_u16_from_vm(vm,paddr));
152                   break;
153                case 1:
154                   *(m_uint8_t *)real_buffer = physmem_copy_u8_from_vm(vm,paddr);
155                   break;
156             }
157          }
158    
159          real_buffer += r;
160          paddr += r;
161          len -= r;
162     }     }
163  }  }
164    
# Line 634  void physmem_copy_from_vm(vm_instance_t Line 166  void physmem_copy_from_vm(vm_instance_t
166  void physmem_copy_to_vm(vm_instance_t *vm,void *real_buffer,  void physmem_copy_to_vm(vm_instance_t *vm,void *real_buffer,
167                          m_uint64_t paddr,size_t len)                          m_uint64_t paddr,size_t len)
168  {  {
169     struct vdevice *vm_ram;     m_uint64_t dummy;
170       m_uint32_t r;
171     u_char *ptr;     u_char *ptr;
172    
173     if ((vm_ram = dev_lookup(vm,paddr,FALSE)) != NULL) {     while(len > 0) {
174        assert(vm_ram->host_addr != 0);        r = m_min(VM_PAGE_SIZE - (paddr & VM_PAGE_IMASK), len);
175        ptr = (u_char *)vm_ram->host_addr + (paddr - vm_ram->phys_addr);        ptr = physmem_get_hptr(vm,paddr,0,MTS_WRITE,&dummy);
176        memcpy(ptr,real_buffer,len);        
177          if (likely(ptr != NULL)) {
178             memcpy(ptr,real_buffer,r);
179          } else {
180             r = m_min(len,4);
181             switch(r) {
182                case 4:
183                   physmem_copy_u32_to_vm(vm,paddr,
184                                          htovm32(*(m_uint32_t *)real_buffer));
185                   break;
186                case 2:
187                   physmem_copy_u16_to_vm(vm,paddr,
188                                          htovm16(*(m_uint16_t *)real_buffer));
189                   break;
190                case 1:
191                   physmem_copy_u8_to_vm(vm,paddr,*(m_uint8_t *)real_buffer);
192                   break;
193             }
194          }
195    
196          real_buffer += r;
197          paddr += r;
198          len -= r;
199     }     }
200  }  }
201    
202  /* Copy a 32-bit word from the VM physical RAM to real host */  /* Copy a 32-bit word from the VM physical RAM to real host */
203  m_uint32_t physmem_copy_u32_from_vm(vm_instance_t *vm,m_uint64_t paddr)  m_uint32_t physmem_copy_u32_from_vm(vm_instance_t *vm,m_uint64_t paddr)
204  {  {
205     struct vdevice *dev;     m_uint64_t tmp = 0;
206     m_uint32_t offset;     m_uint32_t *ptr;
    m_uint64_t tmp;  
    void *ptr;  
207    
208     if (unlikely((dev = dev_lookup(vm,paddr,FALSE)) == NULL))     if ((ptr = physmem_get_hptr(vm,paddr,4,MTS_READ,&tmp)) != NULL)
209        return(0);        return(vmtoh32(*ptr));
   
    offset = paddr - dev->phys_addr;  
210    
211     if ((dev->host_addr != 0) && !(dev->flags & VDEVICE_FLAG_NO_MTS_MMAP))     return(tmp);
       ptr = (u_char *)dev->host_addr + offset;  
    else {  
       ptr = dev->handler(vm->boot_cpu,dev,offset,4,MTS_READ,&tmp);  
       if (!ptr) return(tmp);  
    }  
     
    return(vmtoh32(*(m_uint32_t *)ptr));  
212  }  }
213    
214  /* Copy a 32-bit word to the VM physical RAM from real host */  /* Copy a 32-bit word to the VM physical RAM from real host */
215  void physmem_copy_u32_to_vm(vm_instance_t *vm,m_uint64_t paddr,m_uint32_t val)  void physmem_copy_u32_to_vm(vm_instance_t *vm,m_uint64_t paddr,m_uint32_t val)
216  {  {
217     struct vdevice *dev;     m_uint64_t tmp = val;
218     m_uint32_t offset;     m_uint32_t *ptr;
    m_uint64_t tmp;  
    void *ptr;  
   
    if (unlikely((dev = dev_lookup(vm,paddr,FALSE)) == NULL))  
       return;  
219    
220     offset = paddr - dev->phys_addr;     if ((ptr = physmem_get_hptr(vm,paddr,4,MTS_WRITE,&tmp)) != NULL)  
221          *ptr = htovm32(val);
    if ((dev->host_addr != 0) && !(dev->flags & VDEVICE_FLAG_NO_MTS_MMAP))  
       ptr = (u_char *)dev->host_addr + offset;  
    else {  
       tmp = val;  
       ptr = dev->handler(vm->boot_cpu,dev,offset,4,MTS_WRITE,&tmp);  
       if (!ptr) return;  
    }  
     
    *(m_uint32_t *)ptr = htovm32(val);  
222  }  }
223    
224  /* Copy a 16-bit word from the VM physical RAM to real host */  /* Copy a 16-bit word from the VM physical RAM to real host */
225  m_uint16_t physmem_copy_u16_from_vm(vm_instance_t *vm,m_uint64_t paddr)  m_uint16_t physmem_copy_u16_from_vm(vm_instance_t *vm,m_uint64_t paddr)
226  {  {
227     struct vdevice *dev;     m_uint64_t tmp = 0;
228     m_uint32_t offset;     m_uint16_t *ptr;
    m_uint64_t tmp;  
    void *ptr;  
229    
230     if (unlikely((dev = dev_lookup(vm,paddr,FALSE)) == NULL))     if ((ptr = physmem_get_hptr(vm,paddr,2,MTS_READ,&tmp)) != NULL)
231        return(0);        return(vmtoh16(*ptr));
232    
233     offset = paddr - dev->phys_addr;     return(tmp);
   
    if ((dev->host_addr != 0) && !(dev->flags & VDEVICE_FLAG_NO_MTS_MMAP))  
       ptr = (u_char *)dev->host_addr + offset;  
    else {  
       ptr = dev->handler(vm->boot_cpu,dev,offset,2,MTS_READ,&tmp);  
       if (!ptr) return(tmp);  
    }  
     
    return(vmtoh16(*(m_uint16_t *)ptr));  
234  }  }
235    
236  /* Copy a 16-bit word to the VM physical RAM from real host */  /* Copy a 16-bit word to the VM physical RAM from real host */
237  void physmem_copy_u16_to_vm(vm_instance_t *vm,m_uint64_t paddr,m_uint16_t val)  void physmem_copy_u16_to_vm(vm_instance_t *vm,m_uint64_t paddr,m_uint16_t val)
238  {  {
239     struct vdevice *dev;     m_uint64_t tmp = val;
240     m_uint32_t offset;     m_uint16_t *ptr;
    m_uint64_t tmp;  
    void *ptr;  
241    
242     if (unlikely((dev = dev_lookup(vm,paddr,FALSE)) == NULL))     if ((ptr = physmem_get_hptr(vm,paddr,2,MTS_WRITE,&tmp)) != NULL)  
243        return;        *ptr = htovm16(val);
244    }
245    
246     offset = paddr - dev->phys_addr;  /* Copy a byte from the VM physical RAM to real host */
247    m_uint8_t physmem_copy_u8_from_vm(vm_instance_t *vm,m_uint64_t paddr)
248    {
249       m_uint64_t tmp = 0;
250       m_uint8_t *ptr;
251    
252     if ((dev->host_addr != 0) && !(dev->flags & VDEVICE_FLAG_NO_MTS_MMAP))     if ((ptr = physmem_get_hptr(vm,paddr,1,MTS_READ,&tmp)) != NULL)
253        ptr = (u_char *)dev->host_addr + offset;        return(*ptr);
254     else {  
255        tmp = val;     return(tmp);
256        ptr = dev->handler(vm->boot_cpu,dev,offset,2,MTS_WRITE,&tmp);  }
257        if (!ptr) return;  
258     }  /* Copy a 16-bit word to the VM physical RAM from real host */
259      void physmem_copy_u8_to_vm(vm_instance_t *vm,m_uint64_t paddr,m_uint8_t val)
260     *(m_uint16_t *)ptr = htovm16(val);  {
261       m_uint64_t tmp = val;
262       m_uint8_t *ptr;
263    
264       if ((ptr = physmem_get_hptr(vm,paddr,1,MTS_WRITE,&tmp)) != NULL)  
265          *ptr = val;
266  }  }
267    
268  /* DMA transfer operation */  /* DMA transfer operation */
269  void physmem_dma_transfer(vm_instance_t *vm,m_uint64_t src,m_uint64_t dst,  void physmem_dma_transfer(vm_instance_t *vm,m_uint64_t src,m_uint64_t dst,
270                            size_t len)                            size_t len)
271  {  {
272     struct vdevice *src_dev,*dst_dev;     m_uint64_t dummy;
273     u_char *sptr,*dptr;     u_char *sptr,*dptr;
274       size_t clen,sl,dl;
275    
276     src_dev = dev_lookup(vm,src,FALSE);     while(len > 0) {
277     dst_dev = dev_lookup(vm,dst,FALSE);        sptr = physmem_get_hptr(vm,src,0,MTS_READ,&dummy);
278          dptr = physmem_get_hptr(vm,dst,0,MTS_WRITE,&dummy);
279    
280          if (!sptr || !dptr) {
281             vm_log(vm,"DMA","unable to transfer from 0x%llx to 0x%llx\n",src,dst);
282             return;
283          }
284    
285     if ((src_dev != NULL) && (dst_dev != NULL)) {        sl = VM_PAGE_SIZE - (src & VM_PAGE_IMASK);
286        assert(src_dev->host_addr != 0);        dl = VM_PAGE_SIZE - (dst & VM_PAGE_IMASK);
287        assert(dst_dev->host_addr != 0);        clen = m_min(sl,dl);
288                clen = m_min(clen,len);
289        sptr = (u_char *)src_dev->host_addr + (src - src_dev->phys_addr);  
290        dptr = (u_char *)dst_dev->host_addr + (dst - dst_dev->phys_addr);        memcpy(dptr,sptr,clen);
291        memcpy(dptr,sptr,len);  
292     } else {        src += clen;
293        vm_log(vm,"DMA","unable to transfer from 0x%llx to 0x%llx (len=%lu)\n",        dst += clen;
294               src,dst,(u_long)len);        len -= clen;
295     }     }
296  }  }
297    

Legend:
Removed from v.6  
changed lines
  Added in v.7

  ViewVC Help
Powered by ViewVC 1.1.26