diff --git a/cpu-all.h b/cpu-all.h index 387030e..10258c3 100644 --- a/cpu-all.h +++ b/cpu-all.h @@ -829,8 +829,9 @@ extern ram_addr_t ram_size; /* physical memory access */ #define TLB_INVALID_MASK (1 << 3) +#define TLB_IO_MASK (1 << 4) #define IO_MEM_SHIFT 4 -#define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT)) +#define IO_MEM_NB_ENTRIES 1024 #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */ #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */ @@ -840,8 +841,8 @@ extern ram_addr_t ram_size; exception, the write memory callback gets the ram offset instead of the physical address */ #define IO_MEM_ROMD (1) -#define IO_MEM_SUBPAGE (2) -#define IO_MEM_SUBWIDTH (4) +#define IO_MEM_SUBPAGE (2) /* used internally */ +#define IO_MEM_SUBWIDTH (4) /* used internally */ typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value); typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr); diff --git a/cpu-defs.h b/cpu-defs.h index c4389ed..5f3def8 100644 --- a/cpu-defs.h +++ b/cpu-defs.h @@ -99,16 +99,16 @@ typedef uint64_t target_phys_addr_t; #define CPU_TLB_BITS 8 #define CPU_TLB_SIZE (1 << CPU_TLB_BITS) -#if TARGET_PHYS_ADDR_BITS == 32 && TARGET_LONG_BITS == 32 -#define CPU_TLB_ENTRY_BITS 4 -#else +#if TARGET_LONG_BITS == 32 #define CPU_TLB_ENTRY_BITS 5 +#else +#define CPU_TLB_ENTRY_BITS 6 #endif typedef struct CPUTLBEntry { /* bit 31 to TARGET_PAGE_BITS : virtual address - bit TARGET_PAGE_BITS-1..IO_MEM_SHIFT : if non zero, memory io - zone number + bit TARGET_PAGE_BITS-1..IO_MEM_SHIFT+1 : zero + bit IO_MEM_SHIFT : indicates an io region bit 3 : indicates that the entry is invalid bit 2..0 : zero */ @@ -122,11 +122,14 @@ typedef struct CPUTLBEntry { #else target_phys_addr_t addend; #endif + /* if non-zero, memory io zone number */ + int io_index; /* padding to get a power of two size */ uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) - (sizeof(target_ulong) * 3 + ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) + - sizeof(target_phys_addr_t))]; + sizeof(target_phys_addr_t) + + sizeof(int))]; } CPUTLBEntry; #define CPU_TEMP_BUF_NLONGS 128 diff --git a/exec.c b/exec.c index 2fd0078..935c4ba 100644 --- a/exec.c +++ b/exec.c @@ -117,8 +117,10 @@ typedef struct PageDesc { } PageDesc; typedef struct PhysPageDesc { - /* offset in host memory of the page + io_index in the low 12 bits */ + /* offset in host memory of the page */ ram_addr_t phys_offset; + /* memory io region id */ + int io_index; } PhysPageDesc; #define L2_BITS 10 @@ -310,8 +312,10 @@ static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) return NULL; pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); *lp = pd; - for (i = 0; i < L2_SIZE; i++) - pd[i].phys_offset = IO_MEM_UNASSIGNED; + for (i = 0; i < L2_SIZE; i++) { + pd[i].phys_offset = 0; + pd[i].io_index = IO_MEM_UNASSIGNED >> IO_MEM_SHIFT; + } } return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1)); } @@ -1092,7 +1096,7 @@ static void breakpoint_invalidate(CPUState *env, target_ulong pc) addr = cpu_get_phys_page_debug(env, pc); p = phys_page_find(addr >> TARGET_PAGE_BITS); if (!p) { - pd = IO_MEM_UNASSIGNED; + pd = 0; } else { pd = p->phys_offset; } @@ -1508,10 +1512,11 @@ static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, unsigned long start, unsigned long length) { unsigned long addr; - if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { + if (!(tlb_entry->addr_write & ~TARGET_PAGE_MASK)) { addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; if ((addr - start) < length) { - tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; + tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_IO_MASK; + tlb_entry->io_index = IO_MEM_NOTDIRTY >> IO_MEM_SHIFT; } } } @@ -1599,11 +1604,12 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) { ram_addr_t ram_addr; - if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { + if (!(tlb_entry->addr_write & ~TARGET_PAGE_MASK)) { ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend - (unsigned long)phys_ram_base; if (!cpu_physical_memory_is_dirty(ram_addr)) { - tlb_entry->addr_write |= IO_MEM_NOTDIRTY; + tlb_entry->addr_write |= TLB_IO_MASK; + tlb_entry->io_index = IO_MEM_NOTDIRTY >> IO_MEM_SHIFT; } } } @@ -1630,10 +1636,11 @@ static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, unsigned long start) { unsigned long addr; - if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) { + if (tlb_entry->io_index == IO_MEM_NOTDIRTY >> IO_MEM_SHIFT) { addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; if (addr == start) { - tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM; + tlb_entry->addr_write = tlb_entry->addr_write & TARGET_PAGE_MASK; + tlb_entry->io_index = IO_MEM_RAM >> IO_MEM_SHIFT; } } } @@ -1673,12 +1680,15 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr, int ret; CPUTLBEntry *te; int i; + int io_index; p = phys_page_find(paddr >> TARGET_PAGE_BITS); if (!p) { - pd = IO_MEM_UNASSIGNED; + pd = 0; + io_index = IO_MEM_UNASSIGNED >> IO_MEM_SHIFT; } else { pd = p->phys_offset; + io_index = p->io_index; } #if defined(DEBUG_TLB) printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n", @@ -1690,9 +1700,9 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr, if (is_softmmu) #endif { - if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { + if (p->io_index > (IO_MEM_ROM >> IO_MEM_SHIFT) && !(pd & IO_MEM_ROMD)) { /* IO memory case */ - address = vaddr | pd; + address = vaddr | TLB_IO_MASK; addend = paddr; } else { /* standard memory */ @@ -1704,16 +1714,16 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr, watchpoint trap routines. */ for (i = 0; i < env->nb_watchpoints; i++) { if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) { - if (address & ~TARGET_PAGE_MASK) { + if (io_index) { env->watchpoint[i].addend = 0; - address = vaddr | io_mem_watch; } else { env->watchpoint[i].addend = pd - paddr + (unsigned long) phys_ram_base; /* TODO: Figure out how to make read watchpoints coexist with code. */ - pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD; + pd = (pd & TARGET_PAGE_MASK) | IO_MEM_ROMD; } + io_index = io_mem_watch; } } @@ -1721,6 +1731,7 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr, addend -= vaddr; te = &env->tlb_table[mmu_idx][index]; te->addend = addend; + te->io_index = io_index; if (prot & PAGE_READ) { te->addr_read = address; } else { @@ -1736,14 +1747,14 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr, te->addr_code = -1; } if (prot & PAGE_WRITE) { - if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || + if (io_index == (IO_MEM_ROM >> IO_MEM_SHIFT) || (pd & IO_MEM_ROMD)) { /* write access calls the I/O callback */ - te->addr_write = vaddr | - (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD)); - } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && + te->addr_write = vaddr | TLB_IO_MASK; + } else if (io_index == (IO_MEM_RAM >> IO_MEM_SHIFT) && !cpu_physical_memory_is_dirty(pd)) { - te->addr_write = vaddr | IO_MEM_NOTDIRTY; + te->addr_write = vaddr | TLB_IO_MASK; + te->io_index = IO_MEM_NOTDIRTY >> IO_MEM_SHIFT; } else { te->addr_write = address; } @@ -1753,7 +1764,7 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr, } #if !defined(CONFIG_SOFTMMU) else { - if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { + if (io_index > (IO_MEM_ROM >> IO_MEM_SHIFT)) { /* IO access: no mapping is done as it will be handled by the soft MMU */ if (!(env->hflags & HF_SOFTMMU_MASK)) @@ -1765,11 +1776,11 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr, ret = 2; } else { if (prot & PROT_WRITE) { - if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || + if (io_index == (IO_MEM_ROM >> IO_MEM_SHIFT) || #if defined(TARGET_HAS_SMC) || 1 first_tb || #endif - ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && + (io_index == (IO_MEM_ROM >> IO_MEM_SHIFT) && !cpu_physical_memory_is_dirty(pd))) { /* ROM: we do as if code was inside */ /* if code is present, we only map as read only and save the @@ -2017,9 +2028,9 @@ static inline void tlb_set_dirty(CPUState *env, #endif /* defined(CONFIG_USER_ONLY) */ static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, - ram_addr_t memory); -static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, - ram_addr_t orig_memory); + int memory); +static void *subpage_init (target_phys_addr_t base, PhysPageDesc *p, + int orig_memory); #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \ need_subpage) \ do { \ @@ -2057,31 +2068,49 @@ void cpu_register_physical_memory(target_phys_addr_t start_addr, end_addr = start_addr + (target_phys_addr_t)size; for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { p = phys_page_find(addr >> TARGET_PAGE_BITS); - if (p && p->phys_offset != IO_MEM_UNASSIGNED) { + if (p && p->io_index != (IO_MEM_UNASSIGNED >> IO_MEM_SHIFT)) { ram_addr_t orig_memory = p->phys_offset; target_phys_addr_t start_addr2, end_addr2; int need_subpage = 0; CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, need_subpage); - if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) { + if (need_subpage || (phys_offset & IO_MEM_SUBWIDTH)) { if (!(orig_memory & IO_MEM_SUBPAGE)) { subpage = subpage_init((addr & TARGET_PAGE_MASK), - &p->phys_offset, orig_memory); + p, p->io_index); } else { - subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK) - >> IO_MEM_SHIFT]; + subpage = io_mem_opaque[p->io_index]; } - subpage_register(subpage, start_addr2, end_addr2, phys_offset); + subpage_register(subpage, start_addr2, end_addr2, + phys_offset >> IO_MEM_SHIFT); } else { - p->phys_offset = phys_offset; + if ((phys_offset & IO_MEM_ROMD) || + (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_UNASSIGNED) { + p->phys_offset = phys_offset & + (TARGET_PAGE_MASK | ((1 << IO_MEM_SHIFT) - 1)); + p->io_index = (phys_offset & ~TARGET_PAGE_MASK) >> + IO_MEM_SHIFT; + } else { + p->phys_offset = phys_offset & ((1 << IO_MEM_SHIFT) - 1); + p->io_index = phys_offset >> IO_MEM_SHIFT; + } if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || (phys_offset & IO_MEM_ROMD)) phys_offset += TARGET_PAGE_SIZE; } } else { p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); - p->phys_offset = phys_offset; + if ((phys_offset & IO_MEM_ROMD) || + (phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_UNASSIGNED) { + p->phys_offset = phys_offset & + (TARGET_PAGE_MASK | ((1 << IO_MEM_SHIFT) - 1)); + p->io_index = (phys_offset & ~TARGET_PAGE_MASK) >> + IO_MEM_SHIFT; + } else { + p->phys_offset = phys_offset & ((1 << IO_MEM_SHIFT) - 1); + p->io_index = phys_offset >> IO_MEM_SHIFT; + } if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || (phys_offset & IO_MEM_ROMD)) phys_offset += TARGET_PAGE_SIZE; @@ -2093,10 +2122,10 @@ void cpu_register_physical_memory(target_phys_addr_t start_addr, end_addr2, need_subpage); if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) { - subpage = subpage_init((addr & TARGET_PAGE_MASK), - &p->phys_offset, IO_MEM_UNASSIGNED); + subpage = subpage_init((addr & TARGET_PAGE_MASK), p, + IO_MEM_UNASSIGNED >> IO_MEM_SHIFT); subpage_register(subpage, start_addr2, end_addr2, - phys_offset); + phys_offset >> IO_MEM_SHIFT); } } } @@ -2118,7 +2147,7 @@ ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr) p = phys_page_find(addr >> TARGET_PAGE_BITS); if (!p) return IO_MEM_UNASSIGNED; - return p->phys_offset; + return p->phys_offset | (p->io_index << IO_MEM_SHIFT); } /* XXX: better than nothing */ @@ -2440,7 +2469,7 @@ static CPUWriteMemoryFunc *subpage_write[] = { }; static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, - ram_addr_t memory) + int memory) { int idx, eidx; unsigned int i; @@ -2453,7 +2482,6 @@ static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__, mmio, start, end, idx, eidx, memory); #endif - memory >>= IO_MEM_SHIFT; for (; idx <= eidx; idx++) { for (i = 0; i < 4; i++) { if (io_mem_read[memory][i]) { @@ -2470,8 +2498,8 @@ static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, return 0; } -static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, - ram_addr_t orig_memory) +static void *subpage_init (target_phys_addr_t base, PhysPageDesc *p, + int orig_memory) { subpage_t *mmio; int subpage_memory; @@ -2484,7 +2512,8 @@ static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, mmio, base, TARGET_PAGE_SIZE, subpage_memory); #endif - *phys = subpage_memory | IO_MEM_SUBPAGE; + p->phys_offset = IO_MEM_SUBPAGE; + p->io_index = subpage_memory >> IO_MEM_SHIFT; subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory); } @@ -2525,7 +2554,10 @@ int cpu_register_io_memory(int io_index, if (io_index <= 0) { if (io_mem_nb >= IO_MEM_NB_ENTRIES) return -1; - io_index = io_mem_nb++; + do + io_index = io_mem_nb++; + while (((io_index << IO_MEM_SHIFT) & ~TARGET_PAGE_MASK) <= + IO_MEM_UNASSIGNED); } else { if (io_index >= IO_MEM_NB_ENTRIES) return -1; @@ -2611,14 +2643,15 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, l = len; p = phys_page_find(page >> TARGET_PAGE_BITS); if (!p) { - pd = IO_MEM_UNASSIGNED; + pd = 0; + io_index = IO_MEM_UNASSIGNED >> IO_MEM_SHIFT; } else { pd = p->phys_offset; + io_index = p->io_index; } if (is_write) { - if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { - io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); + if (io_index != (IO_MEM_RAM >> IO_MEM_SHIFT)) { /* XXX: could force cpu_single_env to NULL to avoid potential bugs */ if (l >= 4 && ((addr & 3) == 0)) { @@ -2652,10 +2685,9 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, } } } else { - if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && + if (io_index != (IO_MEM_RAM >> IO_MEM_SHIFT) && !(pd & IO_MEM_ROMD)) { /* I/O case */ - io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); if (l >= 4 && ((addr & 3) == 0)) { /* 32 bit read access */ val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); @@ -2689,7 +2721,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, void cpu_physical_memory_write_rom(target_phys_addr_t addr, const uint8_t *buf, int len) { - int l; + int l, io_index; uint8_t *ptr; target_phys_addr_t page; unsigned long pd; @@ -2702,13 +2734,15 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr, l = len; p = phys_page_find(page >> TARGET_PAGE_BITS); if (!p) { - pd = IO_MEM_UNASSIGNED; + pd = 0; + io_index = IO_MEM_UNASSIGNED >> IO_MEM_SHIFT; } else { pd = p->phys_offset; + io_index = p->io_index; } - if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM && - (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM && + if (io_index != (IO_MEM_RAM >> IO_MEM_SHIFT) && + io_index != (IO_MEM_ROM >> IO_MEM_SHIFT) && !(pd & IO_MEM_ROMD)) { /* do nothing */ } else { @@ -2736,15 +2770,16 @@ uint32_t ldl_phys(target_phys_addr_t addr) p = phys_page_find(addr >> TARGET_PAGE_BITS); if (!p) { - pd = IO_MEM_UNASSIGNED; + pd = 0; + io_index = IO_MEM_UNASSIGNED >> IO_MEM_SHIFT; } else { pd = p->phys_offset; + io_index = p->io_index; } - if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && + if (io_index > (IO_MEM_ROM >> IO_MEM_SHIFT) && !(pd & IO_MEM_ROMD)) { /* I/O case */ - io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); } else { /* RAM case */ @@ -2766,15 +2801,16 @@ uint64_t ldq_phys(target_phys_addr_t addr) p = phys_page_find(addr >> TARGET_PAGE_BITS); if (!p) { - pd = IO_MEM_UNASSIGNED; + pd = 0; + io_index = IO_MEM_UNASSIGNED >> IO_MEM_SHIFT; } else { pd = p->phys_offset; + io_index = p->io_index; } - if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && + if (io_index > (IO_MEM_ROM >> IO_MEM_SHIFT) && !(pd & IO_MEM_ROMD)) { /* I/O case */ - io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); #ifdef TARGET_WORDS_BIGENDIAN val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32; val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4); @@ -2819,13 +2855,14 @@ void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) p = phys_page_find(addr >> TARGET_PAGE_BITS); if (!p) { - pd = IO_MEM_UNASSIGNED; + pd = 0; + io_index = IO_MEM_UNASSIGNED >> IO_MEM_SHIFT; } else { pd = p->phys_offset; + io_index = p->io_index; } - if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { - io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); + if (io_index != (IO_MEM_RAM >> IO_MEM_SHIFT)) { io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); } else { ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + @@ -2843,13 +2880,14 @@ void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val) p = phys_page_find(addr >> TARGET_PAGE_BITS); if (!p) { - pd = IO_MEM_UNASSIGNED; + pd = 0; + io_index = IO_MEM_UNASSIGNED >> IO_MEM_SHIFT; } else { pd = p->phys_offset; + io_index = p->io_index; } - if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { - io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); + if (io_index != (IO_MEM_RAM >> IO_MEM_SHIFT)) { #ifdef TARGET_WORDS_BIGENDIAN io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32); io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val); @@ -2874,13 +2912,14 @@ void stl_phys(target_phys_addr_t addr, uint32_t val) p = phys_page_find(addr >> TARGET_PAGE_BITS); if (!p) { - pd = IO_MEM_UNASSIGNED; + pd = 0; + io_index = IO_MEM_UNASSIGNED >> IO_MEM_SHIFT; } else { pd = p->phys_offset; + io_index = p->io_index; } - if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { - io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); + if (io_index != (IO_MEM_RAM >> IO_MEM_SHIFT)) { io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); } else { unsigned long addr1; diff --git a/softmmu_template.h b/softmmu_template.h index 0a4bc7e..b108d73 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -51,12 +51,10 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, int mmu_idx, void *retaddr); static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr, - target_ulong tlb_addr) + int index) { DATA_TYPE res; - int index; - index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); #if SHIFT <= 2 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr); #else @@ -95,7 +93,8 @@ DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, /* IO access */ if ((addr & (DATA_SIZE - 1)) != 0) goto do_unaligned_access; - res = glue(io_read, SUFFIX)(physaddr, tlb_addr); + res = glue(io_read, SUFFIX)(physaddr, + env->tlb_table[mmu_idx][index].io_index); } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { /* slow unaligned access (it spans two pages or IO) */ do_unaligned_access: @@ -147,7 +146,8 @@ static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr, /* IO access */ if ((addr & (DATA_SIZE - 1)) != 0) goto do_unaligned_access; - res = glue(io_read, SUFFIX)(physaddr, tlb_addr); + res = glue(io_read, SUFFIX)(physaddr, + env->tlb_table[mmu_idx][index].io_index); } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { do_unaligned_access: /* slow unaligned access (it spans two pages) */ @@ -186,11 +186,9 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr, DATA_TYPE val, target_ulong tlb_addr, + int index, void *retaddr) { - int index; - - index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); env->mem_write_vaddr = tlb_addr; env->mem_write_pc = (unsigned long)retaddr; #if SHIFT <= 2 @@ -228,7 +226,9 @@ void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, if ((addr & (DATA_SIZE - 1)) != 0) goto do_unaligned_access; retaddr = GETPC(); - glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr); + glue(io_write, SUFFIX)(physaddr, val, tlb_addr, + env->tlb_table[mmu_idx][index].io_index, + retaddr); } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { do_unaligned_access: retaddr = GETPC(); @@ -278,7 +278,9 @@ static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr, /* IO access */ if ((addr & (DATA_SIZE - 1)) != 0) goto do_unaligned_access; - glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr); + glue(io_write, SUFFIX)(physaddr, val, tlb_addr, + env->tlb_table[mmu_idx][index].io_index, + retaddr); } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { do_unaligned_access: /* XXX: not efficient, but simple */