4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #define DATA_SIZE (1 << SHIFT)
25 #define DATA_TYPE uint64_t
29 #define DATA_TYPE uint32_t
33 #define DATA_TYPE uint16_t
37 #define DATA_TYPE uint8_t
39 #error unsupported data size
42 #ifdef SOFTMMU_CODE_ACCESS
43 #define READ_ACCESS_TYPE 2
44 #define ADDR_READ addr_code
46 #define READ_ACCESS_TYPE 0
47 #define ADDR_READ addr_read
50 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
53 static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
54 target_ulong tlb_addr)
59 index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
61 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
63 #ifdef TARGET_WORDS_BIGENDIAN
64 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
65 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
67 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
68 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
70 #endif /* SHIFT > 2 */
74 /* handle all cases except unaligned access which span two pages */
75 DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
80 target_ulong tlb_addr;
81 target_phys_addr_t physaddr;
84 /* test if there is match for unaligned or IO access */
85 /* XXX: could done more in memory macro in a non portable way */
86 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
88 tlb_addr = env->tlb_table[is_user][index].ADDR_READ;
89 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
90 physaddr = addr + env->tlb_table[is_user][index].addend;
91 if (tlb_addr & ~TARGET_PAGE_MASK) {
93 if ((addr & (DATA_SIZE - 1)) != 0)
94 goto do_unaligned_access;
95 res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
96 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
97 /* slow unaligned access (it spans two pages or IO) */
101 do_unaligned_access(addr, READ_ACCESS_TYPE, is_user, retaddr);
103 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
106 /* unaligned/aligned access in the same page */
108 if ((addr & (DATA_SIZE - 1)) != 0) {
110 do_unaligned_access(addr, READ_ACCESS_TYPE, is_user, retaddr);
113 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)physaddr);
116 /* the page is not in the TLB : fill it */
119 if ((addr & (DATA_SIZE - 1)) != 0)
120 do_unaligned_access(addr, READ_ACCESS_TYPE, is_user, retaddr);
122 tlb_fill(addr, READ_ACCESS_TYPE, is_user, retaddr);
128 /* handle all unaligned cases */
129 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
133 DATA_TYPE res, res1, res2;
135 target_phys_addr_t physaddr;
136 target_ulong tlb_addr, addr1, addr2;
138 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
140 tlb_addr = env->tlb_table[is_user][index].ADDR_READ;
141 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
142 physaddr = addr + env->tlb_table[is_user][index].addend;
143 if (tlb_addr & ~TARGET_PAGE_MASK) {
145 if ((addr & (DATA_SIZE - 1)) != 0)
146 goto do_unaligned_access;
147 res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
148 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
150 /* slow unaligned access (it spans two pages) */
151 addr1 = addr & ~(DATA_SIZE - 1);
152 addr2 = addr1 + DATA_SIZE;
153 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
155 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
157 shift = (addr & (DATA_SIZE - 1)) * 8;
158 #ifdef TARGET_WORDS_BIGENDIAN
159 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
161 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
163 res = (DATA_TYPE)res;
165 /* unaligned/aligned access in the same page */
166 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)physaddr);
169 /* the page is not in the TLB : fill it */
170 tlb_fill(addr, READ_ACCESS_TYPE, is_user, retaddr);
176 #ifndef SOFTMMU_CODE_ACCESS
178 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
183 static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
185 target_ulong tlb_addr,
190 index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
191 env->mem_write_vaddr = tlb_addr;
192 env->mem_write_pc = (unsigned long)retaddr;
194 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
196 #ifdef TARGET_WORDS_BIGENDIAN
197 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
198 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
200 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
201 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
203 #endif /* SHIFT > 2 */
206 void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
210 target_phys_addr_t physaddr;
211 target_ulong tlb_addr;
215 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
217 tlb_addr = env->tlb_table[is_user][index].addr_write;
218 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
219 physaddr = addr + env->tlb_table[is_user][index].addend;
220 if (tlb_addr & ~TARGET_PAGE_MASK) {
222 if ((addr & (DATA_SIZE - 1)) != 0)
223 goto do_unaligned_access;
225 glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
226 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
230 do_unaligned_access(addr, 1, is_user, retaddr);
232 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
235 /* aligned/unaligned access in the same page */
237 if ((addr & (DATA_SIZE - 1)) != 0) {
239 do_unaligned_access(addr, 1, is_user, retaddr);
242 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)physaddr, val);
245 /* the page is not in the TLB : fill it */
248 if ((addr & (DATA_SIZE - 1)) != 0)
249 do_unaligned_access(addr, 1, is_user, retaddr);
251 tlb_fill(addr, 1, is_user, retaddr);
256 /* handles all unaligned cases */
257 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
262 target_phys_addr_t physaddr;
263 target_ulong tlb_addr;
266 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
268 tlb_addr = env->tlb_table[is_user][index].addr_write;
269 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
270 physaddr = addr + env->tlb_table[is_user][index].addend;
271 if (tlb_addr & ~TARGET_PAGE_MASK) {
273 if ((addr & (DATA_SIZE - 1)) != 0)
274 goto do_unaligned_access;
275 glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
276 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
278 /* XXX: not efficient, but simple */
279 for(i = 0;i < DATA_SIZE; i++) {
280 #ifdef TARGET_WORDS_BIGENDIAN
281 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
284 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
289 /* aligned/unaligned access in the same page */
290 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)physaddr, val);
293 /* the page is not in the TLB : fill it */
294 tlb_fill(addr, 1, is_user, retaddr);
299 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
301 #undef READ_ACCESS_TYPE