if (code_gen_buffer_size > (512 * 1024 * 1024))
code_gen_buffer_size = (512 * 1024 * 1024);
#elif defined(__arm__)
- /* Keep the buffer no bigger than 16GB to branch between blocks */
+ /* Keep the buffer no bigger than 16MB to branch between blocks */
if (code_gen_buffer_size > 16 * 1024 * 1024)
code_gen_buffer_size = 16 * 1024 * 1024;
#elif defined(__s390x__)
.addend = -1,
};
-/* NOTE: if flush_global is true, also flush global entries (not
- implemented yet) */
+/* NOTE:
+ * If flush_global is true (the usual case), flush all tlb entries.
+ * If flush_global is false, flush (at least) all tlb entries not
+ * marked global.
+ *
+ * Since QEMU doesn't currently implement a global/not-global flag
+ * for tlb entries, at the moment tlb_flush() will also flush all
+ * tlb entries in the flush_global == false case. This is OK because
+ * CPU architectures generally permit an implementation to drop
+ * entries from the TLB at any time, so flushing more entries than
+ * required is only an efficiency issue, not a correctness issue.
+ */
void tlb_flush(CPUState *env, int flush_global)
{
int i;
MemoryRegion *mr;
pd &= ~TARGET_PAGE_MASK;
- mr = io_mem_region[pd >> IO_MEM_SHIFT];
+ mr = io_mem_region[pd];
return mr->rom_device && mr->readable;
}
ram_addr_t orig_memory = p->phys_offset;
target_phys_addr_t start_addr2, end_addr2;
int need_subpage = 0;
- MemoryRegion *mr = io_mem_region[(orig_memory & ~TARGET_PAGE_MASK)
- >> IO_MEM_SHIFT];
+ MemoryRegion *mr = io_mem_region[orig_memory & ~TARGET_PAGE_MASK];
CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
need_subpage);
if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
memory = io_mem_subpage_ram.ram_addr;
}
- memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ memory &= IO_MEM_NB_ENTRIES - 1;
for (; idx <= eidx; idx++) {
mmio->sub_io_index[idx] = memory;
mmio->region_offset[idx] = region_offset;
if (io_index == -1)
return io_index;
} else {
- io_index >>= IO_MEM_SHIFT;
if (io_index >= IO_MEM_NB_ENTRIES)
return -1;
}
io_mem_region[io_index] = mr;
- return (io_index << IO_MEM_SHIFT);
+ return io_index;
}
int cpu_register_io_memory(MemoryRegion *mr)
return cpu_register_io_memory_fixed(0, mr);
}
-void cpu_unregister_io_memory(int io_table_address)
+void cpu_unregister_io_memory(int io_index)
{
- int io_index = io_table_address >> IO_MEM_SHIFT;
-
io_mem_region[io_index] = NULL;
io_mem_used[io_index] = 0;
}
if (is_write) {
if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
target_phys_addr_t addr1;
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ io_index = pd & (IO_MEM_NB_ENTRIES - 1);
addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
/* XXX: could force cpu_single_env to NULL to avoid
potential bugs */
if (!is_ram_rom_romd(pd)) {
target_phys_addr_t addr1;
/* I/O case */
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ io_index = pd & (IO_MEM_NB_ENTRIES - 1);
addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
if (l >= 4 && ((addr1 & 3) == 0)) {
/* 32 bit read access */
if (!is_ram_rom_romd(pd)) {
/* I/O case */
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ io_index = pd & (IO_MEM_NB_ENTRIES - 1);
addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
val = io_mem_read(io_index, addr, 4);
#if defined(TARGET_WORDS_BIGENDIAN)
if (!is_ram_rom_romd(pd)) {
/* I/O case */
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ io_index = pd & (IO_MEM_NB_ENTRIES - 1);
addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
/* XXX This is broken when device endian != cpu endian.
if (!is_ram_rom_romd(pd)) {
/* I/O case */
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ io_index = pd & (IO_MEM_NB_ENTRIES - 1);
addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
val = io_mem_read(io_index, addr, 2);
#if defined(TARGET_WORDS_BIGENDIAN)
pd = p.phys_offset;
if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ io_index = pd & (IO_MEM_NB_ENTRIES - 1);
addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
io_mem_write(io_index, addr, val, 4);
} else {
pd = p.phys_offset;
if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ io_index = pd & (IO_MEM_NB_ENTRIES - 1);
addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
#ifdef TARGET_WORDS_BIGENDIAN
io_mem_write(io_index, addr, val >> 32, 4);
pd = p.phys_offset;
if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ io_index = pd & (IO_MEM_NB_ENTRIES - 1);
addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
pd = p.phys_offset;
if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ io_index = pd & (IO_MEM_NB_ENTRIES - 1);
addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
return qemu_ram_addr_from_host_nofail(p);
}
+/*
+ * A helper function for the _utterly broken_ virtio device model to find out if
+ * it's running on a big endian machine. Don't do this at home kids!
+ */
+bool virtio_is_big_endian(void);
+bool virtio_is_big_endian(void)
+{
+#if defined(TARGET_WORDS_BIGENDIAN)
+ return true;
+#else
+ return false;
+#endif
+}
+
#define MMUSUFFIX _cmmu
#undef GETPC
#define GETPC() NULL