]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Declarations for obsolete exec.c functions | |
3 | * | |
4 | * Copyright 2011 Red Hat, Inc. and/or its affiliates | |
5 | * | |
6 | * Authors: | |
7 | * Avi Kivity <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or | |
10 | * later. See the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | /* | |
15 | * This header is for use by exec.c and memory.c ONLY. Do not include it. | |
16 | * The functions declared here will be removed soon. | |
17 | */ | |
18 | ||
19 | #ifndef MEMORY_INTERNAL_H | |
20 | #define MEMORY_INTERNAL_H | |
21 | ||
22 | #ifndef CONFIG_USER_ONLY | |
23 | #include "hw/xen.h" | |
24 | ||
25 | typedef struct PhysPageEntry PhysPageEntry; | |
26 | ||
27 | struct PhysPageEntry { | |
28 | uint16_t is_leaf : 1; | |
29 | /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */ | |
30 | uint16_t ptr : 15; | |
31 | }; | |
32 | ||
33 | typedef struct AddressSpaceDispatch AddressSpaceDispatch; | |
34 | ||
35 | struct AddressSpaceDispatch { | |
36 | /* This is a multi-level map on the physical address space. | |
37 | * The bottom level has pointers to MemoryRegionSections. | |
38 | */ | |
39 | PhysPageEntry phys_map; | |
40 | MemoryListener listener; | |
41 | }; | |
42 | ||
43 | void address_space_init_dispatch(AddressSpace *as); | |
44 | void address_space_destroy_dispatch(AddressSpace *as); | |
45 | ||
46 | ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, | |
47 | MemoryRegion *mr); | |
48 | ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr); | |
49 | void qemu_ram_free(ram_addr_t addr); | |
50 | void qemu_ram_free_from_ptr(ram_addr_t addr); | |
51 | ||
52 | struct MemoryRegion; | |
53 | struct MemoryRegionSection; | |
54 | ||
55 | void qemu_register_coalesced_mmio(hwaddr addr, ram_addr_t size); | |
56 | void qemu_unregister_coalesced_mmio(hwaddr addr, ram_addr_t size); | |
57 | ||
58 | #define VGA_DIRTY_FLAG 0x01 | |
59 | #define CODE_DIRTY_FLAG 0x02 | |
60 | #define MIGRATION_DIRTY_FLAG 0x08 | |
61 | ||
62 | static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr) | |
63 | { | |
64 | return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS]; | |
65 | } | |
66 | ||
67 | /* read dirty bit (return 0 or 1) */ | |
68 | static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) | |
69 | { | |
70 | return cpu_physical_memory_get_dirty_flags(addr) == 0xff; | |
71 | } | |
72 | ||
73 | static inline int cpu_physical_memory_get_dirty(ram_addr_t start, | |
74 | ram_addr_t length, | |
75 | int dirty_flags) | |
76 | { | |
77 | int ret = 0; | |
78 | ram_addr_t addr, end; | |
79 | ||
80 | end = TARGET_PAGE_ALIGN(start + length); | |
81 | start &= TARGET_PAGE_MASK; | |
82 | for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) { | |
83 | ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags; | |
84 | } | |
85 | return ret; | |
86 | } | |
87 | ||
88 | static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr, | |
89 | int dirty_flags) | |
90 | { | |
91 | return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags; | |
92 | } | |
93 | ||
94 | static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) | |
95 | { | |
96 | cpu_physical_memory_set_dirty_flags(addr, 0xff); | |
97 | } | |
98 | ||
99 | static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr, | |
100 | int dirty_flags) | |
101 | { | |
102 | int mask = ~dirty_flags; | |
103 | ||
104 | return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask; | |
105 | } | |
106 | ||
107 | static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, | |
108 | ram_addr_t length, | |
109 | int dirty_flags) | |
110 | { | |
111 | ram_addr_t addr, end; | |
112 | ||
113 | end = TARGET_PAGE_ALIGN(start + length); | |
114 | start &= TARGET_PAGE_MASK; | |
115 | for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) { | |
116 | cpu_physical_memory_set_dirty_flags(addr, dirty_flags); | |
117 | } | |
118 | xen_modified_memory(addr, length); | |
119 | } | |
120 | ||
121 | static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start, | |
122 | ram_addr_t length, | |
123 | int dirty_flags) | |
124 | { | |
125 | ram_addr_t addr, end; | |
126 | ||
127 | end = TARGET_PAGE_ALIGN(start + length); | |
128 | start &= TARGET_PAGE_MASK; | |
129 | for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) { | |
130 | cpu_physical_memory_clear_dirty_flags(addr, dirty_flags); | |
131 | } | |
132 | } | |
133 | ||
134 | void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, | |
135 | int dirty_flags); | |
136 | ||
137 | extern const IORangeOps memory_region_iorange_ops; | |
138 | ||
139 | #endif | |
140 | ||
141 | #endif |