]>
Commit | Line | Data |
---|---|---|
432d268c JN |
1 | /* |
2 | * Copyright (C) 2011 Citrix Ltd. | |
3 | * | |
4 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
5 | * the COPYING file in the top-level directory. | |
6 | * | |
7 | */ | |
8 | ||
9 | #include "config.h" | |
10 | ||
11 | #include <sys/resource.h> | |
12 | ||
13 | #include "hw/xen_backend.h" | |
14 | #include "blockdev.h" | |
15 | ||
16 | #include <xen/hvm/params.h> | |
17 | #include <sys/mman.h> | |
18 | ||
19 | #include "xen-mapcache.h" | |
20 | #include "trace.h" | |
21 | ||
22 | ||
23 | //#define MAPCACHE_DEBUG | |
24 | ||
25 | #ifdef MAPCACHE_DEBUG | |
26 | # define DPRINTF(fmt, ...) do { \ | |
27 | fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \ | |
28 | } while (0) | |
29 | #else | |
30 | # define DPRINTF(fmt, ...) do { } while (0) | |
31 | #endif | |
32 | ||
33 | #if defined(__i386__) | |
34 | # define MCACHE_BUCKET_SHIFT 16 | |
35 | #elif defined(__x86_64__) | |
36 | # define MCACHE_BUCKET_SHIFT 20 | |
37 | #endif | |
38 | #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT) | |
39 | ||
40 | #define BITS_PER_LONG (sizeof(long) * 8) | |
41 | #define BITS_TO_LONGS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) | |
42 | #define DECLARE_BITMAP(name, bits) unsigned long name[BITS_TO_LONGS(bits)] | |
43 | ||
44 | typedef struct MapCacheEntry { | |
45 | target_phys_addr_t paddr_index; | |
46 | uint8_t *vaddr_base; | |
47 | DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT); | |
48 | uint8_t lock; | |
49 | struct MapCacheEntry *next; | |
50 | } MapCacheEntry; | |
51 | ||
52 | typedef struct MapCacheRev { | |
53 | uint8_t *vaddr_req; | |
54 | target_phys_addr_t paddr_index; | |
55 | QTAILQ_ENTRY(MapCacheRev) next; | |
56 | } MapCacheRev; | |
57 | ||
58 | typedef struct MapCache { | |
59 | MapCacheEntry *entry; | |
60 | unsigned long nr_buckets; | |
61 | QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries; | |
62 | ||
63 | /* For most cases (>99.9%), the page address is the same. */ | |
64 | target_phys_addr_t last_address_index; | |
65 | uint8_t *last_address_vaddr; | |
66 | unsigned long max_mcache_size; | |
67 | unsigned int mcache_bucket_shift; | |
68 | } MapCache; | |
69 | ||
70 | static MapCache *mapcache; | |
71 | ||
72 | static inline int test_bit(unsigned int bit, const unsigned long *map) | |
73 | { | |
74 | return !!((map)[(bit) / BITS_PER_LONG] & (1UL << ((bit) % BITS_PER_LONG))); | |
75 | } | |
76 | ||
77 | void qemu_map_cache_init(void) | |
78 | { | |
79 | unsigned long size; | |
80 | struct rlimit rlimit_as; | |
81 | ||
82 | mapcache = qemu_mallocz(sizeof (MapCache)); | |
83 | ||
84 | QTAILQ_INIT(&mapcache->locked_entries); | |
85 | mapcache->last_address_index = -1; | |
86 | ||
87 | getrlimit(RLIMIT_AS, &rlimit_as); | |
88 | rlimit_as.rlim_cur = rlimit_as.rlim_max; | |
89 | setrlimit(RLIMIT_AS, &rlimit_as); | |
90 | mapcache->max_mcache_size = rlimit_as.rlim_max; | |
91 | ||
92 | mapcache->nr_buckets = | |
93 | (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) + | |
94 | (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >> | |
95 | (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)); | |
96 | ||
97 | size = mapcache->nr_buckets * sizeof (MapCacheEntry); | |
98 | size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1); | |
99 | DPRINTF("qemu_map_cache_init, nr_buckets = %lx size %lu\n", mapcache->nr_buckets, size); | |
100 | mapcache->entry = qemu_mallocz(size); | |
101 | } | |
102 | ||
103 | static void qemu_remap_bucket(MapCacheEntry *entry, | |
104 | target_phys_addr_t size, | |
105 | target_phys_addr_t address_index) | |
106 | { | |
107 | uint8_t *vaddr_base; | |
108 | xen_pfn_t *pfns; | |
109 | int *err; | |
110 | unsigned int i, j; | |
111 | target_phys_addr_t nb_pfn = size >> XC_PAGE_SHIFT; | |
112 | ||
113 | trace_qemu_remap_bucket(address_index); | |
114 | ||
115 | pfns = qemu_mallocz(nb_pfn * sizeof (xen_pfn_t)); | |
116 | err = qemu_mallocz(nb_pfn * sizeof (int)); | |
117 | ||
118 | if (entry->vaddr_base != NULL) { | |
119 | if (munmap(entry->vaddr_base, size) != 0) { | |
120 | perror("unmap fails"); | |
121 | exit(-1); | |
122 | } | |
123 | } | |
124 | ||
125 | for (i = 0; i < nb_pfn; i++) { | |
126 | pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i; | |
127 | } | |
128 | ||
129 | vaddr_base = xc_map_foreign_bulk(xen_xc, xen_domid, PROT_READ|PROT_WRITE, | |
130 | pfns, err, nb_pfn); | |
131 | if (vaddr_base == NULL) { | |
132 | perror("xc_map_foreign_bulk"); | |
133 | exit(-1); | |
134 | } | |
135 | ||
136 | entry->vaddr_base = vaddr_base; | |
137 | entry->paddr_index = address_index; | |
138 | ||
139 | for (i = 0; i < nb_pfn; i += BITS_PER_LONG) { | |
140 | unsigned long word = 0; | |
141 | if ((i + BITS_PER_LONG) > nb_pfn) { | |
142 | j = nb_pfn % BITS_PER_LONG; | |
143 | } else { | |
144 | j = BITS_PER_LONG; | |
145 | } | |
146 | while (j > 0) { | |
147 | word = (word << 1) | !err[i + --j]; | |
148 | } | |
149 | entry->valid_mapping[i / BITS_PER_LONG] = word; | |
150 | } | |
151 | ||
152 | qemu_free(pfns); | |
153 | qemu_free(err); | |
154 | } | |
155 | ||
156 | uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, uint8_t lock) | |
157 | { | |
158 | MapCacheEntry *entry, *pentry = NULL; | |
159 | target_phys_addr_t address_index = phys_addr >> MCACHE_BUCKET_SHIFT; | |
160 | target_phys_addr_t address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1); | |
161 | ||
162 | trace_qemu_map_cache(phys_addr); | |
163 | ||
164 | if (address_index == mapcache->last_address_index && !lock) { | |
165 | trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset); | |
166 | return mapcache->last_address_vaddr + address_offset; | |
167 | } | |
168 | ||
169 | entry = &mapcache->entry[address_index % mapcache->nr_buckets]; | |
170 | ||
171 | while (entry && entry->lock && entry->paddr_index != address_index && entry->vaddr_base) { | |
172 | pentry = entry; | |
173 | entry = entry->next; | |
174 | } | |
175 | if (!entry) { | |
176 | entry = qemu_mallocz(sizeof (MapCacheEntry)); | |
177 | pentry->next = entry; | |
178 | qemu_remap_bucket(entry, size ? : MCACHE_BUCKET_SIZE, address_index); | |
179 | } else if (!entry->lock) { | |
180 | if (!entry->vaddr_base || entry->paddr_index != address_index || | |
181 | !test_bit(address_offset >> XC_PAGE_SHIFT, entry->valid_mapping)) { | |
182 | qemu_remap_bucket(entry, size ? : MCACHE_BUCKET_SIZE, address_index); | |
183 | } | |
184 | } | |
185 | ||
186 | if (!test_bit(address_offset >> XC_PAGE_SHIFT, entry->valid_mapping)) { | |
187 | mapcache->last_address_index = -1; | |
188 | trace_qemu_map_cache_return(NULL); | |
189 | return NULL; | |
190 | } | |
191 | ||
192 | mapcache->last_address_index = address_index; | |
193 | mapcache->last_address_vaddr = entry->vaddr_base; | |
194 | if (lock) { | |
195 | MapCacheRev *reventry = qemu_mallocz(sizeof(MapCacheRev)); | |
196 | entry->lock++; | |
197 | reventry->vaddr_req = mapcache->last_address_vaddr + address_offset; | |
198 | reventry->paddr_index = mapcache->last_address_index; | |
199 | QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next); | |
200 | } | |
201 | ||
202 | trace_qemu_map_cache_return(mapcache->last_address_vaddr + address_offset); | |
203 | return mapcache->last_address_vaddr + address_offset; | |
204 | } | |
205 | ||
206 | ram_addr_t qemu_ram_addr_from_mapcache(void *ptr) | |
207 | { | |
208 | MapCacheRev *reventry; | |
209 | target_phys_addr_t paddr_index; | |
210 | int found = 0; | |
211 | ||
212 | QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { | |
213 | if (reventry->vaddr_req == ptr) { | |
214 | paddr_index = reventry->paddr_index; | |
215 | found = 1; | |
216 | break; | |
217 | } | |
218 | } | |
219 | if (!found) { | |
220 | fprintf(stderr, "qemu_ram_addr_from_mapcache, could not find %p\n", ptr); | |
221 | QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { | |
222 | DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, | |
223 | reventry->vaddr_req); | |
224 | } | |
225 | abort(); | |
226 | return 0; | |
227 | } | |
228 | ||
229 | return paddr_index << MCACHE_BUCKET_SHIFT; | |
230 | } | |
231 | ||
232 | void qemu_invalidate_entry(uint8_t *buffer) | |
233 | { | |
234 | MapCacheEntry *entry = NULL, *pentry = NULL; | |
235 | MapCacheRev *reventry; | |
236 | target_phys_addr_t paddr_index; | |
237 | int found = 0; | |
238 | ||
239 | if (mapcache->last_address_vaddr == buffer) { | |
240 | mapcache->last_address_index = -1; | |
241 | } | |
242 | ||
243 | QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { | |
244 | if (reventry->vaddr_req == buffer) { | |
245 | paddr_index = reventry->paddr_index; | |
246 | found = 1; | |
247 | break; | |
248 | } | |
249 | } | |
250 | if (!found) { | |
251 | DPRINTF("qemu_invalidate_entry, could not find %p\n", buffer); | |
252 | QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { | |
253 | DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req); | |
254 | } | |
255 | return; | |
256 | } | |
257 | QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next); | |
258 | qemu_free(reventry); | |
259 | ||
260 | entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; | |
261 | while (entry && entry->paddr_index != paddr_index) { | |
262 | pentry = entry; | |
263 | entry = entry->next; | |
264 | } | |
265 | if (!entry) { | |
266 | DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer); | |
267 | return; | |
268 | } | |
269 | entry->lock--; | |
270 | if (entry->lock > 0 || pentry == NULL) { | |
271 | return; | |
272 | } | |
273 | ||
274 | pentry->next = entry->next; | |
275 | if (munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE) != 0) { | |
276 | perror("unmap fails"); | |
277 | exit(-1); | |
278 | } | |
279 | qemu_free(entry); | |
280 | } | |
281 | ||
282 | void qemu_invalidate_map_cache(void) | |
283 | { | |
284 | unsigned long i; | |
285 | MapCacheRev *reventry; | |
286 | ||
287 | /* Flush pending AIO before destroying the mapcache */ | |
288 | qemu_aio_flush(); | |
289 | ||
290 | QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { | |
291 | DPRINTF("There should be no locked mappings at this time, " | |
292 | "but "TARGET_FMT_plx" -> %p is present\n", | |
293 | reventry->paddr_index, reventry->vaddr_req); | |
294 | } | |
295 | ||
296 | mapcache_lock(); | |
297 | ||
298 | for (i = 0; i < mapcache->nr_buckets; i++) { | |
299 | MapCacheEntry *entry = &mapcache->entry[i]; | |
300 | ||
301 | if (entry->vaddr_base == NULL) { | |
302 | continue; | |
303 | } | |
304 | ||
305 | if (munmap(entry->vaddr_base, MCACHE_BUCKET_SIZE) != 0) { | |
306 | perror("unmap fails"); | |
307 | exit(-1); | |
308 | } | |
309 | ||
310 | entry->paddr_index = 0; | |
311 | entry->vaddr_base = NULL; | |
312 | } | |
313 | ||
314 | mapcache->last_address_index = -1; | |
315 | mapcache->last_address_vaddr = NULL; | |
316 | ||
317 | mapcache_unlock(); | |
318 | } | |
319 | ||
320 | uint8_t *xen_map_block(target_phys_addr_t phys_addr, target_phys_addr_t size) | |
321 | { | |
322 | uint8_t *vaddr_base; | |
323 | xen_pfn_t *pfns; | |
324 | int *err; | |
325 | unsigned int i; | |
326 | target_phys_addr_t nb_pfn = size >> XC_PAGE_SHIFT; | |
327 | ||
328 | trace_xen_map_block(phys_addr, size); | |
329 | phys_addr >>= XC_PAGE_SHIFT; | |
330 | ||
331 | pfns = qemu_mallocz(nb_pfn * sizeof (xen_pfn_t)); | |
332 | err = qemu_mallocz(nb_pfn * sizeof (int)); | |
333 | ||
334 | for (i = 0; i < nb_pfn; i++) { | |
335 | pfns[i] = phys_addr + i; | |
336 | } | |
337 | ||
338 | vaddr_base = xc_map_foreign_bulk(xen_xc, xen_domid, PROT_READ|PROT_WRITE, | |
339 | pfns, err, nb_pfn); | |
340 | if (vaddr_base == NULL) { | |
341 | perror("xc_map_foreign_bulk"); | |
342 | exit(-1); | |
343 | } | |
344 | ||
345 | qemu_free(pfns); | |
346 | qemu_free(err); | |
347 | ||
348 | return vaddr_base; | |
349 | } |