]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /** |
2 | * \file drm_vm.h | |
3 | * Memory mapping for DRM | |
4 | * | |
5 | * \author Rickard E. (Rik) Faith <[email protected]> | |
6 | * \author Gareth Hughes <[email protected]> | |
7 | */ | |
8 | ||
9 | /* | |
10 | * Created: Mon Jan 4 08:58:31 1999 by [email protected] | |
11 | * | |
12 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. | |
13 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. | |
14 | * All Rights Reserved. | |
15 | * | |
16 | * Permission is hereby granted, free of charge, to any person obtaining a | |
17 | * copy of this software and associated documentation files (the "Software"), | |
18 | * to deal in the Software without restriction, including without limitation | |
19 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
20 | * and/or sell copies of the Software, and to permit persons to whom the | |
21 | * Software is furnished to do so, subject to the following conditions: | |
22 | * | |
23 | * The above copyright notice and this permission notice (including the next | |
24 | * paragraph) shall be included in all copies or substantial portions of the | |
25 | * Software. | |
26 | * | |
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
28 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
29 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
30 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
31 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
32 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
33 | * OTHER DEALINGS IN THE SOFTWARE. | |
34 | */ | |
35 | ||
36 | #include "drmP.h" | |
37 | #if defined(__ia64__) | |
38 | #include <linux/efi.h> | |
39 | #endif | |
40 | ||
41 | ||
42 | /** | |
43 | * \c nopage method for AGP virtual memory. | |
44 | * | |
45 | * \param vma virtual memory area. | |
46 | * \param address access address. | |
47 | * \return pointer to the page structure. | |
48 | * | |
49 | * Find the right map and if it's AGP memory find the real physical page to | |
50 | * map, get the page, increment the use count and return it. | |
51 | */ | |
52 | #if __OS_HAS_AGP | |
53 | static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, | |
54 | unsigned long address) | |
55 | { | |
56 | drm_file_t *priv = vma->vm_file->private_data; | |
57 | drm_device_t *dev = priv->head->dev; | |
58 | drm_map_t *map = NULL; | |
59 | drm_map_list_t *r_list; | |
60 | struct list_head *list; | |
61 | ||
62 | /* | |
63 | * Find the right map | |
64 | */ | |
65 | if (!drm_core_has_AGP(dev)) | |
66 | goto vm_nopage_error; | |
67 | ||
68 | if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error; | |
69 | ||
70 | list_for_each(list, &dev->maplist->head) { | |
71 | r_list = list_entry(list, drm_map_list_t, head); | |
72 | map = r_list->map; | |
73 | if (!map) continue; | |
74 | if (map->offset == VM_OFFSET(vma)) break; | |
75 | } | |
76 | ||
77 | if (map && map->type == _DRM_AGP) { | |
78 | unsigned long offset = address - vma->vm_start; | |
79 | unsigned long baddr = VM_OFFSET(vma) + offset; | |
80 | struct drm_agp_mem *agpmem; | |
81 | struct page *page; | |
82 | ||
83 | #ifdef __alpha__ | |
84 | /* | |
85 | * Adjust to a bus-relative address | |
86 | */ | |
87 | baddr -= dev->hose->mem_space->start; | |
88 | #endif | |
89 | ||
90 | /* | |
91 | * It's AGP memory - find the real physical page to map | |
92 | */ | |
93 | for(agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) { | |
94 | if (agpmem->bound <= baddr && | |
95 | agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) | |
96 | break; | |
97 | } | |
98 | ||
99 | if (!agpmem) goto vm_nopage_error; | |
100 | ||
101 | /* | |
102 | * Get the page, inc the use count, and return it | |
103 | */ | |
104 | offset = (baddr - agpmem->bound) >> PAGE_SHIFT; | |
105 | page = virt_to_page(__va(agpmem->memory->memory[offset])); | |
106 | get_page(page); | |
107 | ||
108 | DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", | |
109 | baddr, __va(agpmem->memory->memory[offset]), offset, | |
110 | page_count(page)); | |
111 | ||
112 | return page; | |
113 | } | |
114 | vm_nopage_error: | |
115 | return NOPAGE_SIGBUS; /* Disallow mremap */ | |
116 | } | |
117 | #else /* __OS_HAS_AGP */ | |
118 | static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, | |
119 | unsigned long address) | |
120 | { | |
121 | return NOPAGE_SIGBUS; | |
122 | } | |
123 | #endif /* __OS_HAS_AGP */ | |
124 | ||
125 | /** | |
126 | * \c nopage method for shared virtual memory. | |
127 | * | |
128 | * \param vma virtual memory area. | |
129 | * \param address access address. | |
130 | * \return pointer to the page structure. | |
131 | * | |
132 | * Get the the mapping, find the real physical page to map, get the page, and | |
133 | * return it. | |
134 | */ | |
135 | static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, | |
136 | unsigned long address) | |
137 | { | |
138 | drm_map_t *map = (drm_map_t *)vma->vm_private_data; | |
139 | unsigned long offset; | |
140 | unsigned long i; | |
141 | struct page *page; | |
142 | ||
143 | if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ | |
144 | if (!map) return NOPAGE_OOM; /* Nothing allocated */ | |
145 | ||
146 | offset = address - vma->vm_start; | |
147 | i = (unsigned long)map->handle + offset; | |
148 | page = vmalloc_to_page((void *)i); | |
149 | if (!page) | |
150 | return NOPAGE_OOM; | |
151 | get_page(page); | |
152 | ||
153 | DRM_DEBUG("shm_nopage 0x%lx\n", address); | |
154 | return page; | |
155 | } | |
156 | ||
157 | ||
158 | /** | |
159 | * \c close method for shared virtual memory. | |
160 | * | |
161 | * \param vma virtual memory area. | |
162 | * | |
163 | * Deletes map information if we are the last | |
164 | * person to close a mapping and it's not in the global maplist. | |
165 | */ | |
166 | void drm_vm_shm_close(struct vm_area_struct *vma) | |
167 | { | |
168 | drm_file_t *priv = vma->vm_file->private_data; | |
169 | drm_device_t *dev = priv->head->dev; | |
170 | drm_vma_entry_t *pt, *prev, *next; | |
171 | drm_map_t *map; | |
172 | drm_map_list_t *r_list; | |
173 | struct list_head *list; | |
174 | int found_maps = 0; | |
175 | ||
176 | DRM_DEBUG("0x%08lx,0x%08lx\n", | |
177 | vma->vm_start, vma->vm_end - vma->vm_start); | |
178 | atomic_dec(&dev->vma_count); | |
179 | ||
180 | map = vma->vm_private_data; | |
181 | ||
182 | down(&dev->struct_sem); | |
183 | for (pt = dev->vmalist, prev = NULL; pt; pt = next) { | |
184 | next = pt->next; | |
185 | if (pt->vma->vm_private_data == map) found_maps++; | |
186 | if (pt->vma == vma) { | |
187 | if (prev) { | |
188 | prev->next = pt->next; | |
189 | } else { | |
190 | dev->vmalist = pt->next; | |
191 | } | |
192 | drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); | |
193 | } else { | |
194 | prev = pt; | |
195 | } | |
196 | } | |
197 | /* We were the only map that was found */ | |
198 | if(found_maps == 1 && | |
199 | map->flags & _DRM_REMOVABLE) { | |
200 | /* Check to see if we are in the maplist, if we are not, then | |
201 | * we delete this mappings information. | |
202 | */ | |
203 | found_maps = 0; | |
204 | list = &dev->maplist->head; | |
205 | list_for_each(list, &dev->maplist->head) { | |
206 | r_list = list_entry(list, drm_map_list_t, head); | |
207 | if (r_list->map == map) found_maps++; | |
208 | } | |
209 | ||
210 | if(!found_maps) { | |
211 | switch (map->type) { | |
212 | case _DRM_REGISTERS: | |
213 | case _DRM_FRAME_BUFFER: | |
214 | if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { | |
215 | int retcode; | |
216 | retcode = mtrr_del(map->mtrr, | |
217 | map->offset, | |
218 | map->size); | |
219 | DRM_DEBUG("mtrr_del = %d\n", retcode); | |
220 | } | |
221 | drm_ioremapfree(map->handle, map->size, dev); | |
222 | break; | |
223 | case _DRM_SHM: | |
224 | vfree(map->handle); | |
225 | break; | |
226 | case _DRM_AGP: | |
227 | case _DRM_SCATTER_GATHER: | |
228 | break; | |
229 | } | |
230 | drm_free(map, sizeof(*map), DRM_MEM_MAPS); | |
231 | } | |
232 | } | |
233 | up(&dev->struct_sem); | |
234 | } | |
235 | ||
236 | /** | |
237 | * \c nopage method for DMA virtual memory. | |
238 | * | |
239 | * \param vma virtual memory area. | |
240 | * \param address access address. | |
241 | * \return pointer to the page structure. | |
242 | * | |
243 | * Determine the page number from the page offset and get it from drm_device_dma::pagelist. | |
244 | */ | |
245 | static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, | |
246 | unsigned long address) | |
247 | { | |
248 | drm_file_t *priv = vma->vm_file->private_data; | |
249 | drm_device_t *dev = priv->head->dev; | |
250 | drm_device_dma_t *dma = dev->dma; | |
251 | unsigned long offset; | |
252 | unsigned long page_nr; | |
253 | struct page *page; | |
254 | ||
255 | if (!dma) return NOPAGE_SIGBUS; /* Error */ | |
256 | if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ | |
257 | if (!dma->pagelist) return NOPAGE_OOM ; /* Nothing allocated */ | |
258 | ||
259 | offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ | |
260 | page_nr = offset >> PAGE_SHIFT; | |
261 | page = virt_to_page((dma->pagelist[page_nr] + | |
262 | (offset & (~PAGE_MASK)))); | |
263 | ||
264 | get_page(page); | |
265 | ||
266 | DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr); | |
267 | return page; | |
268 | } | |
269 | ||
270 | /** | |
271 | * \c nopage method for scatter-gather virtual memory. | |
272 | * | |
273 | * \param vma virtual memory area. | |
274 | * \param address access address. | |
275 | * \return pointer to the page structure. | |
276 | * | |
277 | * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. | |
278 | */ | |
279 | static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, | |
280 | unsigned long address) | |
281 | { | |
282 | drm_map_t *map = (drm_map_t *)vma->vm_private_data; | |
283 | drm_file_t *priv = vma->vm_file->private_data; | |
284 | drm_device_t *dev = priv->head->dev; | |
285 | drm_sg_mem_t *entry = dev->sg; | |
286 | unsigned long offset; | |
287 | unsigned long map_offset; | |
288 | unsigned long page_offset; | |
289 | struct page *page; | |
290 | ||
291 | if (!entry) return NOPAGE_SIGBUS; /* Error */ | |
292 | if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ | |
293 | if (!entry->pagelist) return NOPAGE_OOM ; /* Nothing allocated */ | |
294 | ||
295 | ||
296 | offset = address - vma->vm_start; | |
297 | map_offset = map->offset - dev->sg->handle; | |
298 | page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); | |
299 | page = entry->pagelist[page_offset]; | |
300 | get_page(page); | |
301 | ||
302 | return page; | |
303 | } | |
304 | ||
305 | ||
306 | #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) | |
307 | ||
308 | static struct page *drm_vm_nopage(struct vm_area_struct *vma, | |
309 | unsigned long address, | |
310 | int *type) { | |
311 | if (type) *type = VM_FAULT_MINOR; | |
312 | return drm_do_vm_nopage(vma, address); | |
313 | } | |
314 | ||
315 | static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, | |
316 | unsigned long address, | |
317 | int *type) { | |
318 | if (type) *type = VM_FAULT_MINOR; | |
319 | return drm_do_vm_shm_nopage(vma, address); | |
320 | } | |
321 | ||
322 | static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, | |
323 | unsigned long address, | |
324 | int *type) { | |
325 | if (type) *type = VM_FAULT_MINOR; | |
326 | return drm_do_vm_dma_nopage(vma, address); | |
327 | } | |
328 | ||
329 | static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, | |
330 | unsigned long address, | |
331 | int *type) { | |
332 | if (type) *type = VM_FAULT_MINOR; | |
333 | return drm_do_vm_sg_nopage(vma, address); | |
334 | } | |
335 | ||
336 | #else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */ | |
337 | ||
338 | static struct page *drm_vm_nopage(struct vm_area_struct *vma, | |
339 | unsigned long address, | |
340 | int unused) { | |
341 | return drm_do_vm_nopage(vma, address); | |
342 | } | |
343 | ||
344 | static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, | |
345 | unsigned long address, | |
346 | int unused) { | |
347 | return drm_do_vm_shm_nopage(vma, address); | |
348 | } | |
349 | ||
350 | static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, | |
351 | unsigned long address, | |
352 | int unused) { | |
353 | return drm_do_vm_dma_nopage(vma, address); | |
354 | } | |
355 | ||
356 | static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, | |
357 | unsigned long address, | |
358 | int unused) { | |
359 | return drm_do_vm_sg_nopage(vma, address); | |
360 | } | |
361 | ||
362 | #endif | |
363 | ||
364 | ||
365 | /** AGP virtual memory operations */ | |
366 | static struct vm_operations_struct drm_vm_ops = { | |
367 | .nopage = drm_vm_nopage, | |
368 | .open = drm_vm_open, | |
369 | .close = drm_vm_close, | |
370 | }; | |
371 | ||
372 | /** Shared virtual memory operations */ | |
373 | static struct vm_operations_struct drm_vm_shm_ops = { | |
374 | .nopage = drm_vm_shm_nopage, | |
375 | .open = drm_vm_open, | |
376 | .close = drm_vm_shm_close, | |
377 | }; | |
378 | ||
379 | /** DMA virtual memory operations */ | |
380 | static struct vm_operations_struct drm_vm_dma_ops = { | |
381 | .nopage = drm_vm_dma_nopage, | |
382 | .open = drm_vm_open, | |
383 | .close = drm_vm_close, | |
384 | }; | |
385 | ||
386 | /** Scatter-gather virtual memory operations */ | |
387 | static struct vm_operations_struct drm_vm_sg_ops = { | |
388 | .nopage = drm_vm_sg_nopage, | |
389 | .open = drm_vm_open, | |
390 | .close = drm_vm_close, | |
391 | }; | |
392 | ||
393 | ||
394 | /** | |
395 | * \c open method for shared virtual memory. | |
396 | * | |
397 | * \param vma virtual memory area. | |
398 | * | |
399 | * Create a new drm_vma_entry structure as the \p vma private data entry and | |
400 | * add it to drm_device::vmalist. | |
401 | */ | |
402 | void drm_vm_open(struct vm_area_struct *vma) | |
403 | { | |
404 | drm_file_t *priv = vma->vm_file->private_data; | |
405 | drm_device_t *dev = priv->head->dev; | |
406 | drm_vma_entry_t *vma_entry; | |
407 | ||
408 | DRM_DEBUG("0x%08lx,0x%08lx\n", | |
409 | vma->vm_start, vma->vm_end - vma->vm_start); | |
410 | atomic_inc(&dev->vma_count); | |
411 | ||
412 | vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); | |
413 | if (vma_entry) { | |
414 | down(&dev->struct_sem); | |
415 | vma_entry->vma = vma; | |
416 | vma_entry->next = dev->vmalist; | |
417 | vma_entry->pid = current->pid; | |
418 | dev->vmalist = vma_entry; | |
419 | up(&dev->struct_sem); | |
420 | } | |
421 | } | |
422 | ||
423 | /** | |
424 | * \c close method for all virtual memory types. | |
425 | * | |
426 | * \param vma virtual memory area. | |
427 | * | |
428 | * Search the \p vma private data entry in drm_device::vmalist, unlink it, and | |
429 | * free it. | |
430 | */ | |
431 | void drm_vm_close(struct vm_area_struct *vma) | |
432 | { | |
433 | drm_file_t *priv = vma->vm_file->private_data; | |
434 | drm_device_t *dev = priv->head->dev; | |
435 | drm_vma_entry_t *pt, *prev; | |
436 | ||
437 | DRM_DEBUG("0x%08lx,0x%08lx\n", | |
438 | vma->vm_start, vma->vm_end - vma->vm_start); | |
439 | atomic_dec(&dev->vma_count); | |
440 | ||
441 | down(&dev->struct_sem); | |
442 | for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { | |
443 | if (pt->vma == vma) { | |
444 | if (prev) { | |
445 | prev->next = pt->next; | |
446 | } else { | |
447 | dev->vmalist = pt->next; | |
448 | } | |
449 | drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); | |
450 | break; | |
451 | } | |
452 | } | |
453 | up(&dev->struct_sem); | |
454 | } | |
455 | ||
456 | /** | |
457 | * mmap DMA memory. | |
458 | * | |
459 | * \param filp file pointer. | |
460 | * \param vma virtual memory area. | |
461 | * \return zero on success or a negative number on failure. | |
462 | * | |
463 | * Sets the virtual memory area operations structure to vm_dma_ops, the file | |
464 | * pointer, and calls vm_open(). | |
465 | */ | |
466 | int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) | |
467 | { | |
468 | drm_file_t *priv = filp->private_data; | |
469 | drm_device_t *dev; | |
470 | drm_device_dma_t *dma; | |
471 | unsigned long length = vma->vm_end - vma->vm_start; | |
472 | ||
473 | lock_kernel(); | |
474 | dev = priv->head->dev; | |
475 | dma = dev->dma; | |
476 | DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", | |
477 | vma->vm_start, vma->vm_end, VM_OFFSET(vma)); | |
478 | ||
479 | /* Length must match exact page count */ | |
480 | if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { | |
481 | unlock_kernel(); | |
482 | return -EINVAL; | |
483 | } | |
484 | unlock_kernel(); | |
485 | ||
486 | vma->vm_ops = &drm_vm_dma_ops; | |
487 | ||
488 | #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ | |
489 | vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */ | |
490 | #else | |
491 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ | |
492 | #endif | |
493 | ||
494 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | |
495 | drm_vm_open(vma); | |
496 | return 0; | |
497 | } | |
498 | ||
499 | unsigned long drm_core_get_map_ofs(drm_map_t *map) | |
500 | { | |
501 | return map->offset; | |
502 | } | |
503 | EXPORT_SYMBOL(drm_core_get_map_ofs); | |
504 | ||
505 | unsigned long drm_core_get_reg_ofs(struct drm_device *dev) | |
506 | { | |
507 | #ifdef __alpha__ | |
508 | return dev->hose->dense_mem_base - dev->hose->mem_space->start; | |
509 | #else | |
510 | return 0; | |
511 | #endif | |
512 | } | |
513 | EXPORT_SYMBOL(drm_core_get_reg_ofs); | |
514 | ||
515 | /** | |
516 | * mmap DMA memory. | |
517 | * | |
518 | * \param filp file pointer. | |
519 | * \param vma virtual memory area. | |
520 | * \return zero on success or a negative number on failure. | |
521 | * | |
522 | * If the virtual memory area has no offset associated with it then it's a DMA | |
523 | * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist, | |
524 | * checks that the restricted flag is not set, sets the virtual memory operations | |
525 | * according to the mapping type and remaps the pages. Finally sets the file | |
526 | * pointer and calls vm_open(). | |
527 | */ | |
528 | int drm_mmap(struct file *filp, struct vm_area_struct *vma) | |
529 | { | |
530 | drm_file_t *priv = filp->private_data; | |
531 | drm_device_t *dev = priv->head->dev; | |
532 | drm_map_t *map = NULL; | |
533 | drm_map_list_t *r_list; | |
534 | unsigned long offset = 0; | |
535 | struct list_head *list; | |
536 | ||
537 | DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", | |
538 | vma->vm_start, vma->vm_end, VM_OFFSET(vma)); | |
539 | ||
540 | if ( !priv->authenticated ) return -EACCES; | |
541 | ||
542 | /* We check for "dma". On Apple's UniNorth, it's valid to have | |
543 | * the AGP mapped at physical address 0 | |
544 | * --BenH. | |
545 | */ | |
546 | if (!VM_OFFSET(vma) | |
547 | #if __OS_HAS_AGP | |
548 | && (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) | |
549 | #endif | |
550 | ) | |
551 | return drm_mmap_dma(filp, vma); | |
552 | ||
553 | /* A sequential search of a linked list is | |
554 | fine here because: 1) there will only be | |
555 | about 5-10 entries in the list and, 2) a | |
556 | DRI client only has to do this mapping | |
557 | once, so it doesn't have to be optimized | |
558 | for performance, even if the list was a | |
559 | bit longer. */ | |
560 | list_for_each(list, &dev->maplist->head) { | |
561 | unsigned long off; | |
562 | ||
563 | r_list = list_entry(list, drm_map_list_t, head); | |
564 | map = r_list->map; | |
565 | if (!map) continue; | |
566 | off = dev->driver->get_map_ofs(map); | |
567 | if (off == VM_OFFSET(vma)) break; | |
568 | } | |
569 | ||
570 | if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) | |
571 | return -EPERM; | |
572 | ||
573 | /* Check for valid size. */ | |
574 | if (map->size != vma->vm_end - vma->vm_start) return -EINVAL; | |
575 | ||
576 | if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { | |
577 | vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); | |
578 | #if defined(__i386__) || defined(__x86_64__) | |
579 | pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; | |
580 | #else | |
581 | /* Ye gads this is ugly. With more thought | |
582 | we could move this up higher and use | |
583 | `protection_map' instead. */ | |
584 | vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect( | |
585 | __pte(pgprot_val(vma->vm_page_prot))))); | |
586 | #endif | |
587 | } | |
588 | ||
589 | switch (map->type) { | |
590 | case _DRM_AGP: | |
591 | if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) { | |
592 | /* | |
593 | * On some platforms we can't talk to bus dma address from the CPU, so for | |
594 | * memory of type DRM_AGP, we'll deal with sorting out the real physical | |
595 | * pages and mappings in nopage() | |
596 | */ | |
597 | #if defined(__powerpc__) | |
598 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; | |
599 | #endif | |
600 | vma->vm_ops = &drm_vm_ops; | |
601 | break; | |
602 | } | |
603 | /* fall through to _DRM_FRAME_BUFFER... */ | |
604 | case _DRM_FRAME_BUFFER: | |
605 | case _DRM_REGISTERS: | |
606 | if (VM_OFFSET(vma) >= __pa(high_memory)) { | |
607 | #if defined(__i386__) || defined(__x86_64__) | |
608 | if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { | |
609 | pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; | |
610 | pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; | |
611 | } | |
612 | #elif defined(__powerpc__) | |
613 | pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED; | |
614 | #endif | |
615 | vma->vm_flags |= VM_IO; /* not in core dump */ | |
616 | } | |
617 | #if defined(__ia64__) | |
618 | if (efi_range_is_wc(vma->vm_start, vma->vm_end - | |
619 | vma->vm_start)) | |
620 | vma->vm_page_prot = | |
621 | pgprot_writecombine(vma->vm_page_prot); | |
622 | else | |
623 | vma->vm_page_prot = | |
624 | pgprot_noncached(vma->vm_page_prot); | |
625 | #endif | |
626 | offset = dev->driver->get_reg_ofs(dev); | |
627 | #ifdef __sparc__ | |
628 | if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start, | |
629 | (VM_OFFSET(vma) + offset) >> PAGE_SHIFT, | |
630 | vma->vm_end - vma->vm_start, | |
631 | vma->vm_page_prot)) | |
632 | #else | |
633 | if (io_remap_pfn_range(vma, vma->vm_start, | |
634 | (VM_OFFSET(vma) + offset) >> PAGE_SHIFT, | |
635 | vma->vm_end - vma->vm_start, | |
636 | vma->vm_page_prot)) | |
637 | #endif | |
638 | return -EAGAIN; | |
639 | DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," | |
640 | " offset = 0x%lx\n", | |
641 | map->type, | |
642 | vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset); | |
643 | vma->vm_ops = &drm_vm_ops; | |
644 | break; | |
645 | case _DRM_SHM: | |
646 | vma->vm_ops = &drm_vm_shm_ops; | |
647 | vma->vm_private_data = (void *)map; | |
648 | /* Don't let this area swap. Change when | |
649 | DRM_KERNEL advisory is supported. */ | |
650 | #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ | |
651 | vma->vm_flags |= VM_LOCKED; | |
652 | #else | |
653 | vma->vm_flags |= VM_RESERVED; | |
654 | #endif | |
655 | break; | |
656 | case _DRM_SCATTER_GATHER: | |
657 | vma->vm_ops = &drm_vm_sg_ops; | |
658 | vma->vm_private_data = (void *)map; | |
659 | #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ | |
660 | vma->vm_flags |= VM_LOCKED; | |
661 | #else | |
662 | vma->vm_flags |= VM_RESERVED; | |
663 | #endif | |
664 | break; | |
665 | default: | |
666 | return -EINVAL; /* This should never happen. */ | |
667 | } | |
668 | #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ | |
669 | vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */ | |
670 | #else | |
671 | vma->vm_flags |= VM_RESERVED; /* Don't swap */ | |
672 | #endif | |
673 | ||
674 | vma->vm_file = filp; /* Needed for drm_vm_open() */ | |
675 | drm_vm_open(vma); | |
676 | return 0; | |
677 | } | |
678 | EXPORT_SYMBOL(drm_mmap); |