]> Git Repo - linux.git/blob - arch/tile/kernel/pci-dma.c
mm/hugetlb.c: fix reservation race when freeing surplus pages
[linux.git] / arch / tile / kernel / pci-dma.c
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  */
14
15 #include <linux/mm.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/swiotlb.h>
18 #include <linux/vmalloc.h>
19 #include <linux/export.h>
20 #include <asm/tlbflush.h>
21 #include <asm/homecache.h>
22
23 /* Generic DMA mapping functions: */
24
25 /*
26  * Allocate what Linux calls "coherent" memory.  On TILEPro this is
27  * uncached memory; on TILE-Gx it is hash-for-home memory.
28  */
29 #ifdef __tilepro__
30 #define PAGE_HOME_DMA PAGE_HOME_UNCACHED
31 #else
32 #define PAGE_HOME_DMA PAGE_HOME_HASH
33 #endif
34
35 static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
36                                      dma_addr_t *dma_handle, gfp_t gfp,
37                                      unsigned long attrs)
38 {
39         u64 dma_mask = (dev && dev->coherent_dma_mask) ?
40                 dev->coherent_dma_mask : DMA_BIT_MASK(32);
41         int node = dev ? dev_to_node(dev) : 0;
42         int order = get_order(size);
43         struct page *pg;
44         dma_addr_t addr;
45
46         gfp |= __GFP_ZERO;
47
48         /*
49          * If the mask specifies that the memory be in the first 4 GB, then
50          * we force the allocation to come from the DMA zone.  We also
51          * force the node to 0 since that's the only node where the DMA
52          * zone isn't empty.  If the mask size is smaller than 32 bits, we
53          * may still not be able to guarantee a suitable memory address, in
54          * which case we will return NULL.  But such devices are uncommon.
55          */
56         if (dma_mask <= DMA_BIT_MASK(32)) {
57                 gfp |= GFP_DMA;
58                 node = 0;
59         }
60
61         pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA);
62         if (pg == NULL)
63                 return NULL;
64
65         addr = page_to_phys(pg);
66         if (addr + size > dma_mask) {
67                 __homecache_free_pages(pg, order);
68                 return NULL;
69         }
70
71         *dma_handle = addr;
72
73         return page_address(pg);
74 }
75
76 /*
77  * Free memory that was allocated with tile_dma_alloc_coherent.
78  */
79 static void tile_dma_free_coherent(struct device *dev, size_t size,
80                                    void *vaddr, dma_addr_t dma_handle,
81                                    unsigned long attrs)
82 {
83         homecache_free_pages((unsigned long)vaddr, get_order(size));
84 }
85
86 /*
87  * The map routines "map" the specified address range for DMA
88  * accesses.  The memory belongs to the device after this call is
89  * issued, until it is unmapped with dma_unmap_single.
90  *
91  * We don't need to do any mapping, we just flush the address range
92  * out of the cache and return a DMA address.
93  *
94  * The unmap routines do whatever is necessary before the processor
95  * accesses the memory again, and must be called before the driver
96  * touches the memory.  We can get away with a cache invalidate if we
97  * can count on nothing having been touched.
98  */
99
100 /* Set up a single page for DMA access. */
101 static void __dma_prep_page(struct page *page, unsigned long offset,
102                             size_t size, enum dma_data_direction direction)
103 {
104         /*
105          * Flush the page from cache if necessary.
106          * On tilegx, data is delivered to hash-for-home L3; on tilepro,
107          * data is delivered direct to memory.
108          *
109          * NOTE: If we were just doing DMA_TO_DEVICE we could optimize
110          * this to be a "flush" not a "finv" and keep some of the
111          * state in cache across the DMA operation, but it doesn't seem
112          * worth creating the necessary flush_buffer_xxx() infrastructure.
113          */
114         int home = page_home(page);
115         switch (home) {
116         case PAGE_HOME_HASH:
117 #ifdef __tilegx__
118                 return;
119 #endif
120                 break;
121         case PAGE_HOME_UNCACHED:
122 #ifdef __tilepro__
123                 return;
124 #endif
125                 break;
126         case PAGE_HOME_IMMUTABLE:
127                 /* Should be going to the device only. */
128                 BUG_ON(direction == DMA_FROM_DEVICE ||
129                        direction == DMA_BIDIRECTIONAL);
130                 return;
131         case PAGE_HOME_INCOHERENT:
132                 /* Incoherent anyway, so no need to work hard here. */
133                 return;
134         default:
135                 BUG_ON(home < 0 || home >= NR_CPUS);
136                 break;
137         }
138         homecache_finv_page(page);
139
140 #ifdef DEBUG_ALIGNMENT
141         /* Warn if the region isn't cacheline aligned. */
142         if (offset & (L2_CACHE_BYTES - 1) || (size & (L2_CACHE_BYTES - 1)))
143                 pr_warn("Unaligned DMA to non-hfh memory: PA %#llx/%#lx\n",
144                         PFN_PHYS(page_to_pfn(page)) + offset, size);
145 #endif
146 }
147
148 /* Make the page ready to be read by the core. */
149 static void __dma_complete_page(struct page *page, unsigned long offset,
150                                 size_t size, enum dma_data_direction direction)
151 {
152 #ifdef __tilegx__
153         switch (page_home(page)) {
154         case PAGE_HOME_HASH:
155                 /* I/O device delivered data the way the cpu wanted it. */
156                 break;
157         case PAGE_HOME_INCOHERENT:
158                 /* Incoherent anyway, so no need to work hard here. */
159                 break;
160         case PAGE_HOME_IMMUTABLE:
161                 /* Extra read-only copies are not a problem. */
162                 break;
163         default:
164                 /* Flush the bogus hash-for-home I/O entries to memory. */
165                 homecache_finv_map_page(page, PAGE_HOME_HASH);
166                 break;
167         }
168 #endif
169 }
170
171 static void __dma_prep_pa_range(dma_addr_t dma_addr, size_t size,
172                                 enum dma_data_direction direction)
173 {
174         struct page *page = pfn_to_page(PFN_DOWN(dma_addr));
175         unsigned long offset = dma_addr & (PAGE_SIZE - 1);
176         size_t bytes = min(size, (size_t)(PAGE_SIZE - offset));
177
178         while (size != 0) {
179                 __dma_prep_page(page, offset, bytes, direction);
180                 size -= bytes;
181                 ++page;
182                 offset = 0;
183                 bytes = min((size_t)PAGE_SIZE, size);
184         }
185 }
186
187 static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size,
188                                     enum dma_data_direction direction)
189 {
190         struct page *page = pfn_to_page(PFN_DOWN(dma_addr));
191         unsigned long offset = dma_addr & (PAGE_SIZE - 1);
192         size_t bytes = min(size, (size_t)(PAGE_SIZE - offset));
193
194         while (size != 0) {
195                 __dma_complete_page(page, offset, bytes, direction);
196                 size -= bytes;
197                 ++page;
198                 offset = 0;
199                 bytes = min((size_t)PAGE_SIZE, size);
200         }
201 }
202
203 static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist,
204                            int nents, enum dma_data_direction direction,
205                            unsigned long attrs)
206 {
207         struct scatterlist *sg;
208         int i;
209
210         BUG_ON(!valid_dma_direction(direction));
211
212         WARN_ON(nents == 0 || sglist->length == 0);
213
214         for_each_sg(sglist, sg, nents, i) {
215                 sg->dma_address = sg_phys(sg);
216 #ifdef CONFIG_NEED_SG_DMA_LENGTH
217                 sg->dma_length = sg->length;
218 #endif
219                 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
220                         continue;
221                 __dma_prep_pa_range(sg->dma_address, sg->length, direction);
222         }
223
224         return nents;
225 }
226
227 static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
228                               int nents, enum dma_data_direction direction,
229                               unsigned long attrs)
230 {
231         struct scatterlist *sg;
232         int i;
233
234         BUG_ON(!valid_dma_direction(direction));
235         for_each_sg(sglist, sg, nents, i) {
236                 sg->dma_address = sg_phys(sg);
237                 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
238                         continue;
239                 __dma_complete_pa_range(sg->dma_address, sg->length,
240                                         direction);
241         }
242 }
243
244 static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page,
245                                     unsigned long offset, size_t size,
246                                     enum dma_data_direction direction,
247                                     unsigned long attrs)
248 {
249         BUG_ON(!valid_dma_direction(direction));
250
251         BUG_ON(offset + size > PAGE_SIZE);
252         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
253                 __dma_prep_page(page, offset, size, direction);
254
255         return page_to_pa(page) + offset;
256 }
257
258 static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
259                                 size_t size, enum dma_data_direction direction,
260                                 unsigned long attrs)
261 {
262         BUG_ON(!valid_dma_direction(direction));
263
264         if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
265                 return;
266
267         __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
268                             dma_address & (PAGE_SIZE - 1), size, direction);
269 }
270
271 static void tile_dma_sync_single_for_cpu(struct device *dev,
272                                          dma_addr_t dma_handle,
273                                          size_t size,
274                                          enum dma_data_direction direction)
275 {
276         BUG_ON(!valid_dma_direction(direction));
277
278         __dma_complete_pa_range(dma_handle, size, direction);
279 }
280
281 static void tile_dma_sync_single_for_device(struct device *dev,
282                                             dma_addr_t dma_handle, size_t size,
283                                             enum dma_data_direction direction)
284 {
285         __dma_prep_pa_range(dma_handle, size, direction);
286 }
287
288 static void tile_dma_sync_sg_for_cpu(struct device *dev,
289                                      struct scatterlist *sglist, int nelems,
290                                      enum dma_data_direction direction)
291 {
292         struct scatterlist *sg;
293         int i;
294
295         BUG_ON(!valid_dma_direction(direction));
296         WARN_ON(nelems == 0 || sglist->length == 0);
297
298         for_each_sg(sglist, sg, nelems, i) {
299                 dma_sync_single_for_cpu(dev, sg->dma_address,
300                                         sg_dma_len(sg), direction);
301         }
302 }
303
304 static void tile_dma_sync_sg_for_device(struct device *dev,
305                                         struct scatterlist *sglist, int nelems,
306                                         enum dma_data_direction direction)
307 {
308         struct scatterlist *sg;
309         int i;
310
311         BUG_ON(!valid_dma_direction(direction));
312         WARN_ON(nelems == 0 || sglist->length == 0);
313
314         for_each_sg(sglist, sg, nelems, i) {
315                 dma_sync_single_for_device(dev, sg->dma_address,
316                                            sg_dma_len(sg), direction);
317         }
318 }
319
320 static inline int
321 tile_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
322 {
323         return 0;
324 }
325
326 static inline int
327 tile_dma_supported(struct device *dev, u64 mask)
328 {
329         return 1;
330 }
331
332 static struct dma_map_ops tile_default_dma_map_ops = {
333         .alloc = tile_dma_alloc_coherent,
334         .free = tile_dma_free_coherent,
335         .map_page = tile_dma_map_page,
336         .unmap_page = tile_dma_unmap_page,
337         .map_sg = tile_dma_map_sg,
338         .unmap_sg = tile_dma_unmap_sg,
339         .sync_single_for_cpu = tile_dma_sync_single_for_cpu,
340         .sync_single_for_device = tile_dma_sync_single_for_device,
341         .sync_sg_for_cpu = tile_dma_sync_sg_for_cpu,
342         .sync_sg_for_device = tile_dma_sync_sg_for_device,
343         .mapping_error = tile_dma_mapping_error,
344         .dma_supported = tile_dma_supported
345 };
346
347 struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops;
348 EXPORT_SYMBOL(tile_dma_map_ops);
349
350 /* Generic PCI DMA mapping functions */
351
352 static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size,
353                                          dma_addr_t *dma_handle, gfp_t gfp,
354                                          unsigned long attrs)
355 {
356         int node = dev_to_node(dev);
357         int order = get_order(size);
358         struct page *pg;
359         dma_addr_t addr;
360
361         gfp |= __GFP_ZERO;
362
363         pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA);
364         if (pg == NULL)
365                 return NULL;
366
367         addr = page_to_phys(pg);
368
369         *dma_handle = addr + get_dma_offset(dev);
370
371         return page_address(pg);
372 }
373
374 /*
375  * Free memory that was allocated with tile_pci_dma_alloc_coherent.
376  */
377 static void tile_pci_dma_free_coherent(struct device *dev, size_t size,
378                                        void *vaddr, dma_addr_t dma_handle,
379                                        unsigned long attrs)
380 {
381         homecache_free_pages((unsigned long)vaddr, get_order(size));
382 }
383
384 static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist,
385                                int nents, enum dma_data_direction direction,
386                                unsigned long attrs)
387 {
388         struct scatterlist *sg;
389         int i;
390
391         BUG_ON(!valid_dma_direction(direction));
392
393         WARN_ON(nents == 0 || sglist->length == 0);
394
395         for_each_sg(sglist, sg, nents, i) {
396                 sg->dma_address = sg_phys(sg);
397                 __dma_prep_pa_range(sg->dma_address, sg->length, direction);
398
399                 sg->dma_address = sg->dma_address + get_dma_offset(dev);
400 #ifdef CONFIG_NEED_SG_DMA_LENGTH
401                 sg->dma_length = sg->length;
402 #endif
403         }
404
405         return nents;
406 }
407
408 static void tile_pci_dma_unmap_sg(struct device *dev,
409                                   struct scatterlist *sglist, int nents,
410                                   enum dma_data_direction direction,
411                                   unsigned long attrs)
412 {
413         struct scatterlist *sg;
414         int i;
415
416         BUG_ON(!valid_dma_direction(direction));
417         for_each_sg(sglist, sg, nents, i) {
418                 sg->dma_address = sg_phys(sg);
419                 __dma_complete_pa_range(sg->dma_address, sg->length,
420                                         direction);
421         }
422 }
423
424 static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page,
425                                         unsigned long offset, size_t size,
426                                         enum dma_data_direction direction,
427                                         unsigned long attrs)
428 {
429         BUG_ON(!valid_dma_direction(direction));
430
431         BUG_ON(offset + size > PAGE_SIZE);
432         __dma_prep_page(page, offset, size, direction);
433
434         return page_to_pa(page) + offset + get_dma_offset(dev);
435 }
436
437 static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
438                                     size_t size,
439                                     enum dma_data_direction direction,
440                                     unsigned long attrs)
441 {
442         BUG_ON(!valid_dma_direction(direction));
443
444         dma_address -= get_dma_offset(dev);
445
446         __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
447                             dma_address & (PAGE_SIZE - 1), size, direction);
448 }
449
450 static void tile_pci_dma_sync_single_for_cpu(struct device *dev,
451                                              dma_addr_t dma_handle,
452                                              size_t size,
453                                              enum dma_data_direction direction)
454 {
455         BUG_ON(!valid_dma_direction(direction));
456
457         dma_handle -= get_dma_offset(dev);
458
459         __dma_complete_pa_range(dma_handle, size, direction);
460 }
461
462 static void tile_pci_dma_sync_single_for_device(struct device *dev,
463                                                 dma_addr_t dma_handle,
464                                                 size_t size,
465                                                 enum dma_data_direction
466                                                 direction)
467 {
468         dma_handle -= get_dma_offset(dev);
469
470         __dma_prep_pa_range(dma_handle, size, direction);
471 }
472
473 static void tile_pci_dma_sync_sg_for_cpu(struct device *dev,
474                                          struct scatterlist *sglist,
475                                          int nelems,
476                                          enum dma_data_direction direction)
477 {
478         struct scatterlist *sg;
479         int i;
480
481         BUG_ON(!valid_dma_direction(direction));
482         WARN_ON(nelems == 0 || sglist->length == 0);
483
484         for_each_sg(sglist, sg, nelems, i) {
485                 dma_sync_single_for_cpu(dev, sg->dma_address,
486                                         sg_dma_len(sg), direction);
487         }
488 }
489
490 static void tile_pci_dma_sync_sg_for_device(struct device *dev,
491                                             struct scatterlist *sglist,
492                                             int nelems,
493                                             enum dma_data_direction direction)
494 {
495         struct scatterlist *sg;
496         int i;
497
498         BUG_ON(!valid_dma_direction(direction));
499         WARN_ON(nelems == 0 || sglist->length == 0);
500
501         for_each_sg(sglist, sg, nelems, i) {
502                 dma_sync_single_for_device(dev, sg->dma_address,
503                                            sg_dma_len(sg), direction);
504         }
505 }
506
507 static inline int
508 tile_pci_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
509 {
510         return 0;
511 }
512
513 static inline int
514 tile_pci_dma_supported(struct device *dev, u64 mask)
515 {
516         return 1;
517 }
518
519 static struct dma_map_ops tile_pci_default_dma_map_ops = {
520         .alloc = tile_pci_dma_alloc_coherent,
521         .free = tile_pci_dma_free_coherent,
522         .map_page = tile_pci_dma_map_page,
523         .unmap_page = tile_pci_dma_unmap_page,
524         .map_sg = tile_pci_dma_map_sg,
525         .unmap_sg = tile_pci_dma_unmap_sg,
526         .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu,
527         .sync_single_for_device = tile_pci_dma_sync_single_for_device,
528         .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
529         .sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
530         .mapping_error = tile_pci_dma_mapping_error,
531         .dma_supported = tile_pci_dma_supported
532 };
533
534 struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops;
535 EXPORT_SYMBOL(gx_pci_dma_map_ops);
536
537 /* PCI DMA mapping functions for legacy PCI devices */
538
539 #ifdef CONFIG_SWIOTLB
540 static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size,
541                                          dma_addr_t *dma_handle, gfp_t gfp,
542                                          unsigned long attrs)
543 {
544         gfp |= GFP_DMA;
545         return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
546 }
547
548 static void tile_swiotlb_free_coherent(struct device *dev, size_t size,
549                                        void *vaddr, dma_addr_t dma_addr,
550                                        unsigned long attrs)
551 {
552         swiotlb_free_coherent(dev, size, vaddr, dma_addr);
553 }
554
555 static struct dma_map_ops pci_swiotlb_dma_ops = {
556         .alloc = tile_swiotlb_alloc_coherent,
557         .free = tile_swiotlb_free_coherent,
558         .map_page = swiotlb_map_page,
559         .unmap_page = swiotlb_unmap_page,
560         .map_sg = swiotlb_map_sg_attrs,
561         .unmap_sg = swiotlb_unmap_sg_attrs,
562         .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
563         .sync_single_for_device = swiotlb_sync_single_for_device,
564         .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
565         .sync_sg_for_device = swiotlb_sync_sg_for_device,
566         .dma_supported = swiotlb_dma_supported,
567         .mapping_error = swiotlb_dma_mapping_error,
568 };
569
570 static struct dma_map_ops pci_hybrid_dma_ops = {
571         .alloc = tile_swiotlb_alloc_coherent,
572         .free = tile_swiotlb_free_coherent,
573         .map_page = tile_pci_dma_map_page,
574         .unmap_page = tile_pci_dma_unmap_page,
575         .map_sg = tile_pci_dma_map_sg,
576         .unmap_sg = tile_pci_dma_unmap_sg,
577         .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu,
578         .sync_single_for_device = tile_pci_dma_sync_single_for_device,
579         .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
580         .sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
581         .mapping_error = tile_pci_dma_mapping_error,
582         .dma_supported = tile_pci_dma_supported
583 };
584
585 struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
586 struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops;
587 #else
588 struct dma_map_ops *gx_legacy_pci_dma_map_ops;
589 struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
590 #endif
591 EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
592 EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
593
594 int dma_set_mask(struct device *dev, u64 mask)
595 {
596         struct dma_map_ops *dma_ops = get_dma_ops(dev);
597
598         /*
599          * For PCI devices with 64-bit DMA addressing capability, promote
600          * the dma_ops to hybrid, with the consistent memory DMA space limited
601          * to 32-bit. For 32-bit capable devices, limit the streaming DMA
602          * address range to max_direct_dma_addr.
603          */
604         if (dma_ops == gx_pci_dma_map_ops ||
605             dma_ops == gx_hybrid_pci_dma_map_ops ||
606             dma_ops == gx_legacy_pci_dma_map_ops) {
607                 if (mask == DMA_BIT_MASK(64) &&
608                     dma_ops == gx_legacy_pci_dma_map_ops)
609                         set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
610                 else if (mask > dev->archdata.max_direct_dma_addr)
611                         mask = dev->archdata.max_direct_dma_addr;
612         }
613
614         if (!dev->dma_mask || !dma_supported(dev, mask))
615                 return -EIO;
616
617         *dev->dma_mask = mask;
618
619         return 0;
620 }
621 EXPORT_SYMBOL(dma_set_mask);
622
623 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
624 int dma_set_coherent_mask(struct device *dev, u64 mask)
625 {
626         struct dma_map_ops *dma_ops = get_dma_ops(dev);
627
628         /*
629          * For PCI devices with 64-bit DMA addressing capability, promote
630          * the dma_ops to full capability for both streams and consistent
631          * memory access. For 32-bit capable devices, limit the consistent 
632          * memory DMA range to max_direct_dma_addr.
633          */
634         if (dma_ops == gx_pci_dma_map_ops ||
635             dma_ops == gx_hybrid_pci_dma_map_ops ||
636             dma_ops == gx_legacy_pci_dma_map_ops) {
637                 if (mask == DMA_BIT_MASK(64))
638                         set_dma_ops(dev, gx_pci_dma_map_ops);
639                 else if (mask > dev->archdata.max_direct_dma_addr)
640                         mask = dev->archdata.max_direct_dma_addr;
641         }
642
643         if (!dma_supported(dev, mask))
644                 return -EIO;
645         dev->coherent_dma_mask = mask;
646         return 0;
647 }
648 EXPORT_SYMBOL(dma_set_coherent_mask);
649 #endif
650
651 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
652 /*
653  * The generic dma_get_required_mask() uses the highest physical address
654  * (max_pfn) to provide the hint to the PCI drivers regarding 32-bit or
655  * 64-bit DMA configuration. Since TILEGx has I/O TLB/MMU, allowing the
656  * DMAs to use the full 64-bit PCI address space and not limited by
657  * the physical memory space, we always let the PCI devices use
658  * 64-bit DMA if they have that capability, by returning the 64-bit
659  * DMA mask here. The device driver has the option to use 32-bit DMA if
660  * the device is not capable of 64-bit DMA.
661  */
662 u64 dma_get_required_mask(struct device *dev)
663 {
664         return DMA_BIT_MASK(64);
665 }
666 EXPORT_SYMBOL_GPL(dma_get_required_mask);
667 #endif
This page took 0.07933 seconds and 4 git commands to generate.