]>
Commit | Line | Data |
---|---|---|
013de2d6 GR |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. | |
3 | ||
4 | #include <linux/cache.h> | |
5 | #include <linux/dma-mapping.h> | |
6 | #include <linux/dma-contiguous.h> | |
7 | #include <linux/dma-noncoherent.h> | |
8 | #include <linux/genalloc.h> | |
9 | #include <linux/highmem.h> | |
10 | #include <linux/io.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/scatterlist.h> | |
13 | #include <linux/types.h> | |
14 | #include <linux/version.h> | |
15 | #include <asm/cache.h> | |
16 | ||
013de2d6 GR |
17 | static int __init atomic_pool_init(void) |
18 | { | |
f04b951f | 19 | return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL)); |
013de2d6 GR |
20 | } |
21 | postcore_initcall(atomic_pool_init); | |
22 | ||
f04b951f | 23 | void arch_dma_prep_coherent(struct page *page, size_t size) |
013de2d6 GR |
24 | { |
25 | if (PageHighMem(page)) { | |
26 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
27 | ||
28 | do { | |
29 | void *ptr = kmap_atomic(page); | |
30 | size_t _size = (size < PAGE_SIZE) ? size : PAGE_SIZE; | |
31 | ||
32 | memset(ptr, 0, _size); | |
33 | dma_wbinv_range((unsigned long)ptr, | |
34 | (unsigned long)ptr + _size); | |
35 | ||
36 | kunmap_atomic(ptr); | |
37 | ||
38 | page++; | |
39 | size -= PAGE_SIZE; | |
40 | count--; | |
41 | } while (count); | |
42 | } else { | |
43 | void *ptr = page_address(page); | |
44 | ||
45 | memset(ptr, 0, size); | |
46 | dma_wbinv_range((unsigned long)ptr, (unsigned long)ptr + size); | |
47 | } | |
48 | } | |
49 | ||
013de2d6 GR |
50 | static inline void cache_op(phys_addr_t paddr, size_t size, |
51 | void (*fn)(unsigned long start, unsigned long end)) | |
52 | { | |
53 | struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); | |
54 | unsigned int offset = paddr & ~PAGE_MASK; | |
55 | size_t left = size; | |
56 | unsigned long start; | |
57 | ||
58 | do { | |
59 | size_t len = left; | |
60 | ||
61 | if (PageHighMem(page)) { | |
62 | void *addr; | |
63 | ||
64 | if (offset + len > PAGE_SIZE) { | |
65 | if (offset >= PAGE_SIZE) { | |
66 | page += offset >> PAGE_SHIFT; | |
67 | offset &= ~PAGE_MASK; | |
68 | } | |
69 | len = PAGE_SIZE - offset; | |
70 | } | |
71 | ||
72 | addr = kmap_atomic(page); | |
73 | start = (unsigned long)(addr + offset); | |
74 | fn(start, start + len); | |
75 | kunmap_atomic(addr); | |
76 | } else { | |
77 | start = (unsigned long)phys_to_virt(paddr); | |
78 | fn(start, start + size); | |
79 | } | |
80 | offset = 0; | |
81 | page++; | |
82 | left -= len; | |
83 | } while (left); | |
84 | } | |
85 | ||
86 | void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, | |
87 | size_t size, enum dma_data_direction dir) | |
88 | { | |
89 | switch (dir) { | |
90 | case DMA_TO_DEVICE: | |
91 | cache_op(paddr, size, dma_wb_range); | |
92 | break; | |
93 | case DMA_FROM_DEVICE: | |
94 | case DMA_BIDIRECTIONAL: | |
95 | cache_op(paddr, size, dma_wbinv_range); | |
96 | break; | |
97 | default: | |
98 | BUG(); | |
99 | } | |
100 | } | |
101 | ||
102 | void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, | |
103 | size_t size, enum dma_data_direction dir) | |
104 | { | |
105 | switch (dir) { | |
106 | case DMA_TO_DEVICE: | |
107 | cache_op(paddr, size, dma_wb_range); | |
108 | break; | |
109 | case DMA_FROM_DEVICE: | |
110 | case DMA_BIDIRECTIONAL: | |
111 | cache_op(paddr, size, dma_wbinv_range); | |
112 | break; | |
113 | default: | |
114 | BUG(); | |
115 | } | |
116 | } |