]>
Commit | Line | Data |
---|---|---|
92281dee DW |
1 | /* |
2 | * Copyright(c) 2015 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
7d3dcf26 | 13 | #include <linux/device.h> |
92281dee DW |
14 | #include <linux/types.h> |
15 | #include <linux/io.h> | |
16 | #include <linux/mm.h> | |
41e94a85 | 17 | #include <linux/memory_hotplug.h> |
92281dee DW |
18 | |
19 | #ifndef ioremap_cache | |
20 | /* temporary while we convert existing ioremap_cache users to memremap */ | |
21 | __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) | |
22 | { | |
23 | return ioremap(offset, size); | |
24 | } | |
25 | #endif | |
26 | ||
27 | /** | |
28 | * memremap() - remap an iomem_resource as cacheable memory | |
29 | * @offset: iomem resource start address | |
30 | * @size: size of remap | |
31 | * @flags: either MEMREMAP_WB or MEMREMAP_WT | |
32 | * | |
33 | * memremap() is "ioremap" for cases where it is known that the resource | |
34 | * being mapped does not have i/o side effects and the __iomem | |
35 | * annotation is not applicable. | |
36 | * | |
37 | * MEMREMAP_WB - matches the default mapping for "System RAM" on | |
38 | * the architecture. This is usually a read-allocate write-back cache. | |
39 | * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM | |
40 | * memremap() will bypass establishing a new mapping and instead return | |
41 | * a pointer into the direct map. | |
42 | * | |
43 | * MEMREMAP_WT - establish a mapping whereby writes either bypass the | |
44 | * cache or are written through to memory and never exist in a | |
45 | * cache-dirty state with respect to program visibility. Attempts to | |
46 | * map "System RAM" with this mapping type will fail. | |
47 | */ | |
48 | void *memremap(resource_size_t offset, size_t size, unsigned long flags) | |
49 | { | |
50 | int is_ram = region_intersects(offset, size, "System RAM"); | |
51 | void *addr = NULL; | |
52 | ||
53 | if (is_ram == REGION_MIXED) { | |
54 | WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n", | |
55 | &offset, (unsigned long) size); | |
56 | return NULL; | |
57 | } | |
58 | ||
59 | /* Try all mapping types requested until one returns non-NULL */ | |
60 | if (flags & MEMREMAP_WB) { | |
61 | flags &= ~MEMREMAP_WB; | |
62 | /* | |
63 | * MEMREMAP_WB is special in that it can be satisifed | |
64 | * from the direct map. Some archs depend on the | |
65 | * capability of memremap() to autodetect cases where | |
66 | * the requested range is potentially in "System RAM" | |
67 | */ | |
68 | if (is_ram == REGION_INTERSECTS) | |
69 | addr = __va(offset); | |
70 | else | |
71 | addr = ioremap_cache(offset, size); | |
72 | } | |
73 | ||
74 | /* | |
75 | * If we don't have a mapping yet and more request flags are | |
76 | * pending then we will be attempting to establish a new virtual | |
77 | * address mapping. Enforce that this mapping is not aliasing | |
78 | * "System RAM" | |
79 | */ | |
80 | if (!addr && is_ram == REGION_INTERSECTS && flags) { | |
81 | WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", | |
82 | &offset, (unsigned long) size); | |
83 | return NULL; | |
84 | } | |
85 | ||
86 | if (!addr && (flags & MEMREMAP_WT)) { | |
87 | flags &= ~MEMREMAP_WT; | |
88 | addr = ioremap_wt(offset, size); | |
89 | } | |
90 | ||
91 | return addr; | |
92 | } | |
93 | EXPORT_SYMBOL(memremap); | |
94 | ||
95 | void memunmap(void *addr) | |
96 | { | |
97 | if (is_vmalloc_addr(addr)) | |
98 | iounmap((void __iomem *) addr); | |
99 | } | |
100 | EXPORT_SYMBOL(memunmap); | |
7d3dcf26 CH |
101 | |
102 | static void devm_memremap_release(struct device *dev, void *res) | |
103 | { | |
104 | memunmap(res); | |
105 | } | |
106 | ||
107 | static int devm_memremap_match(struct device *dev, void *res, void *match_data) | |
108 | { | |
109 | return *(void **)res == match_data; | |
110 | } | |
111 | ||
112 | void *devm_memremap(struct device *dev, resource_size_t offset, | |
113 | size_t size, unsigned long flags) | |
114 | { | |
115 | void **ptr, *addr; | |
116 | ||
117 | ptr = devres_alloc(devm_memremap_release, sizeof(*ptr), GFP_KERNEL); | |
118 | if (!ptr) | |
119 | return NULL; | |
120 | ||
121 | addr = memremap(offset, size, flags); | |
122 | if (addr) { | |
123 | *ptr = addr; | |
124 | devres_add(dev, ptr); | |
125 | } else | |
126 | devres_free(ptr); | |
127 | ||
128 | return addr; | |
129 | } | |
130 | EXPORT_SYMBOL(devm_memremap); | |
131 | ||
132 | void devm_memunmap(struct device *dev, void *addr) | |
133 | { | |
134 | WARN_ON(devres_destroy(dev, devm_memremap_release, devm_memremap_match, | |
135 | addr)); | |
136 | memunmap(addr); | |
137 | } | |
138 | EXPORT_SYMBOL(devm_memunmap); | |
41e94a85 CH |
139 | |
140 | #ifdef CONFIG_ZONE_DEVICE | |
141 | struct page_map { | |
142 | struct resource res; | |
143 | }; | |
144 | ||
145 | static void devm_memremap_pages_release(struct device *dev, void *res) | |
146 | { | |
147 | struct page_map *page_map = res; | |
148 | ||
149 | /* pages are dead and unused, undo the arch mapping */ | |
150 | arch_remove_memory(page_map->res.start, resource_size(&page_map->res)); | |
151 | } | |
152 | ||
153 | void *devm_memremap_pages(struct device *dev, struct resource *res) | |
154 | { | |
155 | int is_ram = region_intersects(res->start, resource_size(res), | |
156 | "System RAM"); | |
157 | struct page_map *page_map; | |
158 | int error, nid; | |
159 | ||
160 | if (is_ram == REGION_MIXED) { | |
161 | WARN_ONCE(1, "%s attempted on mixed region %pr\n", | |
162 | __func__, res); | |
163 | return ERR_PTR(-ENXIO); | |
164 | } | |
165 | ||
166 | if (is_ram == REGION_INTERSECTS) | |
167 | return __va(res->start); | |
168 | ||
169 | page_map = devres_alloc(devm_memremap_pages_release, | |
170 | sizeof(*page_map), GFP_KERNEL); | |
171 | if (!page_map) | |
172 | return ERR_PTR(-ENOMEM); | |
173 | ||
174 | memcpy(&page_map->res, res, sizeof(*res)); | |
175 | ||
176 | nid = dev_to_node(dev); | |
177 | if (nid < 0) | |
178 | nid = 0; | |
179 | ||
180 | error = arch_add_memory(nid, res->start, resource_size(res), true); | |
181 | if (error) { | |
182 | devres_free(page_map); | |
183 | return ERR_PTR(error); | |
184 | } | |
185 | ||
186 | devres_add(dev, page_map); | |
187 | return __va(res->start); | |
188 | } | |
189 | EXPORT_SYMBOL(devm_memremap_pages); | |
190 | #endif /* CONFIG_ZONE_DEVICE */ |