]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
f4a18312 | 2 | #include <linux/err.h> |
5ea81769 AV |
3 | #include <linux/pci.h> |
4 | #include <linux/io.h> | |
5a0e3ad6 | 5 | #include <linux/gfp.h> |
8bc3bcc9 | 6 | #include <linux/export.h> |
d5e83827 | 7 | #include <linux/of_address.h> |
5ea81769 | 8 | |
1b723413 YX |
9 | enum devm_ioremap_type { |
10 | DEVM_IOREMAP = 0, | |
e537654b | 11 | DEVM_IOREMAP_UC, |
1b723413 | 12 | DEVM_IOREMAP_WC, |
7c566bb5 | 13 | DEVM_IOREMAP_NP, |
1b723413 YX |
14 | }; |
15 | ||
b41e5fff | 16 | void devm_ioremap_release(struct device *dev, void *res) |
5ea81769 AV |
17 | { |
18 | iounmap(*(void __iomem **)res); | |
19 | } | |
20 | ||
21 | static int devm_ioremap_match(struct device *dev, void *res, void *match_data) | |
22 | { | |
23 | return *(void **)res == match_data; | |
24 | } | |
25 | ||
1b723413 YX |
26 | static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset, |
27 | resource_size_t size, | |
28 | enum devm_ioremap_type type) | |
5ea81769 | 29 | { |
1b723413 | 30 | void __iomem **ptr, *addr = NULL; |
5ea81769 | 31 | |
55656016 MPT |
32 | ptr = devres_alloc_node(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL, |
33 | dev_to_node(dev)); | |
5ea81769 AV |
34 | if (!ptr) |
35 | return NULL; | |
36 | ||
1b723413 YX |
37 | switch (type) { |
38 | case DEVM_IOREMAP: | |
39 | addr = ioremap(offset, size); | |
40 | break; | |
e537654b TZ |
41 | case DEVM_IOREMAP_UC: |
42 | addr = ioremap_uc(offset, size); | |
43 | break; | |
1b723413 YX |
44 | case DEVM_IOREMAP_WC: |
45 | addr = ioremap_wc(offset, size); | |
46 | break; | |
7c566bb5 HM |
47 | case DEVM_IOREMAP_NP: |
48 | addr = ioremap_np(offset, size); | |
49 | break; | |
1b723413 YX |
50 | } |
51 | ||
5ea81769 AV |
52 | if (addr) { |
53 | *ptr = addr; | |
54 | devres_add(dev, ptr); | |
55 | } else | |
56 | devres_free(ptr); | |
57 | ||
58 | return addr; | |
59 | } | |
1b723413 YX |
60 | |
61 | /** | |
62 | * devm_ioremap - Managed ioremap() | |
63 | * @dev: Generic device to remap IO address for | |
64 | * @offset: Resource address to map | |
65 | * @size: Size of map | |
66 | * | |
67 | * Managed ioremap(). Map is automatically unmapped on driver detach. | |
68 | */ | |
69 | void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, | |
70 | resource_size_t size) | |
71 | { | |
72 | return __devm_ioremap(dev, offset, size, DEVM_IOREMAP); | |
73 | } | |
5ea81769 AV |
74 | EXPORT_SYMBOL(devm_ioremap); |
75 | ||
e537654b TZ |
76 | /** |
77 | * devm_ioremap_uc - Managed ioremap_uc() | |
78 | * @dev: Generic device to remap IO address for | |
79 | * @offset: Resource address to map | |
80 | * @size: Size of map | |
81 | * | |
82 | * Managed ioremap_uc(). Map is automatically unmapped on driver detach. | |
83 | */ | |
84 | void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, | |
85 | resource_size_t size) | |
86 | { | |
87 | return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC); | |
88 | } | |
89 | EXPORT_SYMBOL_GPL(devm_ioremap_uc); | |
90 | ||
34644524 AK |
91 | /** |
92 | * devm_ioremap_wc - Managed ioremap_wc() | |
93 | * @dev: Generic device to remap IO address for | |
6524754e | 94 | * @offset: Resource address to map |
34644524 AK |
95 | * @size: Size of map |
96 | * | |
97 | * Managed ioremap_wc(). Map is automatically unmapped on driver detach. | |
98 | */ | |
99 | void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, | |
100 | resource_size_t size) | |
101 | { | |
1b723413 | 102 | return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC); |
34644524 AK |
103 | } |
104 | EXPORT_SYMBOL(devm_ioremap_wc); | |
105 | ||
5ea81769 AV |
106 | /** |
107 | * devm_iounmap - Managed iounmap() | |
108 | * @dev: Generic device to unmap for | |
109 | * @addr: Address to unmap | |
110 | * | |
111 | * Managed iounmap(). @addr must have been mapped using devm_ioremap*(). | |
112 | */ | |
113 | void devm_iounmap(struct device *dev, void __iomem *addr) | |
114 | { | |
5ea81769 | 115 | WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, |
b104d6a5 | 116 | (__force void *)addr)); |
ae891a1b | 117 | iounmap(addr); |
5ea81769 AV |
118 | } |
119 | EXPORT_SYMBOL(devm_iounmap); | |
120 | ||
6e924822 BG |
121 | static void __iomem * |
122 | __devm_ioremap_resource(struct device *dev, const struct resource *res, | |
123 | enum devm_ioremap_type type) | |
72f8c0bf WS |
124 | { |
125 | resource_size_t size; | |
72f8c0bf | 126 | void __iomem *dest_ptr; |
35bd8c07 | 127 | char *pretty_name; |
72f8c0bf WS |
128 | |
129 | BUG_ON(!dev); | |
130 | ||
131 | if (!res || resource_type(res) != IORESOURCE_MEM) { | |
132 | dev_err(dev, "invalid resource\n"); | |
b104d6a5 | 133 | return IOMEM_ERR_PTR(-EINVAL); |
72f8c0bf WS |
134 | } |
135 | ||
7c566bb5 HM |
136 | if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED) |
137 | type = DEVM_IOREMAP_NP; | |
138 | ||
72f8c0bf | 139 | size = resource_size(res); |
72f8c0bf | 140 | |
35bd8c07 VO |
141 | if (res->name) |
142 | pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", | |
143 | dev_name(dev), res->name); | |
144 | else | |
145 | pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); | |
5c3e241f ZL |
146 | if (!pretty_name) { |
147 | dev_err(dev, "can't generate pretty name for resource %pR\n", res); | |
35bd8c07 | 148 | return IOMEM_ERR_PTR(-ENOMEM); |
5c3e241f | 149 | } |
35bd8c07 VO |
150 | |
151 | if (!devm_request_mem_region(dev, res->start, size, pretty_name)) { | |
72f8c0bf | 152 | dev_err(dev, "can't request region for resource %pR\n", res); |
b104d6a5 | 153 | return IOMEM_ERR_PTR(-EBUSY); |
72f8c0bf WS |
154 | } |
155 | ||
6e924822 | 156 | dest_ptr = __devm_ioremap(dev, res->start, size, type); |
72f8c0bf WS |
157 | if (!dest_ptr) { |
158 | dev_err(dev, "ioremap failed for resource %pR\n", res); | |
159 | devm_release_mem_region(dev, res->start, size); | |
b104d6a5 | 160 | dest_ptr = IOMEM_ERR_PTR(-ENOMEM); |
72f8c0bf WS |
161 | } |
162 | ||
163 | return dest_ptr; | |
164 | } | |
6e924822 BG |
165 | |
166 | /** | |
167 | * devm_ioremap_resource() - check, request region, and ioremap resource | |
168 | * @dev: generic device to handle the resource for | |
169 | * @res: resource to be handled | |
170 | * | |
171 | * Checks that a resource is a valid memory region, requests the memory | |
172 | * region and ioremaps it. All operations are managed and will be undone | |
173 | * on driver detach. | |
174 | * | |
0c7a6b91 | 175 | * Usage example: |
6e924822 BG |
176 | * |
177 | * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
178 | * base = devm_ioremap_resource(&pdev->dev, res); | |
179 | * if (IS_ERR(base)) | |
180 | * return PTR_ERR(base); | |
0c7a6b91 SB |
181 | * |
182 | * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code | |
183 | * on failure. | |
6e924822 BG |
184 | */ |
185 | void __iomem *devm_ioremap_resource(struct device *dev, | |
186 | const struct resource *res) | |
187 | { | |
188 | return __devm_ioremap_resource(dev, res, DEVM_IOREMAP); | |
189 | } | |
75096579 TR |
190 | EXPORT_SYMBOL(devm_ioremap_resource); |
191 | ||
b873af62 BG |
192 | /** |
193 | * devm_ioremap_resource_wc() - write-combined variant of | |
194 | * devm_ioremap_resource() | |
195 | * @dev: generic device to handle the resource for | |
196 | * @res: resource to be handled | |
197 | * | |
0c7a6b91 SB |
198 | * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code |
199 | * on failure. | |
b873af62 BG |
200 | */ |
201 | void __iomem *devm_ioremap_resource_wc(struct device *dev, | |
202 | const struct resource *res) | |
203 | { | |
204 | return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC); | |
205 | } | |
206 | ||
d5e83827 BH |
207 | /* |
208 | * devm_of_iomap - Requests a resource and maps the memory mapped IO | |
209 | * for a given device_node managed by a given device | |
210 | * | |
211 | * Checks that a resource is a valid memory region, requests the memory | |
212 | * region and ioremaps it. All operations are managed and will be undone | |
213 | * on driver detach of the device. | |
214 | * | |
215 | * This is to be used when a device requests/maps resources described | |
216 | * by other device tree nodes (children or otherwise). | |
217 | * | |
218 | * @dev: The device "managing" the resource | |
219 | * @node: The device-tree node where the resource resides | |
220 | * @index: index of the MMIO range in the "reg" property | |
221 | * @size: Returns the size of the resource (pass NULL if not needed) | |
0c7a6b91 SB |
222 | * |
223 | * Usage example: | |
d5e83827 BH |
224 | * |
225 | * base = devm_of_iomap(&pdev->dev, node, 0, NULL); | |
226 | * if (IS_ERR(base)) | |
227 | * return PTR_ERR(base); | |
7ae731a8 DC |
228 | * |
229 | * Please Note: This is not a one-to-one replacement for of_iomap() because the | |
230 | * of_iomap() function does not track whether the region is already mapped. If | |
231 | * two drivers try to map the same memory, the of_iomap() function will succeed | |
28d9fdf0 | 232 | * but the devm_of_iomap() function will return -EBUSY. |
7ae731a8 | 233 | * |
0c7a6b91 SB |
234 | * Return: a pointer to the requested and mapped memory or an ERR_PTR() encoded |
235 | * error code on failure. | |
d5e83827 BH |
236 | */ |
237 | void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, | |
238 | resource_size_t *size) | |
239 | { | |
240 | struct resource res; | |
241 | ||
242 | if (of_address_to_resource(node, index, &res)) | |
243 | return IOMEM_ERR_PTR(-EINVAL); | |
244 | if (size) | |
245 | *size = resource_size(&res); | |
246 | return devm_ioremap_resource(dev, &res); | |
247 | } | |
248 | EXPORT_SYMBOL(devm_of_iomap); | |
249 | ||
ce816fa8 | 250 | #ifdef CONFIG_HAS_IOPORT_MAP |
5ea81769 AV |
251 | /* |
252 | * Generic iomap devres | |
253 | */ | |
254 | static void devm_ioport_map_release(struct device *dev, void *res) | |
255 | { | |
256 | ioport_unmap(*(void __iomem **)res); | |
257 | } | |
258 | ||
259 | static int devm_ioport_map_match(struct device *dev, void *res, | |
260 | void *match_data) | |
261 | { | |
262 | return *(void **)res == match_data; | |
263 | } | |
264 | ||
265 | /** | |
266 | * devm_ioport_map - Managed ioport_map() | |
267 | * @dev: Generic device to map ioport for | |
268 | * @port: Port to map | |
269 | * @nr: Number of ports to map | |
270 | * | |
271 | * Managed ioport_map(). Map is automatically unmapped on driver | |
272 | * detach. | |
0c7a6b91 SB |
273 | * |
274 | * Return: a pointer to the remapped memory or NULL on failure. | |
5ea81769 | 275 | */ |
5cbb00cc | 276 | void __iomem *devm_ioport_map(struct device *dev, unsigned long port, |
5ea81769 AV |
277 | unsigned int nr) |
278 | { | |
279 | void __iomem **ptr, *addr; | |
280 | ||
55656016 MPT |
281 | ptr = devres_alloc_node(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL, |
282 | dev_to_node(dev)); | |
5ea81769 AV |
283 | if (!ptr) |
284 | return NULL; | |
285 | ||
286 | addr = ioport_map(port, nr); | |
287 | if (addr) { | |
288 | *ptr = addr; | |
289 | devres_add(dev, ptr); | |
290 | } else | |
291 | devres_free(ptr); | |
292 | ||
293 | return addr; | |
294 | } | |
295 | EXPORT_SYMBOL(devm_ioport_map); | |
296 | ||
297 | /** | |
298 | * devm_ioport_unmap - Managed ioport_unmap() | |
299 | * @dev: Generic device to unmap for | |
300 | * @addr: Address to unmap | |
301 | * | |
302 | * Managed ioport_unmap(). @addr must have been mapped using | |
303 | * devm_ioport_map(). | |
304 | */ | |
305 | void devm_ioport_unmap(struct device *dev, void __iomem *addr) | |
306 | { | |
307 | ioport_unmap(addr); | |
308 | WARN_ON(devres_destroy(dev, devm_ioport_map_release, | |
b104d6a5 | 309 | devm_ioport_map_match, (__force void *)addr)); |
5ea81769 AV |
310 | } |
311 | EXPORT_SYMBOL(devm_ioport_unmap); | |
ce816fa8 | 312 | #endif /* CONFIG_HAS_IOPORT_MAP */ |
5ea81769 AV |
313 | |
314 | #ifdef CONFIG_PCI | |
315 | /* | |
316 | * PCI iomap devres | |
317 | */ | |
c9c13ba4 | 318 | #define PCIM_IOMAP_MAX PCI_STD_NUM_BARS |
5ea81769 AV |
319 | |
320 | struct pcim_iomap_devres { | |
321 | void __iomem *table[PCIM_IOMAP_MAX]; | |
322 | }; | |
323 | ||
324 | static void pcim_iomap_release(struct device *gendev, void *res) | |
325 | { | |
20af74ef | 326 | struct pci_dev *dev = to_pci_dev(gendev); |
5ea81769 AV |
327 | struct pcim_iomap_devres *this = res; |
328 | int i; | |
329 | ||
330 | for (i = 0; i < PCIM_IOMAP_MAX; i++) | |
331 | if (this->table[i]) | |
332 | pci_iounmap(dev, this->table[i]); | |
333 | } | |
334 | ||
335 | /** | |
336 | * pcim_iomap_table - access iomap allocation table | |
337 | * @pdev: PCI device to access iomap table for | |
338 | * | |
339 | * Access iomap allocation table for @dev. If iomap table doesn't | |
340 | * exist and @pdev is managed, it will be allocated. All iomaps | |
341 | * recorded in the iomap table are automatically unmapped on driver | |
342 | * detach. | |
343 | * | |
344 | * This function might sleep when the table is first allocated but can | |
9dbbc3b9 | 345 | * be safely called without context and guaranteed to succeed once |
5ea81769 AV |
346 | * allocated. |
347 | */ | |
5cbb00cc | 348 | void __iomem * const *pcim_iomap_table(struct pci_dev *pdev) |
5ea81769 AV |
349 | { |
350 | struct pcim_iomap_devres *dr, *new_dr; | |
351 | ||
352 | dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); | |
353 | if (dr) | |
354 | return dr->table; | |
355 | ||
55656016 MPT |
356 | new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL, |
357 | dev_to_node(&pdev->dev)); | |
5ea81769 AV |
358 | if (!new_dr) |
359 | return NULL; | |
360 | dr = devres_get(&pdev->dev, new_dr, NULL, NULL); | |
361 | return dr->table; | |
362 | } | |
363 | EXPORT_SYMBOL(pcim_iomap_table); | |
364 | ||
365 | /** | |
366 | * pcim_iomap - Managed pcim_iomap() | |
367 | * @pdev: PCI device to iomap for | |
368 | * @bar: BAR to iomap | |
369 | * @maxlen: Maximum length of iomap | |
370 | * | |
371 | * Managed pci_iomap(). Map is automatically unmapped on driver | |
372 | * detach. | |
373 | */ | |
5cbb00cc | 374 | void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) |
5ea81769 AV |
375 | { |
376 | void __iomem **tbl; | |
377 | ||
378 | BUG_ON(bar >= PCIM_IOMAP_MAX); | |
379 | ||
380 | tbl = (void __iomem **)pcim_iomap_table(pdev); | |
381 | if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ | |
382 | return NULL; | |
383 | ||
384 | tbl[bar] = pci_iomap(pdev, bar, maxlen); | |
385 | return tbl[bar]; | |
386 | } | |
387 | EXPORT_SYMBOL(pcim_iomap); | |
388 | ||
389 | /** | |
390 | * pcim_iounmap - Managed pci_iounmap() | |
391 | * @pdev: PCI device to iounmap for | |
392 | * @addr: Address to unmap | |
393 | * | |
394 | * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). | |
395 | */ | |
396 | void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) | |
397 | { | |
398 | void __iomem **tbl; | |
399 | int i; | |
400 | ||
401 | pci_iounmap(pdev, addr); | |
402 | ||
403 | tbl = (void __iomem **)pcim_iomap_table(pdev); | |
404 | BUG_ON(!tbl); | |
405 | ||
406 | for (i = 0; i < PCIM_IOMAP_MAX; i++) | |
407 | if (tbl[i] == addr) { | |
408 | tbl[i] = NULL; | |
409 | return; | |
410 | } | |
411 | WARN_ON(1); | |
412 | } | |
413 | EXPORT_SYMBOL(pcim_iounmap); | |
414 | ||
415 | /** | |
416 | * pcim_iomap_regions - Request and iomap PCI BARs | |
417 | * @pdev: PCI device to map IO resources for | |
418 | * @mask: Mask of BARs to request and iomap | |
419 | * @name: Name used when requesting regions | |
420 | * | |
421 | * Request and iomap regions specified by @mask. | |
422 | */ | |
fb7ebfe4 | 423 | int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name) |
5ea81769 AV |
424 | { |
425 | void __iomem * const *iomap; | |
426 | int i, rc; | |
427 | ||
428 | iomap = pcim_iomap_table(pdev); | |
429 | if (!iomap) | |
430 | return -ENOMEM; | |
431 | ||
432 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
433 | unsigned long len; | |
434 | ||
435 | if (!(mask & (1 << i))) | |
436 | continue; | |
437 | ||
438 | rc = -EINVAL; | |
439 | len = pci_resource_len(pdev, i); | |
440 | if (!len) | |
441 | goto err_inval; | |
442 | ||
443 | rc = pci_request_region(pdev, i, name); | |
444 | if (rc) | |
fb4d64e7 | 445 | goto err_inval; |
5ea81769 AV |
446 | |
447 | rc = -ENOMEM; | |
448 | if (!pcim_iomap(pdev, i, 0)) | |
fb4d64e7 | 449 | goto err_region; |
5ea81769 AV |
450 | } |
451 | ||
452 | return 0; | |
453 | ||
5ea81769 AV |
454 | err_region: |
455 | pci_release_region(pdev, i); | |
456 | err_inval: | |
457 | while (--i >= 0) { | |
fb4d64e7 FD |
458 | if (!(mask & (1 << i))) |
459 | continue; | |
5ea81769 AV |
460 | pcim_iounmap(pdev, iomap[i]); |
461 | pci_release_region(pdev, i); | |
462 | } | |
463 | ||
464 | return rc; | |
465 | } | |
466 | EXPORT_SYMBOL(pcim_iomap_regions); | |
ec04b075 | 467 | |
916fbfb7 TH |
468 | /** |
469 | * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones | |
470 | * @pdev: PCI device to map IO resources for | |
471 | * @mask: Mask of BARs to iomap | |
472 | * @name: Name used when requesting regions | |
473 | * | |
474 | * Request all PCI BARs and iomap regions specified by @mask. | |
475 | */ | |
fb7ebfe4 | 476 | int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, |
916fbfb7 TH |
477 | const char *name) |
478 | { | |
479 | int request_mask = ((1 << 6) - 1) & ~mask; | |
480 | int rc; | |
481 | ||
482 | rc = pci_request_selected_regions(pdev, request_mask, name); | |
483 | if (rc) | |
484 | return rc; | |
485 | ||
486 | rc = pcim_iomap_regions(pdev, mask, name); | |
487 | if (rc) | |
488 | pci_release_selected_regions(pdev, request_mask); | |
489 | return rc; | |
490 | } | |
491 | EXPORT_SYMBOL(pcim_iomap_regions_request_all); | |
492 | ||
ec04b075 TH |
493 | /** |
494 | * pcim_iounmap_regions - Unmap and release PCI BARs | |
495 | * @pdev: PCI device to map IO resources for | |
496 | * @mask: Mask of BARs to unmap and release | |
497 | * | |
4d45ada3 | 498 | * Unmap and release regions specified by @mask. |
ec04b075 | 499 | */ |
fb7ebfe4 | 500 | void pcim_iounmap_regions(struct pci_dev *pdev, int mask) |
ec04b075 TH |
501 | { |
502 | void __iomem * const *iomap; | |
503 | int i; | |
504 | ||
505 | iomap = pcim_iomap_table(pdev); | |
506 | if (!iomap) | |
507 | return; | |
508 | ||
1f35d04a | 509 | for (i = 0; i < PCIM_IOMAP_MAX; i++) { |
ec04b075 TH |
510 | if (!(mask & (1 << i))) |
511 | continue; | |
512 | ||
513 | pcim_iounmap(pdev, iomap[i]); | |
514 | pci_release_region(pdev, i); | |
515 | } | |
516 | } | |
517 | EXPORT_SYMBOL(pcim_iounmap_regions); | |
571806a9 | 518 | #endif /* CONFIG_PCI */ |
3229b906 TZ |
519 | |
520 | static void devm_arch_phys_ac_add_release(struct device *dev, void *res) | |
521 | { | |
522 | arch_phys_wc_del(*((int *)res)); | |
523 | } | |
524 | ||
525 | /** | |
526 | * devm_arch_phys_wc_add - Managed arch_phys_wc_add() | |
527 | * @dev: Managed device | |
528 | * @base: Memory base address | |
529 | * @size: Size of memory range | |
530 | * | |
531 | * Adds a WC MTRR using arch_phys_wc_add() and sets up a release callback. | |
532 | * See arch_phys_wc_add() for more information. | |
533 | */ | |
534 | int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size) | |
535 | { | |
536 | int *mtrr; | |
537 | int ret; | |
538 | ||
55656016 MPT |
539 | mtrr = devres_alloc_node(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL, |
540 | dev_to_node(dev)); | |
3229b906 TZ |
541 | if (!mtrr) |
542 | return -ENOMEM; | |
543 | ||
544 | ret = arch_phys_wc_add(base, size); | |
545 | if (ret < 0) { | |
546 | devres_free(mtrr); | |
547 | return ret; | |
548 | } | |
549 | ||
550 | *mtrr = ret; | |
551 | devres_add(dev, mtrr); | |
552 | ||
553 | return ret; | |
554 | } | |
555 | EXPORT_SYMBOL(devm_arch_phys_wc_add); | |
c8223107 TZ |
556 | |
557 | struct arch_io_reserve_memtype_wc_devres { | |
558 | resource_size_t start; | |
559 | resource_size_t size; | |
560 | }; | |
561 | ||
562 | static void devm_arch_io_free_memtype_wc_release(struct device *dev, void *res) | |
563 | { | |
564 | const struct arch_io_reserve_memtype_wc_devres *this = res; | |
565 | ||
566 | arch_io_free_memtype_wc(this->start, this->size); | |
567 | } | |
568 | ||
569 | /** | |
570 | * devm_arch_io_reserve_memtype_wc - Managed arch_io_reserve_memtype_wc() | |
571 | * @dev: Managed device | |
572 | * @start: Memory base address | |
573 | * @size: Size of memory range | |
574 | * | |
575 | * Reserves a memory range with WC caching using arch_io_reserve_memtype_wc() | |
576 | * and sets up a release callback See arch_io_reserve_memtype_wc() for more | |
577 | * information. | |
578 | */ | |
579 | int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start, | |
580 | resource_size_t size) | |
581 | { | |
582 | struct arch_io_reserve_memtype_wc_devres *dr; | |
583 | int ret; | |
584 | ||
55656016 MPT |
585 | dr = devres_alloc_node(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL, |
586 | dev_to_node(dev)); | |
c8223107 TZ |
587 | if (!dr) |
588 | return -ENOMEM; | |
589 | ||
590 | ret = arch_io_reserve_memtype_wc(start, size); | |
591 | if (ret < 0) { | |
592 | devres_free(dr); | |
593 | return ret; | |
594 | } | |
595 | ||
596 | dr->start = start; | |
597 | dr->size = size; | |
598 | devres_add(dev, dr); | |
599 | ||
600 | return ret; | |
601 | } | |
602 | EXPORT_SYMBOL(devm_arch_io_reserve_memtype_wc); |