]> Git Repo - linux.git/blob - drivers/dax/kmem.c
block: move the dax flag to queue_limits
[linux.git] / drivers / dax / kmem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2016-2019 Intel Corporation. All rights reserved. */
3 #include <linux/memremap.h>
4 #include <linux/pagemap.h>
5 #include <linux/memory.h>
6 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/pfn_t.h>
9 #include <linux/slab.h>
10 #include <linux/dax.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/memory-tiers.h>
15 #include <linux/memory_hotplug.h>
16 #include "dax-private.h"
17 #include "bus.h"
18
19 /*
20  * Default abstract distance assigned to the NUMA node onlined
21  * by DAX/kmem if the low level platform driver didn't initialize
22  * one for this NUMA node.
23  */
24 #define MEMTIER_DEFAULT_DAX_ADISTANCE   (MEMTIER_ADISTANCE_DRAM * 5)
25
26 /* Memory resource name used for add_memory_driver_managed(). */
27 static const char *kmem_name;
28 /* Set if any memory will remain added when the driver will be unloaded. */
29 static bool any_hotremove_failed;
30
31 static int dax_kmem_range(struct dev_dax *dev_dax, int i, struct range *r)
32 {
33         struct dev_dax_range *dax_range = &dev_dax->ranges[i];
34         struct range *range = &dax_range->range;
35
36         /* memory-block align the hotplug range */
37         r->start = ALIGN(range->start, memory_block_size_bytes());
38         r->end = ALIGN_DOWN(range->end + 1, memory_block_size_bytes()) - 1;
39         if (r->start >= r->end) {
40                 r->start = range->start;
41                 r->end = range->end;
42                 return -ENOSPC;
43         }
44         return 0;
45 }
46
47 struct dax_kmem_data {
48         const char *res_name;
49         int mgid;
50         struct resource *res[];
51 };
52
53 static DEFINE_MUTEX(kmem_memory_type_lock);
54 static LIST_HEAD(kmem_memory_types);
55
56 static struct memory_dev_type *kmem_find_alloc_memory_type(int adist)
57 {
58         guard(mutex)(&kmem_memory_type_lock);
59         return mt_find_alloc_memory_type(adist, &kmem_memory_types);
60 }
61
62 static void kmem_put_memory_types(void)
63 {
64         guard(mutex)(&kmem_memory_type_lock);
65         mt_put_memory_types(&kmem_memory_types);
66 }
67
68 static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
69 {
70         struct device *dev = &dev_dax->dev;
71         unsigned long total_len = 0;
72         struct dax_kmem_data *data;
73         struct memory_dev_type *mtype;
74         int i, rc, mapped = 0;
75         mhp_t mhp_flags;
76         int numa_node;
77         int adist = MEMTIER_DEFAULT_DAX_ADISTANCE;
78
79         /*
80          * Ensure good NUMA information for the persistent memory.
81          * Without this check, there is a risk that slow memory
82          * could be mixed in a node with faster memory, causing
83          * unavoidable performance issues.
84          */
85         numa_node = dev_dax->target_node;
86         if (numa_node < 0) {
87                 dev_warn(dev, "rejecting DAX region with invalid node: %d\n",
88                                 numa_node);
89                 return -EINVAL;
90         }
91
92         mt_calc_adistance(numa_node, &adist);
93         mtype = kmem_find_alloc_memory_type(adist);
94         if (IS_ERR(mtype))
95                 return PTR_ERR(mtype);
96
97         for (i = 0; i < dev_dax->nr_range; i++) {
98                 struct range range;
99
100                 rc = dax_kmem_range(dev_dax, i, &range);
101                 if (rc) {
102                         dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
103                                         i, range.start, range.end);
104                         continue;
105                 }
106                 total_len += range_len(&range);
107         }
108
109         if (!total_len) {
110                 dev_warn(dev, "rejecting DAX region without any memory after alignment\n");
111                 return -EINVAL;
112         }
113
114         init_node_memory_type(numa_node, mtype);
115
116         rc = -ENOMEM;
117         data = kzalloc(struct_size(data, res, dev_dax->nr_range), GFP_KERNEL);
118         if (!data)
119                 goto err_dax_kmem_data;
120
121         data->res_name = kstrdup(dev_name(dev), GFP_KERNEL);
122         if (!data->res_name)
123                 goto err_res_name;
124
125         rc = memory_group_register_static(numa_node, PFN_UP(total_len));
126         if (rc < 0)
127                 goto err_reg_mgid;
128         data->mgid = rc;
129
130         for (i = 0; i < dev_dax->nr_range; i++) {
131                 struct resource *res;
132                 struct range range;
133
134                 rc = dax_kmem_range(dev_dax, i, &range);
135                 if (rc)
136                         continue;
137
138                 /* Region is permanently reserved if hotremove fails. */
139                 res = request_mem_region(range.start, range_len(&range), data->res_name);
140                 if (!res) {
141                         dev_warn(dev, "mapping%d: %#llx-%#llx could not reserve region\n",
142                                         i, range.start, range.end);
143                         /*
144                          * Once some memory has been onlined we can't
145                          * assume that it can be un-onlined safely.
146                          */
147                         if (mapped)
148                                 continue;
149                         rc = -EBUSY;
150                         goto err_request_mem;
151                 }
152                 data->res[i] = res;
153
154                 /*
155                  * Set flags appropriate for System RAM.  Leave ..._BUSY clear
156                  * so that add_memory() can add a child resource.  Do not
157                  * inherit flags from the parent since it may set new flags
158                  * unknown to us that will break add_memory() below.
159                  */
160                 res->flags = IORESOURCE_SYSTEM_RAM;
161
162                 mhp_flags = MHP_NID_IS_MGID;
163                 if (dev_dax->memmap_on_memory)
164                         mhp_flags |= MHP_MEMMAP_ON_MEMORY;
165
166                 /*
167                  * Ensure that future kexec'd kernels will not treat
168                  * this as RAM automatically.
169                  */
170                 rc = add_memory_driver_managed(data->mgid, range.start,
171                                 range_len(&range), kmem_name, mhp_flags);
172
173                 if (rc) {
174                         dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n",
175                                         i, range.start, range.end);
176                         remove_resource(res);
177                         kfree(res);
178                         data->res[i] = NULL;
179                         if (mapped)
180                                 continue;
181                         goto err_request_mem;
182                 }
183                 mapped++;
184         }
185
186         dev_set_drvdata(dev, data);
187
188         return 0;
189
190 err_request_mem:
191         memory_group_unregister(data->mgid);
192 err_reg_mgid:
193         kfree(data->res_name);
194 err_res_name:
195         kfree(data);
196 err_dax_kmem_data:
197         clear_node_memory_type(numa_node, mtype);
198         return rc;
199 }
200
201 #ifdef CONFIG_MEMORY_HOTREMOVE
202 static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
203 {
204         int i, success = 0;
205         int node = dev_dax->target_node;
206         struct device *dev = &dev_dax->dev;
207         struct dax_kmem_data *data = dev_get_drvdata(dev);
208
209         /*
210          * We have one shot for removing memory, if some memory blocks were not
211          * offline prior to calling this function remove_memory() will fail, and
212          * there is no way to hotremove this memory until reboot because device
213          * unbind will succeed even if we return failure.
214          */
215         for (i = 0; i < dev_dax->nr_range; i++) {
216                 struct range range;
217                 int rc;
218
219                 rc = dax_kmem_range(dev_dax, i, &range);
220                 if (rc)
221                         continue;
222
223                 rc = remove_memory(range.start, range_len(&range));
224                 if (rc == 0) {
225                         remove_resource(data->res[i]);
226                         kfree(data->res[i]);
227                         data->res[i] = NULL;
228                         success++;
229                         continue;
230                 }
231                 any_hotremove_failed = true;
232                 dev_err(dev,
233                         "mapping%d: %#llx-%#llx cannot be hotremoved until the next reboot\n",
234                                 i, range.start, range.end);
235         }
236
237         if (success >= dev_dax->nr_range) {
238                 memory_group_unregister(data->mgid);
239                 kfree(data->res_name);
240                 kfree(data);
241                 dev_set_drvdata(dev, NULL);
242                 /*
243                  * Clear the memtype association on successful unplug.
244                  * If not, we have memory blocks left which can be
245                  * offlined/onlined later. We need to keep memory_dev_type
246                  * for that. This implies this reference will be around
247                  * till next reboot.
248                  */
249                 clear_node_memory_type(node, NULL);
250         }
251 }
252 #else
253 static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
254 {
255         /*
256          * Without hotremove purposely leak the request_mem_region() for the
257          * device-dax range and return '0' to ->remove() attempts. The removal
258          * of the device from the driver always succeeds, but the region is
259          * permanently pinned as reserved by the unreleased
260          * request_mem_region().
261          */
262         any_hotremove_failed = true;
263 }
264 #endif /* CONFIG_MEMORY_HOTREMOVE */
265
266 static struct dax_device_driver device_dax_kmem_driver = {
267         .probe = dev_dax_kmem_probe,
268         .remove = dev_dax_kmem_remove,
269         .type = DAXDRV_KMEM_TYPE,
270 };
271
272 static int __init dax_kmem_init(void)
273 {
274         int rc;
275
276         /* Resource name is permanently allocated if any hotremove fails. */
277         kmem_name = kstrdup_const("System RAM (kmem)", GFP_KERNEL);
278         if (!kmem_name)
279                 return -ENOMEM;
280
281         rc = dax_driver_register(&device_dax_kmem_driver);
282         if (rc)
283                 goto error_dax_driver;
284
285         return rc;
286
287 error_dax_driver:
288         kmem_put_memory_types();
289         kfree_const(kmem_name);
290         return rc;
291 }
292
293 static void __exit dax_kmem_exit(void)
294 {
295         dax_driver_unregister(&device_dax_kmem_driver);
296         if (!any_hotremove_failed)
297                 kfree_const(kmem_name);
298         kmem_put_memory_types();
299 }
300
301 MODULE_AUTHOR("Intel Corporation");
302 MODULE_LICENSE("GPL v2");
303 module_init(dax_kmem_init);
304 module_exit(dax_kmem_exit);
305 MODULE_ALIAS_DAX_DEVICE(0);
This page took 0.049454 seconds and 4 git commands to generate.