]>
Commit | Line | Data |
---|---|---|
cd9e9808 MB |
1 | /* |
2 | * Copyright (C) 2015 IT University of Copenhagen. All rights reserved. | |
3 | * Initial release: Matias Bjorling <[email protected]> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version | |
7 | * 2 as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but | |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; see the file COPYING. If not, write to | |
16 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, | |
17 | * USA. | |
18 | * | |
19 | */ | |
20 | ||
cd9e9808 MB |
21 | #include <linux/list.h> |
22 | #include <linux/types.h> | |
23 | #include <linux/sem.h> | |
24 | #include <linux/bitmap.h> | |
90014829 | 25 | #include <linux/module.h> |
389b2a1c | 26 | #include <linux/moduleparam.h> |
cd9e9808 MB |
27 | #include <linux/miscdevice.h> |
28 | #include <linux/lightnvm.h> | |
91276162 | 29 | #include <linux/sched/sysctl.h> |
cd9e9808 | 30 | |
6063fe39 | 31 | static LIST_HEAD(nvm_tgt_types); |
5cd90785 | 32 | static DECLARE_RWSEM(nvm_tgtt_lock); |
cd9e9808 MB |
33 | static LIST_HEAD(nvm_devices); |
34 | static DECLARE_RWSEM(nvm_lock); | |
35 | ||
ade69e24 MB |
36 | /* Map between virtual and physical channel and lun */ |
37 | struct nvm_ch_map { | |
38 | int ch_off; | |
39 | int nr_luns; | |
40 | int *lun_offs; | |
41 | }; | |
42 | ||
43 | struct nvm_dev_map { | |
44 | struct nvm_ch_map *chnls; | |
45 | int nr_chnls; | |
46 | }; | |
47 | ||
48 | struct nvm_area { | |
49 | struct list_head list; | |
50 | sector_t begin; | |
51 | sector_t end; /* end is excluded */ | |
52 | }; | |
53 | ||
ade69e24 MB |
54 | static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name) |
55 | { | |
56 | struct nvm_target *tgt; | |
57 | ||
58 | list_for_each_entry(tgt, &dev->targets, list) | |
59 | if (!strcmp(name, tgt->disk->disk_name)) | |
60 | return tgt; | |
61 | ||
62 | return NULL; | |
63 | } | |
64 | ||
65 | static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end) | |
66 | { | |
67 | int i; | |
68 | ||
69 | for (i = lun_begin; i <= lun_end; i++) { | |
70 | if (test_and_set_bit(i, dev->lun_map)) { | |
71 | pr_err("nvm: lun %d already allocated\n", i); | |
72 | goto err; | |
73 | } | |
74 | } | |
75 | ||
76 | return 0; | |
77 | err: | |
507f7d68 | 78 | while (--i >= lun_begin) |
ade69e24 MB |
79 | clear_bit(i, dev->lun_map); |
80 | ||
81 | return -EBUSY; | |
82 | } | |
83 | ||
84 | static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin, | |
85 | int lun_end) | |
86 | { | |
87 | int i; | |
88 | ||
89 | for (i = lun_begin; i <= lun_end; i++) | |
90 | WARN_ON(!test_and_clear_bit(i, dev->lun_map)); | |
91 | } | |
92 | ||
edee1bdd | 93 | static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear) |
ade69e24 MB |
94 | { |
95 | struct nvm_dev *dev = tgt_dev->parent; | |
96 | struct nvm_dev_map *dev_map = tgt_dev->map; | |
97 | int i, j; | |
98 | ||
99 | for (i = 0; i < dev_map->nr_chnls; i++) { | |
100 | struct nvm_ch_map *ch_map = &dev_map->chnls[i]; | |
101 | int *lun_offs = ch_map->lun_offs; | |
102 | int ch = i + ch_map->ch_off; | |
103 | ||
edee1bdd JG |
104 | if (clear) { |
105 | for (j = 0; j < ch_map->nr_luns; j++) { | |
106 | int lun = j + lun_offs[j]; | |
107 | int lunid = (ch * dev->geo.luns_per_chnl) + lun; | |
ade69e24 | 108 | |
edee1bdd JG |
109 | WARN_ON(!test_and_clear_bit(lunid, |
110 | dev->lun_map)); | |
111 | } | |
ade69e24 MB |
112 | } |
113 | ||
114 | kfree(ch_map->lun_offs); | |
115 | } | |
116 | ||
117 | kfree(dev_map->chnls); | |
118 | kfree(dev_map); | |
119 | ||
120 | kfree(tgt_dev->luns); | |
121 | kfree(tgt_dev); | |
122 | } | |
123 | ||
124 | static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev, | |
125 | int lun_begin, int lun_end) | |
126 | { | |
127 | struct nvm_tgt_dev *tgt_dev = NULL; | |
128 | struct nvm_dev_map *dev_rmap = dev->rmap; | |
129 | struct nvm_dev_map *dev_map; | |
130 | struct ppa_addr *luns; | |
131 | int nr_luns = lun_end - lun_begin + 1; | |
132 | int luns_left = nr_luns; | |
133 | int nr_chnls = nr_luns / dev->geo.luns_per_chnl; | |
134 | int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl; | |
135 | int bch = lun_begin / dev->geo.luns_per_chnl; | |
136 | int blun = lun_begin % dev->geo.luns_per_chnl; | |
137 | int lunid = 0; | |
138 | int lun_balanced = 1; | |
139 | int prev_nr_luns; | |
140 | int i, j; | |
141 | ||
142 | nr_chnls = nr_luns / dev->geo.luns_per_chnl; | |
143 | nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1; | |
144 | ||
145 | dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL); | |
146 | if (!dev_map) | |
147 | goto err_dev; | |
148 | ||
149 | dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map), | |
150 | GFP_KERNEL); | |
151 | if (!dev_map->chnls) | |
152 | goto err_chnls; | |
153 | ||
154 | luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL); | |
155 | if (!luns) | |
156 | goto err_luns; | |
157 | ||
158 | prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ? | |
159 | dev->geo.luns_per_chnl : luns_left; | |
160 | for (i = 0; i < nr_chnls; i++) { | |
161 | struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch]; | |
162 | int *lun_roffs = ch_rmap->lun_offs; | |
163 | struct nvm_ch_map *ch_map = &dev_map->chnls[i]; | |
164 | int *lun_offs; | |
165 | int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ? | |
166 | dev->geo.luns_per_chnl : luns_left; | |
167 | ||
168 | if (lun_balanced && prev_nr_luns != luns_in_chnl) | |
169 | lun_balanced = 0; | |
170 | ||
171 | ch_map->ch_off = ch_rmap->ch_off = bch; | |
172 | ch_map->nr_luns = luns_in_chnl; | |
173 | ||
174 | lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL); | |
175 | if (!lun_offs) | |
176 | goto err_ch; | |
177 | ||
178 | for (j = 0; j < luns_in_chnl; j++) { | |
179 | luns[lunid].ppa = 0; | |
180 | luns[lunid].g.ch = i; | |
181 | luns[lunid++].g.lun = j; | |
182 | ||
183 | lun_offs[j] = blun; | |
184 | lun_roffs[j + blun] = blun; | |
185 | } | |
186 | ||
187 | ch_map->lun_offs = lun_offs; | |
188 | ||
189 | /* when starting a new channel, lun offset is reset */ | |
190 | blun = 0; | |
191 | luns_left -= luns_in_chnl; | |
192 | } | |
193 | ||
194 | dev_map->nr_chnls = nr_chnls; | |
195 | ||
196 | tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL); | |
197 | if (!tgt_dev) | |
198 | goto err_ch; | |
199 | ||
200 | memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo)); | |
201 | /* Target device only owns a portion of the physical device */ | |
202 | tgt_dev->geo.nr_chnls = nr_chnls; | |
203 | tgt_dev->geo.nr_luns = nr_luns; | |
204 | tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1; | |
205 | tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun; | |
206 | tgt_dev->q = dev->q; | |
207 | tgt_dev->map = dev_map; | |
208 | tgt_dev->luns = luns; | |
209 | memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id)); | |
210 | ||
211 | tgt_dev->parent = dev; | |
212 | ||
213 | return tgt_dev; | |
214 | err_ch: | |
507f7d68 | 215 | while (--i >= 0) |
ade69e24 MB |
216 | kfree(dev_map->chnls[i].lun_offs); |
217 | kfree(luns); | |
218 | err_luns: | |
219 | kfree(dev_map->chnls); | |
220 | err_chnls: | |
221 | kfree(dev_map); | |
222 | err_dev: | |
223 | return tgt_dev; | |
224 | } | |
225 | ||
226 | static const struct block_device_operations nvm_fops = { | |
227 | .owner = THIS_MODULE, | |
228 | }; | |
229 | ||
230 | static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) | |
231 | { | |
232 | struct nvm_ioctl_create_simple *s = &create->conf.s; | |
233 | struct request_queue *tqueue; | |
234 | struct gendisk *tdisk; | |
235 | struct nvm_tgt_type *tt; | |
236 | struct nvm_target *t; | |
237 | struct nvm_tgt_dev *tgt_dev; | |
238 | void *targetdata; | |
8d77bb82 | 239 | int ret; |
ade69e24 MB |
240 | |
241 | tt = nvm_find_target_type(create->tgttype, 1); | |
242 | if (!tt) { | |
243 | pr_err("nvm: target type %s not found\n", create->tgttype); | |
244 | return -EINVAL; | |
245 | } | |
246 | ||
247 | mutex_lock(&dev->mlock); | |
248 | t = nvm_find_target(dev, create->tgtname); | |
249 | if (t) { | |
250 | pr_err("nvm: target name already exists.\n"); | |
251 | mutex_unlock(&dev->mlock); | |
252 | return -EINVAL; | |
253 | } | |
254 | mutex_unlock(&dev->mlock); | |
255 | ||
12e9a6d6 RP |
256 | ret = nvm_reserve_luns(dev, s->lun_begin, s->lun_end); |
257 | if (ret) | |
258 | return ret; | |
ade69e24 MB |
259 | |
260 | t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL); | |
8d77bb82 RP |
261 | if (!t) { |
262 | ret = -ENOMEM; | |
ade69e24 | 263 | goto err_reserve; |
8d77bb82 | 264 | } |
ade69e24 MB |
265 | |
266 | tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end); | |
267 | if (!tgt_dev) { | |
268 | pr_err("nvm: could not create target device\n"); | |
8d77bb82 | 269 | ret = -ENOMEM; |
ade69e24 MB |
270 | goto err_t; |
271 | } | |
272 | ||
7d1ef2f4 | 273 | tdisk = alloc_disk(0); |
8d77bb82 RP |
274 | if (!tdisk) { |
275 | ret = -ENOMEM; | |
7d1ef2f4 | 276 | goto err_dev; |
8d77bb82 | 277 | } |
7d1ef2f4 | 278 | |
ade69e24 | 279 | tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node); |
8d77bb82 RP |
280 | if (!tqueue) { |
281 | ret = -ENOMEM; | |
7d1ef2f4 | 282 | goto err_disk; |
8d77bb82 | 283 | } |
ade69e24 MB |
284 | blk_queue_make_request(tqueue, tt->make_rq); |
285 | ||
6eb08245 | 286 | strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name)); |
ade69e24 MB |
287 | tdisk->flags = GENHD_FL_EXT_DEVT; |
288 | tdisk->major = 0; | |
289 | tdisk->first_minor = 0; | |
290 | tdisk->fops = &nvm_fops; | |
291 | tdisk->queue = tqueue; | |
292 | ||
4af3f75d | 293 | targetdata = tt->init(tgt_dev, tdisk, create->flags); |
8d77bb82 RP |
294 | if (IS_ERR(targetdata)) { |
295 | ret = PTR_ERR(targetdata); | |
ade69e24 | 296 | goto err_init; |
8d77bb82 | 297 | } |
ade69e24 MB |
298 | |
299 | tdisk->private_data = targetdata; | |
300 | tqueue->queuedata = targetdata; | |
301 | ||
302 | blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect); | |
303 | ||
304 | set_capacity(tdisk, tt->capacity(targetdata)); | |
305 | add_disk(tdisk); | |
306 | ||
8d77bb82 RP |
307 | if (tt->sysfs_init && tt->sysfs_init(tdisk)) { |
308 | ret = -ENOMEM; | |
9a69b0ed | 309 | goto err_sysfs; |
8d77bb82 | 310 | } |
9a69b0ed | 311 | |
ade69e24 MB |
312 | t->type = tt; |
313 | t->disk = tdisk; | |
314 | t->dev = tgt_dev; | |
315 | ||
316 | mutex_lock(&dev->mlock); | |
317 | list_add_tail(&t->list, &dev->targets); | |
318 | mutex_unlock(&dev->mlock); | |
319 | ||
90014829 RP |
320 | __module_get(tt->owner); |
321 | ||
ade69e24 | 322 | return 0; |
9a69b0ed JG |
323 | err_sysfs: |
324 | if (tt->exit) | |
325 | tt->exit(targetdata); | |
ade69e24 | 326 | err_init: |
ade69e24 | 327 | blk_cleanup_queue(tqueue); |
75ba4ada | 328 | tdisk->queue = NULL; |
7d1ef2f4 JG |
329 | err_disk: |
330 | put_disk(tdisk); | |
ade69e24 | 331 | err_dev: |
edee1bdd | 332 | nvm_remove_tgt_dev(tgt_dev, 0); |
ade69e24 MB |
333 | err_t: |
334 | kfree(t); | |
335 | err_reserve: | |
336 | nvm_release_luns_err(dev, s->lun_begin, s->lun_end); | |
8d77bb82 | 337 | return ret; |
ade69e24 MB |
338 | } |
339 | ||
340 | static void __nvm_remove_target(struct nvm_target *t) | |
341 | { | |
342 | struct nvm_tgt_type *tt = t->type; | |
343 | struct gendisk *tdisk = t->disk; | |
344 | struct request_queue *q = tdisk->queue; | |
345 | ||
346 | del_gendisk(tdisk); | |
347 | blk_cleanup_queue(q); | |
348 | ||
9a69b0ed JG |
349 | if (tt->sysfs_exit) |
350 | tt->sysfs_exit(tdisk); | |
351 | ||
ade69e24 MB |
352 | if (tt->exit) |
353 | tt->exit(tdisk->private_data); | |
354 | ||
edee1bdd | 355 | nvm_remove_tgt_dev(t->dev, 1); |
ade69e24 | 356 | put_disk(tdisk); |
90014829 | 357 | module_put(t->type->owner); |
ade69e24 MB |
358 | |
359 | list_del(&t->list); | |
360 | kfree(t); | |
361 | } | |
362 | ||
363 | /** | |
364 | * nvm_remove_tgt - Removes a target from the media manager | |
365 | * @dev: device | |
366 | * @remove: ioctl structure with target name to remove. | |
367 | * | |
368 | * Returns: | |
369 | * 0: on success | |
370 | * 1: on not found | |
371 | * <0: on error | |
372 | */ | |
373 | static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove) | |
374 | { | |
375 | struct nvm_target *t; | |
bb6aa6f0 | 376 | struct block_device *bdev; |
ade69e24 MB |
377 | |
378 | mutex_lock(&dev->mlock); | |
379 | t = nvm_find_target(dev, remove->tgtname); | |
380 | if (!t) { | |
381 | mutex_unlock(&dev->mlock); | |
382 | return 1; | |
383 | } | |
bb6aa6f0 RP |
384 | bdev = bdget_disk(t->disk, 0); |
385 | if (!bdev) { | |
386 | pr_err("nvm: removal failed, allocating bd failed\n"); | |
387 | mutex_unlock(&dev->mlock); | |
388 | return -ENOMEM; | |
389 | } | |
390 | if (bdev->bd_super || bdev->bd_part_count) { | |
391 | pr_err("nvm: removal failed, block device busy\n"); | |
392 | bdput(bdev); | |
393 | mutex_unlock(&dev->mlock); | |
394 | return -EBUSY; | |
395 | } | |
396 | bdput(bdev); | |
ade69e24 MB |
397 | __nvm_remove_target(t); |
398 | mutex_unlock(&dev->mlock); | |
399 | ||
400 | return 0; | |
401 | } | |
402 | ||
403 | static int nvm_register_map(struct nvm_dev *dev) | |
404 | { | |
405 | struct nvm_dev_map *rmap; | |
406 | int i, j; | |
407 | ||
408 | rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL); | |
409 | if (!rmap) | |
410 | goto err_rmap; | |
411 | ||
412 | rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map), | |
413 | GFP_KERNEL); | |
414 | if (!rmap->chnls) | |
415 | goto err_chnls; | |
416 | ||
417 | for (i = 0; i < dev->geo.nr_chnls; i++) { | |
418 | struct nvm_ch_map *ch_rmap; | |
419 | int *lun_roffs; | |
420 | int luns_in_chnl = dev->geo.luns_per_chnl; | |
421 | ||
422 | ch_rmap = &rmap->chnls[i]; | |
423 | ||
424 | ch_rmap->ch_off = -1; | |
425 | ch_rmap->nr_luns = luns_in_chnl; | |
426 | ||
427 | lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL); | |
428 | if (!lun_roffs) | |
429 | goto err_ch; | |
430 | ||
431 | for (j = 0; j < luns_in_chnl; j++) | |
432 | lun_roffs[j] = -1; | |
433 | ||
434 | ch_rmap->lun_offs = lun_roffs; | |
435 | } | |
436 | ||
437 | dev->rmap = rmap; | |
438 | ||
439 | return 0; | |
440 | err_ch: | |
441 | while (--i >= 0) | |
442 | kfree(rmap->chnls[i].lun_offs); | |
443 | err_chnls: | |
444 | kfree(rmap); | |
445 | err_rmap: | |
446 | return -ENOMEM; | |
447 | } | |
448 | ||
7a3de2b3 JG |
449 | static void nvm_unregister_map(struct nvm_dev *dev) |
450 | { | |
451 | struct nvm_dev_map *rmap = dev->rmap; | |
452 | int i; | |
453 | ||
454 | for (i = 0; i < dev->geo.nr_chnls; i++) | |
455 | kfree(rmap->chnls[i].lun_offs); | |
456 | ||
457 | kfree(rmap->chnls); | |
458 | kfree(rmap); | |
459 | } | |
460 | ||
61a561d8 | 461 | static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p) |
ade69e24 MB |
462 | { |
463 | struct nvm_dev_map *dev_map = tgt_dev->map; | |
464 | struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch]; | |
465 | int lun_off = ch_map->lun_offs[p->g.lun]; | |
ade69e24 MB |
466 | |
467 | p->g.ch += ch_map->ch_off; | |
468 | p->g.lun += lun_off; | |
ade69e24 MB |
469 | } |
470 | ||
61a561d8 | 471 | static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p) |
ade69e24 MB |
472 | { |
473 | struct nvm_dev *dev = tgt_dev->parent; | |
474 | struct nvm_dev_map *dev_rmap = dev->rmap; | |
475 | struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch]; | |
476 | int lun_roff = ch_rmap->lun_offs[p->g.lun]; | |
477 | ||
478 | p->g.ch -= ch_rmap->ch_off; | |
479 | p->g.lun -= lun_roff; | |
ade69e24 MB |
480 | } |
481 | ||
dab8ee9e MB |
482 | static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, |
483 | struct ppa_addr *ppa_list, int nr_ppas) | |
ade69e24 MB |
484 | { |
485 | int i; | |
ade69e24 | 486 | |
dab8ee9e MB |
487 | for (i = 0; i < nr_ppas; i++) { |
488 | nvm_map_to_dev(tgt_dev, &ppa_list[i]); | |
489 | ppa_list[i] = generic_to_dev_addr(tgt_dev, ppa_list[i]); | |
ade69e24 | 490 | } |
dab8ee9e | 491 | } |
ade69e24 | 492 | |
dab8ee9e MB |
493 | static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, |
494 | struct ppa_addr *ppa_list, int nr_ppas) | |
495 | { | |
496 | int i; | |
497 | ||
498 | for (i = 0; i < nr_ppas; i++) { | |
499 | ppa_list[i] = dev_to_generic_addr(tgt_dev, ppa_list[i]); | |
500 | nvm_map_to_tgt(tgt_dev, &ppa_list[i]); | |
ade69e24 | 501 | } |
ade69e24 MB |
502 | } |
503 | ||
dab8ee9e | 504 | static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) |
ade69e24 | 505 | { |
dab8ee9e MB |
506 | if (rqd->nr_ppas == 1) { |
507 | nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1); | |
508 | return; | |
509 | } | |
ade69e24 | 510 | |
dab8ee9e MB |
511 | nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas); |
512 | } | |
513 | ||
514 | static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) | |
515 | { | |
516 | if (rqd->nr_ppas == 1) { | |
517 | nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1); | |
518 | return; | |
519 | } | |
ade69e24 | 520 | |
dab8ee9e | 521 | nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas); |
ade69e24 MB |
522 | } |
523 | ||
524 | void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries, | |
525 | int len) | |
526 | { | |
527 | struct nvm_geo *geo = &dev->geo; | |
528 | struct nvm_dev_map *dev_rmap = dev->rmap; | |
529 | u64 i; | |
530 | ||
531 | for (i = 0; i < len; i++) { | |
532 | struct nvm_ch_map *ch_rmap; | |
533 | int *lun_roffs; | |
534 | struct ppa_addr gaddr; | |
535 | u64 pba = le64_to_cpu(entries[i]); | |
ade69e24 MB |
536 | u64 diff; |
537 | ||
538 | if (!pba) | |
539 | continue; | |
540 | ||
541 | gaddr = linear_to_generic_addr(geo, pba); | |
542 | ch_rmap = &dev_rmap->chnls[gaddr.g.ch]; | |
543 | lun_roffs = ch_rmap->lun_offs; | |
544 | ||
ade69e24 MB |
545 | diff = ((ch_rmap->ch_off * geo->luns_per_chnl) + |
546 | (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun; | |
547 | ||
548 | entries[i] -= cpu_to_le64(diff); | |
549 | } | |
550 | } | |
551 | EXPORT_SYMBOL(nvm_part_to_tgt); | |
552 | ||
b76eb20b | 553 | struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock) |
6f8645cb | 554 | { |
b76eb20b | 555 | struct nvm_tgt_type *tmp, *tt = NULL; |
6f8645cb | 556 | |
b76eb20b | 557 | if (lock) |
5cd90785 | 558 | down_write(&nvm_tgtt_lock); |
6f8645cb | 559 | |
b76eb20b MB |
560 | list_for_each_entry(tmp, &nvm_tgt_types, list) |
561 | if (!strcmp(name, tmp->name)) { | |
562 | tt = tmp; | |
563 | break; | |
564 | } | |
cd9e9808 | 565 | |
b76eb20b | 566 | if (lock) |
5cd90785 | 567 | up_write(&nvm_tgtt_lock); |
b76eb20b | 568 | return tt; |
cd9e9808 | 569 | } |
b76eb20b | 570 | EXPORT_SYMBOL(nvm_find_target_type); |
cd9e9808 | 571 | |
6063fe39 | 572 | int nvm_register_tgt_type(struct nvm_tgt_type *tt) |
cd9e9808 MB |
573 | { |
574 | int ret = 0; | |
575 | ||
5cd90785 | 576 | down_write(&nvm_tgtt_lock); |
b76eb20b | 577 | if (nvm_find_target_type(tt->name, 0)) |
cd9e9808 MB |
578 | ret = -EEXIST; |
579 | else | |
6063fe39 | 580 | list_add(&tt->list, &nvm_tgt_types); |
5cd90785 | 581 | up_write(&nvm_tgtt_lock); |
cd9e9808 MB |
582 | |
583 | return ret; | |
584 | } | |
6063fe39 | 585 | EXPORT_SYMBOL(nvm_register_tgt_type); |
cd9e9808 | 586 | |
6063fe39 | 587 | void nvm_unregister_tgt_type(struct nvm_tgt_type *tt) |
cd9e9808 MB |
588 | { |
589 | if (!tt) | |
590 | return; | |
591 | ||
592 | down_write(&nvm_lock); | |
593 | list_del(&tt->list); | |
594 | up_write(&nvm_lock); | |
595 | } | |
6063fe39 | 596 | EXPORT_SYMBOL(nvm_unregister_tgt_type); |
cd9e9808 MB |
597 | |
598 | void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags, | |
599 | dma_addr_t *dma_handler) | |
600 | { | |
75b85649 | 601 | return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags, |
cd9e9808 MB |
602 | dma_handler); |
603 | } | |
604 | EXPORT_SYMBOL(nvm_dev_dma_alloc); | |
605 | ||
da2d7cb8 | 606 | void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler) |
cd9e9808 | 607 | { |
75b85649 | 608 | dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler); |
cd9e9808 MB |
609 | } |
610 | EXPORT_SYMBOL(nvm_dev_dma_free); | |
611 | ||
cd9e9808 MB |
612 | static struct nvm_dev *nvm_find_nvm_dev(const char *name) |
613 | { | |
614 | struct nvm_dev *dev; | |
615 | ||
616 | list_for_each_entry(dev, &nvm_devices, devices) | |
617 | if (!strcmp(name, dev->name)) | |
618 | return dev; | |
619 | ||
620 | return NULL; | |
621 | } | |
622 | ||
333ba053 JG |
623 | int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, |
624 | int nr_ppas, int type) | |
625 | { | |
626 | struct nvm_dev *dev = tgt_dev->parent; | |
627 | struct nvm_rq rqd; | |
628 | int ret; | |
629 | ||
630 | if (nr_ppas > dev->ops->max_phys_sect) { | |
631 | pr_err("nvm: unable to update all blocks atomically\n"); | |
632 | return -EINVAL; | |
633 | } | |
634 | ||
635 | memset(&rqd, 0, sizeof(struct nvm_rq)); | |
636 | ||
17912c49 | 637 | nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1); |
dab8ee9e | 638 | nvm_rq_tgt_to_dev(tgt_dev, &rqd); |
333ba053 JG |
639 | |
640 | ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); | |
17912c49 | 641 | nvm_free_rqd_ppalist(tgt_dev, &rqd); |
333ba053 | 642 | if (ret) { |
ade69e24 | 643 | pr_err("nvm: failed bb mark\n"); |
333ba053 JG |
644 | return -EINVAL; |
645 | } | |
646 | ||
647 | return 0; | |
648 | } | |
649 | EXPORT_SYMBOL(nvm_set_tgt_bb_tbl); | |
650 | ||
a279006a JG |
651 | int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev) |
652 | { | |
653 | struct nvm_dev *dev = tgt_dev->parent; | |
654 | ||
655 | return dev->ops->max_phys_sect; | |
656 | } | |
657 | EXPORT_SYMBOL(nvm_max_phys_sects); | |
658 | ||
8e53624d | 659 | int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) |
cd9e9808 | 660 | { |
8e53624d | 661 | struct nvm_dev *dev = tgt_dev->parent; |
3e505afb | 662 | int ret; |
8e53624d | 663 | |
ade69e24 MB |
664 | if (!dev->ops->submit_io) |
665 | return -ENODEV; | |
666 | ||
dab8ee9e | 667 | nvm_rq_tgt_to_dev(tgt_dev, rqd); |
ade69e24 MB |
668 | |
669 | rqd->dev = tgt_dev; | |
3e505afb JG |
670 | |
671 | /* In case of error, fail with right address format */ | |
672 | ret = dev->ops->submit_io(dev, rqd); | |
673 | if (ret) | |
674 | nvm_rq_dev_to_tgt(tgt_dev, rqd); | |
675 | return ret; | |
cd9e9808 MB |
676 | } |
677 | EXPORT_SYMBOL(nvm_submit_io); | |
678 | ||
17912c49 | 679 | static void nvm_end_io_sync(struct nvm_rq *rqd) |
cd9e9808 | 680 | { |
17912c49 | 681 | struct completion *waiting = rqd->private; |
10995c3d | 682 | |
17912c49 JG |
683 | complete(waiting); |
684 | } | |
10995c3d | 685 | |
17912c49 JG |
686 | int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, |
687 | int nr_ppas) | |
688 | { | |
689 | struct nvm_geo *geo = &tgt_dev->geo; | |
690 | struct nvm_rq rqd; | |
691 | int ret; | |
692 | DECLARE_COMPLETION_ONSTACK(wait); | |
10995c3d MB |
693 | |
694 | memset(&rqd, 0, sizeof(struct nvm_rq)); | |
8e53624d | 695 | |
17912c49 JG |
696 | rqd.opcode = NVM_OP_ERASE; |
697 | rqd.end_io = nvm_end_io_sync; | |
698 | rqd.private = &wait; | |
699 | rqd.flags = geo->plane_mode >> 1; | |
700 | ||
701 | ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1); | |
10995c3d MB |
702 | if (ret) |
703 | return ret; | |
704 | ||
17912c49 JG |
705 | ret = nvm_submit_io(tgt_dev, &rqd); |
706 | if (ret) { | |
707 | pr_err("rrpr: erase I/O submission failed: %d\n", ret); | |
708 | goto free_ppa_list; | |
709 | } | |
710 | wait_for_completion_io(&wait); | |
10995c3d | 711 | |
17912c49 JG |
712 | free_ppa_list: |
713 | nvm_free_rqd_ppalist(tgt_dev, &rqd); | |
10995c3d MB |
714 | |
715 | return ret; | |
cd9e9808 | 716 | } |
17912c49 | 717 | EXPORT_SYMBOL(nvm_erase_sync); |
cd9e9808 | 718 | |
da2d7cb8 | 719 | int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb, |
959e911b JG |
720 | nvm_l2p_update_fn *update_l2p, void *priv) |
721 | { | |
da2d7cb8 JG |
722 | struct nvm_dev *dev = tgt_dev->parent; |
723 | ||
959e911b JG |
724 | if (!dev->ops->get_l2p_tbl) |
725 | return 0; | |
726 | ||
727 | return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv); | |
728 | } | |
729 | EXPORT_SYMBOL(nvm_get_l2p_tbl); | |
730 | ||
da2d7cb8 | 731 | int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len) |
959e911b | 732 | { |
da2d7cb8 | 733 | struct nvm_dev *dev = tgt_dev->parent; |
ade69e24 MB |
734 | struct nvm_geo *geo = &dev->geo; |
735 | struct nvm_area *area, *prev, *next; | |
736 | sector_t begin = 0; | |
737 | sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9; | |
738 | ||
739 | if (len > max_sectors) | |
740 | return -EINVAL; | |
741 | ||
742 | area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL); | |
743 | if (!area) | |
744 | return -ENOMEM; | |
745 | ||
746 | prev = NULL; | |
747 | ||
748 | spin_lock(&dev->lock); | |
749 | list_for_each_entry(next, &dev->area_list, list) { | |
750 | if (begin + len > next->begin) { | |
751 | begin = next->end; | |
752 | prev = next; | |
753 | continue; | |
754 | } | |
755 | break; | |
756 | } | |
757 | ||
758 | if ((begin + len) > max_sectors) { | |
759 | spin_unlock(&dev->lock); | |
760 | kfree(area); | |
761 | return -EINVAL; | |
762 | } | |
da2d7cb8 | 763 | |
ade69e24 MB |
764 | area->begin = *lba = begin; |
765 | area->end = begin + len; | |
766 | ||
767 | if (prev) /* insert into sorted order */ | |
768 | list_add(&area->list, &prev->list); | |
769 | else | |
770 | list_add(&area->list, &dev->area_list); | |
771 | spin_unlock(&dev->lock); | |
772 | ||
773 | return 0; | |
959e911b JG |
774 | } |
775 | EXPORT_SYMBOL(nvm_get_area); | |
776 | ||
ade69e24 | 777 | void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin) |
959e911b | 778 | { |
da2d7cb8 | 779 | struct nvm_dev *dev = tgt_dev->parent; |
ade69e24 | 780 | struct nvm_area *area; |
da2d7cb8 | 781 | |
ade69e24 MB |
782 | spin_lock(&dev->lock); |
783 | list_for_each_entry(area, &dev->area_list, list) { | |
784 | if (area->begin != begin) | |
785 | continue; | |
786 | ||
787 | list_del(&area->list); | |
788 | spin_unlock(&dev->lock); | |
789 | kfree(area); | |
790 | return; | |
791 | } | |
792 | spin_unlock(&dev->lock); | |
959e911b JG |
793 | } |
794 | EXPORT_SYMBOL(nvm_put_area); | |
795 | ||
17912c49 | 796 | int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, |
8680f165 | 797 | const struct ppa_addr *ppas, int nr_ppas, int vblk) |
069368e9 | 798 | { |
17912c49 JG |
799 | struct nvm_dev *dev = tgt_dev->parent; |
800 | struct nvm_geo *geo = &tgt_dev->geo; | |
abd805ec | 801 | int i, plane_cnt, pl_idx; |
8680f165 | 802 | struct ppa_addr ppa; |
abd805ec | 803 | |
8e79b5cb | 804 | if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) { |
6d5be959 | 805 | rqd->nr_ppas = nr_ppas; |
abd805ec | 806 | rqd->ppa_addr = ppas[0]; |
069368e9 | 807 | |
069368e9 | 808 | return 0; |
abd805ec | 809 | } |
069368e9 | 810 | |
6d5be959 | 811 | rqd->nr_ppas = nr_ppas; |
abd805ec MB |
812 | rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list); |
813 | if (!rqd->ppa_list) { | |
814 | pr_err("nvm: failed to allocate dma memory\n"); | |
815 | return -ENOMEM; | |
816 | } | |
817 | ||
5ebc7d9f MB |
818 | if (!vblk) { |
819 | for (i = 0; i < nr_ppas; i++) | |
820 | rqd->ppa_list[i] = ppas[i]; | |
821 | } else { | |
8e79b5cb | 822 | plane_cnt = geo->plane_mode; |
6d5be959 | 823 | rqd->nr_ppas *= plane_cnt; |
5ebc7d9f | 824 | |
556755e9 | 825 | for (i = 0; i < nr_ppas; i++) { |
5ebc7d9f | 826 | for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { |
8680f165 MB |
827 | ppa = ppas[i]; |
828 | ppa.g.pl = pl_idx; | |
829 | rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa; | |
5ebc7d9f | 830 | } |
069368e9 MB |
831 | } |
832 | } | |
833 | ||
abd805ec MB |
834 | return 0; |
835 | } | |
836 | EXPORT_SYMBOL(nvm_set_rqd_ppalist); | |
837 | ||
17912c49 | 838 | void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) |
abd805ec MB |
839 | { |
840 | if (!rqd->ppa_list) | |
841 | return; | |
842 | ||
17912c49 | 843 | nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list); |
abd805ec MB |
844 | } |
845 | EXPORT_SYMBOL(nvm_free_rqd_ppalist); | |
846 | ||
06894efe | 847 | void nvm_end_io(struct nvm_rq *rqd) |
91276162 | 848 | { |
ade69e24 | 849 | struct nvm_tgt_dev *tgt_dev = rqd->dev; |
ade69e24 MB |
850 | |
851 | /* Convert address space */ | |
852 | if (tgt_dev) | |
dab8ee9e | 853 | nvm_rq_dev_to_tgt(tgt_dev, rqd); |
ade69e24 | 854 | |
06894efe MB |
855 | if (rqd->end_io) |
856 | rqd->end_io(rqd); | |
91276162 MB |
857 | } |
858 | EXPORT_SYMBOL(nvm_end_io); | |
859 | ||
22e8c976 MB |
860 | /* |
861 | * folds a bad block list from its plane representation to its virtual | |
862 | * block representation. The fold is done in place and reduced size is | |
863 | * returned. | |
864 | * | |
865 | * If any of the planes status are bad or grown bad block, the virtual block | |
866 | * is marked bad. If not bad, the first plane state acts as the block state. | |
867 | */ | |
868 | int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks) | |
869 | { | |
8e79b5cb | 870 | struct nvm_geo *geo = &dev->geo; |
22e8c976 MB |
871 | int blk, offset, pl, blktype; |
872 | ||
8e79b5cb | 873 | if (nr_blks != geo->blks_per_lun * geo->plane_mode) |
22e8c976 MB |
874 | return -EINVAL; |
875 | ||
8e79b5cb JG |
876 | for (blk = 0; blk < geo->blks_per_lun; blk++) { |
877 | offset = blk * geo->plane_mode; | |
22e8c976 MB |
878 | blktype = blks[offset]; |
879 | ||
880 | /* Bad blocks on any planes take precedence over other types */ | |
8e79b5cb | 881 | for (pl = 0; pl < geo->plane_mode; pl++) { |
22e8c976 MB |
882 | if (blks[offset + pl] & |
883 | (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) { | |
884 | blktype = blks[offset + pl]; | |
885 | break; | |
886 | } | |
887 | } | |
888 | ||
889 | blks[blk] = blktype; | |
890 | } | |
891 | ||
8e79b5cb | 892 | return geo->blks_per_lun; |
22e8c976 MB |
893 | } |
894 | EXPORT_SYMBOL(nvm_bb_tbl_fold); | |
895 | ||
333ba053 JG |
896 | int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa, |
897 | u8 *blks) | |
898 | { | |
8f4fe008 MB |
899 | struct nvm_dev *dev = tgt_dev->parent; |
900 | ||
dab8ee9e | 901 | nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1); |
333ba053 | 902 | |
8f4fe008 | 903 | return dev->ops->get_bb_tbl(dev, ppa, blks); |
333ba053 JG |
904 | } |
905 | EXPORT_SYMBOL(nvm_get_tgt_bb_tbl); | |
906 | ||
ca5927e7 MB |
907 | static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp) |
908 | { | |
8e79b5cb | 909 | struct nvm_geo *geo = &dev->geo; |
ca5927e7 MB |
910 | int i; |
911 | ||
8e79b5cb | 912 | dev->lps_per_blk = geo->pgs_per_blk; |
ca5927e7 MB |
913 | dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL); |
914 | if (!dev->lptbl) | |
915 | return -ENOMEM; | |
916 | ||
917 | /* Just a linear array */ | |
918 | for (i = 0; i < dev->lps_per_blk; i++) | |
919 | dev->lptbl[i] = i; | |
920 | ||
921 | return 0; | |
922 | } | |
923 | ||
924 | static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp) | |
925 | { | |
926 | int i, p; | |
927 | struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc; | |
928 | ||
929 | if (!mlc->num_pairs) | |
930 | return 0; | |
931 | ||
932 | dev->lps_per_blk = mlc->num_pairs; | |
933 | dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL); | |
934 | if (!dev->lptbl) | |
935 | return -ENOMEM; | |
936 | ||
937 | /* The lower page table encoding consists of a list of bytes, where each | |
938 | * has a lower and an upper half. The first half byte maintains the | |
939 | * increment value and every value after is an offset added to the | |
12624af2 MB |
940 | * previous incrementation value |
941 | */ | |
ca5927e7 MB |
942 | dev->lptbl[0] = mlc->pairs[0] & 0xF; |
943 | for (i = 1; i < dev->lps_per_blk; i++) { | |
944 | p = mlc->pairs[i >> 1]; | |
945 | if (i & 0x1) /* upper */ | |
946 | dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4); | |
947 | else /* lower */ | |
948 | dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF); | |
949 | } | |
950 | ||
951 | return 0; | |
952 | } | |
953 | ||
cd9e9808 MB |
954 | static int nvm_core_init(struct nvm_dev *dev) |
955 | { | |
956 | struct nvm_id *id = &dev->identity; | |
19bd6fe7 | 957 | struct nvm_id_group *grp = &id->grp; |
8e79b5cb | 958 | struct nvm_geo *geo = &dev->geo; |
7f7c5d03 | 959 | int ret; |
cd9e9808 | 960 | |
8e79b5cb JG |
961 | /* Whole device values */ |
962 | geo->nr_chnls = grp->num_ch; | |
963 | geo->luns_per_chnl = grp->num_lun; | |
964 | ||
965 | /* Generic device values */ | |
966 | geo->pgs_per_blk = grp->num_pg; | |
967 | geo->blks_per_lun = grp->num_blk; | |
968 | geo->nr_planes = grp->num_pln; | |
969 | geo->fpg_size = grp->fpg_sz; | |
970 | geo->pfpg_size = grp->fpg_sz * grp->num_pln; | |
971 | geo->sec_size = grp->csecs; | |
972 | geo->oob_size = grp->sos; | |
973 | geo->sec_per_pg = grp->fpg_sz / grp->csecs; | |
974 | geo->mccap = grp->mccap; | |
975 | memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); | |
976 | ||
977 | geo->plane_mode = NVM_PLANE_SINGLE; | |
978 | geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size; | |
cd9e9808 MB |
979 | |
980 | if (grp->mpos & 0x020202) | |
8e79b5cb | 981 | geo->plane_mode = NVM_PLANE_DOUBLE; |
cd9e9808 | 982 | if (grp->mpos & 0x040404) |
8e79b5cb | 983 | geo->plane_mode = NVM_PLANE_QUAD; |
cd9e9808 | 984 | |
7f7c5d03 MB |
985 | if (grp->mtype != 0) { |
986 | pr_err("nvm: memory type not supported\n"); | |
987 | return -EINVAL; | |
988 | } | |
989 | ||
cd9e9808 | 990 | /* calculated values */ |
8e79b5cb JG |
991 | geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes; |
992 | geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk; | |
993 | geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun; | |
994 | geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls; | |
cd9e9808 | 995 | |
8e79b5cb JG |
996 | dev->total_secs = geo->nr_luns * geo->sec_per_lun; |
997 | dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns), | |
da1e2849 WT |
998 | sizeof(unsigned long), GFP_KERNEL); |
999 | if (!dev->lun_map) | |
1000 | return -ENOMEM; | |
7f7c5d03 MB |
1001 | |
1002 | switch (grp->fmtype) { | |
1003 | case NVM_ID_FMTYPE_SLC: | |
1004 | if (nvm_init_slc_tbl(dev, grp)) { | |
1005 | ret = -ENOMEM; | |
1006 | goto err_fmtype; | |
1007 | } | |
1008 | break; | |
1009 | case NVM_ID_FMTYPE_MLC: | |
1010 | if (nvm_init_mlc_tbl(dev, grp)) { | |
1011 | ret = -ENOMEM; | |
1012 | goto err_fmtype; | |
1013 | } | |
1014 | break; | |
1015 | default: | |
1016 | pr_err("nvm: flash type not supported\n"); | |
1017 | ret = -EINVAL; | |
1018 | goto err_fmtype; | |
1019 | } | |
1020 | ||
ade69e24 MB |
1021 | INIT_LIST_HEAD(&dev->area_list); |
1022 | INIT_LIST_HEAD(&dev->targets); | |
e3eb3799 | 1023 | mutex_init(&dev->mlock); |
4c9dacb8 | 1024 | spin_lock_init(&dev->lock); |
cd9e9808 | 1025 | |
ade69e24 MB |
1026 | ret = nvm_register_map(dev); |
1027 | if (ret) | |
1028 | goto err_fmtype; | |
ac81bfa9 | 1029 | |
ade69e24 | 1030 | blk_queue_logical_block_size(dev->q, geo->sec_size); |
cd9e9808 | 1031 | return 0; |
7f7c5d03 MB |
1032 | err_fmtype: |
1033 | kfree(dev->lun_map); | |
1034 | return ret; | |
cd9e9808 MB |
1035 | } |
1036 | ||
46b160ce | 1037 | static void nvm_free(struct nvm_dev *dev) |
cd9e9808 MB |
1038 | { |
1039 | if (!dev) | |
1040 | return; | |
1041 | ||
40267efd SL |
1042 | if (dev->dma_pool) |
1043 | dev->ops->destroy_dma_pool(dev->dma_pool); | |
1044 | ||
7a3de2b3 | 1045 | nvm_unregister_map(dev); |
ca5927e7 | 1046 | kfree(dev->lptbl); |
7f7c5d03 | 1047 | kfree(dev->lun_map); |
40267efd | 1048 | kfree(dev); |
cd9e9808 MB |
1049 | } |
1050 | ||
1051 | static int nvm_init(struct nvm_dev *dev) | |
1052 | { | |
8e79b5cb | 1053 | struct nvm_geo *geo = &dev->geo; |
480fc0db | 1054 | int ret = -EINVAL; |
cd9e9808 | 1055 | |
16f26c3a | 1056 | if (dev->ops->identity(dev, &dev->identity)) { |
cd9e9808 | 1057 | pr_err("nvm: device could not be identified\n"); |
cd9e9808 MB |
1058 | goto err; |
1059 | } | |
1060 | ||
19bd6fe7 MB |
1061 | pr_debug("nvm: ver:%x nvm_vendor:%x\n", |
1062 | dev->identity.ver_id, dev->identity.vmnt); | |
cd9e9808 MB |
1063 | |
1064 | if (dev->identity.ver_id != 1) { | |
1065 | pr_err("nvm: device not supported by kernel."); | |
1066 | goto err; | |
1067 | } | |
1068 | ||
cd9e9808 MB |
1069 | ret = nvm_core_init(dev); |
1070 | if (ret) { | |
1071 | pr_err("nvm: could not initialize core structures.\n"); | |
1072 | goto err; | |
1073 | } | |
1074 | ||
cd9e9808 | 1075 | pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n", |
8e79b5cb JG |
1076 | dev->name, geo->sec_per_pg, geo->nr_planes, |
1077 | geo->pgs_per_blk, geo->blks_per_lun, | |
1078 | geo->nr_luns, geo->nr_chnls); | |
cd9e9808 MB |
1079 | return 0; |
1080 | err: | |
cd9e9808 MB |
1081 | pr_err("nvm: failed to initialize nvm\n"); |
1082 | return ret; | |
1083 | } | |
1084 | ||
b0b4e09c | 1085 | struct nvm_dev *nvm_alloc_dev(int node) |
cd9e9808 | 1086 | { |
b0b4e09c | 1087 | return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node); |
cd9e9808 | 1088 | } |
b0b4e09c | 1089 | EXPORT_SYMBOL(nvm_alloc_dev); |
cd9e9808 | 1090 | |
b0b4e09c | 1091 | int nvm_register(struct nvm_dev *dev) |
cd9e9808 | 1092 | { |
cd9e9808 MB |
1093 | int ret; |
1094 | ||
ade69e24 MB |
1095 | if (!dev->q || !dev->ops) |
1096 | return -EINVAL; | |
cd9e9808 | 1097 | |
d160147b WT |
1098 | if (dev->ops->max_phys_sect > 256) { |
1099 | pr_info("nvm: max sectors supported is 256.\n"); | |
ade69e24 | 1100 | return -EINVAL; |
d160147b WT |
1101 | } |
1102 | ||
cd9e9808 | 1103 | if (dev->ops->max_phys_sect > 1) { |
75b85649 JG |
1104 | dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist"); |
1105 | if (!dev->dma_pool) { | |
1106 | pr_err("nvm: could not create dma pool\n"); | |
ade69e24 | 1107 | return -ENOMEM; |
cd9e9808 | 1108 | } |
cd9e9808 MB |
1109 | } |
1110 | ||
ade69e24 MB |
1111 | ret = nvm_init(dev); |
1112 | if (ret) | |
1113 | goto err_init; | |
b7692076 | 1114 | |
762796bc | 1115 | /* register device with a supported media manager */ |
edad2e66 MB |
1116 | down_write(&nvm_lock); |
1117 | list_add(&dev->devices, &nvm_devices); | |
1118 | up_write(&nvm_lock); | |
1119 | ||
cd9e9808 MB |
1120 | return 0; |
1121 | err_init: | |
ade69e24 | 1122 | dev->ops->destroy_dma_pool(dev->dma_pool); |
cd9e9808 MB |
1123 | return ret; |
1124 | } | |
1125 | EXPORT_SYMBOL(nvm_register); | |
1126 | ||
b0b4e09c | 1127 | void nvm_unregister(struct nvm_dev *dev) |
cd9e9808 | 1128 | { |
ade69e24 MB |
1129 | struct nvm_target *t, *tmp; |
1130 | ||
1131 | mutex_lock(&dev->mlock); | |
1132 | list_for_each_entry_safe(t, tmp, &dev->targets, list) { | |
1133 | if (t->dev->parent != dev) | |
1134 | continue; | |
1135 | __nvm_remove_target(t); | |
1136 | } | |
1137 | mutex_unlock(&dev->mlock); | |
1138 | ||
d0a712ce | 1139 | down_write(&nvm_lock); |
cd9e9808 MB |
1140 | list_del(&dev->devices); |
1141 | up_write(&nvm_lock); | |
c1480ad5 | 1142 | |
3dc87dd0 | 1143 | nvm_free(dev); |
cd9e9808 MB |
1144 | } |
1145 | EXPORT_SYMBOL(nvm_unregister); | |
1146 | ||
cd9e9808 MB |
1147 | static int __nvm_configure_create(struct nvm_ioctl_create *create) |
1148 | { | |
1149 | struct nvm_dev *dev; | |
1150 | struct nvm_ioctl_create_simple *s; | |
1151 | ||
d0a712ce | 1152 | down_write(&nvm_lock); |
cd9e9808 | 1153 | dev = nvm_find_nvm_dev(create->dev); |
d0a712ce | 1154 | up_write(&nvm_lock); |
b76eb20b | 1155 | |
cd9e9808 MB |
1156 | if (!dev) { |
1157 | pr_err("nvm: device not found\n"); | |
1158 | return -EINVAL; | |
1159 | } | |
1160 | ||
1161 | if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) { | |
1162 | pr_err("nvm: config type not valid\n"); | |
1163 | return -EINVAL; | |
1164 | } | |
1165 | s = &create->conf.s; | |
1166 | ||
6732c740 MB |
1167 | if (s->lun_begin == -1 && s->lun_end == -1) { |
1168 | s->lun_begin = 0; | |
1169 | s->lun_end = dev->geo.nr_luns - 1; | |
1170 | } | |
1171 | ||
0e5ffd1c | 1172 | if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) { |
cd9e9808 | 1173 | pr_err("nvm: lun out of bound (%u:%u > %u)\n", |
0e5ffd1c | 1174 | s->lun_begin, s->lun_end, dev->geo.nr_luns - 1); |
cd9e9808 MB |
1175 | return -EINVAL; |
1176 | } | |
1177 | ||
ade69e24 | 1178 | return nvm_create_tgt(dev, create); |
cd9e9808 MB |
1179 | } |
1180 | ||
cd9e9808 MB |
1181 | static long nvm_ioctl_info(struct file *file, void __user *arg) |
1182 | { | |
1183 | struct nvm_ioctl_info *info; | |
1184 | struct nvm_tgt_type *tt; | |
1185 | int tgt_iter = 0; | |
1186 | ||
1187 | if (!capable(CAP_SYS_ADMIN)) | |
1188 | return -EPERM; | |
1189 | ||
1190 | info = memdup_user(arg, sizeof(struct nvm_ioctl_info)); | |
1191 | if (IS_ERR(info)) | |
1192 | return -EFAULT; | |
1193 | ||
1194 | info->version[0] = NVM_VERSION_MAJOR; | |
1195 | info->version[1] = NVM_VERSION_MINOR; | |
1196 | info->version[2] = NVM_VERSION_PATCH; | |
1197 | ||
1198 | down_write(&nvm_lock); | |
6063fe39 | 1199 | list_for_each_entry(tt, &nvm_tgt_types, list) { |
cd9e9808 MB |
1200 | struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter]; |
1201 | ||
1202 | tgt->version[0] = tt->version[0]; | |
1203 | tgt->version[1] = tt->version[1]; | |
1204 | tgt->version[2] = tt->version[2]; | |
1205 | strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX); | |
1206 | ||
1207 | tgt_iter++; | |
1208 | } | |
1209 | ||
1210 | info->tgtsize = tgt_iter; | |
1211 | up_write(&nvm_lock); | |
1212 | ||
76e25081 SM |
1213 | if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) { |
1214 | kfree(info); | |
cd9e9808 | 1215 | return -EFAULT; |
76e25081 | 1216 | } |
cd9e9808 MB |
1217 | |
1218 | kfree(info); | |
1219 | return 0; | |
1220 | } | |
1221 | ||
1222 | static long nvm_ioctl_get_devices(struct file *file, void __user *arg) | |
1223 | { | |
1224 | struct nvm_ioctl_get_devices *devices; | |
1225 | struct nvm_dev *dev; | |
1226 | int i = 0; | |
1227 | ||
1228 | if (!capable(CAP_SYS_ADMIN)) | |
1229 | return -EPERM; | |
1230 | ||
1231 | devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL); | |
1232 | if (!devices) | |
1233 | return -ENOMEM; | |
1234 | ||
1235 | down_write(&nvm_lock); | |
1236 | list_for_each_entry(dev, &nvm_devices, devices) { | |
1237 | struct nvm_ioctl_device_info *info = &devices->info[i]; | |
1238 | ||
6eb08245 | 1239 | strlcpy(info->devname, dev->name, sizeof(info->devname)); |
cd9e9808 | 1240 | |
ade69e24 MB |
1241 | /* kept for compatibility */ |
1242 | info->bmversion[0] = 1; | |
1243 | info->bmversion[1] = 0; | |
1244 | info->bmversion[2] = 0; | |
6eb08245 | 1245 | strlcpy(info->bmname, "gennvm", sizeof(info->bmname)); |
cd9e9808 | 1246 | i++; |
ade69e24 | 1247 | |
cd9e9808 MB |
1248 | if (i > 31) { |
1249 | pr_err("nvm: max 31 devices can be reported.\n"); | |
1250 | break; | |
1251 | } | |
1252 | } | |
1253 | up_write(&nvm_lock); | |
1254 | ||
1255 | devices->nr_devices = i; | |
1256 | ||
76e25081 SM |
1257 | if (copy_to_user(arg, devices, |
1258 | sizeof(struct nvm_ioctl_get_devices))) { | |
1259 | kfree(devices); | |
cd9e9808 | 1260 | return -EFAULT; |
76e25081 | 1261 | } |
cd9e9808 MB |
1262 | |
1263 | kfree(devices); | |
1264 | return 0; | |
1265 | } | |
1266 | ||
1267 | static long nvm_ioctl_dev_create(struct file *file, void __user *arg) | |
1268 | { | |
1269 | struct nvm_ioctl_create create; | |
1270 | ||
1271 | if (!capable(CAP_SYS_ADMIN)) | |
1272 | return -EPERM; | |
1273 | ||
1274 | if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create))) | |
1275 | return -EFAULT; | |
1276 | ||
1277 | create.dev[DISK_NAME_LEN - 1] = '\0'; | |
1278 | create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0'; | |
1279 | create.tgtname[DISK_NAME_LEN - 1] = '\0'; | |
1280 | ||
1281 | if (create.flags != 0) { | |
4af3f75d JG |
1282 | __u32 flags = create.flags; |
1283 | ||
1284 | /* Check for valid flags */ | |
1285 | if (flags & NVM_TARGET_FACTORY) | |
1286 | flags &= ~NVM_TARGET_FACTORY; | |
1287 | ||
1288 | if (flags) { | |
1289 | pr_err("nvm: flag not supported\n"); | |
1290 | return -EINVAL; | |
1291 | } | |
cd9e9808 MB |
1292 | } |
1293 | ||
1294 | return __nvm_configure_create(&create); | |
1295 | } | |
1296 | ||
1297 | static long nvm_ioctl_dev_remove(struct file *file, void __user *arg) | |
1298 | { | |
1299 | struct nvm_ioctl_remove remove; | |
b76eb20b MB |
1300 | struct nvm_dev *dev; |
1301 | int ret = 0; | |
cd9e9808 MB |
1302 | |
1303 | if (!capable(CAP_SYS_ADMIN)) | |
1304 | return -EPERM; | |
1305 | ||
1306 | if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove))) | |
1307 | return -EFAULT; | |
1308 | ||
1309 | remove.tgtname[DISK_NAME_LEN - 1] = '\0'; | |
1310 | ||
1311 | if (remove.flags != 0) { | |
1312 | pr_err("nvm: no flags supported\n"); | |
1313 | return -EINVAL; | |
1314 | } | |
1315 | ||
b76eb20b | 1316 | list_for_each_entry(dev, &nvm_devices, devices) { |
ade69e24 | 1317 | ret = nvm_remove_tgt(dev, &remove); |
b76eb20b MB |
1318 | if (!ret) |
1319 | break; | |
1320 | } | |
1321 | ||
1322 | return ret; | |
cd9e9808 MB |
1323 | } |
1324 | ||
ade69e24 | 1325 | /* kept for compatibility reasons */ |
55696154 MB |
1326 | static long nvm_ioctl_dev_init(struct file *file, void __user *arg) |
1327 | { | |
1328 | struct nvm_ioctl_dev_init init; | |
1329 | ||
1330 | if (!capable(CAP_SYS_ADMIN)) | |
1331 | return -EPERM; | |
1332 | ||
1333 | if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init))) | |
1334 | return -EFAULT; | |
1335 | ||
1336 | if (init.flags != 0) { | |
1337 | pr_err("nvm: no flags supported\n"); | |
1338 | return -EINVAL; | |
1339 | } | |
1340 | ||
ade69e24 | 1341 | return 0; |
55696154 MB |
1342 | } |
1343 | ||
ade69e24 | 1344 | /* Kept for compatibility reasons */ |
8b4970c4 MB |
1345 | static long nvm_ioctl_dev_factory(struct file *file, void __user *arg) |
1346 | { | |
1347 | struct nvm_ioctl_dev_factory fact; | |
8b4970c4 MB |
1348 | |
1349 | if (!capable(CAP_SYS_ADMIN)) | |
1350 | return -EPERM; | |
1351 | ||
1352 | if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory))) | |
1353 | return -EFAULT; | |
1354 | ||
1355 | fact.dev[DISK_NAME_LEN - 1] = '\0'; | |
1356 | ||
1357 | if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1)) | |
1358 | return -EINVAL; | |
1359 | ||
bf643185 | 1360 | return 0; |
8b4970c4 MB |
1361 | } |
1362 | ||
cd9e9808 MB |
1363 | static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) |
1364 | { | |
1365 | void __user *argp = (void __user *)arg; | |
1366 | ||
1367 | switch (cmd) { | |
1368 | case NVM_INFO: | |
1369 | return nvm_ioctl_info(file, argp); | |
1370 | case NVM_GET_DEVICES: | |
1371 | return nvm_ioctl_get_devices(file, argp); | |
1372 | case NVM_DEV_CREATE: | |
1373 | return nvm_ioctl_dev_create(file, argp); | |
1374 | case NVM_DEV_REMOVE: | |
1375 | return nvm_ioctl_dev_remove(file, argp); | |
55696154 MB |
1376 | case NVM_DEV_INIT: |
1377 | return nvm_ioctl_dev_init(file, argp); | |
8b4970c4 MB |
1378 | case NVM_DEV_FACTORY: |
1379 | return nvm_ioctl_dev_factory(file, argp); | |
cd9e9808 MB |
1380 | } |
1381 | return 0; | |
1382 | } | |
1383 | ||
1384 | static const struct file_operations _ctl_fops = { | |
1385 | .open = nonseekable_open, | |
1386 | .unlocked_ioctl = nvm_ctl_ioctl, | |
1387 | .owner = THIS_MODULE, | |
1388 | .llseek = noop_llseek, | |
1389 | }; | |
1390 | ||
1391 | static struct miscdevice _nvm_misc = { | |
1392 | .minor = MISC_DYNAMIC_MINOR, | |
1393 | .name = "lightnvm", | |
1394 | .nodename = "lightnvm/control", | |
1395 | .fops = &_ctl_fops, | |
1396 | }; | |
389b2a1c | 1397 | builtin_misc_device(_nvm_misc); |