]> Git Repo - linux.git/blame - drivers/lightnvm/core.c
lightnvm: don't check for failure from mempool_alloc()
[linux.git] / drivers / lightnvm / core.c
CommitLineData
cd9e9808
MB
1/*
2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
3 * Initial release: Matias Bjorling <[email protected]>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17 * USA.
18 *
19 */
20
cd9e9808
MB
21#include <linux/list.h>
22#include <linux/types.h>
23#include <linux/sem.h>
24#include <linux/bitmap.h>
389b2a1c 25#include <linux/moduleparam.h>
cd9e9808
MB
26#include <linux/miscdevice.h>
27#include <linux/lightnvm.h>
91276162 28#include <linux/sched/sysctl.h>
cd9e9808 29
6063fe39 30static LIST_HEAD(nvm_tgt_types);
5cd90785 31static DECLARE_RWSEM(nvm_tgtt_lock);
cd9e9808
MB
32static LIST_HEAD(nvm_devices);
33static DECLARE_RWSEM(nvm_lock);
34
ade69e24
MB
35/* Map between virtual and physical channel and lun */
36struct nvm_ch_map {
37 int ch_off;
38 int nr_luns;
39 int *lun_offs;
40};
41
42struct nvm_dev_map {
43 struct nvm_ch_map *chnls;
44 int nr_chnls;
45};
46
47struct nvm_area {
48 struct list_head list;
49 sector_t begin;
50 sector_t end; /* end is excluded */
51};
52
ade69e24
MB
53static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
54{
55 struct nvm_target *tgt;
56
57 list_for_each_entry(tgt, &dev->targets, list)
58 if (!strcmp(name, tgt->disk->disk_name))
59 return tgt;
60
61 return NULL;
62}
63
64static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
65{
66 int i;
67
68 for (i = lun_begin; i <= lun_end; i++) {
69 if (test_and_set_bit(i, dev->lun_map)) {
70 pr_err("nvm: lun %d already allocated\n", i);
71 goto err;
72 }
73 }
74
75 return 0;
76err:
77 while (--i > lun_begin)
78 clear_bit(i, dev->lun_map);
79
80 return -EBUSY;
81}
82
83static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
84 int lun_end)
85{
86 int i;
87
88 for (i = lun_begin; i <= lun_end; i++)
89 WARN_ON(!test_and_clear_bit(i, dev->lun_map));
90}
91
92static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev)
93{
94 struct nvm_dev *dev = tgt_dev->parent;
95 struct nvm_dev_map *dev_map = tgt_dev->map;
96 int i, j;
97
98 for (i = 0; i < dev_map->nr_chnls; i++) {
99 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
100 int *lun_offs = ch_map->lun_offs;
101 int ch = i + ch_map->ch_off;
102
103 for (j = 0; j < ch_map->nr_luns; j++) {
104 int lun = j + lun_offs[j];
105 int lunid = (ch * dev->geo.luns_per_chnl) + lun;
106
107 WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
108 }
109
110 kfree(ch_map->lun_offs);
111 }
112
113 kfree(dev_map->chnls);
114 kfree(dev_map);
115
116 kfree(tgt_dev->luns);
117 kfree(tgt_dev);
118}
119
120static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
121 int lun_begin, int lun_end)
122{
123 struct nvm_tgt_dev *tgt_dev = NULL;
124 struct nvm_dev_map *dev_rmap = dev->rmap;
125 struct nvm_dev_map *dev_map;
126 struct ppa_addr *luns;
127 int nr_luns = lun_end - lun_begin + 1;
128 int luns_left = nr_luns;
129 int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
130 int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
131 int bch = lun_begin / dev->geo.luns_per_chnl;
132 int blun = lun_begin % dev->geo.luns_per_chnl;
133 int lunid = 0;
134 int lun_balanced = 1;
135 int prev_nr_luns;
136 int i, j;
137
138 nr_chnls = nr_luns / dev->geo.luns_per_chnl;
139 nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
140
141 dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
142 if (!dev_map)
143 goto err_dev;
144
145 dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map),
146 GFP_KERNEL);
147 if (!dev_map->chnls)
148 goto err_chnls;
149
150 luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
151 if (!luns)
152 goto err_luns;
153
154 prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
155 dev->geo.luns_per_chnl : luns_left;
156 for (i = 0; i < nr_chnls; i++) {
157 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
158 int *lun_roffs = ch_rmap->lun_offs;
159 struct nvm_ch_map *ch_map = &dev_map->chnls[i];
160 int *lun_offs;
161 int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
162 dev->geo.luns_per_chnl : luns_left;
163
164 if (lun_balanced && prev_nr_luns != luns_in_chnl)
165 lun_balanced = 0;
166
167 ch_map->ch_off = ch_rmap->ch_off = bch;
168 ch_map->nr_luns = luns_in_chnl;
169
170 lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
171 if (!lun_offs)
172 goto err_ch;
173
174 for (j = 0; j < luns_in_chnl; j++) {
175 luns[lunid].ppa = 0;
176 luns[lunid].g.ch = i;
177 luns[lunid++].g.lun = j;
178
179 lun_offs[j] = blun;
180 lun_roffs[j + blun] = blun;
181 }
182
183 ch_map->lun_offs = lun_offs;
184
185 /* when starting a new channel, lun offset is reset */
186 blun = 0;
187 luns_left -= luns_in_chnl;
188 }
189
190 dev_map->nr_chnls = nr_chnls;
191
192 tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
193 if (!tgt_dev)
194 goto err_ch;
195
196 memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
197 /* Target device only owns a portion of the physical device */
198 tgt_dev->geo.nr_chnls = nr_chnls;
199 tgt_dev->geo.nr_luns = nr_luns;
200 tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
201 tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
202 tgt_dev->q = dev->q;
203 tgt_dev->map = dev_map;
204 tgt_dev->luns = luns;
205 memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
206
207 tgt_dev->parent = dev;
208
209 return tgt_dev;
210err_ch:
211 while (--i > 0)
212 kfree(dev_map->chnls[i].lun_offs);
213 kfree(luns);
214err_luns:
215 kfree(dev_map->chnls);
216err_chnls:
217 kfree(dev_map);
218err_dev:
219 return tgt_dev;
220}
221
222static const struct block_device_operations nvm_fops = {
223 .owner = THIS_MODULE,
224};
225
226static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
227{
228 struct nvm_ioctl_create_simple *s = &create->conf.s;
229 struct request_queue *tqueue;
230 struct gendisk *tdisk;
231 struct nvm_tgt_type *tt;
232 struct nvm_target *t;
233 struct nvm_tgt_dev *tgt_dev;
234 void *targetdata;
235
236 tt = nvm_find_target_type(create->tgttype, 1);
237 if (!tt) {
238 pr_err("nvm: target type %s not found\n", create->tgttype);
239 return -EINVAL;
240 }
241
242 mutex_lock(&dev->mlock);
243 t = nvm_find_target(dev, create->tgtname);
244 if (t) {
245 pr_err("nvm: target name already exists.\n");
246 mutex_unlock(&dev->mlock);
247 return -EINVAL;
248 }
249 mutex_unlock(&dev->mlock);
250
251 if (nvm_reserve_luns(dev, s->lun_begin, s->lun_end))
252 return -ENOMEM;
253
254 t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
255 if (!t)
256 goto err_reserve;
257
258 tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end);
259 if (!tgt_dev) {
260 pr_err("nvm: could not create target device\n");
261 goto err_t;
262 }
263
264 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
265 if (!tqueue)
266 goto err_dev;
267 blk_queue_make_request(tqueue, tt->make_rq);
268
269 tdisk = alloc_disk(0);
270 if (!tdisk)
271 goto err_queue;
272
273 sprintf(tdisk->disk_name, "%s", create->tgtname);
274 tdisk->flags = GENHD_FL_EXT_DEVT;
275 tdisk->major = 0;
276 tdisk->first_minor = 0;
277 tdisk->fops = &nvm_fops;
278 tdisk->queue = tqueue;
279
280 targetdata = tt->init(tgt_dev, tdisk);
281 if (IS_ERR(targetdata))
282 goto err_init;
283
284 tdisk->private_data = targetdata;
285 tqueue->queuedata = targetdata;
286
287 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
288
289 set_capacity(tdisk, tt->capacity(targetdata));
290 add_disk(tdisk);
291
9a69b0ed
JG
292 if (tt->sysfs_init && tt->sysfs_init(tdisk))
293 goto err_sysfs;
294
ade69e24
MB
295 t->type = tt;
296 t->disk = tdisk;
297 t->dev = tgt_dev;
298
299 mutex_lock(&dev->mlock);
300 list_add_tail(&t->list, &dev->targets);
301 mutex_unlock(&dev->mlock);
302
303 return 0;
9a69b0ed
JG
304err_sysfs:
305 if (tt->exit)
306 tt->exit(targetdata);
ade69e24
MB
307err_init:
308 put_disk(tdisk);
309err_queue:
310 blk_cleanup_queue(tqueue);
311err_dev:
deccf5a5 312 nvm_remove_tgt_dev(tgt_dev);
ade69e24
MB
313err_t:
314 kfree(t);
315err_reserve:
316 nvm_release_luns_err(dev, s->lun_begin, s->lun_end);
317 return -ENOMEM;
318}
319
320static void __nvm_remove_target(struct nvm_target *t)
321{
322 struct nvm_tgt_type *tt = t->type;
323 struct gendisk *tdisk = t->disk;
324 struct request_queue *q = tdisk->queue;
325
326 del_gendisk(tdisk);
327 blk_cleanup_queue(q);
328
9a69b0ed
JG
329 if (tt->sysfs_exit)
330 tt->sysfs_exit(tdisk);
331
ade69e24
MB
332 if (tt->exit)
333 tt->exit(tdisk->private_data);
334
335 nvm_remove_tgt_dev(t->dev);
336 put_disk(tdisk);
337
338 list_del(&t->list);
339 kfree(t);
340}
341
342/**
343 * nvm_remove_tgt - Removes a target from the media manager
344 * @dev: device
345 * @remove: ioctl structure with target name to remove.
346 *
347 * Returns:
348 * 0: on success
349 * 1: on not found
350 * <0: on error
351 */
352static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
353{
354 struct nvm_target *t;
355
356 mutex_lock(&dev->mlock);
357 t = nvm_find_target(dev, remove->tgtname);
358 if (!t) {
359 mutex_unlock(&dev->mlock);
360 return 1;
361 }
362 __nvm_remove_target(t);
363 mutex_unlock(&dev->mlock);
364
365 return 0;
366}
367
368static int nvm_register_map(struct nvm_dev *dev)
369{
370 struct nvm_dev_map *rmap;
371 int i, j;
372
373 rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
374 if (!rmap)
375 goto err_rmap;
376
377 rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map),
378 GFP_KERNEL);
379 if (!rmap->chnls)
380 goto err_chnls;
381
382 for (i = 0; i < dev->geo.nr_chnls; i++) {
383 struct nvm_ch_map *ch_rmap;
384 int *lun_roffs;
385 int luns_in_chnl = dev->geo.luns_per_chnl;
386
387 ch_rmap = &rmap->chnls[i];
388
389 ch_rmap->ch_off = -1;
390 ch_rmap->nr_luns = luns_in_chnl;
391
392 lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
393 if (!lun_roffs)
394 goto err_ch;
395
396 for (j = 0; j < luns_in_chnl; j++)
397 lun_roffs[j] = -1;
398
399 ch_rmap->lun_offs = lun_roffs;
400 }
401
402 dev->rmap = rmap;
403
404 return 0;
405err_ch:
406 while (--i >= 0)
407 kfree(rmap->chnls[i].lun_offs);
408err_chnls:
409 kfree(rmap);
410err_rmap:
411 return -ENOMEM;
412}
413
7a3de2b3
JG
414static void nvm_unregister_map(struct nvm_dev *dev)
415{
416 struct nvm_dev_map *rmap = dev->rmap;
417 int i;
418
419 for (i = 0; i < dev->geo.nr_chnls; i++)
420 kfree(rmap->chnls[i].lun_offs);
421
422 kfree(rmap->chnls);
423 kfree(rmap);
424}
425
61a561d8 426static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
ade69e24
MB
427{
428 struct nvm_dev_map *dev_map = tgt_dev->map;
429 struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch];
430 int lun_off = ch_map->lun_offs[p->g.lun];
ade69e24
MB
431
432 p->g.ch += ch_map->ch_off;
433 p->g.lun += lun_off;
ade69e24
MB
434}
435
61a561d8 436static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
ade69e24
MB
437{
438 struct nvm_dev *dev = tgt_dev->parent;
439 struct nvm_dev_map *dev_rmap = dev->rmap;
440 struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
441 int lun_roff = ch_rmap->lun_offs[p->g.lun];
442
443 p->g.ch -= ch_rmap->ch_off;
444 p->g.lun -= lun_roff;
ade69e24
MB
445}
446
dab8ee9e
MB
447static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
448 struct ppa_addr *ppa_list, int nr_ppas)
ade69e24
MB
449{
450 int i;
ade69e24 451
dab8ee9e
MB
452 for (i = 0; i < nr_ppas; i++) {
453 nvm_map_to_dev(tgt_dev, &ppa_list[i]);
454 ppa_list[i] = generic_to_dev_addr(tgt_dev, ppa_list[i]);
ade69e24 455 }
dab8ee9e 456}
ade69e24 457
dab8ee9e
MB
458static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
459 struct ppa_addr *ppa_list, int nr_ppas)
460{
461 int i;
462
463 for (i = 0; i < nr_ppas; i++) {
464 ppa_list[i] = dev_to_generic_addr(tgt_dev, ppa_list[i]);
465 nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
ade69e24 466 }
ade69e24
MB
467}
468
dab8ee9e 469static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
ade69e24 470{
dab8ee9e
MB
471 if (rqd->nr_ppas == 1) {
472 nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1);
473 return;
474 }
ade69e24 475
dab8ee9e
MB
476 nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
477}
478
479static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
480{
481 if (rqd->nr_ppas == 1) {
482 nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1);
483 return;
484 }
ade69e24 485
dab8ee9e 486 nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
ade69e24
MB
487}
488
489void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
490 int len)
491{
492 struct nvm_geo *geo = &dev->geo;
493 struct nvm_dev_map *dev_rmap = dev->rmap;
494 u64 i;
495
496 for (i = 0; i < len; i++) {
497 struct nvm_ch_map *ch_rmap;
498 int *lun_roffs;
499 struct ppa_addr gaddr;
500 u64 pba = le64_to_cpu(entries[i]);
501 int off;
502 u64 diff;
503
504 if (!pba)
505 continue;
506
507 gaddr = linear_to_generic_addr(geo, pba);
508 ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
509 lun_roffs = ch_rmap->lun_offs;
510
511 off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun;
512
513 diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
514 (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
515
516 entries[i] -= cpu_to_le64(diff);
517 }
518}
519EXPORT_SYMBOL(nvm_part_to_tgt);
520
b76eb20b 521struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
6f8645cb 522{
b76eb20b 523 struct nvm_tgt_type *tmp, *tt = NULL;
6f8645cb 524
b76eb20b 525 if (lock)
5cd90785 526 down_write(&nvm_tgtt_lock);
6f8645cb 527
b76eb20b
MB
528 list_for_each_entry(tmp, &nvm_tgt_types, list)
529 if (!strcmp(name, tmp->name)) {
530 tt = tmp;
531 break;
532 }
cd9e9808 533
b76eb20b 534 if (lock)
5cd90785 535 up_write(&nvm_tgtt_lock);
b76eb20b 536 return tt;
cd9e9808 537}
b76eb20b 538EXPORT_SYMBOL(nvm_find_target_type);
cd9e9808 539
6063fe39 540int nvm_register_tgt_type(struct nvm_tgt_type *tt)
cd9e9808
MB
541{
542 int ret = 0;
543
5cd90785 544 down_write(&nvm_tgtt_lock);
b76eb20b 545 if (nvm_find_target_type(tt->name, 0))
cd9e9808
MB
546 ret = -EEXIST;
547 else
6063fe39 548 list_add(&tt->list, &nvm_tgt_types);
5cd90785 549 up_write(&nvm_tgtt_lock);
cd9e9808
MB
550
551 return ret;
552}
6063fe39 553EXPORT_SYMBOL(nvm_register_tgt_type);
cd9e9808 554
6063fe39 555void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
cd9e9808
MB
556{
557 if (!tt)
558 return;
559
560 down_write(&nvm_lock);
561 list_del(&tt->list);
562 up_write(&nvm_lock);
563}
6063fe39 564EXPORT_SYMBOL(nvm_unregister_tgt_type);
cd9e9808
MB
565
566void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
567 dma_addr_t *dma_handler)
568{
75b85649 569 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
cd9e9808
MB
570 dma_handler);
571}
572EXPORT_SYMBOL(nvm_dev_dma_alloc);
573
da2d7cb8 574void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
cd9e9808 575{
75b85649 576 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
cd9e9808
MB
577}
578EXPORT_SYMBOL(nvm_dev_dma_free);
579
cd9e9808
MB
580static struct nvm_dev *nvm_find_nvm_dev(const char *name)
581{
582 struct nvm_dev *dev;
583
584 list_for_each_entry(dev, &nvm_devices, devices)
585 if (!strcmp(name, dev->name))
586 return dev;
587
588 return NULL;
589}
590
333ba053
JG
591int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
592 int nr_ppas, int type)
593{
594 struct nvm_dev *dev = tgt_dev->parent;
595 struct nvm_rq rqd;
596 int ret;
597
598 if (nr_ppas > dev->ops->max_phys_sect) {
599 pr_err("nvm: unable to update all blocks atomically\n");
600 return -EINVAL;
601 }
602
603 memset(&rqd, 0, sizeof(struct nvm_rq));
604
17912c49 605 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
dab8ee9e 606 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
333ba053
JG
607
608 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
17912c49 609 nvm_free_rqd_ppalist(tgt_dev, &rqd);
333ba053 610 if (ret) {
ade69e24 611 pr_err("nvm: failed bb mark\n");
333ba053
JG
612 return -EINVAL;
613 }
614
615 return 0;
616}
617EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
618
a279006a
JG
619int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
620{
621 struct nvm_dev *dev = tgt_dev->parent;
622
623 return dev->ops->max_phys_sect;
624}
625EXPORT_SYMBOL(nvm_max_phys_sects);
626
8e53624d 627int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
cd9e9808 628{
8e53624d
JG
629 struct nvm_dev *dev = tgt_dev->parent;
630
ade69e24
MB
631 if (!dev->ops->submit_io)
632 return -ENODEV;
633
dab8ee9e 634 nvm_rq_tgt_to_dev(tgt_dev, rqd);
ade69e24
MB
635
636 rqd->dev = tgt_dev;
637 return dev->ops->submit_io(dev, rqd);
cd9e9808
MB
638}
639EXPORT_SYMBOL(nvm_submit_io);
640
17912c49 641static void nvm_end_io_sync(struct nvm_rq *rqd)
cd9e9808 642{
17912c49 643 struct completion *waiting = rqd->private;
10995c3d 644
17912c49
JG
645 complete(waiting);
646}
10995c3d 647
17912c49
JG
648int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
649 int nr_ppas)
650{
651 struct nvm_geo *geo = &tgt_dev->geo;
652 struct nvm_rq rqd;
653 int ret;
654 DECLARE_COMPLETION_ONSTACK(wait);
10995c3d
MB
655
656 memset(&rqd, 0, sizeof(struct nvm_rq));
8e53624d 657
17912c49
JG
658 rqd.opcode = NVM_OP_ERASE;
659 rqd.end_io = nvm_end_io_sync;
660 rqd.private = &wait;
661 rqd.flags = geo->plane_mode >> 1;
662
663 ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
10995c3d
MB
664 if (ret)
665 return ret;
666
17912c49
JG
667 ret = nvm_submit_io(tgt_dev, &rqd);
668 if (ret) {
669 pr_err("rrpr: erase I/O submission failed: %d\n", ret);
670 goto free_ppa_list;
671 }
672 wait_for_completion_io(&wait);
10995c3d 673
17912c49
JG
674free_ppa_list:
675 nvm_free_rqd_ppalist(tgt_dev, &rqd);
10995c3d
MB
676
677 return ret;
cd9e9808 678}
17912c49 679EXPORT_SYMBOL(nvm_erase_sync);
cd9e9808 680
da2d7cb8 681int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
959e911b
JG
682 nvm_l2p_update_fn *update_l2p, void *priv)
683{
da2d7cb8
JG
684 struct nvm_dev *dev = tgt_dev->parent;
685
959e911b
JG
686 if (!dev->ops->get_l2p_tbl)
687 return 0;
688
689 return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
690}
691EXPORT_SYMBOL(nvm_get_l2p_tbl);
692
da2d7cb8 693int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
959e911b 694{
da2d7cb8 695 struct nvm_dev *dev = tgt_dev->parent;
ade69e24
MB
696 struct nvm_geo *geo = &dev->geo;
697 struct nvm_area *area, *prev, *next;
698 sector_t begin = 0;
699 sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
700
701 if (len > max_sectors)
702 return -EINVAL;
703
704 area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
705 if (!area)
706 return -ENOMEM;
707
708 prev = NULL;
709
710 spin_lock(&dev->lock);
711 list_for_each_entry(next, &dev->area_list, list) {
712 if (begin + len > next->begin) {
713 begin = next->end;
714 prev = next;
715 continue;
716 }
717 break;
718 }
719
720 if ((begin + len) > max_sectors) {
721 spin_unlock(&dev->lock);
722 kfree(area);
723 return -EINVAL;
724 }
da2d7cb8 725
ade69e24
MB
726 area->begin = *lba = begin;
727 area->end = begin + len;
728
729 if (prev) /* insert into sorted order */
730 list_add(&area->list, &prev->list);
731 else
732 list_add(&area->list, &dev->area_list);
733 spin_unlock(&dev->lock);
734
735 return 0;
959e911b
JG
736}
737EXPORT_SYMBOL(nvm_get_area);
738
ade69e24 739void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
959e911b 740{
da2d7cb8 741 struct nvm_dev *dev = tgt_dev->parent;
ade69e24 742 struct nvm_area *area;
da2d7cb8 743
ade69e24
MB
744 spin_lock(&dev->lock);
745 list_for_each_entry(area, &dev->area_list, list) {
746 if (area->begin != begin)
747 continue;
748
749 list_del(&area->list);
750 spin_unlock(&dev->lock);
751 kfree(area);
752 return;
753 }
754 spin_unlock(&dev->lock);
959e911b
JG
755}
756EXPORT_SYMBOL(nvm_put_area);
757
17912c49 758int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
8680f165 759 const struct ppa_addr *ppas, int nr_ppas, int vblk)
069368e9 760{
17912c49
JG
761 struct nvm_dev *dev = tgt_dev->parent;
762 struct nvm_geo *geo = &tgt_dev->geo;
abd805ec 763 int i, plane_cnt, pl_idx;
8680f165 764 struct ppa_addr ppa;
abd805ec 765
8e79b5cb 766 if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
6d5be959 767 rqd->nr_ppas = nr_ppas;
abd805ec 768 rqd->ppa_addr = ppas[0];
069368e9 769
069368e9 770 return 0;
abd805ec 771 }
069368e9 772
6d5be959 773 rqd->nr_ppas = nr_ppas;
abd805ec
MB
774 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
775 if (!rqd->ppa_list) {
776 pr_err("nvm: failed to allocate dma memory\n");
777 return -ENOMEM;
778 }
779
5ebc7d9f
MB
780 if (!vblk) {
781 for (i = 0; i < nr_ppas; i++)
782 rqd->ppa_list[i] = ppas[i];
783 } else {
8e79b5cb 784 plane_cnt = geo->plane_mode;
6d5be959 785 rqd->nr_ppas *= plane_cnt;
5ebc7d9f 786
556755e9 787 for (i = 0; i < nr_ppas; i++) {
5ebc7d9f 788 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
8680f165
MB
789 ppa = ppas[i];
790 ppa.g.pl = pl_idx;
791 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
5ebc7d9f 792 }
069368e9
MB
793 }
794 }
795
abd805ec
MB
796 return 0;
797}
798EXPORT_SYMBOL(nvm_set_rqd_ppalist);
799
17912c49 800void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
abd805ec
MB
801{
802 if (!rqd->ppa_list)
803 return;
804
17912c49 805 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
abd805ec
MB
806}
807EXPORT_SYMBOL(nvm_free_rqd_ppalist);
808
06894efe 809void nvm_end_io(struct nvm_rq *rqd)
91276162 810{
ade69e24 811 struct nvm_tgt_dev *tgt_dev = rqd->dev;
ade69e24
MB
812
813 /* Convert address space */
814 if (tgt_dev)
dab8ee9e 815 nvm_rq_dev_to_tgt(tgt_dev, rqd);
ade69e24 816
06894efe
MB
817 if (rqd->end_io)
818 rqd->end_io(rqd);
91276162
MB
819}
820EXPORT_SYMBOL(nvm_end_io);
821
22e8c976
MB
822/*
823 * folds a bad block list from its plane representation to its virtual
824 * block representation. The fold is done in place and reduced size is
825 * returned.
826 *
827 * If any of the planes status are bad or grown bad block, the virtual block
828 * is marked bad. If not bad, the first plane state acts as the block state.
829 */
830int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
831{
8e79b5cb 832 struct nvm_geo *geo = &dev->geo;
22e8c976
MB
833 int blk, offset, pl, blktype;
834
8e79b5cb 835 if (nr_blks != geo->blks_per_lun * geo->plane_mode)
22e8c976
MB
836 return -EINVAL;
837
8e79b5cb
JG
838 for (blk = 0; blk < geo->blks_per_lun; blk++) {
839 offset = blk * geo->plane_mode;
22e8c976
MB
840 blktype = blks[offset];
841
842 /* Bad blocks on any planes take precedence over other types */
8e79b5cb 843 for (pl = 0; pl < geo->plane_mode; pl++) {
22e8c976
MB
844 if (blks[offset + pl] &
845 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
846 blktype = blks[offset + pl];
847 break;
848 }
849 }
850
851 blks[blk] = blktype;
852 }
853
8e79b5cb 854 return geo->blks_per_lun;
22e8c976
MB
855}
856EXPORT_SYMBOL(nvm_bb_tbl_fold);
857
333ba053
JG
858int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
859 u8 *blks)
860{
8f4fe008
MB
861 struct nvm_dev *dev = tgt_dev->parent;
862
dab8ee9e 863 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
333ba053 864
8f4fe008 865 return dev->ops->get_bb_tbl(dev, ppa, blks);
333ba053
JG
866}
867EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
868
ca5927e7
MB
869static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
870{
8e79b5cb 871 struct nvm_geo *geo = &dev->geo;
ca5927e7
MB
872 int i;
873
8e79b5cb 874 dev->lps_per_blk = geo->pgs_per_blk;
ca5927e7
MB
875 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
876 if (!dev->lptbl)
877 return -ENOMEM;
878
879 /* Just a linear array */
880 for (i = 0; i < dev->lps_per_blk; i++)
881 dev->lptbl[i] = i;
882
883 return 0;
884}
885
886static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
887{
888 int i, p;
889 struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
890
891 if (!mlc->num_pairs)
892 return 0;
893
894 dev->lps_per_blk = mlc->num_pairs;
895 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
896 if (!dev->lptbl)
897 return -ENOMEM;
898
899 /* The lower page table encoding consists of a list of bytes, where each
900 * has a lower and an upper half. The first half byte maintains the
901 * increment value and every value after is an offset added to the
12624af2
MB
902 * previous incrementation value
903 */
ca5927e7
MB
904 dev->lptbl[0] = mlc->pairs[0] & 0xF;
905 for (i = 1; i < dev->lps_per_blk; i++) {
906 p = mlc->pairs[i >> 1];
907 if (i & 0x1) /* upper */
908 dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
909 else /* lower */
910 dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
911 }
912
913 return 0;
914}
915
cd9e9808
MB
916static int nvm_core_init(struct nvm_dev *dev)
917{
918 struct nvm_id *id = &dev->identity;
19bd6fe7 919 struct nvm_id_group *grp = &id->grp;
8e79b5cb 920 struct nvm_geo *geo = &dev->geo;
7f7c5d03 921 int ret;
cd9e9808 922
8e79b5cb
JG
923 /* Whole device values */
924 geo->nr_chnls = grp->num_ch;
925 geo->luns_per_chnl = grp->num_lun;
926
927 /* Generic device values */
928 geo->pgs_per_blk = grp->num_pg;
929 geo->blks_per_lun = grp->num_blk;
930 geo->nr_planes = grp->num_pln;
931 geo->fpg_size = grp->fpg_sz;
932 geo->pfpg_size = grp->fpg_sz * grp->num_pln;
933 geo->sec_size = grp->csecs;
934 geo->oob_size = grp->sos;
935 geo->sec_per_pg = grp->fpg_sz / grp->csecs;
936 geo->mccap = grp->mccap;
937 memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
938
939 geo->plane_mode = NVM_PLANE_SINGLE;
940 geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
cd9e9808
MB
941
942 if (grp->mpos & 0x020202)
8e79b5cb 943 geo->plane_mode = NVM_PLANE_DOUBLE;
cd9e9808 944 if (grp->mpos & 0x040404)
8e79b5cb 945 geo->plane_mode = NVM_PLANE_QUAD;
cd9e9808 946
7f7c5d03
MB
947 if (grp->mtype != 0) {
948 pr_err("nvm: memory type not supported\n");
949 return -EINVAL;
950 }
951
cd9e9808 952 /* calculated values */
8e79b5cb
JG
953 geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
954 geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
955 geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
956 geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
cd9e9808 957
8e79b5cb
JG
958 dev->total_secs = geo->nr_luns * geo->sec_per_lun;
959 dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
da1e2849
WT
960 sizeof(unsigned long), GFP_KERNEL);
961 if (!dev->lun_map)
962 return -ENOMEM;
7f7c5d03
MB
963
964 switch (grp->fmtype) {
965 case NVM_ID_FMTYPE_SLC:
966 if (nvm_init_slc_tbl(dev, grp)) {
967 ret = -ENOMEM;
968 goto err_fmtype;
969 }
970 break;
971 case NVM_ID_FMTYPE_MLC:
972 if (nvm_init_mlc_tbl(dev, grp)) {
973 ret = -ENOMEM;
974 goto err_fmtype;
975 }
976 break;
977 default:
978 pr_err("nvm: flash type not supported\n");
979 ret = -EINVAL;
980 goto err_fmtype;
981 }
982
ade69e24
MB
983 INIT_LIST_HEAD(&dev->area_list);
984 INIT_LIST_HEAD(&dev->targets);
e3eb3799 985 mutex_init(&dev->mlock);
4c9dacb8 986 spin_lock_init(&dev->lock);
cd9e9808 987
ade69e24
MB
988 ret = nvm_register_map(dev);
989 if (ret)
990 goto err_fmtype;
ac81bfa9 991
ade69e24 992 blk_queue_logical_block_size(dev->q, geo->sec_size);
cd9e9808 993 return 0;
7f7c5d03
MB
994err_fmtype:
995 kfree(dev->lun_map);
996 return ret;
cd9e9808
MB
997}
998
40267efd 999void nvm_free(struct nvm_dev *dev)
cd9e9808
MB
1000{
1001 if (!dev)
1002 return;
1003
40267efd
SL
1004 if (dev->dma_pool)
1005 dev->ops->destroy_dma_pool(dev->dma_pool);
1006
7a3de2b3 1007 nvm_unregister_map(dev);
ca5927e7 1008 kfree(dev->lptbl);
7f7c5d03 1009 kfree(dev->lun_map);
40267efd 1010 kfree(dev);
cd9e9808
MB
1011}
1012
1013static int nvm_init(struct nvm_dev *dev)
1014{
8e79b5cb 1015 struct nvm_geo *geo = &dev->geo;
480fc0db 1016 int ret = -EINVAL;
cd9e9808 1017
16f26c3a 1018 if (dev->ops->identity(dev, &dev->identity)) {
cd9e9808 1019 pr_err("nvm: device could not be identified\n");
cd9e9808
MB
1020 goto err;
1021 }
1022
19bd6fe7
MB
1023 pr_debug("nvm: ver:%x nvm_vendor:%x\n",
1024 dev->identity.ver_id, dev->identity.vmnt);
cd9e9808
MB
1025
1026 if (dev->identity.ver_id != 1) {
1027 pr_err("nvm: device not supported by kernel.");
1028 goto err;
1029 }
1030
cd9e9808
MB
1031 ret = nvm_core_init(dev);
1032 if (ret) {
1033 pr_err("nvm: could not initialize core structures.\n");
1034 goto err;
1035 }
1036
cd9e9808 1037 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
8e79b5cb
JG
1038 dev->name, geo->sec_per_pg, geo->nr_planes,
1039 geo->pgs_per_blk, geo->blks_per_lun,
1040 geo->nr_luns, geo->nr_chnls);
cd9e9808
MB
1041 return 0;
1042err:
cd9e9808
MB
1043 pr_err("nvm: failed to initialize nvm\n");
1044 return ret;
1045}
1046
b0b4e09c 1047struct nvm_dev *nvm_alloc_dev(int node)
cd9e9808 1048{
b0b4e09c 1049 return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
cd9e9808 1050}
b0b4e09c 1051EXPORT_SYMBOL(nvm_alloc_dev);
cd9e9808 1052
b0b4e09c 1053int nvm_register(struct nvm_dev *dev)
cd9e9808 1054{
cd9e9808
MB
1055 int ret;
1056
ade69e24
MB
1057 if (!dev->q || !dev->ops)
1058 return -EINVAL;
cd9e9808 1059
d160147b
WT
1060 if (dev->ops->max_phys_sect > 256) {
1061 pr_info("nvm: max sectors supported is 256.\n");
ade69e24 1062 return -EINVAL;
d160147b
WT
1063 }
1064
cd9e9808 1065 if (dev->ops->max_phys_sect > 1) {
75b85649
JG
1066 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
1067 if (!dev->dma_pool) {
1068 pr_err("nvm: could not create dma pool\n");
ade69e24 1069 return -ENOMEM;
cd9e9808 1070 }
cd9e9808
MB
1071 }
1072
ade69e24
MB
1073 ret = nvm_init(dev);
1074 if (ret)
1075 goto err_init;
b7692076 1076
762796bc 1077 /* register device with a supported media manager */
edad2e66
MB
1078 down_write(&nvm_lock);
1079 list_add(&dev->devices, &nvm_devices);
1080 up_write(&nvm_lock);
1081
cd9e9808
MB
1082 return 0;
1083err_init:
ade69e24 1084 dev->ops->destroy_dma_pool(dev->dma_pool);
cd9e9808
MB
1085 return ret;
1086}
1087EXPORT_SYMBOL(nvm_register);
1088
b0b4e09c 1089void nvm_unregister(struct nvm_dev *dev)
cd9e9808 1090{
ade69e24
MB
1091 struct nvm_target *t, *tmp;
1092
1093 mutex_lock(&dev->mlock);
1094 list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1095 if (t->dev->parent != dev)
1096 continue;
1097 __nvm_remove_target(t);
1098 }
1099 mutex_unlock(&dev->mlock);
1100
d0a712ce 1101 down_write(&nvm_lock);
cd9e9808
MB
1102 list_del(&dev->devices);
1103 up_write(&nvm_lock);
c1480ad5 1104
3dc87dd0 1105 nvm_free(dev);
cd9e9808
MB
1106}
1107EXPORT_SYMBOL(nvm_unregister);
1108
cd9e9808
MB
1109static int __nvm_configure_create(struct nvm_ioctl_create *create)
1110{
1111 struct nvm_dev *dev;
1112 struct nvm_ioctl_create_simple *s;
1113
d0a712ce 1114 down_write(&nvm_lock);
cd9e9808 1115 dev = nvm_find_nvm_dev(create->dev);
d0a712ce 1116 up_write(&nvm_lock);
b76eb20b 1117
cd9e9808
MB
1118 if (!dev) {
1119 pr_err("nvm: device not found\n");
1120 return -EINVAL;
1121 }
1122
1123 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
1124 pr_err("nvm: config type not valid\n");
1125 return -EINVAL;
1126 }
1127 s = &create->conf.s;
1128
6732c740
MB
1129 if (s->lun_begin == -1 && s->lun_end == -1) {
1130 s->lun_begin = 0;
1131 s->lun_end = dev->geo.nr_luns - 1;
1132 }
1133
0e5ffd1c 1134 if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) {
cd9e9808 1135 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
0e5ffd1c 1136 s->lun_begin, s->lun_end, dev->geo.nr_luns - 1);
cd9e9808
MB
1137 return -EINVAL;
1138 }
1139
ade69e24 1140 return nvm_create_tgt(dev, create);
cd9e9808
MB
1141}
1142
cd9e9808
MB
1143static long nvm_ioctl_info(struct file *file, void __user *arg)
1144{
1145 struct nvm_ioctl_info *info;
1146 struct nvm_tgt_type *tt;
1147 int tgt_iter = 0;
1148
1149 if (!capable(CAP_SYS_ADMIN))
1150 return -EPERM;
1151
1152 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1153 if (IS_ERR(info))
1154 return -EFAULT;
1155
1156 info->version[0] = NVM_VERSION_MAJOR;
1157 info->version[1] = NVM_VERSION_MINOR;
1158 info->version[2] = NVM_VERSION_PATCH;
1159
1160 down_write(&nvm_lock);
6063fe39 1161 list_for_each_entry(tt, &nvm_tgt_types, list) {
cd9e9808
MB
1162 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1163
1164 tgt->version[0] = tt->version[0];
1165 tgt->version[1] = tt->version[1];
1166 tgt->version[2] = tt->version[2];
1167 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1168
1169 tgt_iter++;
1170 }
1171
1172 info->tgtsize = tgt_iter;
1173 up_write(&nvm_lock);
1174
76e25081
SM
1175 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1176 kfree(info);
cd9e9808 1177 return -EFAULT;
76e25081 1178 }
cd9e9808
MB
1179
1180 kfree(info);
1181 return 0;
1182}
1183
1184static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1185{
1186 struct nvm_ioctl_get_devices *devices;
1187 struct nvm_dev *dev;
1188 int i = 0;
1189
1190 if (!capable(CAP_SYS_ADMIN))
1191 return -EPERM;
1192
1193 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1194 if (!devices)
1195 return -ENOMEM;
1196
1197 down_write(&nvm_lock);
1198 list_for_each_entry(dev, &nvm_devices, devices) {
1199 struct nvm_ioctl_device_info *info = &devices->info[i];
1200
1201 sprintf(info->devname, "%s", dev->name);
cd9e9808 1202
ade69e24
MB
1203 /* kept for compatibility */
1204 info->bmversion[0] = 1;
1205 info->bmversion[1] = 0;
1206 info->bmversion[2] = 0;
1207 sprintf(info->bmname, "%s", "gennvm");
cd9e9808 1208 i++;
ade69e24 1209
cd9e9808
MB
1210 if (i > 31) {
1211 pr_err("nvm: max 31 devices can be reported.\n");
1212 break;
1213 }
1214 }
1215 up_write(&nvm_lock);
1216
1217 devices->nr_devices = i;
1218
76e25081
SM
1219 if (copy_to_user(arg, devices,
1220 sizeof(struct nvm_ioctl_get_devices))) {
1221 kfree(devices);
cd9e9808 1222 return -EFAULT;
76e25081 1223 }
cd9e9808
MB
1224
1225 kfree(devices);
1226 return 0;
1227}
1228
1229static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1230{
1231 struct nvm_ioctl_create create;
1232
1233 if (!capable(CAP_SYS_ADMIN))
1234 return -EPERM;
1235
1236 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1237 return -EFAULT;
1238
1239 create.dev[DISK_NAME_LEN - 1] = '\0';
1240 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1241 create.tgtname[DISK_NAME_LEN - 1] = '\0';
1242
1243 if (create.flags != 0) {
1244 pr_err("nvm: no flags supported\n");
1245 return -EINVAL;
1246 }
1247
1248 return __nvm_configure_create(&create);
1249}
1250
1251static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1252{
1253 struct nvm_ioctl_remove remove;
b76eb20b
MB
1254 struct nvm_dev *dev;
1255 int ret = 0;
cd9e9808
MB
1256
1257 if (!capable(CAP_SYS_ADMIN))
1258 return -EPERM;
1259
1260 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1261 return -EFAULT;
1262
1263 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1264
1265 if (remove.flags != 0) {
1266 pr_err("nvm: no flags supported\n");
1267 return -EINVAL;
1268 }
1269
b76eb20b 1270 list_for_each_entry(dev, &nvm_devices, devices) {
ade69e24 1271 ret = nvm_remove_tgt(dev, &remove);
b76eb20b
MB
1272 if (!ret)
1273 break;
1274 }
1275
1276 return ret;
cd9e9808
MB
1277}
1278
ade69e24 1279/* kept for compatibility reasons */
55696154
MB
1280static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1281{
1282 struct nvm_ioctl_dev_init init;
1283
1284 if (!capable(CAP_SYS_ADMIN))
1285 return -EPERM;
1286
1287 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1288 return -EFAULT;
1289
1290 if (init.flags != 0) {
1291 pr_err("nvm: no flags supported\n");
1292 return -EINVAL;
1293 }
1294
ade69e24 1295 return 0;
55696154
MB
1296}
1297
ade69e24 1298/* Kept for compatibility reasons */
8b4970c4
MB
1299static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1300{
1301 struct nvm_ioctl_dev_factory fact;
8b4970c4
MB
1302
1303 if (!capable(CAP_SYS_ADMIN))
1304 return -EPERM;
1305
1306 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1307 return -EFAULT;
1308
1309 fact.dev[DISK_NAME_LEN - 1] = '\0';
1310
1311 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1312 return -EINVAL;
1313
bf643185 1314 return 0;
8b4970c4
MB
1315}
1316
cd9e9808
MB
1317static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1318{
1319 void __user *argp = (void __user *)arg;
1320
1321 switch (cmd) {
1322 case NVM_INFO:
1323 return nvm_ioctl_info(file, argp);
1324 case NVM_GET_DEVICES:
1325 return nvm_ioctl_get_devices(file, argp);
1326 case NVM_DEV_CREATE:
1327 return nvm_ioctl_dev_create(file, argp);
1328 case NVM_DEV_REMOVE:
1329 return nvm_ioctl_dev_remove(file, argp);
55696154
MB
1330 case NVM_DEV_INIT:
1331 return nvm_ioctl_dev_init(file, argp);
8b4970c4
MB
1332 case NVM_DEV_FACTORY:
1333 return nvm_ioctl_dev_factory(file, argp);
cd9e9808
MB
1334 }
1335 return 0;
1336}
1337
1338static const struct file_operations _ctl_fops = {
1339 .open = nonseekable_open,
1340 .unlocked_ioctl = nvm_ctl_ioctl,
1341 .owner = THIS_MODULE,
1342 .llseek = noop_llseek,
1343};
1344
1345static struct miscdevice _nvm_misc = {
1346 .minor = MISC_DYNAMIC_MINOR,
1347 .name = "lightnvm",
1348 .nodename = "lightnvm/control",
1349 .fops = &_ctl_fops,
1350};
389b2a1c 1351builtin_misc_device(_nvm_misc);
This page took 0.3631 seconds and 4 git commands to generate.