2 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/sem.h>
24 #include <linux/bitmap.h>
25 #include <linux/moduleparam.h>
26 #include <linux/miscdevice.h>
27 #include <linux/lightnvm.h>
28 #include <linux/sched/sysctl.h>
30 static LIST_HEAD(nvm_tgt_types);
31 static DECLARE_RWSEM(nvm_tgtt_lock);
32 static LIST_HEAD(nvm_mgrs);
33 static LIST_HEAD(nvm_devices);
34 static DECLARE_RWSEM(nvm_lock);
36 struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
38 struct nvm_tgt_type *tmp, *tt = NULL;
41 down_write(&nvm_tgtt_lock);
43 list_for_each_entry(tmp, &nvm_tgt_types, list)
44 if (!strcmp(name, tmp->name)) {
50 up_write(&nvm_tgtt_lock);
53 EXPORT_SYMBOL(nvm_find_target_type);
55 int nvm_register_tgt_type(struct nvm_tgt_type *tt)
59 down_write(&nvm_tgtt_lock);
60 if (nvm_find_target_type(tt->name, 0))
63 list_add(&tt->list, &nvm_tgt_types);
64 up_write(&nvm_tgtt_lock);
68 EXPORT_SYMBOL(nvm_register_tgt_type);
70 void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
75 down_write(&nvm_lock);
79 EXPORT_SYMBOL(nvm_unregister_tgt_type);
81 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
82 dma_addr_t *dma_handler)
84 return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
87 EXPORT_SYMBOL(nvm_dev_dma_alloc);
89 void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
91 dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
93 EXPORT_SYMBOL(nvm_dev_dma_free);
95 static struct nvmm_type *nvm_find_mgr_type(const char *name)
99 list_for_each_entry(mt, &nvm_mgrs, list)
100 if (!strcmp(name, mt->name))
106 static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
108 struct nvmm_type *mt;
111 lockdep_assert_held(&nvm_lock);
113 list_for_each_entry(mt, &nvm_mgrs, list) {
114 if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
117 ret = mt->register_mgr(dev);
119 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
121 return NULL; /* initialization failed */
129 int nvm_register_mgr(struct nvmm_type *mt)
134 down_write(&nvm_lock);
135 if (nvm_find_mgr_type(mt->name)) {
139 list_add(&mt->list, &nvm_mgrs);
142 /* try to register media mgr if any device have none configured */
143 list_for_each_entry(dev, &nvm_devices, devices) {
147 dev->mt = nvm_init_mgr(dev);
154 EXPORT_SYMBOL(nvm_register_mgr);
156 void nvm_unregister_mgr(struct nvmm_type *mt)
161 down_write(&nvm_lock);
165 EXPORT_SYMBOL(nvm_unregister_mgr);
167 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
171 list_for_each_entry(dev, &nvm_devices, devices)
172 if (!strcmp(name, dev->name))
178 static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev,
181 struct nvm_dev *dev = tgt_dev->parent;
184 if (rqd->nr_ppas > 1) {
185 for (i = 0; i < rqd->nr_ppas; i++) {
186 rqd->ppa_list[i] = dev->mt->trans_ppa(tgt_dev,
187 rqd->ppa_list[i], TRANS_TGT_TO_DEV);
188 rqd->ppa_list[i] = generic_to_dev_addr(dev,
192 rqd->ppa_addr = dev->mt->trans_ppa(tgt_dev, rqd->ppa_addr,
194 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
198 int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
204 if (nr_ppas > dev->ops->max_phys_sect) {
205 pr_err("nvm: unable to update all sysblocks atomically\n");
209 memset(&rqd, 0, sizeof(struct nvm_rq));
211 nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
212 nvm_generic_to_addr_mode(dev, &rqd);
214 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
215 nvm_free_rqd_ppalist(dev, &rqd);
217 pr_err("nvm: sysblk failed bb mark\n");
223 EXPORT_SYMBOL(nvm_set_bb_tbl);
225 int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
226 int nr_ppas, int type)
228 struct nvm_dev *dev = tgt_dev->parent;
232 if (nr_ppas > dev->ops->max_phys_sect) {
233 pr_err("nvm: unable to update all blocks atomically\n");
237 memset(&rqd, 0, sizeof(struct nvm_rq));
239 nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
240 nvm_tgt_generic_to_addr_mode(tgt_dev, &rqd);
242 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
243 nvm_free_rqd_ppalist(dev, &rqd);
245 pr_err("nvm: sysblk failed bb mark\n");
251 EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
253 int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
255 struct nvm_dev *dev = tgt_dev->parent;
257 return dev->ops->max_phys_sect;
259 EXPORT_SYMBOL(nvm_max_phys_sects);
261 int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
263 struct nvm_dev *dev = tgt_dev->parent;
265 return dev->mt->submit_io(tgt_dev, rqd);
267 EXPORT_SYMBOL(nvm_submit_io);
269 int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags)
271 struct nvm_dev *dev = tgt_dev->parent;
273 return dev->mt->erase_blk(tgt_dev, p, flags);
275 EXPORT_SYMBOL(nvm_erase_blk);
277 int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
278 nvm_l2p_update_fn *update_l2p, void *priv)
280 struct nvm_dev *dev = tgt_dev->parent;
282 if (!dev->ops->get_l2p_tbl)
285 return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
287 EXPORT_SYMBOL(nvm_get_l2p_tbl);
289 int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
291 struct nvm_dev *dev = tgt_dev->parent;
293 return dev->mt->get_area(dev, lba, len);
295 EXPORT_SYMBOL(nvm_get_area);
297 void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t lba)
299 struct nvm_dev *dev = tgt_dev->parent;
301 dev->mt->put_area(dev, lba);
303 EXPORT_SYMBOL(nvm_put_area);
305 void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
309 if (rqd->nr_ppas > 1) {
310 for (i = 0; i < rqd->nr_ppas; i++)
311 rqd->ppa_list[i] = dev_to_generic_addr(dev,
314 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
317 EXPORT_SYMBOL(nvm_addr_to_generic_mode);
319 void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
323 if (rqd->nr_ppas > 1) {
324 for (i = 0; i < rqd->nr_ppas; i++)
325 rqd->ppa_list[i] = generic_to_dev_addr(dev,
328 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
331 EXPORT_SYMBOL(nvm_generic_to_addr_mode);
333 int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
334 const struct ppa_addr *ppas, int nr_ppas, int vblk)
336 struct nvm_geo *geo = &dev->geo;
337 int i, plane_cnt, pl_idx;
340 if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
341 rqd->nr_ppas = nr_ppas;
342 rqd->ppa_addr = ppas[0];
347 rqd->nr_ppas = nr_ppas;
348 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
349 if (!rqd->ppa_list) {
350 pr_err("nvm: failed to allocate dma memory\n");
355 for (i = 0; i < nr_ppas; i++)
356 rqd->ppa_list[i] = ppas[i];
358 plane_cnt = geo->plane_mode;
359 rqd->nr_ppas *= plane_cnt;
361 for (i = 0; i < nr_ppas; i++) {
362 for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
365 rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
372 EXPORT_SYMBOL(nvm_set_rqd_ppalist);
374 void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
379 nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
381 EXPORT_SYMBOL(nvm_free_rqd_ppalist);
383 int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
389 if (!dev->ops->erase_block)
392 memset(&rqd, 0, sizeof(struct nvm_rq));
394 ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
398 nvm_generic_to_addr_mode(dev, &rqd);
402 ret = dev->ops->erase_block(dev, &rqd);
404 nvm_free_rqd_ppalist(dev, &rqd);
408 EXPORT_SYMBOL(nvm_erase_ppa);
410 void nvm_end_io(struct nvm_rq *rqd, int error)
415 EXPORT_SYMBOL(nvm_end_io);
417 static void nvm_end_io_sync(struct nvm_rq *rqd)
419 struct completion *waiting = rqd->wait;
426 static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
427 int flags, void *buf, int len)
429 DECLARE_COMPLETION_ONSTACK(wait);
432 unsigned long hang_check;
434 bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
435 if (IS_ERR_OR_NULL(bio))
438 nvm_generic_to_addr_mode(dev, rqd);
441 rqd->opcode = opcode;
445 rqd->end_io = nvm_end_io_sync;
447 ret = dev->ops->submit_io(dev, rqd);
453 /* Prevent hang_check timer from firing at us during very long I/O */
454 hang_check = sysctl_hung_task_timeout_secs;
456 while (!wait_for_completion_io_timeout(&wait,
457 hang_check * (HZ/2)))
460 wait_for_completion_io(&wait);
466 * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
467 * take to free ppa list if necessary.
469 * @ppa_list: user created ppa_list
470 * @nr_ppas: length of ppa_list
471 * @opcode: device opcode
472 * @flags: device flags
474 * @len: data buffer length
476 int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
477 int nr_ppas, int opcode, int flags, void *buf, int len)
481 if (dev->ops->max_phys_sect < nr_ppas)
484 memset(&rqd, 0, sizeof(struct nvm_rq));
486 rqd.nr_ppas = nr_ppas;
488 rqd.ppa_list = ppa_list;
490 rqd.ppa_addr = ppa_list[0];
492 return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
494 EXPORT_SYMBOL(nvm_submit_ppa_list);
497 * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
498 * as single, dual, quad plane PPAs depending on device type.
500 * @ppa: user created ppa_list
501 * @nr_ppas: length of ppa_list
502 * @opcode: device opcode
503 * @flags: device flags
505 * @len: data buffer length
507 int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
508 int opcode, int flags, void *buf, int len)
513 memset(&rqd, 0, sizeof(struct nvm_rq));
514 ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
518 ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
520 nvm_free_rqd_ppalist(dev, &rqd);
524 EXPORT_SYMBOL(nvm_submit_ppa);
527 * folds a bad block list from its plane representation to its virtual
528 * block representation. The fold is done in place and reduced size is
531 * If any of the planes status are bad or grown bad block, the virtual block
532 * is marked bad. If not bad, the first plane state acts as the block state.
534 int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
536 struct nvm_geo *geo = &dev->geo;
537 int blk, offset, pl, blktype;
539 if (nr_blks != geo->blks_per_lun * geo->plane_mode)
542 for (blk = 0; blk < geo->blks_per_lun; blk++) {
543 offset = blk * geo->plane_mode;
544 blktype = blks[offset];
546 /* Bad blocks on any planes take precedence over other types */
547 for (pl = 0; pl < geo->plane_mode; pl++) {
548 if (blks[offset + pl] &
549 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
550 blktype = blks[offset + pl];
558 return geo->blks_per_lun;
560 EXPORT_SYMBOL(nvm_bb_tbl_fold);
562 int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
564 ppa = generic_to_dev_addr(dev, ppa);
566 return dev->ops->get_bb_tbl(dev, ppa, blks);
568 EXPORT_SYMBOL(nvm_get_bb_tbl);
570 int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
573 struct nvm_dev *dev = tgt_dev->parent;
575 ppa = dev->mt->trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV);
576 return nvm_get_bb_tbl(dev, ppa, blks);
578 EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
580 static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
582 struct nvm_geo *geo = &dev->geo;
585 dev->lps_per_blk = geo->pgs_per_blk;
586 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
590 /* Just a linear array */
591 for (i = 0; i < dev->lps_per_blk; i++)
597 static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
600 struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
605 dev->lps_per_blk = mlc->num_pairs;
606 dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
610 /* The lower page table encoding consists of a list of bytes, where each
611 * has a lower and an upper half. The first half byte maintains the
612 * increment value and every value after is an offset added to the
613 * previous incrementation value
615 dev->lptbl[0] = mlc->pairs[0] & 0xF;
616 for (i = 1; i < dev->lps_per_blk; i++) {
617 p = mlc->pairs[i >> 1];
618 if (i & 0x1) /* upper */
619 dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
621 dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
627 static int nvm_core_init(struct nvm_dev *dev)
629 struct nvm_id *id = &dev->identity;
630 struct nvm_id_group *grp = &id->groups[0];
631 struct nvm_geo *geo = &dev->geo;
634 /* Whole device values */
635 geo->nr_chnls = grp->num_ch;
636 geo->luns_per_chnl = grp->num_lun;
638 /* Generic device values */
639 geo->pgs_per_blk = grp->num_pg;
640 geo->blks_per_lun = grp->num_blk;
641 geo->nr_planes = grp->num_pln;
642 geo->fpg_size = grp->fpg_sz;
643 geo->pfpg_size = grp->fpg_sz * grp->num_pln;
644 geo->sec_size = grp->csecs;
645 geo->oob_size = grp->sos;
646 geo->sec_per_pg = grp->fpg_sz / grp->csecs;
647 geo->mccap = grp->mccap;
648 memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
650 geo->plane_mode = NVM_PLANE_SINGLE;
651 geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
653 if (grp->mpos & 0x020202)
654 geo->plane_mode = NVM_PLANE_DOUBLE;
655 if (grp->mpos & 0x040404)
656 geo->plane_mode = NVM_PLANE_QUAD;
658 if (grp->mtype != 0) {
659 pr_err("nvm: memory type not supported\n");
663 /* calculated values */
664 geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
665 geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
666 geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
667 geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
669 dev->total_secs = geo->nr_luns * geo->sec_per_lun;
670 dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
671 sizeof(unsigned long), GFP_KERNEL);
675 switch (grp->fmtype) {
676 case NVM_ID_FMTYPE_SLC:
677 if (nvm_init_slc_tbl(dev, grp)) {
682 case NVM_ID_FMTYPE_MLC:
683 if (nvm_init_mlc_tbl(dev, grp)) {
689 pr_err("nvm: flash type not supported\n");
694 mutex_init(&dev->mlock);
695 spin_lock_init(&dev->lock);
697 blk_queue_logical_block_size(dev->q, geo->sec_size);
705 static void nvm_free_mgr(struct nvm_dev *dev)
710 dev->mt->unregister_mgr(dev);
714 void nvm_free(struct nvm_dev *dev)
722 dev->ops->destroy_dma_pool(dev->dma_pool);
729 static int nvm_init(struct nvm_dev *dev)
731 struct nvm_geo *geo = &dev->geo;
734 if (!dev->q || !dev->ops)
737 if (dev->ops->identity(dev, &dev->identity)) {
738 pr_err("nvm: device could not be identified\n");
742 pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
743 dev->identity.ver_id, dev->identity.vmnt,
744 dev->identity.cgrps);
746 if (dev->identity.ver_id != 1) {
747 pr_err("nvm: device not supported by kernel.");
751 if (dev->identity.cgrps != 1) {
752 pr_err("nvm: only one group configuration supported.");
756 ret = nvm_core_init(dev);
758 pr_err("nvm: could not initialize core structures.\n");
762 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
763 dev->name, geo->sec_per_pg, geo->nr_planes,
764 geo->pgs_per_blk, geo->blks_per_lun,
765 geo->nr_luns, geo->nr_chnls);
768 pr_err("nvm: failed to initialize nvm\n");
772 struct nvm_dev *nvm_alloc_dev(int node)
774 return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
776 EXPORT_SYMBOL(nvm_alloc_dev);
778 int nvm_register(struct nvm_dev *dev)
786 if (dev->ops->max_phys_sect > 256) {
787 pr_info("nvm: max sectors supported is 256.\n");
792 if (dev->ops->max_phys_sect > 1) {
793 dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
794 if (!dev->dma_pool) {
795 pr_err("nvm: could not create dma pool\n");
801 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
802 ret = nvm_get_sysblock(dev, &dev->sb);
804 pr_err("nvm: device not initialized.\n");
806 pr_err("nvm: err (%d) on device initialization\n", ret);
809 /* register device with a supported media manager */
810 down_write(&nvm_lock);
812 dev->mt = nvm_init_mgr(dev);
813 list_add(&dev->devices, &nvm_devices);
821 EXPORT_SYMBOL(nvm_register);
823 void nvm_unregister(struct nvm_dev *dev)
825 down_write(&nvm_lock);
826 list_del(&dev->devices);
831 EXPORT_SYMBOL(nvm_unregister);
833 static int __nvm_configure_create(struct nvm_ioctl_create *create)
836 struct nvm_ioctl_create_simple *s;
838 down_write(&nvm_lock);
839 dev = nvm_find_nvm_dev(create->dev);
843 pr_err("nvm: device not found\n");
848 pr_info("nvm: device has no media manager registered.\n");
852 if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
853 pr_err("nvm: config type not valid\n");
858 if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) {
859 pr_err("nvm: lun out of bound (%u:%u > %u)\n",
860 s->lun_begin, s->lun_end, dev->geo.nr_luns);
864 return dev->mt->create_tgt(dev, create);
867 static long nvm_ioctl_info(struct file *file, void __user *arg)
869 struct nvm_ioctl_info *info;
870 struct nvm_tgt_type *tt;
873 if (!capable(CAP_SYS_ADMIN))
876 info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
880 info->version[0] = NVM_VERSION_MAJOR;
881 info->version[1] = NVM_VERSION_MINOR;
882 info->version[2] = NVM_VERSION_PATCH;
884 down_write(&nvm_lock);
885 list_for_each_entry(tt, &nvm_tgt_types, list) {
886 struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
888 tgt->version[0] = tt->version[0];
889 tgt->version[1] = tt->version[1];
890 tgt->version[2] = tt->version[2];
891 strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
896 info->tgtsize = tgt_iter;
899 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
908 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
910 struct nvm_ioctl_get_devices *devices;
914 if (!capable(CAP_SYS_ADMIN))
917 devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
921 down_write(&nvm_lock);
922 list_for_each_entry(dev, &nvm_devices, devices) {
923 struct nvm_ioctl_device_info *info = &devices->info[i];
925 sprintf(info->devname, "%s", dev->name);
927 info->bmversion[0] = dev->mt->version[0];
928 info->bmversion[1] = dev->mt->version[1];
929 info->bmversion[2] = dev->mt->version[2];
930 sprintf(info->bmname, "%s", dev->mt->name);
932 sprintf(info->bmname, "none");
937 pr_err("nvm: max 31 devices can be reported.\n");
943 devices->nr_devices = i;
945 if (copy_to_user(arg, devices,
946 sizeof(struct nvm_ioctl_get_devices))) {
955 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
957 struct nvm_ioctl_create create;
959 if (!capable(CAP_SYS_ADMIN))
962 if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
965 create.dev[DISK_NAME_LEN - 1] = '\0';
966 create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
967 create.tgtname[DISK_NAME_LEN - 1] = '\0';
969 if (create.flags != 0) {
970 pr_err("nvm: no flags supported\n");
974 return __nvm_configure_create(&create);
977 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
979 struct nvm_ioctl_remove remove;
983 if (!capable(CAP_SYS_ADMIN))
986 if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
989 remove.tgtname[DISK_NAME_LEN - 1] = '\0';
991 if (remove.flags != 0) {
992 pr_err("nvm: no flags supported\n");
996 list_for_each_entry(dev, &nvm_devices, devices) {
997 ret = dev->mt->remove_tgt(dev, &remove);
1005 static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
1008 info->erase_cnt = 0;
1012 static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1014 struct nvm_dev *dev;
1015 struct nvm_sb_info info;
1018 down_write(&nvm_lock);
1019 dev = nvm_find_nvm_dev(init->dev);
1020 up_write(&nvm_lock);
1022 pr_err("nvm: device not found\n");
1026 nvm_setup_nvm_sb_info(&info);
1028 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1029 info.fs_ppa.ppa = -1;
1031 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1032 ret = nvm_init_sysblock(dev, &info);
1037 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1039 down_write(&nvm_lock);
1040 dev->mt = nvm_init_mgr(dev);
1041 up_write(&nvm_lock);
1046 static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1048 struct nvm_ioctl_dev_init init;
1050 if (!capable(CAP_SYS_ADMIN))
1053 if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1056 if (init.flags != 0) {
1057 pr_err("nvm: no flags supported\n");
1061 init.dev[DISK_NAME_LEN - 1] = '\0';
1063 return __nvm_ioctl_dev_init(&init);
1066 static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1068 struct nvm_ioctl_dev_factory fact;
1069 struct nvm_dev *dev;
1071 if (!capable(CAP_SYS_ADMIN))
1074 if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1077 fact.dev[DISK_NAME_LEN - 1] = '\0';
1079 if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1082 down_write(&nvm_lock);
1083 dev = nvm_find_nvm_dev(fact.dev);
1084 up_write(&nvm_lock);
1086 pr_err("nvm: device not found\n");
1092 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1093 return nvm_dev_factory(dev, fact.flags);
1098 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1100 void __user *argp = (void __user *)arg;
1104 return nvm_ioctl_info(file, argp);
1105 case NVM_GET_DEVICES:
1106 return nvm_ioctl_get_devices(file, argp);
1107 case NVM_DEV_CREATE:
1108 return nvm_ioctl_dev_create(file, argp);
1109 case NVM_DEV_REMOVE:
1110 return nvm_ioctl_dev_remove(file, argp);
1112 return nvm_ioctl_dev_init(file, argp);
1113 case NVM_DEV_FACTORY:
1114 return nvm_ioctl_dev_factory(file, argp);
1119 static const struct file_operations _ctl_fops = {
1120 .open = nonseekable_open,
1121 .unlocked_ioctl = nvm_ctl_ioctl,
1122 .owner = THIS_MODULE,
1123 .llseek = noop_llseek,
1126 static struct miscdevice _nvm_misc = {
1127 .minor = MISC_DYNAMIC_MINOR,
1129 .nodename = "lightnvm/control",
1132 builtin_misc_device(_nvm_misc);