]> Git Repo - linux.git/blame - drivers/nvdimm/pmem.c
Merge branch 'for-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[linux.git] / drivers / nvdimm / pmem.c
CommitLineData
9e853f23
RZ
1/*
2 * Persistent Memory Driver
3 *
9f53f9fa 4 * Copyright (c) 2014-2015, Intel Corporation.
9e853f23
RZ
5 * Copyright (c) 2015, Christoph Hellwig <[email protected]>.
6 * Copyright (c) 2015, Boaz Harrosh <[email protected]>.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/blkdev.h>
20#include <linux/hdreg.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
b95f5f43 25#include <linux/badblocks.h>
9476df7d 26#include <linux/memremap.h>
32ab0a3f 27#include <linux/vmalloc.h>
34c0fd54 28#include <linux/pfn_t.h>
9e853f23 29#include <linux/slab.h>
61031952 30#include <linux/pmem.h>
9f53f9fa 31#include <linux/nd.h>
f295e53b 32#include "pmem.h"
32ab0a3f 33#include "pfn.h"
9f53f9fa 34#include "nd.h"
9e853f23 35
f284a4f2
DW
36static struct device *to_dev(struct pmem_device *pmem)
37{
38 /*
39 * nvdimm bus services need a 'dev' parameter, and we record the device
40 * at init in bb.dev.
41 */
42 return pmem->bb.dev;
43}
44
45static struct nd_region *to_region(struct pmem_device *pmem)
46{
47 return to_nd_region(to_dev(pmem)->parent);
48}
9e853f23 49
59e64739
DW
50static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
51 unsigned int len)
52{
f284a4f2 53 struct device *dev = to_dev(pmem);
59e64739
DW
54 sector_t sector;
55 long cleared;
56
57 sector = (offset - pmem->data_offset) / 512;
58 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
59
60 if (cleared > 0 && cleared / 512) {
5bf0b6e1 61 dev_dbg(dev, "%s: %#llx clear %ld sector%s\n",
59e64739
DW
62 __func__, (unsigned long long) sector,
63 cleared / 512, cleared / 512 > 1 ? "s" : "");
64 badblocks_clear(&pmem->bb, sector, cleared / 512);
65 }
66 invalidate_pmem(pmem->virt_addr + offset, len);
67}
68
bd697a80
VV
69static void write_pmem(void *pmem_addr, struct page *page,
70 unsigned int off, unsigned int len)
71{
72 void *mem = kmap_atomic(page);
73
74 memcpy_to_pmem(pmem_addr, mem + off, len);
75 kunmap_atomic(mem);
76}
77
78static int read_pmem(struct page *page, unsigned int off,
79 void *pmem_addr, unsigned int len)
80{
81 int rc;
82 void *mem = kmap_atomic(page);
83
84 rc = memcpy_from_pmem(mem + off, pmem_addr, len);
85 kunmap_atomic(mem);
86 return rc;
87}
88
e10624f8 89static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
c11f0c0b 90 unsigned int len, unsigned int off, bool is_write,
9e853f23
RZ
91 sector_t sector)
92{
b5ebc8ec 93 int rc = 0;
59e64739 94 bool bad_pmem = false;
32ab0a3f 95 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
7a9eb206 96 void *pmem_addr = pmem->virt_addr + pmem_off;
9e853f23 97
59e64739
DW
98 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
99 bad_pmem = true;
100
c11f0c0b 101 if (!is_write) {
59e64739 102 if (unlikely(bad_pmem))
b5ebc8ec
DW
103 rc = -EIO;
104 else {
bd697a80 105 rc = read_pmem(page, off, pmem_addr, len);
b5ebc8ec
DW
106 flush_dcache_page(page);
107 }
9e853f23 108 } else {
0a370d26
DW
109 /*
110 * Note that we write the data both before and after
111 * clearing poison. The write before clear poison
112 * handles situations where the latest written data is
113 * preserved and the clear poison operation simply marks
114 * the address range as valid without changing the data.
115 * In this case application software can assume that an
116 * interrupted write will either return the new good
117 * data or an error.
118 *
119 * However, if pmem_clear_poison() leaves the data in an
120 * indeterminate state we need to perform the write
121 * after clear poison.
122 */
9e853f23 123 flush_dcache_page(page);
bd697a80 124 write_pmem(pmem_addr, page, off, len);
59e64739
DW
125 if (unlikely(bad_pmem)) {
126 pmem_clear_poison(pmem, pmem_off, len);
bd697a80 127 write_pmem(pmem_addr, page, off, len);
59e64739 128 }
9e853f23
RZ
129 }
130
b5ebc8ec 131 return rc;
9e853f23
RZ
132}
133
7e267a8c
DW
134/* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */
135#ifndef REQ_FLUSH
136#define REQ_FLUSH REQ_PREFLUSH
137#endif
138
dece1635 139static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
9e853f23 140{
e10624f8 141 int rc = 0;
f0dc089c
DW
142 bool do_acct;
143 unsigned long start;
9e853f23 144 struct bio_vec bvec;
9e853f23 145 struct bvec_iter iter;
bd842b8c 146 struct pmem_device *pmem = q->queuedata;
7e267a8c
DW
147 struct nd_region *nd_region = to_region(pmem);
148
1eff9d32 149 if (bio->bi_opf & REQ_FLUSH)
7e267a8c 150 nvdimm_flush(nd_region);
9e853f23 151
f0dc089c 152 do_acct = nd_iostat_start(bio, &start);
e10624f8
DW
153 bio_for_each_segment(bvec, bio, iter) {
154 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
c11f0c0b 155 bvec.bv_offset, op_is_write(bio_op(bio)),
e10624f8
DW
156 iter.bi_sector);
157 if (rc) {
158 bio->bi_error = rc;
159 break;
160 }
161 }
f0dc089c
DW
162 if (do_acct)
163 nd_iostat_end(bio, start);
61031952 164
1eff9d32 165 if (bio->bi_opf & REQ_FUA)
7e267a8c 166 nvdimm_flush(nd_region);
61031952 167
4246a0b6 168 bio_endio(bio);
dece1635 169 return BLK_QC_T_NONE;
9e853f23
RZ
170}
171
172static int pmem_rw_page(struct block_device *bdev, sector_t sector,
c11f0c0b 173 struct page *page, bool is_write)
9e853f23 174{
bd842b8c 175 struct pmem_device *pmem = bdev->bd_queue->queuedata;
e10624f8 176 int rc;
9e853f23 177
c11f0c0b 178 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
9e853f23 179
e10624f8
DW
180 /*
181 * The ->rw_page interface is subtle and tricky. The core
182 * retries on any error, so we can only invoke page_endio() in
183 * the successful completion case. Otherwise, we'll see crashes
184 * caused by double completion.
185 */
186 if (rc == 0)
c11f0c0b 187 page_endio(page, is_write, 0);
e10624f8
DW
188
189 return rc;
9e853f23
RZ
190}
191
f295e53b
DW
192/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
193__weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
7a9eb206 194 void **kaddr, pfn_t *pfn, long size)
9e853f23 195{
bd842b8c 196 struct pmem_device *pmem = bdev->bd_queue->queuedata;
32ab0a3f 197 resource_size_t offset = sector * 512 + pmem->data_offset;
589e75d1 198
0a70bd43
DW
199 if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
200 return -EIO;
e2e05394 201 *kaddr = pmem->virt_addr + offset;
34c0fd54 202 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
9e853f23 203
0a70bd43
DW
204 /*
205 * If badblocks are present, limit known good range to the
206 * requested range.
207 */
208 if (unlikely(pmem->bb.count))
209 return size;
cfe30b87 210 return pmem->size - pmem->pfn_pad - offset;
9e853f23
RZ
211}
212
213static const struct block_device_operations pmem_fops = {
214 .owner = THIS_MODULE,
215 .rw_page = pmem_rw_page,
216 .direct_access = pmem_direct_access,
58138820 217 .revalidate_disk = nvdimm_revalidate_disk,
9e853f23
RZ
218};
219
030b99e3
DW
220static void pmem_release_queue(void *q)
221{
222 blk_cleanup_queue(q);
223}
224
f02716db 225static void pmem_release_disk(void *disk)
030b99e3
DW
226{
227 del_gendisk(disk);
228 put_disk(disk);
229}
230
200c79da
DW
231static int pmem_attach_disk(struct device *dev,
232 struct nd_namespace_common *ndns)
9e853f23 233{
200c79da 234 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
f284a4f2 235 struct nd_region *nd_region = to_nd_region(dev->parent);
200c79da
DW
236 struct vmem_altmap __altmap, *altmap = NULL;
237 struct resource *res = &nsio->res;
238 struct nd_pfn *nd_pfn = NULL;
239 int nid = dev_to_node(dev);
240 struct nd_pfn_sb *pfn_sb;
9e853f23 241 struct pmem_device *pmem;
200c79da 242 struct resource pfn_res;
468ded03 243 struct request_queue *q;
200c79da
DW
244 struct gendisk *disk;
245 void *addr;
246
247 /* while nsio_rw_bytes is active, parse a pfn info block if present */
248 if (is_nd_pfn(dev)) {
249 nd_pfn = to_nd_pfn(dev);
250 altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
251 if (IS_ERR(altmap))
252 return PTR_ERR(altmap);
253 }
254
255 /* we're attaching a block device, disable raw namespace access */
256 devm_nsio_disable(dev, nsio);
9e853f23 257
708ab62b 258 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
9e853f23 259 if (!pmem)
200c79da 260 return -ENOMEM;
9e853f23 261
200c79da 262 dev_set_drvdata(dev, pmem);
9e853f23
RZ
263 pmem->phys_addr = res->start;
264 pmem->size = resource_size(res);
f284a4f2 265 if (nvdimm_has_flush(nd_region) < 0)
61031952 266 dev_warn(dev, "unable to guarantee persistence of writes\n");
9e853f23 267
947df02d
DW
268 if (!devm_request_mem_region(dev, res->start, resource_size(res),
269 dev_name(dev))) {
270 dev_warn(dev, "could not reserve region %pR\n", res);
200c79da 271 return -EBUSY;
9e853f23
RZ
272 }
273
468ded03
DW
274 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
275 if (!q)
200c79da 276 return -ENOMEM;
468ded03 277
34c0fd54 278 pmem->pfn_flags = PFN_DEV;
200c79da
DW
279 if (is_nd_pfn(dev)) {
280 addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
281 altmap);
282 pfn_sb = nd_pfn->pfn_sb;
283 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
284 pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
285 pmem->pfn_flags |= PFN_MAP;
286 res = &pfn_res; /* for badblocks populate */
287 res->start += pmem->data_offset;
288 } else if (pmem_should_map_pages(dev)) {
289 addr = devm_memremap_pages(dev, &nsio->res,
5c2c2587 290 &q->q_usage_counter, NULL);
34c0fd54
DW
291 pmem->pfn_flags |= PFN_MAP;
292 } else
200c79da
DW
293 addr = devm_memremap(dev, pmem->phys_addr,
294 pmem->size, ARCH_MEMREMAP_PMEM);
b36f4761 295
030b99e3
DW
296 /*
297 * At release time the queue must be dead before
298 * devm_memremap_pages is unwound
299 */
f02716db 300 if (devm_add_action_or_reset(dev, pmem_release_queue, q))
200c79da 301 return -ENOMEM;
8c2f7e86 302
200c79da
DW
303 if (IS_ERR(addr))
304 return PTR_ERR(addr);
7a9eb206 305 pmem->virt_addr = addr;
9e853f23 306
7e267a8c 307 blk_queue_write_cache(q, true, true);
5a92289f
DW
308 blk_queue_make_request(q, pmem_make_request);
309 blk_queue_physical_block_size(q, PAGE_SIZE);
310 blk_queue_max_hw_sectors(q, UINT_MAX);
311 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
312 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
163d4baa 313 queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
5a92289f 314 q->queuedata = pmem;
9e853f23 315
538ea4aa 316 disk = alloc_disk_node(0, nid);
030b99e3
DW
317 if (!disk)
318 return -ENOMEM;
9e853f23 319
9e853f23 320 disk->fops = &pmem_fops;
5a92289f 321 disk->queue = q;
9e853f23 322 disk->flags = GENHD_FL_EXT_DEVT;
5212e11f 323 nvdimm_namespace_disk_name(ndns, disk->disk_name);
cfe30b87
DW
324 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
325 / 512);
b95f5f43
DW
326 if (devm_init_badblocks(dev, &pmem->bb))
327 return -ENOMEM;
f284a4f2 328 nvdimm_badblocks_populate(nd_region, &pmem->bb, res);
57f7f317 329 disk->bb = &pmem->bb;
0d52c756 330 device_add_disk(dev, disk);
f02716db
DW
331
332 if (devm_add_action_or_reset(dev, pmem_release_disk, disk))
333 return -ENOMEM;
334
58138820 335 revalidate_disk(disk);
9e853f23 336
8c2f7e86
DW
337 return 0;
338}
9e853f23 339
9f53f9fa 340static int nd_pmem_probe(struct device *dev)
9e853f23 341{
8c2f7e86 342 struct nd_namespace_common *ndns;
9e853f23 343
8c2f7e86
DW
344 ndns = nvdimm_namespace_common_probe(dev);
345 if (IS_ERR(ndns))
346 return PTR_ERR(ndns);
bf9bccc1 347
200c79da
DW
348 if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
349 return -ENXIO;
708ab62b 350
200c79da 351 if (is_nd_btt(dev))
708ab62b
CH
352 return nvdimm_namespace_attach_btt(ndns);
353
32ab0a3f 354 if (is_nd_pfn(dev))
200c79da 355 return pmem_attach_disk(dev, ndns);
32ab0a3f 356
200c79da 357 /* if we find a valid info-block we'll come back as that personality */
c5ed9268
DW
358 if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
359 || nd_dax_probe(dev, ndns) == 0)
32ab0a3f 360 return -ENXIO;
32ab0a3f 361
200c79da
DW
362 /* ...otherwise we're just a raw pmem device */
363 return pmem_attach_disk(dev, ndns);
9e853f23
RZ
364}
365
9f53f9fa 366static int nd_pmem_remove(struct device *dev)
9e853f23 367{
8c2f7e86 368 if (is_nd_btt(dev))
298f2bc5 369 nvdimm_namespace_detach_btt(to_nd_btt(dev));
476f848a
DW
370 nvdimm_flush(to_nd_region(dev->parent));
371
9e853f23
RZ
372 return 0;
373}
374
476f848a
DW
375static void nd_pmem_shutdown(struct device *dev)
376{
377 nvdimm_flush(to_nd_region(dev->parent));
378}
379
71999466
DW
380static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
381{
298f2bc5 382 struct pmem_device *pmem = dev_get_drvdata(dev);
f284a4f2 383 struct nd_region *nd_region = to_region(pmem);
298f2bc5
DW
384 resource_size_t offset = 0, end_trunc = 0;
385 struct nd_namespace_common *ndns;
386 struct nd_namespace_io *nsio;
387 struct resource res;
71999466
DW
388
389 if (event != NVDIMM_REVALIDATE_POISON)
390 return;
391
298f2bc5
DW
392 if (is_nd_btt(dev)) {
393 struct nd_btt *nd_btt = to_nd_btt(dev);
394
395 ndns = nd_btt->ndns;
396 } else if (is_nd_pfn(dev)) {
a3901802
DW
397 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
398 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
399
298f2bc5
DW
400 ndns = nd_pfn->ndns;
401 offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
402 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
403 } else
404 ndns = to_ndns(dev);
a3901802 405
298f2bc5
DW
406 nsio = to_nd_namespace_io(&ndns->dev);
407 res.start = nsio->res.start + offset;
408 res.end = nsio->res.end - end_trunc;
a3901802 409 nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
71999466
DW
410}
411
9f53f9fa
DW
412MODULE_ALIAS("pmem");
413MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
bf9bccc1 414MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
9f53f9fa
DW
415static struct nd_device_driver nd_pmem_driver = {
416 .probe = nd_pmem_probe,
417 .remove = nd_pmem_remove,
71999466 418 .notify = nd_pmem_notify,
476f848a 419 .shutdown = nd_pmem_shutdown,
9f53f9fa
DW
420 .drv = {
421 .name = "nd_pmem",
9e853f23 422 },
bf9bccc1 423 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
9e853f23
RZ
424};
425
426static int __init pmem_init(void)
427{
55155291 428 return nd_driver_register(&nd_pmem_driver);
9e853f23
RZ
429}
430module_init(pmem_init);
431
432static void pmem_exit(void)
433{
9f53f9fa 434 driver_unregister(&nd_pmem_driver.drv);
9e853f23
RZ
435}
436module_exit(pmem_exit);
437
438MODULE_AUTHOR("Ross Zwisler <[email protected]>");
439MODULE_LICENSE("GPL v2");
This page took 0.164405 seconds and 4 git commands to generate.