if (rc)
return rc;
- device_lock(dev);
+ nfit_device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (nd_desc) {
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
break;
}
}
- device_unlock(dev);
+ nfit_device_unlock(dev);
if (rc)
return rc;
return size;
ssize_t rc = -ENXIO;
bool busy;
- device_lock(dev);
+ nfit_device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (!nd_desc) {
device_unlock(dev);
}
mutex_unlock(&acpi_desc->init_mutex);
- device_unlock(dev);
+ nfit_device_unlock(dev);
return rc;
}
if (val != 1)
return -EINVAL;
- device_lock(dev);
+ nfit_device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (nd_desc) {
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
}
- device_unlock(dev);
+ nfit_device_unlock(dev);
if (rc)
return rc;
return size;
struct acpi_device *adev = data;
struct device *dev = &adev->dev;
- device_lock(dev->parent);
+ nfit_device_lock(dev->parent);
__acpi_nvdimm_notify(dev, event);
- device_unlock(dev->parent);
+ nfit_device_unlock(dev->parent);
}
static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
offset = to_interleave_offset(offset, mmio);
writeq(cmd, mmio->addr.base + offset);
- nvdimm_flush(nfit_blk->nd_region);
+ nvdimm_flush(nfit_blk->nd_region, NULL);
if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
readq(mmio->addr.base + offset);
}
if (rw)
- nvdimm_flush(nfit_blk->nd_region);
+ nvdimm_flush(nfit_blk->nd_region, NULL);
rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
return rc;
struct device *dev = acpi_desc->dev;
/* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
- device_lock(dev);
- device_unlock(dev);
+ nfit_device_lock(dev);
+ nfit_device_unlock(dev);
/* Bounce the init_mutex to complete initial registration */
mutex_lock(&acpi_desc->init_mutex);
* acpi_nfit_ars_rescan() submissions have had a chance to
* either submit or see ->cancel set.
*/
- device_lock(bus_dev);
- device_unlock(bus_dev);
+ nfit_device_lock(bus_dev);
+ nfit_device_unlock(bus_dev);
flush_workqueue(nfit_wq);
}
static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
{
- device_lock(&adev->dev);
+ nfit_device_lock(&adev->dev);
__acpi_nfit_notify(&adev->dev, adev->handle, event);
- device_unlock(&adev->dev);
+ nfit_device_unlock(&adev->dev);
}
static const struct acpi_device_id acpi_nfit_ids[] = {
kobject_init(&dev->kobj, &device_ktype);
INIT_LIST_HEAD(&dev->dma_pools);
mutex_init(&dev->mutex);
+ #ifdef CONFIG_PROVE_LOCKING
+ mutex_init(&dev->lockdep_mutex);
+ #endif
lockdep_set_novalidate_class(&dev->mutex);
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
}
EXPORT_SYMBOL_GPL(put_device);
+ bool kill_device(struct device *dev)
+ {
+ /*
+ * Require the device lock and set the "dead" flag to guarantee that
+ * the update behavior is consistent with the other bitfields near
+ * it and that we cannot have an asynchronous probe routine trying
+ * to run while we are tearing out the bus/class/sysfs from
+ * underneath the device.
+ */
+ lockdep_assert_held(&dev->mutex);
+
+ if (dev->p->dead)
+ return false;
+ dev->p->dead = true;
+ return true;
+ }
+ EXPORT_SYMBOL_GPL(kill_device);
+
/**
* device_del - delete device from system.
* @dev: device.
struct kobject *glue_dir = NULL;
struct class_interface *class_intf;
- /*
- * Hold the device lock and set the "dead" flag to guarantee that
- * the update behavior is consistent with the other bitfields near
- * it and that we cannot have an asynchronous probe routine trying
- * to run while we are tearing out the bus/class/sysfs from
- * underneath the device.
- */
device_lock(dev);
- dev->p->dead = true;
+ kill_device(dev);
device_unlock(dev);
/* Notify clients of device removal. This call must come
}
EXPORT_SYMBOL_GPL(device_find_child);
+/**
+ * device_find_child_by_name - device iterator for locating a child device.
+ * @parent: parent struct device
+ * @name: name of the child device
+ *
+ * This is similar to the device_find_child() function above, but it
+ * returns a reference to a device that has the name @name.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+struct device *device_find_child_by_name(struct device *parent,
+ const char *name)
+{
+ struct klist_iter i;
+ struct device *child;
+
+ if (!parent)
+ return NULL;
+
+ klist_iter_init(&parent->p->klist_children, &i);
+ while ((child = next_device(&i)))
+ if (!strcmp(dev_name(child), name) && get_device(child))
+ break;
+ klist_iter_exit(&i);
+ return child;
+}
+EXPORT_SYMBOL_GPL(device_find_child_by_name);
+
int __init devices_init(void)
{
devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
dev->of_node_reused = true;
}
EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
+
+int device_match_of_node(struct device *dev, const void *np)
+{
+ return dev->of_node == np;
+}
+EXPORT_SYMBOL_GPL(device_match_of_node);
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __alt_name_store(dev, buf, len);
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
if (rc)
return rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __size_store(dev, val);
dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
} else
return -ENXIO;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
if (to_ndns(dev)->claim)
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
} else
return -ENXIO;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
if (to_ndns(dev)->claim)
rc = -EBUSY;
dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
buf, buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
struct nd_namespace_common *ndns = to_ndns(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __holder_class_store(dev, buf);
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
struct nd_namespace_common *ndns = to_ndns(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (ndns->claim_class == NVDIMM_CCLASS_NONE)
rc = sprintf(buf, "\n");
else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
rc = sprintf(buf, "dax\n");
else
rc = sprintf(buf, "<unknown>\n");
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
char *mode;
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
claim = ndns->claim;
if (claim && is_nd_btt(claim))
mode = "safe";
else
mode = "raw";
rc = sprintf(buf, "%s\n", mode);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
* Flush any in-progess probes / removals in the driver
* for the raw personality of this namespace.
*/
- device_lock(&ndns->dev);
- device_unlock(&ndns->dev);
+ nd_device_lock(&ndns->dev);
+ nd_device_unlock(&ndns->dev);
if (ndns->dev.driver) {
dev_dbg(&ndns->dev, "is active, can't bind %s\n",
dev_name(dev));
&& !guid_equal(&nd_set->type_guid,
&nd_label->type_guid)) {
dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
- nd_set->type_guid.b,
- nd_label->type_guid.b);
+ &nd_set->type_guid,
+ &nd_label->type_guid);
continue;
}
if (namespace_label_has(ndd, type_guid)) {
if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) {
dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
- nd_set->type_guid.b,
- nd_label->type_guid.b);
+ &nd_set->type_guid,
+ &nd_label->type_guid);
return ERR_PTR(-EAGAIN);
}
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc = 0;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
if (dev->driver)
rc = -EBUSY;
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_pfn->align,
nd_pfn_supported_alignments());
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver) {
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
/* no address to convey if the pfn instance is disabled */
rc = -ENXIO;
}
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver) {
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
/* no size to convey if the pfn instance is disabled */
rc = -ENXIO;
}
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
return 0;
}
+/**
+ * nd_pfn_validate - read and validate info-block
+ * @nd_pfn: fsdax namespace runtime state / properties
+ * @sig: 'devdax' or 'fsdax' signature
+ *
+ * Upon return the info-block buffer contents (->pfn_sb) are
+ * indeterminate when validation fails, and a coherent info-block
+ * otherwise.
+ */
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
nvdimm_bus_unlock(&ndns->dev);
if (!pfn_dev)
return -ENOMEM;
- pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+ pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
nd_pfn = to_nd_pfn(pfn_dev);
nd_pfn->pfn_sb = pfn_sb;
rc = nd_pfn_validate(nd_pfn, PFN_SIG);
}
/*
- * We hotplug memory at section granularity, pad the reserved area from
- * the previous section base to the namespace base address.
+ * We hotplug memory at sub-section granularity, pad the reserved area
+ * from the previous section base to the namespace base address.
*/
static unsigned long init_altmap_base(resource_size_t base)
{
unsigned long base_pfn = PHYS_PFN(base);
- return PFN_SECTION_ALIGN_DOWN(base_pfn);
+ return SUBSECTION_ALIGN_DOWN(base_pfn);
}
static unsigned long init_altmap_reserve(resource_size_t base)
unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
unsigned long base_pfn = PHYS_PFN(base);
- reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
+ reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
return reserve;
}
if (offset < reserve)
return -EINVAL;
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
- pgmap->altmap_valid = false;
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
- nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
- - offset) / PAGE_SIZE);
+ nd_pfn->npfns = PHYS_PFN((resource_size(res) - offset));
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
dev_info(&nd_pfn->dev,
"number of pfns truncated from %lld to %ld\n",
memcpy(altmap, &__altmap, sizeof(*altmap));
altmap->free = PHYS_PFN(offset - reserve);
altmap->alloc = 0;
- pgmap->altmap_valid = true;
+ pgmap->flags |= PGMAP_ALTMAP_VALID;
} else
return -ENXIO;
return 0;
}
-static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
-{
- return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys),
- ALIGN_DOWN(phys, nd_pfn->align));
-}
-
-/*
- * Check if pmem collides with 'System RAM', or other regions when
- * section aligned. Trim it accordingly.
- */
-static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc)
-{
- struct nd_namespace_common *ndns = nd_pfn->ndns;
- struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
- const resource_size_t start = nsio->res.start;
- const resource_size_t end = start + resource_size(&nsio->res);
- resource_size_t adjust, size;
-
- *start_pad = 0;
- *end_trunc = 0;
-
- adjust = start - PHYS_SECTION_ALIGN_DOWN(start);
- size = resource_size(&nsio->res) + adjust;
- if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED
- || nd_region_conflict(nd_region, start - adjust, size))
- *start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
-
- /* Now check that end of the range does not collide. */
- adjust = PHYS_SECTION_ALIGN_UP(end) - end;
- size = resource_size(&nsio->res) + adjust;
- if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED
- || !IS_ALIGNED(end, nd_pfn->align)
- || nd_region_conflict(nd_region, start, size))
- *end_trunc = end - phys_pmem_align_down(nd_pfn, end);
-}
-
static int nd_pfn_init(struct nd_pfn *nd_pfn)
{
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- u32 start_pad, end_trunc, reserve = info_block_reserve();
resource_size_t start, size;
struct nd_region *nd_region;
+ unsigned long npfns, align;
struct nd_pfn_sb *pfn_sb;
- unsigned long npfns;
phys_addr_t offset;
const char *sig;
u64 checksum;
int rc;
- pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
+ pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
if (!pfn_sb)
return -ENOMEM;
sig = DAX_SIG;
else
sig = PFN_SIG;
+
rc = nd_pfn_validate(nd_pfn, sig);
if (rc != -ENODEV)
return rc;
/* no info block, do init */;
+ memset(pfn_sb, 0, sizeof(*pfn_sb));
+
nd_region = to_nd_region(nd_pfn->dev.parent);
if (nd_region->ro) {
dev_info(&nd_pfn->dev,
return -ENXIO;
}
- memset(pfn_sb, 0, sizeof(*pfn_sb));
-
- trim_pfn_device(nd_pfn, &start_pad, &end_trunc);
- if (start_pad + end_trunc)
- dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
- dev_name(&ndns->dev), start_pad + end_trunc);
-
/*
* Note, we use 64 here for the standard size of struct page,
* debugging options may cause it to be larger in which case the
* implementation will limit the pfns advertised through
* ->direct_access() to those that are included in the memmap.
*/
- start = nsio->res.start + start_pad;
+ start = nsio->res.start;
size = resource_size(&nsio->res);
- npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - reserve)
- / PAGE_SIZE);
+ npfns = PHYS_PFN(size - SZ_8K);
+ align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT));
if (nd_pfn->mode == PFN_MODE_PMEM) {
/*
* The altmap should be padded out to the block size used
* when populating the vmemmap. This *should* be equal to
* PMD_SIZE for most architectures.
*/
- offset = ALIGN(start + reserve + 64 * npfns,
- max(nd_pfn->align, PMD_SIZE)) - start;
+ offset = ALIGN(start + SZ_8K + 64 * npfns, align) - start;
} else if (nd_pfn->mode == PFN_MODE_RAM)
- offset = ALIGN(start + reserve, nd_pfn->align) - start;
+ offset = ALIGN(start + SZ_8K, align) - start;
else
return -ENXIO;
- if (offset + start_pad + end_trunc >= size) {
+ if (offset >= size) {
dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
dev_name(&ndns->dev));
return -ENXIO;
}
- npfns = (size - offset - start_pad - end_trunc) / SZ_4K;
+ npfns = PHYS_PFN(size - offset);
pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
pfn_sb->dataoff = cpu_to_le64(offset);
pfn_sb->npfns = cpu_to_le64(npfns);
memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
pfn_sb->version_major = cpu_to_le16(1);
- pfn_sb->version_minor = cpu_to_le16(2);
- pfn_sb->start_pad = cpu_to_le32(start_pad);
- pfn_sb->end_trunc = cpu_to_le32(end_trunc);
+ pfn_sb->version_minor = cpu_to_le16(3);
pfn_sb->align = cpu_to_le32(nd_pfn->align);
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
{
+ int ret = 0;
blk_status_t rc = 0;
bool do_acct;
unsigned long start;
struct nd_region *nd_region = to_region(pmem);
if (bio->bi_opf & REQ_PREFLUSH)
- nvdimm_flush(nd_region);
+ ret = nvdimm_flush(nd_region, bio);
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
nd_iostat_end(bio, start);
if (bio->bi_opf & REQ_FUA)
- nvdimm_flush(nd_region);
+ ret = nvdimm_flush(nd_region, bio);
+
+ if (ret)
+ bio->bi_status = errno_to_blk_status(ret);
bio_endio(bio);
return BLK_QC_T_NONE;
NULL,
};
-static void pmem_release_queue(void *q)
+static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
{
+ struct request_queue *q =
+ container_of(pgmap->ref, struct request_queue, q_usage_counter);
+
blk_cleanup_queue(q);
}
-static void pmem_freeze_queue(struct percpu_ref *ref)
+static void pmem_release_queue(void *pgmap)
{
- struct request_queue *q;
+ pmem_pagemap_cleanup(pgmap);
+}
+
+static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
+{
+ struct request_queue *q =
+ container_of(pgmap->ref, struct request_queue, q_usage_counter);
- q = container_of(ref, typeof(*q), q_usage_counter);
blk_freeze_queue_start(q);
}
put_disk(pmem->disk);
}
-static void pmem_release_pgmap_ops(void *__pgmap)
-{
- dev_pagemap_put_ops();
-}
-
-static void fsdax_pagefree(struct page *page, void *data)
+static void pmem_pagemap_page_free(struct page *page)
{
wake_up_var(&page->_refcount);
}
-static int setup_pagemap_fsdax(struct device *dev, struct dev_pagemap *pgmap)
-{
- dev_pagemap_get_ops();
- if (devm_add_action_or_reset(dev, pmem_release_pgmap_ops, pgmap))
- return -ENOMEM;
- pgmap->type = MEMORY_DEVICE_FS_DAX;
- pgmap->page_free = fsdax_pagefree;
-
- return 0;
-}
+static const struct dev_pagemap_ops fsdax_pagemap_ops = {
+ .page_free = pmem_pagemap_page_free,
+ .kill = pmem_pagemap_kill,
+ .cleanup = pmem_pagemap_cleanup,
+};
static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns)
struct gendisk *disk;
void *addr;
int rc;
+ unsigned long flags = 0UL;
pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
if (!pmem)
if (!q)
return -ENOMEM;
- if (devm_add_action_or_reset(dev, pmem_release_queue, q))
- return -ENOMEM;
-
pmem->pfn_flags = PFN_DEV;
pmem->pgmap.ref = &q->q_usage_counter;
- pmem->pgmap.kill = pmem_freeze_queue;
if (is_nd_pfn(dev)) {
- if (setup_pagemap_fsdax(dev, &pmem->pgmap))
- return -ENOMEM;
+ pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
bb_res.start += pmem->data_offset;
} else if (pmem_should_map_pages(dev)) {
memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
- pmem->pgmap.altmap_valid = false;
- if (setup_pagemap_fsdax(dev, &pmem->pgmap))
- return -ENOMEM;
+ pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
} else {
+ if (devm_add_action_or_reset(dev, pmem_release_queue,
+ &pmem->pgmap))
+ return -ENOMEM;
addr = devm_memremap(dev, pmem->phys_addr,
pmem->size, ARCH_MEMREMAP_PMEM);
memcpy(&bb_res, &nsio->res, sizeof(bb_res));
nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
disk->bb = &pmem->bb;
- dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
+ if (is_nvdimm_sync(nd_region))
+ flags = DAXDEV_F_SYNC;
+ dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags);
if (!dax_dev) {
put_disk(disk);
return -ENOMEM;
}
dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
pmem->dax_dev = dax_dev;
-
gendev = disk_to_dev(disk);
gendev->groups = pmem_attribute_groups;
nvdimm_namespace_detach_btt(to_nd_btt(dev));
else {
/*
- * Note, this assumes device_lock() context to not race
- * nd_pmem_notify()
+ * Note, this assumes nd_device_lock() context to not
+ * race nd_pmem_notify()
*/
sysfs_put(pmem->bb_state);
pmem->bb_state = NULL;
}
- nvdimm_flush(to_nd_region(dev->parent));
+ nvdimm_flush(to_nd_region(dev->parent), NULL);
return 0;
}
static void nd_pmem_shutdown(struct device *dev)
{
- nvdimm_flush(to_nd_region(dev->parent));
+ nvdimm_flush(to_nd_region(dev->parent), NULL);
}
static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
return rc;
if (!flush)
return -EINVAL;
- nvdimm_flush(nd_region);
+ rc = nvdimm_flush(nd_region, NULL);
+ if (rc)
+ return rc;
return len;
}
* the v1.1 namespace label cookie definition. To read all this
* data we need to wait for probing to settle.
*/
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
if (nd_region->ndr_mappings) {
}
}
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
if (rc)
return rc;
* memory nvdimm_bus_lock() is dropped, but that's userspace's
* problem to not race itself.
*/
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
available = nd_region_available_dpa(nd_region);
nvdimm_bus_unlock(dev);
+ nd_device_unlock(dev);
return sprintf(buf, "%llu\n", available);
}
struct nd_region *nd_region = to_nd_region(dev);
unsigned long long available = 0;
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
available = nd_region_allocatable_dpa(nd_region);
nvdimm_bus_unlock(dev);
+ nd_device_unlock(dev);
return sprintf(buf, "%llu\n", available);
}
struct nd_region *nd_region = to_nd_region(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver)
rc = badblocks_show(&nd_region->bb, buf, 0);
else
rc = -ENXIO;
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
dev->of_node = ndr_desc->of_node;
nd_region->ndr_size = resource_size(ndr_desc->res);
nd_region->ndr_start = ndr_desc->res->start;
+ if (ndr_desc->flush)
+ nd_region->flush = ndr_desc->flush;
+ else
+ nd_region->flush = NULL;
+
nd_device_register(dev);
return nd_region;
}
EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
+int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
+{
+ int rc = 0;
+
+ if (!nd_region->flush)
+ rc = generic_nvdimm_flush(nd_region);
+ else {
+ if (nd_region->flush(nd_region, bio))
+ rc = -EIO;
+ }
+
+ return rc;
+}
/**
* nvdimm_flush - flush any posted write queues between the cpu and pmem media
* @nd_region: blk or interleaved pmem region
*/
-void nvdimm_flush(struct nd_region *nd_region)
+int generic_nvdimm_flush(struct nd_region *nd_region)
{
struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
int i, idx;
if (ndrd_get_flush_wpq(ndrd, i, 0))
writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
wmb();
+
+ return 0;
}
EXPORT_SYMBOL_GPL(nvdimm_flush);
}
EXPORT_SYMBOL_GPL(nvdimm_has_cache);
+bool is_nvdimm_sync(struct nd_region *nd_region)
+{
+ return is_nd_pmem(&nd_region->dev) &&
+ !test_bit(ND_REGION_ASYNC, &nd_region->flags);
+}
+EXPORT_SYMBOL_GPL(is_nvdimm_sync);
+
struct conflict_context {
struct nd_region *nd_region;
resource_size_t start, size;
* Copyright (c) 2008-2009 Novell Inc.
*
- * See Documentation/driver-model/ for more information.
+ * See Documentation/driver-api/driver-model/ for more information.
*/
#ifndef _DEVICE_H_
struct iommu_group;
struct iommu_fwspec;
struct dev_pin_info;
+struct iommu_param;
struct bus_attribute {
struct attribute attr;
struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
+int device_match_of_node(struct device *dev, const void *np);
+
int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
int (*fn)(struct device *dev, void *data));
struct device *bus_find_device(struct bus_type *bus, struct device *start,
- void *data,
- int (*match)(struct device *dev, void *data));
+ const void *data,
+ int (*match)(struct device *dev, const void *data));
struct device *bus_find_device_by_name(struct bus_type *bus,
struct device *start,
const char *name);
int (*fn)(struct device *dev,
void *));
struct device *driver_find_device(struct device_driver *drv,
- struct device *start, void *data,
- int (*match)(struct device *dev, void *data));
+ struct device *start, const void *data,
+ int (*match)(struct device *dev, const void *data));
void driver_deferred_probe_add(struct device *dev);
int driver_deferred_probe_check_state(struct device *dev);
+int driver_deferred_probe_check_state_continue(struct device *dev);
/**
* struct subsys_interface - interfaces to device functions
gfp_t gfp_mask, unsigned int order);
extern void devm_free_pages(struct device *dev, unsigned long addr);
-void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
+void __iomem *devm_ioremap_resource(struct device *dev,
+ const struct resource *res);
void __iomem *devm_of_iomap(struct device *dev,
struct device_node *node, int index,
/* allows to add/remove a custom action to devres stack */
int devm_add_action(struct device *dev, void (*action)(void *), void *data);
void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
+void devm_release_action(struct device *dev, void (*action)(void *), void *data);
static inline int devm_add_action_or_reset(struct device *dev,
void (*action)(void *), void *data)
* This identifies the device type and carries type-specific
* information.
* @mutex: Mutex to synchronize calls to its driver.
+ * @lockdep_mutex: An optional debug lock that a subsystem can use as a
+ * peer lock to gain localized lockdep coverage of the device_lock.
* @bus: Type of bus device is on.
* @driver: Which driver has allocated this
* @platform_data: Platform data specific to the device.
* device (i.e. the bus driver that discovered the device).
* @iommu_group: IOMMU group the device belongs to.
* @iommu_fwspec: IOMMU-specific properties supplied by firmware.
+ * @iommu_param: Per device generic IOMMU runtime data
*
* @offline_disabled: If set, the device is permanently online.
* @offline: Set after successful invocation of bus type's .offline().
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set_drvdata/dev_get_drvdata */
+ #ifdef CONFIG_PROVE_LOCKING
+ struct mutex lockdep_mutex;
+ #endif
struct mutex mutex; /* mutex to synchronize calls to
* its driver.
*/
void (*release)(struct device *dev);
struct iommu_group *iommu_group;
struct iommu_fwspec *iommu_fwspec;
+ struct iommu_param *iommu_param;
bool offline_disabled:1;
bool offline:1;
int (*fn)(struct device *dev, void *data));
extern struct device *device_find_child(struct device *dev, void *data,
int (*match)(struct device *dev, void *data));
+extern struct device *device_find_child_by_name(struct device *parent,
+ const char *name);
extern int device_rename(struct device *dev, const char *new_name);
extern int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order);
*/
extern struct device *get_device(struct device *dev);
extern void put_device(struct device *dev);
+ extern bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS
extern int devtmpfs_create_node(struct device *dev);