struct dm_bufio_client {
struct mutex lock;
spinlock_t spinlock;
- unsigned long spinlock_flags;
+ bool no_sleep;
struct list_head lru[LIST_SIZE];
unsigned long n_buffers[LIST_SIZE];
s8 sectors_per_block_bits;
void (*alloc_callback)(struct dm_buffer *);
void (*write_callback)(struct dm_buffer *);
- bool no_sleep;
-
struct kmem_cache *slab_buffer;
struct kmem_cache *slab_cache;
struct dm_io_client *dm_io;
static void dm_bufio_lock(struct dm_bufio_client *c)
{
if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
- spin_lock_irqsave_nested(&c->spinlock, c->spinlock_flags, dm_bufio_in_request());
+ spin_lock_bh(&c->spinlock);
else
mutex_lock_nested(&c->lock, dm_bufio_in_request());
}
static int dm_bufio_trylock(struct dm_bufio_client *c)
{
if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
- return spin_trylock_irqsave(&c->spinlock, c->spinlock_flags);
+ return spin_trylock_bh(&c->spinlock);
else
return mutex_trylock(&c->lock);
}
static void dm_bufio_unlock(struct dm_bufio_client *c)
{
if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
- spin_unlock_irqrestore(&c->spinlock, c->spinlock_flags);
+ spin_unlock_bh(&c->spinlock);
else
mutex_unlock(&c->lock);
}
b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
}
-static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
+static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
unsigned n_sectors, unsigned offset)
{
int r;
struct dm_io_request io_req = {
- .bi_op = rw,
- .bi_op_flags = 0,
+ .bi_opf = op,
.notify.fn = dmio_complete,
.notify.context = b,
.client = b->c->dm_io,
b->end_io(b, status);
}
-static void use_bio(struct dm_buffer *b, int rw, sector_t sector,
+static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
unsigned n_sectors, unsigned offset)
{
struct bio *bio;
bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
if (!bio) {
dmio:
- use_dmio(b, rw, sector, n_sectors, offset);
+ use_dmio(b, op, sector, n_sectors, offset);
return;
}
- bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, rw);
+ bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op);
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_complete;
bio->bi_private = b;
return sector;
}
-static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
+static void submit_io(struct dm_buffer *b, enum req_op op,
+ void (*end_io)(struct dm_buffer *, blk_status_t))
{
unsigned n_sectors;
sector_t sector;
sector = block_to_sector(b->c, b->block);
- if (rw != REQ_OP_WRITE) {
+ if (op != REQ_OP_WRITE) {
n_sectors = b->c->block_size >> SECTOR_SHIFT;
offset = 0;
} else {
}
if (b->data_mode != DATA_MODE_VMALLOC)
- use_bio(b, rw, sector, n_sectors, offset);
+ use_bio(b, op, sector, n_sectors, offset);
else
- use_dmio(b, rw, sector, n_sectors, offset);
+ use_dmio(b, op, sector, n_sectors, offset);
}
/*----------------------------------------------------------------
BUG_ON(test_bit(B_WRITING, &b->state));
BUG_ON(test_bit(B_DIRTY, &b->state));
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
+ unlikely(test_bit(B_READING, &b->state)))
+ continue;
+
if (!b->hold_count) {
__make_buffer_clean(b);
__unlink_buffer(b);
cond_resched();
}
+ if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
+ return NULL;
+
list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
BUG_ON(test_bit(B_READING, &b->state));
int dm_bufio_issue_flush(struct dm_bufio_client *c)
{
struct dm_io_request io_req = {
- .bi_op = REQ_OP_WRITE,
- .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
+ .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
{
struct dm_io_request io_req = {
- .bi_op = REQ_OP_DISCARD,
- .bi_op_flags = REQ_SYNC,
+ .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
*/
static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
{
- if (!(gfp & __GFP_FS)) {
+ if (!(gfp & __GFP_FS) ||
+ (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
if (test_bit(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
c->shrinker.scan_objects = dm_bufio_shrink_scan;
c->shrinker.seeks = 1;
c->shrinker.batch = 0;
- r = register_shrinker(&c->shrinker);
+ r = register_shrinker(&c->shrinker, "md-%s:(%u:%u)", slab_name,
+ MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
if (r)
goto bad;
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/scatterlist.h>
+#include <linux/string.h>
#include <linux/jump_label.h>
#define DM_MSG_PREFIX "verity"
#define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
#define DM_VERITY_OPT_TASKLET_VERIFY "try_verify_in_tasklet"
- #define DM_VERITY_OPTS_MAX (3 + DM_VERITY_OPTS_FEC + \
+ #define DM_VERITY_OPTS_MAX (4 + DM_VERITY_OPTS_FEC + \
DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
struct dm_verity_sig_opts *verify_args,
bool only_modifier_opts)
{
- int r;
+ int r = 0;
unsigned argc;
struct dm_target *ti = v->ti;
const char *arg_name;
if (r)
return r;
continue;
+
+ } else if (only_modifier_opts) {
+ /*
+ * Ignore unrecognized opt, could easily be an extra
+ * argument to an option whose parsing was skipped.
+ * Normal parsing (@only_modifier_opts=false) will
+ * properly parse all options (and their extra args).
+ */
+ continue;
}
+ DMERR("Unrecognized verity feature request: %s", arg_name);
ti->error = "Unrecognized verity feature request";
return -EINVAL;
} while (argc && !r);
return r;
}
+/*
+ * Check whether a DM target is a verity target.
+ */
+bool dm_is_verity_target(struct dm_target *ti)
+{
+ return ti->type->module == THIS_MODULE;
+}
+
+/*
+ * Get the root digest of a verity target.
+ *
+ * Returns a copy of the root digest, the caller is responsible for
+ * freeing the memory of the digest.
+ */
+int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned int *digest_size)
+{
+ struct dm_verity *v = ti->private;
+
+ if (!dm_is_verity_target(ti))
+ return -EINVAL;
+
+ *root_digest = kmemdup(v->root_digest, v->digest_size, GFP_KERNEL);
+ if (*root_digest == NULL)
+ return -ENOMEM;
+
+ *digest_size = v->digest_size;
+
+ return 0;
+}
+
static struct target_type verity_target = {
.name = "verity",
.features = DM_TARGET_IMMUTABLE,
region.sector += wc->start_sector;
atomic_inc(&endio.count);
- req.bi_op = REQ_OP_WRITE;
- req.bi_op_flags = REQ_SYNC;
+ req.bi_opf = REQ_OP_WRITE | REQ_SYNC;
req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
req.client = wc->dm_io;
region.sector += wc->start_sector;
- req.bi_op = REQ_OP_WRITE;
- req.bi_op_flags = REQ_SYNC | REQ_FUA;
+ req.bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_FUA;
req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map;
req.client = wc->dm_io;
region.bdev = dev->bdev;
region.sector = 0;
region.count = 0;
- req.bi_op = REQ_OP_WRITE;
- req.bi_op_flags = REQ_PREFLUSH;
+ req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
req.mem.type = DM_IO_KMEM;
req.mem.ptr.addr = NULL;
req.client = wc->dm_io;
region.bdev = wc->ssd_dev->bdev;
region.sector = wc->start_sector;
region.count = n_sectors;
- req.bi_op = REQ_OP_READ;
- req.bi_op_flags = REQ_SYNC;
+ req.bi_opf = REQ_OP_READ | REQ_SYNC;
req.mem.type = DM_IO_VMA;
req.mem.ptr.vma = (char *)wc->memory_map;
req.client = wc->dm_io;
default:
BUG();
- return -1;
+ wc_unlock(wc);
+ return DM_MAPIO_KILL;
}
}