u64 fprs[16];
u32 fpc;
u32 prefix;
- u64 todpreg;
+ u32 todpreg;
u64 timer;
u64 todcmp;
u64 vxrs_low[16];
kvec.iov_base = dst;
kvec.iov_len = count;
- iov_iter_kvec(&iter, WRITE, &kvec, 1, count);
+ iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
if (copy_oldmem_iter(&iter, src, count) < count)
return -EFAULT;
return 0;
struct iov_iter input;
int err;
- iov_iter_kvec(&input, WRITE, inputs, nr_inputs, src_total_len);
+ iov_iter_kvec(&input, ITER_SOURCE, inputs, nr_inputs, src_total_len);
err = build_test_sglist(&tsgls->src, cfg->src_divs, alignmask,
cfg->inplace_mode != OUT_OF_PLACE ?
max(dst_total_len, src_total_len) :
/* Generate a random length in range [0, max_len], but prefer smaller values */
static unsigned int generate_random_length(unsigned int max_len)
{
- unsigned int len = prandom_u32_max(max_len + 1);
+ unsigned int len = get_random_u32_below(max_len + 1);
- switch (prandom_u32_max(4)) {
+ switch (get_random_u32_below(4)) {
case 0:
return len % 64;
case 1:
{
size_t bitpos;
- bitpos = prandom_u32_max(size * 8);
+ bitpos = get_random_u32_below(size * 8);
buf[bitpos / 8] ^= 1 << (bitpos % 8);
}
/* Flip a random byte in the given nonempty data buffer */
static void flip_random_byte(u8 *buf, size_t size)
{
- buf[prandom_u32_max(size)] ^= 0xff;
+ buf[get_random_u32_below(size)] ^= 0xff;
}
/* Sometimes make some random changes to the given nonempty data buffer */
size_t i;
/* Sometimes flip some bits */
- if (prandom_u32_max(4) == 0) {
- num_flips = min_t(size_t, 1 << prandom_u32_max(8), size * 8);
+ if (get_random_u32_below(4) == 0) {
+ num_flips = min_t(size_t, 1 << get_random_u32_below(8), size * 8);
for (i = 0; i < num_flips; i++)
flip_random_bit(buf, size);
}
/* Sometimes flip some bytes */
- if (prandom_u32_max(4) == 0) {
- num_flips = min_t(size_t, 1 << prandom_u32_max(8), size);
+ if (get_random_u32_below(4) == 0) {
+ num_flips = min_t(size_t, 1 << get_random_u32_below(8), size);
for (i = 0; i < num_flips; i++)
flip_random_byte(buf, size);
}
if (count == 0)
return;
- switch (prandom_u32_max(8)) { /* Choose a generation strategy */
+ switch (get_random_u32_below(8)) { /* Choose a generation strategy */
case 0:
case 1:
/* All the same byte, plus optional mutations */
- switch (prandom_u32_max(4)) {
+ switch (get_random_u32_below(4)) {
case 0:
b = 0x00;
break;
unsigned int this_len;
const char *flushtype_str;
- if (div == &divs[max_divs - 1] || prandom_u32_max(2) == 0)
+ if (div == &divs[max_divs - 1] || get_random_u32_below(2) == 0)
this_len = remaining;
else
- this_len = 1 + prandom_u32_max(remaining);
+ this_len = get_random_u32_inclusive(1, remaining);
div->proportion_of_total = this_len;
- if (prandom_u32_max(4) == 0)
- div->offset = (PAGE_SIZE - 128) + prandom_u32_max(128);
- else if (prandom_u32_max(2) == 0)
- div->offset = prandom_u32_max(32);
+ if (get_random_u32_below(4) == 0)
+ div->offset = get_random_u32_inclusive(PAGE_SIZE - 128, PAGE_SIZE - 1);
+ else if (get_random_u32_below(2) == 0)
+ div->offset = get_random_u32_below(32);
else
- div->offset = prandom_u32_max(PAGE_SIZE);
- if (prandom_u32_max(8) == 0)
+ div->offset = get_random_u32_below(PAGE_SIZE);
+ if (get_random_u32_below(8) == 0)
div->offset_relative_to_alignmask = true;
div->flush_type = FLUSH_TYPE_NONE;
if (gen_flushes) {
- switch (prandom_u32_max(4)) {
+ switch (get_random_u32_below(4)) {
case 0:
div->flush_type = FLUSH_TYPE_REIMPORT;
break;
if (div->flush_type != FLUSH_TYPE_NONE &&
!(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
- prandom_u32_max(2) == 0)
+ get_random_u32_below(2) == 0)
div->nosimd = true;
switch (div->flush_type) {
p += scnprintf(p, end - p, "random:");
- switch (prandom_u32_max(4)) {
+ switch (get_random_u32_below(4)) {
case 0:
case 1:
cfg->inplace_mode = OUT_OF_PLACE;
break;
}
- if (prandom_u32_max(2) == 0) {
+ if (get_random_u32_below(2) == 0) {
cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
p += scnprintf(p, end - p, " may_sleep");
}
- switch (prandom_u32_max(4)) {
+ switch (get_random_u32_below(4)) {
case 0:
cfg->finalization_type = FINALIZATION_TYPE_FINAL;
p += scnprintf(p, end - p, " use_final");
}
if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
- prandom_u32_max(2) == 0) {
+ get_random_u32_below(2) == 0) {
cfg->nosimd = true;
p += scnprintf(p, end - p, " nosimd");
}
cfg->req_flags);
p += scnprintf(p, end - p, "]");
- if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32_max(2) == 0) {
+ if (cfg->inplace_mode == OUT_OF_PLACE && get_random_u32_below(2) == 0) {
p += scnprintf(p, end - p, " dst_divs=[");
p = generate_random_sgl_divisions(cfg->dst_divs,
ARRAY_SIZE(cfg->dst_divs),
p += scnprintf(p, end - p, "]");
}
- if (prandom_u32_max(2) == 0) {
- cfg->iv_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK);
+ if (get_random_u32_below(2) == 0) {
+ cfg->iv_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
}
- if (prandom_u32_max(2) == 0) {
- cfg->key_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK);
+ if (get_random_u32_below(2) == 0) {
+ cfg->key_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
}
kv.iov_base = (void *)vec->plaintext;
kv.iov_len = vec->psize;
- iov_iter_kvec(&input, WRITE, &kv, 1, vec->psize);
+ iov_iter_kvec(&input, ITER_SOURCE, &kv, 1, vec->psize);
return build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize,
&input, divs);
}
vec->ksize = 0;
if (maxkeysize) {
vec->ksize = maxkeysize;
- if (prandom_u32_max(4) == 0)
- vec->ksize = 1 + prandom_u32_max(maxkeysize);
+ if (get_random_u32_below(4) == 0)
+ vec->ksize = get_random_u32_inclusive(1, maxkeysize);
generate_random_bytes((u8 *)vec->key, vec->ksize);
vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
const unsigned int authsize = vec->clen - vec->plen;
- if (prandom_u32_max(2) == 0 && vec->alen > aad_tail_size) {
+ if (get_random_u32_below(2) == 0 && vec->alen > aad_tail_size) {
/* Mutate the AAD */
flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size);
- if (prandom_u32_max(2) == 0)
+ if (get_random_u32_below(2) == 0)
return;
}
- if (prandom_u32_max(2) == 0) {
+ if (get_random_u32_below(2) == 0) {
/* Mutate auth tag (assuming it's at the end of ciphertext) */
flip_random_bit((u8 *)vec->ctext + vec->plen, authsize);
} else {
const unsigned int ivsize = crypto_aead_ivsize(tfm);
const unsigned int authsize = vec->clen - vec->plen;
const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) &&
- (prefer_inauthentic || prandom_u32_max(4) == 0);
+ (prefer_inauthentic || get_random_u32_below(4) == 0);
/* Generate the AAD. */
generate_random_bytes((u8 *)vec->assoc, vec->alen);
/* Avoid implementation-defined behavior. */
memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
- if (inauthentic && prandom_u32_max(2) == 0) {
+ if (inauthentic && get_random_u32_below(2) == 0) {
/* Generate a random ciphertext. */
generate_random_bytes((u8 *)vec->ctext, vec->clen);
} else {
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize;
- if (prandom_u32_max(4) == 0)
- vec->klen = prandom_u32_max(maxkeysize + 1);
+ if (get_random_u32_below(4) == 0)
+ vec->klen = get_random_u32_below(maxkeysize + 1);
generate_random_bytes((u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
/* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
authsize = maxauthsize;
- if (prandom_u32_max(4) == 0)
- authsize = prandom_u32_max(maxauthsize + 1);
+ if (get_random_u32_below(4) == 0)
+ authsize = get_random_u32_below(maxauthsize + 1);
if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
authsize = MIN_COLLISION_FREE_AUTHSIZE;
if (WARN_ON(authsize > maxdatasize))
/* AAD, plaintext, and ciphertext lengths */
total_len = generate_random_length(maxdatasize);
- if (prandom_u32_max(4) == 0)
+ if (get_random_u32_below(4) == 0)
vec->alen = 0;
else
vec->alen = generate_random_length(total_len);
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize;
- if (prandom_u32_max(4) == 0)
- vec->klen = prandom_u32_max(maxkeysize + 1);
+ if (get_random_u32_below(4) == 0)
+ vec->klen = get_random_u32_below(maxkeysize + 1);
generate_random_bytes((u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
ret = 0;
free_acpi_buffer:
- kfree(out_obj);
+ ACPI_FREE(out_obj);
return ret;
}
ret = 0;
free_acpi_buffer:
- kfree(out_obj);
+ ACPI_FREE(out_obj);
return ret;
}
ret = 0;
free_acpi_buffer:
- kfree(out_obj);
+ ACPI_FREE(out_obj);
return ret;
}
iov.iov_base = (void __user *)buf;
iov.iov_len = len;
- iov_iter_init(&iter, WRITE, &iov, 1, len);
+ iov_iter_init(&iter, ITER_SOURCE, &iov, 1, len);
/* map the communication buffer */
phy_addr = (phys_addr_t)((buf_info.addr_hi << 32) | buf_info.addr_lo);
struct msghdr msg = {
.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
};
- iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size);
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, size);
return sock_recvmsg(sock, &msg, msg.msg_flags);
}
timeo = connect_int * HZ;
/* 28.5% random jitter */
- timeo += prandom_u32_max(2) ? timeo / 7 : -timeo / 7;
+ timeo += get_random_u32_below(2) ? timeo / 7 : -timeo / 7;
err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
if (err <= 0)
drbd_warn(connection, "Error receiving initial packet\n");
sock_release(s);
randomize:
- if (prandom_u32_max(2))
+ if (get_random_u32_below(2))
goto retry;
}
}
#include <linux/uaccess.h>
#include <linux/suspend.h>
#include <linux/siphash.h>
+#include <linux/sched/isolation.h>
#include <crypto/chacha.h>
#include <crypto/blake2s.h>
#include <asm/processor.h>
/* Various types of waiters for crng_init->CRNG_READY transition. */
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
static struct fasync_struct *fasync;
+static ATOMIC_NOTIFIER_HEAD(random_ready_notifier);
/* Control how we warn userspace. */
static struct ratelimit_state urandom_warning =
* Wait for the input pool to be seeded and thus guaranteed to supply
* cryptographically secure random numbers. This applies to: the /dev/urandom
* device, the get_random_bytes function, and the get_random_{u8,u16,u32,u64,
- * int,long} family of functions. Using any of these functions without first
+ * long} family of functions. Using any of these functions without first
* calling this function forfeits the guarantee of security.
*
* Returns: 0 if the input pool has been seeded.
}
EXPORT_SYMBOL(wait_for_random_bytes);
+/*
+ * Add a callback function that will be invoked when the crng is initialised,
+ * or immediately if it already has been. Only use this is you are absolutely
+ * sure it is required. Most users should instead be able to test
+ * `rng_is_initialized()` on demand, or make use of `get_random_bytes_wait()`.
+ */
+int __cold execute_with_initialized_rng(struct notifier_block *nb)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&random_ready_notifier.lock, flags);
+ if (crng_ready())
+ nb->notifier_call(nb, 0, NULL);
+ else
+ ret = raw_notifier_chain_register((struct raw_notifier_head *)&random_ready_notifier.head, nb);
+ spin_unlock_irqrestore(&random_ready_notifier.lock, flags);
+ return ret;
+}
+
#define warn_unseeded_randomness() \
if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
* u8 get_random_u8()
* u16 get_random_u16()
* u32 get_random_u32()
+ * u32 get_random_u32_below(u32 ceil)
+ * u32 get_random_u32_above(u32 floor)
+ * u32 get_random_u32_inclusive(u32 floor, u32 ceil)
* u64 get_random_u64()
* unsigned long get_random_long()
*
static struct {
u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
- unsigned long birth;
unsigned long generation;
spinlock_t lock;
} base_crng = {
.lock = INIT_LOCAL_LOCK(crngs.lock),
};
+/*
+ * Return the interval until the next reseeding, which is normally
+ * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
+ * proportional to the uptime.
+ */
+static unsigned int crng_reseed_interval(void)
+{
+ static bool early_boot = true;
+
+ if (unlikely(READ_ONCE(early_boot))) {
+ time64_t uptime = ktime_get_seconds();
+ if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
+ WRITE_ONCE(early_boot, false);
+ else
+ return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
+ (unsigned int)uptime / 2 * HZ);
+ }
+ return CRNG_RESEED_INTERVAL;
+}
+
/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
static void extract_entropy(void *buf, size_t len);
/* This extracts a new crng key from the input pool. */
-static void crng_reseed(void)
+static void crng_reseed(struct work_struct *work)
{
+ static DECLARE_DELAYED_WORK(next_reseed, crng_reseed);
unsigned long flags;
unsigned long next_gen;
u8 key[CHACHA_KEY_SIZE];
+ /* Immediately schedule the next reseeding, so that it fires sooner rather than later. */
+ if (likely(system_unbound_wq))
+ queue_delayed_work(system_unbound_wq, &next_reseed, crng_reseed_interval());
+
extract_entropy(key, sizeof(key));
/*
if (next_gen == ULONG_MAX)
++next_gen;
WRITE_ONCE(base_crng.generation, next_gen);
- WRITE_ONCE(base_crng.birth, jiffies);
if (!static_branch_likely(&crng_is_ready))
crng_init = CRNG_READY;
spin_unlock_irqrestore(&base_crng.lock, flags);
memzero_explicit(first_block, sizeof(first_block));
}
-/*
- * Return the interval until the next reseeding, which is normally
- * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
- * proportional to the uptime.
- */
-static unsigned int crng_reseed_interval(void)
-{
- static bool early_boot = true;
-
- if (unlikely(READ_ONCE(early_boot))) {
- time64_t uptime = ktime_get_seconds();
- if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
- WRITE_ONCE(early_boot, false);
- else
- return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
- (unsigned int)uptime / 2 * HZ);
- }
- return CRNG_RESEED_INTERVAL;
-}
-
/*
* This function returns a ChaCha state that you may use for generating
* random data. It also returns up to 32 bytes on its own of random data
return;
}
- /*
- * If the base_crng is old enough, we reseed, which in turn bumps the
- * generation counter that we check below.
- */
- if (unlikely(time_is_before_jiffies(READ_ONCE(base_crng.birth) + crng_reseed_interval())))
- crng_reseed();
-
local_lock_irqsave(&crngs.lock, flags);
crng = raw_cpu_ptr(&crngs);
}
/*
- * This function is the exported kernel interface. It returns some number of
- * good random numbers, suitable for key generation, seeding TCP sequence
- * numbers, etc. In order to ensure that the randomness returned by this
- * function is okay, the function wait_for_random_bytes() should be called and
- * return 0 at least once at any point prior.
+ * This returns random bytes in arbitrary quantities. The quality of the
+ * random bytes is good as /dev/urandom. In order to ensure that the
+ * randomness provided by this function is okay, the function
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
*/
void get_random_bytes(void *buf, size_t len)
{
DEFINE_BATCHED_ENTROPY(u32)
DEFINE_BATCHED_ENTROPY(u64)
+u32 __get_random_u32_below(u32 ceil)
+{
+ /*
+ * This is the slow path for variable ceil. It is still fast, most of
+ * the time, by doing traditional reciprocal multiplication and
+ * opportunistically comparing the lower half to ceil itself, before
+ * falling back to computing a larger bound, and then rejecting samples
+ * whose lower half would indicate a range indivisible by ceil. The use
+ * of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable
+ * in 32-bits.
+ */
+ u32 rand = get_random_u32();
+ u64 mult;
+
+ /*
+ * This function is technically undefined for ceil == 0, and in fact
+ * for the non-underscored constant version in the header, we build bug
+ * on that. But for the non-constant case, it's convenient to have that
+ * evaluate to being a straight call to get_random_u32(), so that
+ * get_random_u32_inclusive() can work over its whole range without
+ * undefined behavior.
+ */
+ if (unlikely(!ceil))
+ return rand;
+
+ mult = (u64)ceil * rand;
+ if (unlikely((u32)mult < ceil)) {
+ u32 bound = -ceil % ceil;
+ while (unlikely((u32)mult < bound))
+ mult = (u64)ceil * get_random_u32();
+ }
+ return mult >> 32;
+}
+EXPORT_SYMBOL(__get_random_u32_below);
+
#ifdef CONFIG_SMP
/*
* This function is called when the CPU is coming up, with entry
} while (!try_cmpxchg(&input_pool.init_bits, &orig, new));
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
- crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
+ crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
if (static_key_initialized)
execute_in_process_context(crng_set_ready, &set_ready);
+ atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
* the above entropy accumulation routines:
*
* void add_device_randomness(const void *buf, size_t len);
- * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
+ * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after);
* void add_bootloader_randomness(const void *buf, size_t len);
* void add_vmfork_randomness(const void *unique_vm_id, size_t len);
* void add_interrupt_randomness(int irq);
*
* add_bootloader_randomness() is called by bootloader drivers, such as EFI
* and device tree, and credits its input depending on whether or not the
- * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+ * command line option 'random.trust_bootloader'.
*
* add_vmfork_randomness() adds a unique (but not necessarily secret) ID
* representing the current instance of a VM to the pool, without crediting,
*
**********************************************************************/
-static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
-static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
+static bool trust_cpu __initdata = true;
+static bool trust_bootloader __initdata = true;
static int __init parse_trust_cpu(char *arg)
{
return kstrtobool(arg, &trust_cpu);
if (crng_ready() && (action == PM_RESTORE_PREPARE ||
(action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) &&
!IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) {
- crng_reseed();
+ crng_reseed(NULL);
pr_notice("crng reseeded on system resumption\n");
}
return 0;
#endif
for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
- longs = arch_get_random_seed_longs_early(entropy, ARRAY_SIZE(entropy) - i);
+ longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
if (longs) {
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
i += longs;
continue;
}
- longs = arch_get_random_longs_early(entropy, ARRAY_SIZE(entropy) - i);
+ longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
if (longs) {
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
i += longs;
/* Reseed if already seeded by earlier phases. */
if (crng_ready())
- crng_reseed();
+ crng_reseed(NULL);
else if (trust_cpu)
_credit_init_bits(arch_bits);
}
/* Reseed if already seeded by earlier phases. */
if (crng_ready())
- crng_reseed();
+ crng_reseed(NULL);
WARN_ON(register_pm_notifier(&pm_notifier));
EXPORT_SYMBOL(add_device_randomness);
/*
- * Interface for in-kernel drivers of true hardware RNGs.
- * Those devices may produce endless random bits and will be throttled
- * when our pool is full.
+ * Interface for in-kernel drivers of true hardware RNGs. Those devices
+ * may produce endless random bits, so this function will sleep for
+ * some amount of time after, if the sleep_after parameter is true.
*/
-void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
+void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after)
{
mix_pool_bytes(buf, len);
credit_init_bits(entropy);
* Throttle writing to once every reseed interval, unless we're not yet
* initialized or no entropy is credited.
*/
- if (!kthread_should_stop() && (crng_ready() || !entropy))
+ if (sleep_after && !kthread_should_stop() && (crng_ready() || !entropy))
schedule_timeout_interruptible(crng_reseed_interval());
}
EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
/*
- * Handle random seed passed by bootloader, and credit it if
- * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+ * Handle random seed passed by bootloader, and credit it depending
+ * on the command line option 'random.trust_bootloader'.
*/
void __init add_bootloader_randomness(const void *buf, size_t len)
{
{
add_device_randomness(unique_vm_id, len);
if (crng_ready()) {
- crng_reseed();
+ crng_reseed(NULL);
pr_notice("crng reseeded due to virtual machine fork\n");
}
blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
struct entropy_timer_state {
unsigned long entropy;
struct timer_list timer;
- unsigned int samples, samples_per_bit;
+ atomic_t samples;
+ unsigned int samples_per_bit;
};
/*
- * Each time the timer fires, we expect that we got an unpredictable
- * jump in the cycle counter. Even if the timer is running on another
- * CPU, the timer activity will be touching the stack of the CPU that is
- * generating entropy..
+ * Each time the timer fires, we expect that we got an unpredictable jump in
+ * the cycle counter. Even if the timer is running on another CPU, the timer
+ * activity will be touching the stack of the CPU that is generating entropy.
*
- * Note that we don't re-arm the timer in the timer itself - we are
- * happy to be scheduled away, since that just makes the load more
- * complex, but we do not want the timer to keep ticking unless the
- * entropy loop is running.
+ * Note that we don't re-arm the timer in the timer itself - we are happy to be
+ * scheduled away, since that just makes the load more complex, but we do not
+ * want the timer to keep ticking unless the entropy loop is running.
*
* So the re-arming always happens in the entropy loop itself.
*/
static void __cold entropy_timer(struct timer_list *timer)
{
struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer);
+ unsigned long entropy = random_get_entropy();
- if (++state->samples == state->samples_per_bit) {
+ mix_pool_bytes(&entropy, sizeof(entropy));
+ if (atomic_inc_return(&state->samples) % state->samples_per_bit == 0)
credit_init_bits(1);
- state->samples = 0;
- }
}
/*
- * If we have an actual cycle counter, see if we can
- * generate enough entropy with timing noise
+ * If we have an actual cycle counter, see if we can generate enough entropy
+ * with timing noise.
*/
static void __cold try_to_generate_entropy(void)
{
enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 };
- struct entropy_timer_state stack;
+ u8 stack_bytes[sizeof(struct entropy_timer_state) + SMP_CACHE_BYTES - 1];
+ struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES);
unsigned int i, num_different = 0;
unsigned long last = random_get_entropy();
+ int cpu = -1;
for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
- stack.entropy = random_get_entropy();
- if (stack.entropy != last)
+ stack->entropy = random_get_entropy();
+ if (stack->entropy != last)
++num_different;
- last = stack.entropy;
+ last = stack->entropy;
}
- stack.samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
- if (stack.samples_per_bit > MAX_SAMPLES_PER_BIT)
+ stack->samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
+ if (stack->samples_per_bit > MAX_SAMPLES_PER_BIT)
return;
- stack.samples = 0;
- timer_setup_on_stack(&stack.timer, entropy_timer, 0);
+ atomic_set(&stack->samples, 0);
+ timer_setup_on_stack(&stack->timer, entropy_timer, 0);
while (!crng_ready() && !signal_pending(current)) {
- if (!timer_pending(&stack.timer))
- mod_timer(&stack.timer, jiffies);
- mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
+ /*
+ * Check !timer_pending() and then ensure that any previous callback has finished
+ * executing by checking try_to_del_timer_sync(), before queueing the next one.
+ */
+ if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) {
+ struct cpumask timer_cpus;
+ unsigned int num_cpus;
+
+ /*
+ * Preemption must be disabled here, both to read the current CPU number
+ * and to avoid scheduling a timer on a dead CPU.
+ */
+ preempt_disable();
+
+ /* Only schedule callbacks on timer CPUs that are online. */
+ cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
+ num_cpus = cpumask_weight(&timer_cpus);
+ /* In very bizarre case of misconfiguration, fallback to all online. */
+ if (unlikely(num_cpus == 0)) {
+ timer_cpus = *cpu_online_mask;
+ num_cpus = cpumask_weight(&timer_cpus);
+ }
+
+ /* Basic CPU round-robin, which avoids the current CPU. */
+ do {
+ cpu = cpumask_next(cpu, &timer_cpus);
+ if (cpu == nr_cpumask_bits)
+ cpu = cpumask_first(&timer_cpus);
+ } while (cpu == smp_processor_id() && num_cpus > 1);
+
+ /* Expiring the timer at `jiffies` means it's the next tick. */
+ stack->timer.expires = jiffies;
+
+ add_timer_on(&stack->timer, cpu);
+
+ preempt_enable();
+ }
+ mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
schedule();
- stack.entropy = random_get_entropy();
+ stack->entropy = random_get_entropy();
}
+ mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
- del_timer_sync(&stack.timer);
- destroy_timer_on_stack(&stack.timer);
- mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
+ del_timer_sync(&stack->timer);
+ destroy_timer_on_stack(&stack->timer);
}
return ret;
}
- ret = import_single_range(READ, ubuf, len, &iov, &iter);
+ ret = import_single_range(ITER_DEST, ubuf, len, &iov, &iter);
if (unlikely(ret))
return ret;
return get_random_bytes_user(&iter);
return -EINVAL;
if (get_user(len, p++))
return -EFAULT;
- ret = import_single_range(WRITE, p, len, &iov, &iter);
+ ret = import_single_range(ITER_SOURCE, p, len, &iov, &iter);
if (unlikely(ret))
return ret;
ret = write_pool_user(&iter);
return -EPERM;
if (!crng_ready())
return -ENODATA;
- crng_reseed();
+ crng_reseed(NULL);
return 0;
default:
return -EINVAL;
refcount_set(&req->ref, 1);
req->mp_policy = clt_path->clt->mp_policy;
- iov_iter_kvec(&iter, READ, vec, 1, usr_len);
+ iov_iter_kvec(&iter, ITER_SOURCE, vec, 1, usr_len);
len = _copy_from_iter(req->iu->buf, usr_len, &iter);
WARN_ON(len != usr_len);
rtrs_clt_stop_and_destroy_conns(clt_path);
queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
msecs_to_jiffies(delay_ms +
- prandom_u32_max(RTRS_RECONNECT_SEED)));
+ get_random_u32_below(RTRS_RECONNECT_SEED)));
}
static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
kvec.iov_base = dst;
kvec.iov_len = count;
- iov_iter_kvec(&iter, WRITE, &kvec, 1, count);
+ iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
if (memcpy_hsa_iter(&iter, src, count) < count)
return -EIO;
return 0;
TRACE("type: nvme\n");
TRACE("fid: %x\n", ipl_info.data.nvme.fid);
TRACE("nsid: %x\n", ipl_info.data.nvme.nsid);
+ } else if (ipl_info.type == IPL_TYPE_ECKD_DUMP) {
+ TRACE("type: eckd\n");
+ TRACE("devno: %x\n", ipl_info.data.eckd.dev_id.devno);
+ TRACE("ssid: %x\n", ipl_info.data.eckd.dev_id.ssid);
}
rc = sclp_sdias_init();
}
}
+ btrfs_free_path(path);
+ path = NULL;
if (copy_to_user(argp, subvol_info, sizeof(*subvol_info)))
ret = -EFAULT;
}
out:
+ btrfs_free_path(path);
+
if (!ret || ret == -EOVERFLOW) {
rootrefs->num_items = found;
/* update min_treeid for next search */
}
kfree(rootrefs);
- btrfs_free_path(path);
return ret;
}
ipath->fspath->val[i] = rel_ptr;
}
+ btrfs_free_path(path);
+ path = NULL;
ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
ipath->fspath, size);
if (ret) {
size = min_t(u32, loi->size, SZ_16M);
}
- path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
-
inodes = init_data_container(size);
if (IS_ERR(inodes)) {
ret = PTR_ERR(inodes);
- inodes = NULL;
- goto out;
+ goto out_loi;
}
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
inodes, ignore_offset);
+ btrfs_free_path(path);
if (ret == -EINVAL)
ret = -ENOENT;
if (ret < 0)
ret = -EFAULT;
out:
- btrfs_free_path(path);
kvfree(inodes);
out_loi:
kfree(loi);
goto out_acct;
}
- ret = import_iovec(READ, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
+ ret = import_iovec(ITER_DEST, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
&iov, &iter);
if (ret < 0)
goto out_acct;
if (args.len > args.unencoded_len - args.unencoded_offset)
goto out_acct;
- ret = import_iovec(WRITE, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
+ ret = import_iovec(ITER_SOURCE, args.iov, args.iovcnt, ARRAY_SIZE(iovstack),
&iov, &iter);
if (ret < 0)
goto out_acct;
struct inode *inode = d_inode(cfile->dentry);
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct file_lock *flock;
- struct file_lock_context *flctx = inode->i_flctx;
+ struct file_lock_context *flctx = locks_inode_context(inode);
unsigned int count = 0, i;
int rc = 0, xid, type;
struct list_head locks_to_send, *el;
ctx->iter = *from;
ctx->len = len;
} else {
- rc = setup_aio_ctx_iter(ctx, from, WRITE);
+ rc = setup_aio_ctx_iter(ctx, from, ITER_SOURCE);
if (rc) {
kref_put(&ctx->refcount, cifs_aio_ctx_release);
return rc;
ctx->iter = *to;
ctx->len = len;
} else {
- rc = setup_aio_ctx_iter(ctx, to, READ);
+ rc = setup_aio_ctx_iter(ctx, to, ITER_DEST);
if (rc) {
kref_put(&ctx->refcount, cifs_aio_ctx_release);
return rc;
err = cn_printf(cn, "%lu",
rlimit(RLIMIT_CORE));
break;
+ /* CPU the task ran on */
+ case 'C':
+ err = cn_printf(cn, "%d", cprm->cpu);
+ break;
default:
break;
}
static atomic_t core_dump_count = ATOMIC_INIT(0);
struct coredump_params cprm = {
.siginfo = siginfo,
- .regs = signal_pt_regs(),
.limit = rlimit(RLIMIT_CORE),
/*
* We must use the same mm->flags while dumping core to avoid
*/
.mm_flags = mm->flags,
.vma_meta = NULL,
+ .cpu = raw_smp_processor_id(),
};
audit_core_dumps(siginfo->si_signo);
if (dump_interrupted())
return 0;
pos = file->f_pos;
- iov_iter_bvec(&iter, WRITE, &bvec, 1, PAGE_SIZE);
+ iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);
n = __kernel_write_iter(cprm->file, &iter, &pos);
if (n != PAGE_SIZE)
return 0;
ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0);
if (ret == -EOPNOTSUPP || ret == -EXDEV)
- ret = generic_copy_file_range(src, src_pos, dst, dst_pos,
- count, 0);
+ ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count,
+ COPY_FILE_SPLICE);
return ret;
}
struct svc_rqst *rqstp = sd->u.data;
struct page *page = buf->page; // may be a compound one
unsigned offset = buf->offset;
+ struct page *last_page;
- page += offset / PAGE_SIZE;
- for (int i = sd->len; i > 0; i -= PAGE_SIZE)
- svc_rqst_replace_page(rqstp, page++);
+ last_page = page + (offset + sd->len - 1) / PAGE_SIZE;
+ for (page += offset / PAGE_SIZE; page <= last_page; page++)
+ svc_rqst_replace_page(rqstp, page);
if (rqstp->rq_res.page_len == 0) // first call
rqstp->rq_res.page_base = offset % PAGE_SIZE;
rqstp->rq_res.page_len += sd->len;
ssize_t host_err;
trace_nfsd_read_vector(rqstp, fhp, offset, *count);
- iov_iter_kvec(&iter, READ, vec, vlen, *count);
+ iov_iter_kvec(&iter, ITER_DEST, vec, vlen, *count);
host_err = vfs_iter_read(file, &iter, &ppos, 0);
return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err);
}
if (stable && !use_wgather)
flags |= RWF_SYNC;
- iov_iter_kvec(&iter, WRITE, vec, vlen, *cnt);
+ iov_iter_kvec(&iter, ITER_SOURCE, vec, vlen, *cnt);
since = READ_ONCE(file->f_wb_err);
if (verf)
nfsd_copy_write_verifier(verf, nn);
{
struct kvec vec = { .iov_len = len, .iov_base = data, };
struct msghdr msg = { .msg_flags = MSG_DONTWAIT, };
- iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, len);
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, len);
return sock_recvmsg(sock, &msg, MSG_DONTWAIT);
}
}
/* Get a map of all nodes to which this node is currently connected to */
-void o2net_fill_node_map(unsigned long *map, unsigned bytes)
+void o2net_fill_node_map(unsigned long *map, unsigned int bits)
{
struct o2net_sock_container *sc;
int node, ret;
- BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long)));
-
- memset(map, 0, bytes);
+ bitmap_zero(map, bits);
for (node = 0; node < O2NM_MAX_NODES; ++node) {
if (!o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret))
continue;
struct kvec kvec = { .iov_base = buf, .iov_len = count };
struct iov_iter iter;
- iov_iter_kvec(&iter, READ, &kvec, 1, count);
+ iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
return read_from_oldmem(&iter, count, ppos, false);
}
struct kvec kvec = { .iov_base = buf, .iov_len = count };
struct iov_iter iter;
- iov_iter_kvec(&iter, READ, &kvec, 1, count);
+ iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
return read_from_oldmem(&iter, count, ppos,
cc_platform_has(CC_ATTR_MEM_ENCRYPT));
offset = (loff_t) index << PAGE_SHIFT;
kvec.iov_base = page_address(page);
kvec.iov_len = PAGE_SIZE;
- iov_iter_kvec(&iter, READ, &kvec, 1, PAGE_SIZE);
+ iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, PAGE_SIZE);
rc = __read_vmcore(&iter, &offset);
if (rc < 0) {
return rc;
rc = parse_crash_elf_headers();
if (rc) {
+ elfcorehdr_free(elfcorehdr_addr);
pr_warn("Kdump: vmcore not initialized\n");
return rc;
}
init_sync_kiocb(&kiocb, filp);
kiocb.ki_pos = (ppos ? *ppos : 0);
- iov_iter_ubuf(&iter, READ, buf, len);
+ iov_iter_ubuf(&iter, ITER_DEST, buf, len);
ret = call_read_iter(filp, &kiocb, &iter);
BUG_ON(ret == -EIOCBQUEUED);
init_sync_kiocb(&kiocb, file);
kiocb.ki_pos = pos ? *pos : 0;
- iov_iter_kvec(&iter, READ, &iov, 1, iov.iov_len);
+ iov_iter_kvec(&iter, ITER_DEST, &iov, 1, iov.iov_len);
ret = file->f_op->read_iter(&kiocb, &iter);
if (ret > 0) {
if (pos)
init_sync_kiocb(&kiocb, filp);
kiocb.ki_pos = (ppos ? *ppos : 0);
- iov_iter_ubuf(&iter, WRITE, (void __user *)buf, len);
+ iov_iter_ubuf(&iter, ITER_SOURCE, (void __user *)buf, len);
ret = call_write_iter(filp, &kiocb, &iter);
BUG_ON(ret == -EIOCBQUEUED);
.iov_len = min_t(size_t, count, MAX_RW_COUNT),
};
struct iov_iter iter;
- iov_iter_kvec(&iter, WRITE, &iov, 1, iov.iov_len);
+ iov_iter_kvec(&iter, ITER_SOURCE, &iov, 1, iov.iov_len);
return __kernel_write_iter(file, &iter, pos);
}
/*
struct iov_iter iter;
ssize_t ret;
- ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
+ ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
if (ret >= 0) {
ret = do_iter_read(file, &iter, pos, flags);
kfree(iov);
struct iov_iter iter;
ssize_t ret;
- ret = import_iovec(WRITE, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
+ ret = import_iovec(ITER_SOURCE, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
if (ret >= 0) {
file_start_write(file);
ret = do_iter_write(file, &iter, pos, flags);
struct file *file_out, loff_t pos_out,
size_t len, unsigned int flags)
{
+ lockdep_assert(sb_write_started(file_inode(file_out)->i_sb));
+
return do_splice_direct(file_in, &pos_in, file_out, &pos_out,
len > MAX_RW_COUNT ? MAX_RW_COUNT : len, 0);
}
* and several different sets of file_operations, but they all end up
* using the same ->copy_file_range() function pointer.
*/
- if (file_out->f_op->copy_file_range) {
+ if (flags & COPY_FILE_SPLICE) {
+ /* cross sb splice is allowed */
+ } else if (file_out->f_op->copy_file_range) {
if (file_in->f_op->copy_file_range !=
file_out->f_op->copy_file_range)
return -EXDEV;
size_t len, unsigned int flags)
{
ssize_t ret;
+ bool splice = flags & COPY_FILE_SPLICE;
- if (flags != 0)
+ if (flags & ~COPY_FILE_SPLICE)
return -EINVAL;
ret = generic_copy_file_checks(file_in, pos_in, file_out, pos_out, &len,
* same sb using clone, but for filesystems where both clone and copy
* are supported (e.g. nfs,cifs), we only call the copy method.
*/
- if (file_out->f_op->copy_file_range) {
+ if (!splice && file_out->f_op->copy_file_range) {
ret = file_out->f_op->copy_file_range(file_in, pos_in,
file_out, pos_out,
len, flags);
goto done;
}
- if (file_in->f_op->remap_file_range &&
+ if (!splice && file_in->f_op->remap_file_range &&
file_inode(file_in)->i_sb == file_inode(file_out)->i_sb) {
ret = file_in->f_op->remap_file_range(file_in, pos_in,
file_out, pos_out,
* consistent story about which filesystems support copy_file_range()
* and which filesystems do not, that will allow userspace tools to
* make consistent desicions w.r.t using copy_file_range().
+ *
+ * We also get here if caller (e.g. nfsd) requested COPY_FILE_SPLICE.
*/
ret = generic_copy_file_range(file_in, pos_in, file_out, pos_out, len,
flags);
pos_out = f_out.file->f_pos;
}
+ ret = -EINVAL;
+ if (flags != 0)
+ goto out;
+
ret = vfs_copy_file_range(f_in.file, pos_in, f_out.file, pos_out, len,
flags);
if (ret > 0) {
group = current_user_event_group();
- if (!group)
+ if (!group) {
+ kfree(name);
return -ENOENT;
+ }
mutex_lock(&group->reg_mutex);
if (unlikely(*ppos != 0))
return -EFAULT;
- if (unlikely(import_single_range(WRITE, (char __user *)ubuf,
+ if (unlikely(import_single_range(ITER_SOURCE, (char __user *)ubuf,
count, &iov, &i)))
return -EFAULT;
* Application no longer needs these pages. If the pages are dirty,
* it's OK to just throw them away. The app will be more careful about
* data it wants to keep. Be sure to free swap resources too. The
- * zap_page_range call sets things up for shrink_active_list to actually free
- * these pages later if no one else has touched them in the meantime,
+ * zap_page_range_single call sets things up for shrink_active_list to actually
+ * free these pages later if no one else has touched them in the meantime,
* although we could add these pages to a global reuse list for
* shrink_active_list to pick up before reclaiming other pages.
*
static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- zap_page_range(vma, start, end - start);
+ zap_page_range_single(vma, start, end - start, NULL);
return 0;
}
goto out;
}
- ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
+ ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
if (ret < 0)
goto out;
iv.iov_len = skb->len;
memset(&msg, 0, sizeof(msg));
- iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, skb->len);
+ iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, skb->len);
err = l2cap_chan_send(chan, &msg, skb->len);
if (err > 0) {
hci_dev_lock(hdev);
hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
hci_dev_unlock(hdev);
+ hci_dev_put(hdev);
if (!hcon)
return -ENOENT;
if (copy_address != zc->copybuf_address)
return -EINVAL;
- err = import_single_range(READ, (void __user *)copy_address,
+ err = import_single_range(ITER_DEST, (void __user *)copy_address,
inq, &iov, &msg.msg_iter);
if (err)
return err;
if (copy_address != zc->copybuf_address)
return -EINVAL;
- err = import_single_range(READ, (void __user *)copy_address,
+ err = import_single_range(ITER_DEST, (void __user *)copy_address,
copylen, &iov, &msg.msg_iter);
if (err)
return err;
inet->inet_dport = 0;
- if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
- inet_reset_saddr(sk);
+ inet_bhash2_reset_saddr(sk);
sk->sk_shutdown = 0;
sock_reset_flag(sk, SOCK_DONE);
xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
struct kvec *kvec, size_t count, size_t seek)
{
- iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count);
+ iov_iter_kvec(&msg->msg_iter, ITER_DEST, kvec, 1, count);
return xs_sock_recvmsg(sock, msg, flags, seek);
}
struct bio_vec *bvec, unsigned long nr, size_t count,
size_t seek)
{
- iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count);
+ iov_iter_bvec(&msg->msg_iter, ITER_DEST, bvec, nr, count);
return xs_sock_recvmsg(sock, msg, flags, seek);
}
xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
size_t count)
{
- iov_iter_discard(&msg->msg_iter, READ, count);
+ iov_iter_discard(&msg->msg_iter, ITER_DEST, count);
return sock_recvmsg(sock, msg, flags);
}
if (max < min)
return -EADDRINUSE;
range = max - min + 1;
- rand = prandom_u32_max(range);
+ rand = get_random_u32_below(range);
return rand + min;
}
conn_put(con);
}
-static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s)
+static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s, struct socket *sock)
{
struct tipc_conn *con;
int ret;
}
con->conid = ret;
s->idr_in_use++;
- spin_unlock_bh(&s->idr_lock);
set_bit(CF_CONNECTED, &con->flags);
con->server = s;
+ con->sock = sock;
+ conn_get(con);
+ spin_unlock_bh(&s->idr_lock);
return con;
}
iov.iov_base = &s;
iov.iov_len = sizeof(s);
msg.msg_name = NULL;
- iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, iov.iov_len);
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, iov.iov_len);
ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
if (ret == -EWOULDBLOCK)
return -EWOULDBLOCK;
ret = kernel_accept(lsock, &newsock, O_NONBLOCK);
if (ret < 0)
return;
- con = tipc_conn_alloc(srv);
+ con = tipc_conn_alloc(srv, newsock);
if (IS_ERR(con)) {
ret = PTR_ERR(con);
sock_release(newsock);
newsk->sk_data_ready = tipc_conn_data_ready;
newsk->sk_write_space = tipc_conn_write_space;
newsk->sk_user_data = con;
- con->sock = newsock;
write_unlock_bh(&newsk->sk_callback_lock);
/* Wake up receive process in case of 'SYN+' message */
newsk->sk_data_ready(newsk);
+ conn_put(con);
}
}
sub.filter = filter;
*(u64 *)&sub.usr_handle = (u64)port;
- con = tipc_conn_alloc(tipc_topsrv(net));
+ con = tipc_conn_alloc(tipc_topsrv(net), NULL);
if (IS_ERR(con))
return false;
*conid = con->conid;
- con->sock = NULL;
rc = tipc_conn_rcv_sub(tipc_topsrv(net), con, &sub);
- if (rc >= 0)
- return true;
+ if (rc)
+ conn_put(con);
+
conn_put(con);
- return false;
+ return !rc;
}
void tipc_topsrv_kern_unsubscr(struct net *net, int conid)