* the bit offset of all zero areas this function finds is multiples of that
* power of 2. A @align_mask of 0 means no alignment is required.
*/
- static inline unsigned long
- bitmap_find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask)
+ static __always_inline
+ unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
{
return bitmap_find_next_zero_area_off(map, size, start, nr,
align_mask, 0);
#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
- static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
+ static __always_inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
memset(dst, 0, len);
}
- static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+ static __always_inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
memset(dst, 0xff, len);
}
- static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
- unsigned int nbits)
+ static __always_inline
+ void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
/*
* Copy bitmap and clear tail bits in last word.
*/
- static inline void bitmap_copy_clear_tail(unsigned long *dst,
- const unsigned long *src, unsigned int nbits)
+ static __always_inline
+ void bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
bitmap_copy(dst, src, nbits);
if (nbits % BITS_PER_LONG)
dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits);
}
+static inline void bitmap_copy_and_extend(unsigned long *to,
+ const unsigned long *from,
+ unsigned int count, unsigned int size)
+{
+ unsigned int copy = BITS_TO_LONGS(count);
+
+ memcpy(to, from, copy * sizeof(long));
+ if (count % BITS_PER_LONG)
+ to[copy - 1] &= BITMAP_LAST_WORD_MASK(count);
+ memset(to + copy, 0, bitmap_size(size) - copy * sizeof(long));
+}
+
/*
* On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64
* machines the order of hi and lo parts of numbers match the bitmap structure.
bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
#endif
- static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+ static __always_inline
+ bool bitmap_and(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_and(dst, src1, src2, nbits);
}
- static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+ static __always_inline
+ void bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 | *src2;
__bitmap_or(dst, src1, src2, nbits);
}
- static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+ static __always_inline
+ void bitmap_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 ^ *src2;
__bitmap_xor(dst, src1, src2, nbits);
}
- static inline bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+ static __always_inline
+ bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_andnot(dst, src1, src2, nbits);
}
- static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
- unsigned int nbits)
+ static __always_inline
+ void bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = ~(*src);
#endif
#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
- static inline bool bitmap_equal(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+ static __always_inline
+ bool bitmap_equal(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
*
* Returns: True if (*@src1 | *@src2) == *@src3, false otherwise
*/
- static inline bool bitmap_or_equal(const unsigned long *src1,
- const unsigned long *src2,
- const unsigned long *src3,
- unsigned int nbits)
+ static __always_inline
+ bool bitmap_or_equal(const unsigned long *src1, const unsigned long *src2,
+ const unsigned long *src3, unsigned int nbits)
{
if (!small_const_nbits(nbits))
return __bitmap_or_equal(src1, src2, src3, nbits);
return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits));
}
- static inline bool bitmap_intersects(const unsigned long *src1,
- const unsigned long *src2,
- unsigned int nbits)
+ static __always_inline
+ bool bitmap_intersects(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_intersects(src1, src2, nbits);
}
- static inline bool bitmap_subset(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+ static __always_inline
+ bool bitmap_subset(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
return __bitmap_subset(src1, src2, nbits);
}
- static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
+ static __always_inline
+ bool bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
return find_first_bit(src, nbits) == nbits;
}
- static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
+ static __always_inline
+ bool bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
return __bitmap_weight_andnot(src1, src2, nbits);
}
- static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
- unsigned int nbits)
+ static __always_inline
+ void bitmap_set(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__set_bit(start, map);
__bitmap_set(map, start, nbits);
}
- static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
- unsigned int nbits)
+ static __always_inline
+ void bitmap_clear(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__clear_bit(start, map);
__bitmap_clear(map, start, nbits);
}
- static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits)
+ static __always_inline
+ void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
__bitmap_shift_right(dst, src, shift, nbits);
}
- static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits)
+ static __always_inline
+ void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
__bitmap_shift_left(dst, src, shift, nbits);
}
- static inline void bitmap_replace(unsigned long *dst,
- const unsigned long *old,
- const unsigned long *new,
- const unsigned long *mask,
- unsigned int nbits)
+ static __always_inline
+ void bitmap_replace(unsigned long *dst,
+ const unsigned long *old,
+ const unsigned long *new,
+ const unsigned long *mask,
+ unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*old & ~(*mask)) | (*new & *mask);
* bitmap_gather() can be seen as the 'reverse' bitmap_scatter() operation.
* See bitmap_scatter() for details related to this relationship.
*/
- static inline void bitmap_scatter(unsigned long *dst, const unsigned long *src,
- const unsigned long *mask, unsigned int nbits)
+ static __always_inline
+ void bitmap_scatter(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
{
unsigned int n = 0;
unsigned int bit;
* bitmap_scatter(res, src, mask, n) and a call to
* bitmap_scatter(res, result, mask, n) will lead to the same res value.
*/
- static inline void bitmap_gather(unsigned long *dst, const unsigned long *src,
- const unsigned long *mask, unsigned int nbits)
+ static __always_inline
+ void bitmap_gather(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
{
unsigned int n = 0;
unsigned int bit;
__assign_bit(n++, dst, test_bit(bit, src));
}
- static inline void bitmap_next_set_region(unsigned long *bitmap,
- unsigned int *rs, unsigned int *re,
- unsigned int end)
+ static __always_inline
+ void bitmap_next_set_region(unsigned long *bitmap, unsigned int *rs,
+ unsigned int *re, unsigned int end)
{
*rs = find_next_bit(bitmap, end, *rs);
*re = find_next_zero_bit(bitmap, end, *rs + 1);
* This is the complement to __bitmap_find_free_region() and releases
* the found region (by clearing it in the bitmap).
*/
- static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
+ static __always_inline
+ void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
{
bitmap_clear(bitmap, pos, BIT(order));
}
* Returns: 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero).
*/
- static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
+ static __always_inline
+ int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
{
unsigned int len = BIT(order);
* Returns: the bit offset in bitmap of the allocated region,
* or -errno on failure.
*/
- static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
+ static __always_inline
+ int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
{
unsigned int pos, end; /* scans bitmap by regions of size order */
* That is ``(u32 *)(&val)[0]`` gets the upper 32 bits,
* but we expect the lower 32-bits of u64.
*/
- static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
+ static __always_inline void bitmap_from_u64(unsigned long *dst, u64 mask)
{
bitmap_from_arr64(dst, &mask, 64);
}
* @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return
* value is undefined.
*/
- static inline unsigned long bitmap_read(const unsigned long *map,
- unsigned long start,
- unsigned long nbits)
+ static __always_inline
+ unsigned long bitmap_read(const unsigned long *map, unsigned long start, unsigned long nbits)
{
size_t index = BIT_WORD(start);
unsigned long offset = start % BITS_PER_LONG;
*
* For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed.
*/
- static inline void bitmap_write(unsigned long *map, unsigned long value,
- unsigned long start, unsigned long nbits)
+ static __always_inline
+ void bitmap_write(unsigned long *map, unsigned long value,
+ unsigned long start, unsigned long nbits)
{
size_t index;
unsigned long offset;
extern unsigned int nr_cpu_ids;
#endif
- static inline void set_nr_cpu_ids(unsigned int nr)
+ static __always_inline void set_nr_cpu_ids(unsigned int nr)
{
#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
WARN_ON(nr != nr_cpu_ids);
*
* Return: >= nr_cpu_ids if no cpus set.
*/
- static inline unsigned int cpumask_first(const struct cpumask *srcp)
+ static __always_inline unsigned int cpumask_first(const struct cpumask *srcp)
{
return find_first_bit(cpumask_bits(srcp), small_cpumask_bits);
}
*
* Return: >= nr_cpu_ids if all cpus are set.
*/
- static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
+ static __always_inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
return find_first_zero_bit(cpumask_bits(srcp), small_cpumask_bits);
}
*
* Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
*/
- static inline
+ static __always_inline
unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
*
* Return: >= nr_cpu_ids if no cpus set in all.
*/
- static inline
+ static __always_inline
unsigned int cpumask_first_and_and(const struct cpumask *srcp1,
const struct cpumask *srcp2,
const struct cpumask *srcp3)
*
* Return: >= nr_cpumask_bits if no CPUs set.
*/
- static inline unsigned int cpumask_last(const struct cpumask *srcp)
+ static __always_inline unsigned int cpumask_last(const struct cpumask *srcp)
{
return find_last_bit(cpumask_bits(srcp), small_cpumask_bits);
}
*
* Return: >= nr_cpu_ids if no further cpus set.
*/
- static inline
+ static __always_inline
unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
*
* Return: >= nr_cpu_ids if no further cpus unset.
*/
- static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+ static __always_inline
+ unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
#if NR_CPUS == 1
/* Uniprocessor: there is only one valid CPU */
- static inline unsigned int cpumask_local_spread(unsigned int i, int node)
+ static __always_inline
+ unsigned int cpumask_local_spread(unsigned int i, int node)
{
return 0;
}
- static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
- const struct cpumask *src2p)
+ static __always_inline
+ unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return cpumask_first_and(src1p, src2p);
}
- static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp)
+ static __always_inline
+ unsigned int cpumask_any_distribute(const struct cpumask *srcp)
{
return cpumask_first(srcp);
}
*
* Return: >= nr_cpu_ids if no further cpus set in both.
*/
- static inline
+ static __always_inline
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
- const struct cpumask *src2p)
+ const struct cpumask *src2p)
{
/* -1 is a legal arg here. */
if (n != -1)
for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits)
#if NR_CPUS == 1
- static inline
+ static __always_inline
unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
{
cpumask_check(start);
* Often used to find any cpu but smp_processor_id() in a mask.
* Return: >= nr_cpu_ids if no cpus set.
*/
- static inline
+ static __always_inline
unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
{
unsigned int i;
*
* Returns >= nr_cpu_ids if no cpus set.
*/
- static inline
+ static __always_inline
unsigned int cpumask_any_and_but(const struct cpumask *mask1,
const struct cpumask *mask2,
unsigned int cpu)
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
- static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
+ static __always_inline
+ unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
{
return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu));
}
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
- static inline
+ static __always_inline
unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
const struct cpumask *srcp2)
{
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
- static inline
+ static __always_inline
unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
const struct cpumask *srcp2)
{
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
- static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+ static __always_inline
+ void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
- static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+ static __always_inline
+ void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
*
* Return: true if @cpu is set in @cpumask, else returns false
*/
- static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+ static __always_inline
+ bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
*
* Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
- static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
+ static __always_inline
+ bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
*
* Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
- static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
+ static __always_inline
+ bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
* cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
- static inline void cpumask_setall(struct cpumask *dstp)
+ static __always_inline void cpumask_setall(struct cpumask *dstp)
{
if (small_const_nbits(small_cpumask_bits)) {
cpumask_bits(dstp)[0] = BITMAP_LAST_WORD_MASK(nr_cpumask_bits);
* cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
- static inline void cpumask_clear(struct cpumask *dstp)
+ static __always_inline void cpumask_clear(struct cpumask *dstp)
{
bitmap_zero(cpumask_bits(dstp), large_cpumask_bits);
}
*
* Return: false if *@dstp is empty, else returns true
*/
- static inline bool cpumask_and(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+ static __always_inline
+ bool cpumask_and(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
* @src1p: the first input
* @src2p: the second input
*/
- static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
- const struct cpumask *src2p)
+ static __always_inline
+ void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
* @src1p: the first input
* @src2p: the second input
*/
- static inline void cpumask_xor(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+ static __always_inline
+ void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
*
* Return: false if *@dstp is empty, else returns true
*/
- static inline bool cpumask_andnot(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+ static __always_inline
+ bool cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
*
* Return: true if the cpumasks are equal, false if not
*/
- static inline bool cpumask_equal(const struct cpumask *src1p,
- const struct cpumask *src2p)
+ static __always_inline
+ bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
* Return: true if first cpumask ORed with second cpumask == third cpumask,
* otherwise false
*/
- static inline bool cpumask_or_equal(const struct cpumask *src1p,
- const struct cpumask *src2p,
- const struct cpumask *src3p)
+ static __always_inline
+ bool cpumask_or_equal(const struct cpumask *src1p, const struct cpumask *src2p,
+ const struct cpumask *src3p)
{
return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
cpumask_bits(src3p), small_cpumask_bits);
* Return: true if first cpumask ANDed with second cpumask is non-empty,
* otherwise false
*/
- static inline bool cpumask_intersects(const struct cpumask *src1p,
- const struct cpumask *src2p)
+ static __always_inline
+ bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
*
* Return: true if *@src1p is a subset of *@src2p, else returns false
*/
- static inline bool cpumask_subset(const struct cpumask *src1p,
- const struct cpumask *src2p)
+ static __always_inline
+ bool cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
*
* Return: true if srcp is empty (has no bits set), else false
*/
- static inline bool cpumask_empty(const struct cpumask *srcp)
+ static __always_inline bool cpumask_empty(const struct cpumask *srcp)
{
return bitmap_empty(cpumask_bits(srcp), small_cpumask_bits);
}
*
* Return: true if srcp is full (has all bits set), else false
*/
- static inline bool cpumask_full(const struct cpumask *srcp)
+ static __always_inline bool cpumask_full(const struct cpumask *srcp)
{
return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
}
*
* Return: count of bits set in *srcp
*/
- static inline unsigned int cpumask_weight(const struct cpumask *srcp)
+ static __always_inline unsigned int cpumask_weight(const struct cpumask *srcp)
{
return bitmap_weight(cpumask_bits(srcp), small_cpumask_bits);
}
*
* Return: count of bits set in both *srcp1 and *srcp2
*/
- static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
- const struct cpumask *srcp2)
+ static __always_inline
+ unsigned int cpumask_weight_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
*
* Return: count of bits set in both *srcp1 and *srcp2
*/
- static inline unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
- const struct cpumask *srcp2)
+ static __always_inline
+ unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
{
return bitmap_weight_andnot(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
- static inline void cpumask_shift_right(struct cpumask *dstp,
- const struct cpumask *srcp, int n)
+ static __always_inline
+ void cpumask_shift_right(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
small_cpumask_bits);
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
- static inline void cpumask_shift_left(struct cpumask *dstp,
- const struct cpumask *srcp, int n)
+ static __always_inline
+ void cpumask_shift_left(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
nr_cpumask_bits);
* @dstp: the result
* @srcp: the input cpumask
*/
- static inline void cpumask_copy(struct cpumask *dstp,
- const struct cpumask *srcp)
+ static __always_inline
+ void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp)
{
bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), large_cpumask_bits);
}
*
* Return: -errno, or 0 for success.
*/
- static inline int cpumask_parse_user(const char __user *buf, int len,
- struct cpumask *dstp)
+ static __always_inline
+ int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
*
* Return: -errno, or 0 for success.
*/
- static inline int cpumask_parselist_user(const char __user *buf, int len,
- struct cpumask *dstp)
+ static __always_inline
+ int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
nr_cpumask_bits);
*
* Return: -errno, or 0 for success.
*/
- static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
+ static __always_inline int cpumask_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
}
*
* Return: -errno, or 0 for success.
*/
- static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
+ static __always_inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
}
*
* Return: size to allocate for a &struct cpumask in bytes
*/
- static inline unsigned int cpumask_size(void)
+ static __always_inline unsigned int cpumask_size(void)
{
return bitmap_size(large_cpumask_bits);
}
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
- static inline
+ static __always_inline
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
*
* Return: %true if allocation succeeded, %false if not
*/
- static inline
+ static __always_inline
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
}
- static inline
+ static __always_inline
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var(mask, flags | __GFP_ZERO);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
- static inline bool cpumask_available(cpumask_var_t mask)
+ static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return mask != NULL;
}
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
#define __cpumask_var_read_mostly
- static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+ static __always_inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return true;
}
- static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+ static __always_inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
return true;
}
- static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+ static __always_inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
cpumask_clear(*mask);
return true;
}
- static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+ static __always_inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
cpumask_clear(*mask);
return true;
}
- static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
+ static __always_inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
}
- static inline void free_cpumask_var(cpumask_var_t mask)
+ static __always_inline void free_cpumask_var(cpumask_var_t mask)
{
}
- static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
+ static __always_inline void free_bootmem_cpumask_var(cpumask_var_t mask)
{
}
- static inline bool cpumask_available(cpumask_var_t mask)
+ static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return true;
}
assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
#define set_cpu_possible(cpu, possible) assign_cpu((cpu), &__cpu_possible_mask, (possible))
-#define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_possible_mask, (enabled))
+#define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_enabled_mask, (enabled))
#define set_cpu_present(cpu, present) assign_cpu((cpu), &__cpu_present_mask, (present))
#define set_cpu_active(cpu, active) assign_cpu((cpu), &__cpu_active_mask, (active))
#define set_cpu_dying(cpu, dying) assign_cpu((cpu), &__cpu_dying_mask, (dying))
((struct cpumask *)(1 ? (bitmap) \
: (void *)sizeof(__check_is_bitmap(bitmap))))
- static inline int __check_is_bitmap(const unsigned long *bitmap)
+ static __always_inline int __check_is_bitmap(const unsigned long *bitmap)
{
return 1;
}
extern const unsigned long
cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
- static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
+ static __always_inline const struct cpumask *get_cpu_mask(unsigned int cpu)
{
const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
p -= cpu / BITS_PER_LONG;
#define num_present_cpus() cpumask_weight(cpu_present_mask)
#define num_active_cpus() cpumask_weight(cpu_active_mask)
- static inline bool cpu_online(unsigned int cpu)
+ static __always_inline bool cpu_online(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_online_mask);
}
- static inline bool cpu_enabled(unsigned int cpu)
+ static __always_inline bool cpu_enabled(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_enabled_mask);
}
- static inline bool cpu_possible(unsigned int cpu)
+ static __always_inline bool cpu_possible(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_possible_mask);
}
- static inline bool cpu_present(unsigned int cpu)
+ static __always_inline bool cpu_present(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_present_mask);
}
- static inline bool cpu_active(unsigned int cpu)
+ static __always_inline bool cpu_active(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_active_mask);
}
- static inline bool cpu_dying(unsigned int cpu)
+ static __always_inline bool cpu_dying(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_dying_mask);
}
#define num_present_cpus() 1U
#define num_active_cpus() 1U
- static inline bool cpu_online(unsigned int cpu)
+ static __always_inline bool cpu_online(unsigned int cpu)
{
return cpu == 0;
}
- static inline bool cpu_possible(unsigned int cpu)
+ static __always_inline bool cpu_possible(unsigned int cpu)
{
return cpu == 0;
}
- static inline bool cpu_enabled(unsigned int cpu)
+ static __always_inline bool cpu_enabled(unsigned int cpu)
{
return cpu == 0;
}
- static inline bool cpu_present(unsigned int cpu)
+ static __always_inline bool cpu_present(unsigned int cpu)
{
return cpu == 0;
}
- static inline bool cpu_active(unsigned int cpu)
+ static __always_inline bool cpu_active(unsigned int cpu)
{
return cpu == 0;
}
- static inline bool cpu_dying(unsigned int cpu)
+ static __always_inline bool cpu_dying(unsigned int cpu)
{
return false;
}
* Return: the length of the (null-terminated) @buf string, zero if
* nothing is copied.
*/
- static inline ssize_t
+ static __always_inline ssize_t
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
* Return: the length of how many bytes have been copied, excluding
* terminating '\0'.
*/
- static inline ssize_t
- cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
- loff_t off, size_t count)
+ static __always_inline
+ ssize_t cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
{
return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
nr_cpu_ids, off, count) - 1;
* Return: the length of how many bytes have been copied, excluding
* terminating '\0'.
*/
- static inline ssize_t
- cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
- loff_t off, size_t count)
+ static __always_inline
+ ssize_t cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
{
return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
nr_cpu_ids, off, count) - 1;