#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
- #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
- #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
}
/* Per-process atomic flags. */
- #define PFA_NO_NEW_PRIVS 0x00000001 /* May not gain new privileges. */
-
- static inline bool task_no_new_privs(struct task_struct *p)
- {
- return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
- }
-
- static inline void task_set_no_new_privs(struct task_struct *p)
- {
- set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags);
- }
+ #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
+ #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
+ #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
+
+
+ #define TASK_PFA_TEST(name, func) \
+ static inline bool task_##func(struct task_struct *p) \
+ { return test_bit(PFA_##name, &p->atomic_flags); }
+ #define TASK_PFA_SET(name, func) \
+ static inline void task_set_##func(struct task_struct *p) \
+ { set_bit(PFA_##name, &p->atomic_flags); }
+ #define TASK_PFA_CLEAR(name, func) \
+ static inline void task_clear_##func(struct task_struct *p) \
+ { clear_bit(PFA_##name, &p->atomic_flags); }
+
+ TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
+ TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
+
+ TASK_PFA_TEST(SPREAD_PAGE, spread_page)
+ TASK_PFA_SET(SPREAD_PAGE, spread_page)
+ TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
+
+ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
+ TASK_PFA_SET(SPREAD_SLAB, spread_slab)
+ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
/*
* task->jobctl flags
task_thread_info(p)->task = p;
}
+/*
+ * Return the address of the last usable long on the stack.
+ *
+ * When the stack grows down, this is just above the thread
+ * info struct. Going any lower will corrupt the threadinfo.
+ *
+ * When the stack grows up, this is the highest address.
+ * Beyond that position, we corrupt data on the next page.
+ */
static inline unsigned long *end_of_stack(struct task_struct *p)
{
+#ifdef CONFIG_STACK_GROWSUP
+ return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
+#else
return (unsigned long *)(task_thread_info(p) + 1);
+#endif
}
#endif
int
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
{
- size_t left_over, freelist_size, ralign;
+ size_t left_over, freelist_size;
+ size_t ralign = BYTES_PER_WORD;
gfp_t gfp;
int err;
size_t size = cachep->size;
size &= ~(BYTES_PER_WORD - 1);
}
- /*
- * Redzoning and user store require word alignment or possibly larger.
- * Note this will be overridden by architecture or caller mandated
- * alignment if either is greater than BYTES_PER_WORD.
- */
- if (flags & SLAB_STORE_USER)
- ralign = BYTES_PER_WORD;
-
if (flags & SLAB_RED_ZONE) {
ralign = REDZONE_ALIGN;
/* If redzoning, ensure that the second redzone is suitably
#ifdef CONFIG_NUMA
/*
- * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set.
+ * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set.
*
* If we are in_interrupt, then process context, including cpusets and
* mempolicy, may not apply and should not be used for allocation policy.
{
void *objp;
- if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) {
+ if (current->mempolicy || cpuset_do_slab_mem_spread()) {
objp = alternate_node_alloc(cache, flags);
if (objp)
goto out;