struct tlbflush_unmap_batch tlb_ubc;
- struct rcu_head rcu;
+ union {
+ refcount_t rcu_users;
+ struct rcu_head rcu;
+ };
/* Cache last used pipe for splice(): */
struct pipe_inode_info *splice_pipe;
}
struct task_struct *task_rcu_dereference(struct task_struct **ptask);
+void put_task_struct_rcu_user(struct task_struct *task);
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
extern int arch_task_struct_size __read_mostly;
put_task_struct(tsk);
}
+void put_task_struct_rcu_user(struct task_struct *task)
+{
+ if (refcount_dec_and_test(&task->rcu_users))
+ call_rcu(&task->rcu, delayed_put_task_struct);
+}
void release_task(struct task_struct *p)
{
write_unlock_irq(&tasklist_lock);
release_thread(p);
- call_rcu(&p->rcu, delayed_put_task_struct);
+ put_task_struct_rcu_user(p);
p = leader;
if (unlikely(zap_leader))
if (orig->cpus_ptr == &orig->cpus_mask)
tsk->cpus_ptr = &tsk->cpus_mask;
- /*
- * One for us, one for whoever does the "release_task()" (usually
- * parent)
- */
+ /* One for the user space visible state that goes away when reaped. */
+ refcount_set(&tsk->rcu_users, 1);
+ /* One for the rcu users, and one for the scheduler */
refcount_set(&tsk->usage, 2);
#ifdef CONFIG_BLK_DEV_IO_TRACE
tsk->btrace_seq = 0;