Use 'atomic_dec_and_lock()' to make sure that we always hold the
tty_ldisc_lock when the ldisc count goes to zero. That way we can never
race against 'tty_ldisc_try()' increasing the count again.
Reported-by: OGAWA Hirofumi <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Tested-by: Sergey Senozhatsky <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
-static inline void put_ldisc(struct tty_ldisc *ld)
+static void put_ldisc(struct tty_ldisc *ld)
if (WARN_ON_ONCE(!ld))
return;
/*
* If this is the last user, free the ldisc, and
* release the ldisc ops.
if (WARN_ON_ONCE(!ld))
return;
/*
* If this is the last user, free the ldisc, and
* release the ldisc ops.
+ *
+ * We really want an "atomic_dec_and_lock_irqsave()",
+ * but we don't have it, so this does it by hand.
- if (atomic_dec_and_test(&ld->users)) {
- unsigned long flags;
+ local_irq_save(flags);
+ if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
struct tty_ldisc_ops *ldo = ld->ops;
struct tty_ldisc_ops *ldo = ld->ops;
- kfree(ld);
- spin_lock_irqsave(&tty_ldisc_lock, flags);
ldo->refcount--;
module_put(ldo->owner);
spin_unlock_irqrestore(&tty_ldisc_lock, flags);
ldo->refcount--;
module_put(ldo->owner);
spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ local_irq_restore(flags);