2014-08-07 14:39:04 +02:00
|
|
|
#include <linux/fs.h>
|
2015-01-10 23:53:21 +01:00
|
|
|
#include <linux/sched.h>
|
2014-08-07 14:39:04 +02:00
|
|
|
#include <linux/slab.h>
|
2014-05-22 00:22:52 +02:00
|
|
|
#include "internal.h"
|
2014-08-07 14:39:04 +02:00
|
|
|
#include "mount.h"
|
|
|
|
|
|
|
|
static DEFINE_SPINLOCK(pin_lock);
|
|
|
|
|
|
|
|
void pin_remove(struct fs_pin *pin)
|
|
|
|
{
|
|
|
|
spin_lock(&pin_lock);
|
2015-04-02 23:35:48 +02:00
|
|
|
hlist_del_init(&pin->m_list);
|
|
|
|
hlist_del_init(&pin->s_list);
|
2014-08-07 14:39:04 +02:00
|
|
|
spin_unlock(&pin_lock);
|
2015-01-10 23:53:21 +01:00
|
|
|
spin_lock_irq(&pin->wait.lock);
|
|
|
|
pin->done = 1;
|
|
|
|
wake_up_locked(&pin->wait);
|
|
|
|
spin_unlock_irq(&pin->wait.lock);
|
2014-08-07 14:39:04 +02:00
|
|
|
}
|
|
|
|
|
2015-01-11 16:57:27 +01:00
|
|
|
void pin_insert_group(struct fs_pin *pin, struct vfsmount *m, struct hlist_head *p)
|
2014-08-07 14:39:04 +02:00
|
|
|
{
|
|
|
|
spin_lock(&pin_lock);
|
2015-01-11 16:57:27 +01:00
|
|
|
if (p)
|
|
|
|
hlist_add_head(&pin->s_list, p);
|
2014-08-07 14:39:04 +02:00
|
|
|
hlist_add_head(&pin->m_list, &real_mount(m)->mnt_pins);
|
|
|
|
spin_unlock(&pin_lock);
|
|
|
|
}
|
|
|
|
|
2015-01-11 16:57:27 +01:00
|
|
|
void pin_insert(struct fs_pin *pin, struct vfsmount *m)
|
|
|
|
{
|
|
|
|
pin_insert_group(pin, m, &m->mnt_sb->s_pins);
|
|
|
|
}
|
|
|
|
|
2015-01-10 23:53:21 +01:00
|
|
|
void pin_kill(struct fs_pin *p)
|
|
|
|
{
|
2017-06-20 12:06:13 +02:00
|
|
|
wait_queue_entry_t wait;
|
2015-01-10 23:53:21 +01:00
|
|
|
|
|
|
|
if (!p) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
init_wait(&wait);
|
|
|
|
spin_lock_irq(&p->wait.lock);
|
|
|
|
if (likely(!p->done)) {
|
|
|
|
p->done = -1;
|
|
|
|
spin_unlock_irq(&p->wait.lock);
|
|
|
|
rcu_read_unlock();
|
|
|
|
p->kill(p);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (p->done > 0) {
|
|
|
|
spin_unlock_irq(&p->wait.lock);
|
|
|
|
rcu_read_unlock();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
__add_wait_queue(&p->wait, &wait);
|
|
|
|
while (1) {
|
|
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
|
spin_unlock_irq(&p->wait.lock);
|
|
|
|
rcu_read_unlock();
|
|
|
|
schedule();
|
|
|
|
rcu_read_lock();
|
sched/wait: Disambiguate wq_entry->task_list and wq_head->task_list naming
So I've noticed a number of instances where it was not obvious from the
code whether ->task_list was for a wait-queue head or a wait-queue entry.
Furthermore, there's a number of wait-queue users where the lists are
not for 'tasks' but other entities (poll tables, etc.), in which case
the 'task_list' name is actively confusing.
To clear this all up, name the wait-queue head and entry list structure
fields unambiguously:
struct wait_queue_head::task_list => ::head
struct wait_queue_entry::task_list => ::entry
For example, this code:
rqw->wait.task_list.next != &wait->task_list
... is was pretty unclear (to me) what it's doing, while now it's written this way:
rqw->wait.head.next != &wait->entry
... which makes it pretty clear that we are iterating a list until we see the head.
Other examples are:
list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
list_for_each_entry(wq, &fence->wait.task_list, task_list) {
... where it's unclear (to me) what we are iterating, and during review it's
hard to tell whether it's trying to walk a wait-queue entry (which would be
a bug), while now it's written as:
list_for_each_entry_safe(pos, next, &x->head, entry) {
list_for_each_entry(wq, &fence->wait.head, entry) {
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-06-20 12:06:46 +02:00
|
|
|
if (likely(list_empty(&wait.entry)))
|
2015-01-10 23:53:21 +01:00
|
|
|
break;
|
|
|
|
/* OK, we know p couldn't have been freed yet */
|
|
|
|
spin_lock_irq(&p->wait.lock);
|
|
|
|
if (p->done > 0) {
|
|
|
|
spin_unlock_irq(&p->wait.lock);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2014-05-22 00:22:52 +02:00
|
|
|
void mnt_pin_kill(struct mount *m)
|
2014-08-07 14:39:04 +02:00
|
|
|
{
|
|
|
|
while (1) {
|
|
|
|
struct hlist_node *p;
|
|
|
|
rcu_read_lock();
|
2014-05-22 00:22:52 +02:00
|
|
|
p = ACCESS_ONCE(m->mnt_pins.first);
|
2014-08-07 14:39:04 +02:00
|
|
|
if (!p) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
break;
|
|
|
|
}
|
2015-01-10 23:53:21 +01:00
|
|
|
pin_kill(hlist_entry(p, struct fs_pin, m_list));
|
2014-08-07 14:39:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-11 16:57:27 +01:00
|
|
|
void group_pin_kill(struct hlist_head *p)
|
2014-08-07 14:39:04 +02:00
|
|
|
{
|
|
|
|
while (1) {
|
2015-01-11 16:57:27 +01:00
|
|
|
struct hlist_node *q;
|
2014-08-07 14:39:04 +02:00
|
|
|
rcu_read_lock();
|
2015-01-11 16:57:27 +01:00
|
|
|
q = ACCESS_ONCE(p->first);
|
|
|
|
if (!q) {
|
2014-08-07 14:39:04 +02:00
|
|
|
rcu_read_unlock();
|
|
|
|
break;
|
|
|
|
}
|
2015-01-10 23:53:21 +01:00
|
|
|
pin_kill(hlist_entry(q, struct fs_pin, s_list));
|
2014-08-07 14:39:04 +02:00
|
|
|
}
|
|
|
|
}
|