Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: highmem: Fix debug_kmap_atomic() to also handle KM_IRQ_PTE, KM_NMI, and KM_NMI_PTE highmem: Fix race in debug_kmap_atomic() which could cause warn_count to underflow rcu: Fix long-grace-period race between forcing and initialization uids: Prevent tear down race
This commit is contained in:
commit
961767b75d
4 changed files with 28 additions and 14 deletions
|
@ -59,7 +59,7 @@
|
|||
NUM_RCU_LVL_2, \
|
||||
NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
|
||||
}, \
|
||||
.signaled = RCU_SIGNAL_INIT, \
|
||||
.signaled = RCU_GP_IDLE, \
|
||||
.gpnum = -300, \
|
||||
.completed = -300, \
|
||||
.onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
|
||||
|
@ -657,14 +657,17 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
|
|||
* irqs disabled.
|
||||
*/
|
||||
rcu_for_each_node_breadth_first(rsp, rnp) {
|
||||
spin_lock(&rnp->lock); /* irqs already disabled. */
|
||||
spin_lock(&rnp->lock); /* irqs already disabled. */
|
||||
rcu_preempt_check_blocked_tasks(rnp);
|
||||
rnp->qsmask = rnp->qsmaskinit;
|
||||
rnp->gpnum = rsp->gpnum;
|
||||
spin_unlock(&rnp->lock); /* irqs already disabled. */
|
||||
spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
||||
}
|
||||
|
||||
rnp = rcu_get_root(rsp);
|
||||
spin_lock(&rnp->lock); /* irqs already disabled. */
|
||||
rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
|
||||
spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
||||
spin_unlock_irqrestore(&rsp->onofflock, flags);
|
||||
}
|
||||
|
||||
|
@ -706,6 +709,7 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
|
|||
{
|
||||
WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
|
||||
rsp->completed = rsp->gpnum;
|
||||
rsp->signaled = RCU_GP_IDLE;
|
||||
rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
|
||||
rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
|
||||
}
|
||||
|
@ -1162,9 +1166,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
|
|||
}
|
||||
spin_unlock(&rnp->lock);
|
||||
switch (signaled) {
|
||||
case RCU_GP_IDLE:
|
||||
case RCU_GP_INIT:
|
||||
|
||||
break; /* grace period still initializing, ignore. */
|
||||
break; /* grace period idle or initializing, ignore. */
|
||||
|
||||
case RCU_SAVE_DYNTICK:
|
||||
|
||||
|
@ -1178,7 +1183,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
|
|||
|
||||
/* Update state, record completion counter. */
|
||||
spin_lock(&rnp->lock);
|
||||
if (lastcomp == rsp->completed) {
|
||||
if (lastcomp == rsp->completed &&
|
||||
rsp->signaled == RCU_SAVE_DYNTICK) {
|
||||
rsp->signaled = RCU_FORCE_QS;
|
||||
dyntick_record_completed(rsp, lastcomp);
|
||||
}
|
||||
|
|
|
@ -201,9 +201,10 @@ struct rcu_data {
|
|||
};
|
||||
|
||||
/* Values for signaled field in struct rcu_state. */
|
||||
#define RCU_GP_INIT 0 /* Grace period being initialized. */
|
||||
#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */
|
||||
#define RCU_FORCE_QS 2 /* Need to force quiescent state. */
|
||||
#define RCU_GP_IDLE 0 /* No grace period in progress. */
|
||||
#define RCU_GP_INIT 1 /* Grace period being initialized. */
|
||||
#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
|
||||
#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
|
||||
#ifdef CONFIG_NO_HZ
|
||||
#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
|
||||
#else /* #ifdef CONFIG_NO_HZ */
|
||||
|
|
|
@ -330,9 +330,9 @@ done:
|
|||
*/
|
||||
static void free_user(struct user_struct *up, unsigned long flags)
|
||||
{
|
||||
spin_unlock_irqrestore(&uidhash_lock, flags);
|
||||
INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
|
||||
schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
|
||||
spin_unlock_irqrestore(&uidhash_lock, flags);
|
||||
}
|
||||
|
||||
#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
|
||||
|
|
17
mm/highmem.c
17
mm/highmem.c
|
@ -426,16 +426,21 @@ void __init page_address_init(void)
|
|||
|
||||
void debug_kmap_atomic(enum km_type type)
|
||||
{
|
||||
static unsigned warn_count = 10;
|
||||
static int warn_count = 10;
|
||||
|
||||
if (unlikely(warn_count == 0))
|
||||
if (unlikely(warn_count < 0))
|
||||
return;
|
||||
|
||||
if (unlikely(in_interrupt())) {
|
||||
if (in_irq()) {
|
||||
if (in_nmi()) {
|
||||
if (type != KM_NMI && type != KM_NMI_PTE) {
|
||||
WARN_ON(1);
|
||||
warn_count--;
|
||||
}
|
||||
} else if (in_irq()) {
|
||||
if (type != KM_IRQ0 && type != KM_IRQ1 &&
|
||||
type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
|
||||
type != KM_BOUNCE_READ) {
|
||||
type != KM_BOUNCE_READ && type != KM_IRQ_PTE) {
|
||||
WARN_ON(1);
|
||||
warn_count--;
|
||||
}
|
||||
|
@ -452,7 +457,9 @@ void debug_kmap_atomic(enum km_type type)
|
|||
}
|
||||
|
||||
if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
|
||||
type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) {
|
||||
type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ ||
|
||||
type == KM_IRQ_PTE || type == KM_NMI ||
|
||||
type == KM_NMI_PTE ) {
|
||||
if (!irqs_disabled()) {
|
||||
WARN_ON(1);
|
||||
warn_count--;
|
||||
|
|
Loading…
Reference in a new issue