aio: protect reqs_available updates from changes in interrupt handlers
As of commit f8567a3845
it is now possible to
have put_reqs_available() called from irq context. While put_reqs_available()
is per cpu, it did not protect itself from interrupts on the same CPU. This
lead to aio_complete() corrupting the available io requests count when run
under a heavy O_DIRECT workloads as reported by Robert Elliott. Fix this by
disabling irq updates around the per cpu batch updates of reqs_available.
Many thanks to Robert and folks for testing and tracking this down.
Reported-by: Robert Elliot <Elliott@hp.com>
Tested-by: Robert Elliot <Elliott@hp.com>
Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
Cc: Jens Axboe <axboe@kernel.dk>, Christoph Hellwig <hch@infradead.org>
Cc: stable@vger.kenel.org
This commit is contained in:
parent
1795cd9b3a
commit
263782c1c9
1 changed files with 7 additions and 0 deletions
7
fs/aio.c
7
fs/aio.c
|
@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
|
||||||
static void put_reqs_available(struct kioctx *ctx, unsigned nr)
|
static void put_reqs_available(struct kioctx *ctx, unsigned nr)
|
||||||
{
|
{
|
||||||
struct kioctx_cpu *kcpu;
|
struct kioctx_cpu *kcpu;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
kcpu = this_cpu_ptr(ctx->cpu);
|
kcpu = this_cpu_ptr(ctx->cpu);
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
kcpu->reqs_available += nr;
|
kcpu->reqs_available += nr;
|
||||||
|
|
||||||
while (kcpu->reqs_available >= ctx->req_batch * 2) {
|
while (kcpu->reqs_available >= ctx->req_batch * 2) {
|
||||||
kcpu->reqs_available -= ctx->req_batch;
|
kcpu->reqs_available -= ctx->req_batch;
|
||||||
atomic_add(ctx->req_batch, &ctx->reqs_available);
|
atomic_add(ctx->req_batch, &ctx->reqs_available);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
local_irq_restore(flags);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx)
|
||||||
{
|
{
|
||||||
struct kioctx_cpu *kcpu;
|
struct kioctx_cpu *kcpu;
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
kcpu = this_cpu_ptr(ctx->cpu);
|
kcpu = this_cpu_ptr(ctx->cpu);
|
||||||
|
|
||||||
|
local_irq_save(flags);
|
||||||
if (!kcpu->reqs_available) {
|
if (!kcpu->reqs_available) {
|
||||||
int old, avail = atomic_read(&ctx->reqs_available);
|
int old, avail = atomic_read(&ctx->reqs_available);
|
||||||
|
|
||||||
|
@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx)
|
||||||
ret = true;
|
ret = true;
|
||||||
kcpu->reqs_available--;
|
kcpu->reqs_available--;
|
||||||
out:
|
out:
|
||||||
|
local_irq_restore(flags);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue