2005-04-17 00:20:36 +02:00
|
|
|
/*
|
2011-03-31 03:57:33 +02:00
|
|
|
* include/linux/eventpoll.h ( Efficient event polling implementation )
|
2006-06-25 14:48:14 +02:00
|
|
|
* Copyright (C) 2001,...,2006 Davide Libenzi
|
2005-04-17 00:20:36 +02:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* Davide Libenzi <davidel@xmailserver.org>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _LINUX_EVENTPOLL_H
|
|
|
|
#define _LINUX_EVENTPOLL_H
|
|
|
|
|
2008-07-24 06:29:27 +02:00
|
|
|
/* For O_CLOEXEC */
|
|
|
|
#include <linux/fcntl.h>
|
2005-04-17 00:20:36 +02:00
|
|
|
#include <linux/types.h>
|
|
|
|
|
2008-07-24 06:29:43 +02:00
|
|
|
/* Flags for epoll_create1. */
|
2008-07-24 06:29:27 +02:00
|
|
|
#define EPOLL_CLOEXEC O_CLOEXEC
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
/* Valid opcodes to issue to sys_epoll_ctl() */
|
|
|
|
#define EPOLL_CTL_ADD 1
|
|
|
|
#define EPOLL_CTL_DEL 2
|
|
|
|
#define EPOLL_CTL_MOD 3
|
|
|
|
|
2012-05-01 21:33:34 +02:00
|
|
|
/*
|
|
|
|
* Request the handling of system wakeup events so as to prevent system suspends
|
|
|
|
* from happening while those events are being processed.
|
|
|
|
*
|
|
|
|
* Assuming neither EPOLLET nor EPOLLONESHOT is set, system suspends will not be
|
|
|
|
* re-allowed until epoll_wait is called again after consuming the wakeup
|
|
|
|
* event(s).
|
|
|
|
*
|
|
|
|
* Requires CAP_EPOLLWAKEUP
|
|
|
|
*/
|
|
|
|
#define EPOLLWAKEUP (1 << 29)
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
/* Set the One Shot behaviour for the target file descriptor */
|
|
|
|
#define EPOLLONESHOT (1 << 30)
|
|
|
|
|
|
|
|
/* Set the Edge Triggered behaviour for the target file descriptor */
|
|
|
|
#define EPOLLET (1 << 31)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* On x86-64 make the 64bit structure have the same alignment as the
|
|
|
|
* 32bit structure. This makes 32bit emulation easier.
|
2007-03-27 07:32:20 +02:00
|
|
|
*
|
2007-10-29 05:31:16 +01:00
|
|
|
* UML/x86_64 needs the same packing as x86_64
|
2005-04-17 00:20:36 +02:00
|
|
|
*/
|
|
|
|
#ifdef __x86_64__
|
|
|
|
#define EPOLL_PACKED __attribute__((packed))
|
|
|
|
#else
|
|
|
|
#define EPOLL_PACKED
|
|
|
|
#endif
|
|
|
|
|
|
|
|
struct epoll_event {
|
|
|
|
__u32 events;
|
|
|
|
__u64 data;
|
|
|
|
} EPOLL_PACKED;
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
|
|
|
/* Forward declarations to avoid compiler errors */
|
|
|
|
struct file;
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_EPOLL
|
|
|
|
|
|
|
|
/* Used to initialize the epoll bits inside the "struct file" */
|
2006-03-23 12:01:03 +01:00
|
|
|
static inline void eventpoll_init_file(struct file *file)
|
|
|
|
{
|
|
|
|
INIT_LIST_HEAD(&file->f_ep_links);
|
epoll: limit paths
The current epoll code can be tickled to run basically indefinitely in
both loop detection path check (on ep_insert()), and in the wakeup paths.
The programs that tickle this behavior set up deeply linked networks of
epoll file descriptors that cause the epoll algorithms to traverse them
indefinitely. A couple of these sample programs have been previously
posted in this thread: https://lkml.org/lkml/2011/2/25/297.
To fix the loop detection path check algorithms, I simply keep track of
the epoll nodes that have been already visited. Thus, the loop detection
becomes proportional to the number of epoll file descriptor and links.
This dramatically decreases the run-time of the loop check algorithm. In
one diabolical case I tried it reduced the run-time from 15 mintues (all
in kernel time) to .3 seconds.
Fixing the wakeup paths could be done at wakeup time in a similar manner
by keeping track of nodes that have already been visited, but the
complexity is harder, since there can be multiple wakeups on different
cpus...Thus, I've opted to limit the number of possible wakeup paths when
the paths are created.
This is accomplished, by noting that the end file descriptor points that
are found during the loop detection pass (from the newly added link), are
actually the sources for wakeup events. I keep a list of these file
descriptors and limit the number and length of these paths that emanate
from these 'source file descriptors'. In the current implemetation I
allow 1000 paths of length 1, 500 of length 2, 100 of length 3, 50 of
length 4 and 10 of length 5. Note that it is sufficient to check the
'source file descriptors' reachable from the newly added link, since no
other 'source file descriptors' will have newly added links. This allows
us to check only the wakeup paths that may have gotten too long, and not
re-check all possible wakeup paths on the system.
In terms of the path limit selection, I think its first worth noting that
the most common case for epoll, is probably the model where you have 1
epoll file descriptor that is monitoring n number of 'source file
descriptors'. In this case, each 'source file descriptor' has a 1 path of
length 1. Thus, I believe that the limits I'm proposing are quite
reasonable and in fact may be too generous. Thus, I'm hoping that the
proposed limits will not prevent any workloads that currently work to
fail.
In terms of locking, I have extended the use of the 'epmutex' to all
epoll_ctl add and remove operations. Currently its only used in a subset
of the add paths. I need to hold the epmutex, so that we can correctly
traverse a coherent graph, to check the number of paths. I believe that
this additional locking is probably ok, since its in the setup/teardown
paths, and doesn't affect the running paths, but it certainly is going to
add some extra overhead. Also, worth noting is that the epmuex was
recently added to the ep_ctl add operations in the initial path loop
detection code using the argument that it was not on a critical path.
Another thing to note here, is the length of epoll chains that is allowed.
Currently, eventpoll.c defines:
/* Maximum number of nesting allowed inside epoll sets */
#define EP_MAX_NESTS 4
This basically means that I am limited to a graph depth of 5 (EP_MAX_NESTS
+ 1). However, this limit is currently only enforced during the loop
check detection code, and only when the epoll file descriptors are added
in a certain order. Thus, this limit is currently easily bypassed. The
newly added check for wakeup paths, stricly limits the wakeup paths to a
length of 5, regardless of the order in which ep's are linked together.
Thus, a side-effect of the new code is a more consistent enforcement of
the graph depth.
Thus far, I've tested this, using the sample programs previously
mentioned, which now either return quickly or return -EINVAL. I've also
testing using the piptest.c epoll tester, which showed no difference in
performance. I've also created a number of different epoll networks and
tested that they behave as expectded.
I believe this solves the original diabolical test cases, while still
preserving the sane epoll nesting.
Signed-off-by: Jason Baron <jbaron@redhat.com>
Cc: Nelson Elhage <nelhage@ksplice.com>
Cc: Davide Libenzi <davidel@xmailserver.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-01-13 02:17:43 +01:00
|
|
|
INIT_LIST_HEAD(&file->f_tfile_llink);
|
2006-03-23 12:01:03 +01:00
|
|
|
}
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
/* Used to release the epoll bits inside the "struct file" */
|
|
|
|
void eventpoll_release_file(struct file *file);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called from inside fs/file_table.c:__fput() to unlink files
|
|
|
|
* from the eventpoll interface. We need to have this facility to cleanup
|
|
|
|
* correctly files that are closed without being removed from the eventpoll
|
|
|
|
* interface.
|
|
|
|
*/
|
|
|
|
static inline void eventpoll_release(struct file *file)
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fast check to avoid the get/release of the semaphore. Since
|
|
|
|
* we're doing this outside the semaphore lock, it might return
|
|
|
|
* false negatives, but we don't care. It'll help in 99.99% of cases
|
|
|
|
* to avoid the semaphore lock. False positives simply cannot happen
|
|
|
|
* because the file in on the way to be removed and nobody ( but
|
|
|
|
* eventpoll ) has still a reference to this file.
|
|
|
|
*/
|
|
|
|
if (likely(list_empty(&file->f_ep_links)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The file is being closed while it is still linked to an epoll
|
|
|
|
* descriptor. We need to handle this by correctly unlinking it
|
|
|
|
* from its containers.
|
|
|
|
*/
|
|
|
|
eventpoll_release_file(file);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static inline void eventpoll_init_file(struct file *file) {}
|
|
|
|
static inline void eventpoll_release(struct file *file) {}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* #ifdef __KERNEL__ */
|
|
|
|
|
|
|
|
#endif /* #ifndef _LINUX_EVENTPOLL_H */
|
|
|
|
|