[PATCH] sched: sched_cacheflush is now unused
Since Ingo's recent scheduler rewrite which was merged as commit
0437e109e1
sched_cacheflush is unused.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ce8c2293be
commit
c41917df8a
16 changed files with 0 additions and 149 deletions
|
@ -980,15 +980,6 @@ cpu_init (void)
|
|||
pm_idle = default_idle;
|
||||
}
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible.
|
||||
*/
|
||||
void sched_cacheflush(void)
|
||||
{
|
||||
ia64_sal_cache_flush(3);
|
||||
}
|
||||
|
||||
void __init
|
||||
check_bugs (void)
|
||||
{
|
||||
|
|
|
@ -139,16 +139,6 @@ extern void halt(void) __attribute__((noreturn));
|
|||
struct task_struct;
|
||||
extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible.
|
||||
*
|
||||
* TODO: fill this in!
|
||||
*/
|
||||
static inline void sched_cacheflush(void)
|
||||
{
|
||||
}
|
||||
|
||||
#define imb() \
|
||||
__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
|
||||
|
||||
|
|
|
@ -254,16 +254,6 @@ do { \
|
|||
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible.
|
||||
*
|
||||
* TODO: fill this in!
|
||||
*/
|
||||
static inline void sched_cacheflush(void)
|
||||
{
|
||||
}
|
||||
|
||||
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
|
||||
/*
|
||||
* On the StrongARM, "swp" is terminally broken since it bypasses the
|
||||
|
|
|
@ -109,16 +109,6 @@ do { \
|
|||
last = __switch_to(prev,task_thread_info(prev),task_thread_info(next)); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible.
|
||||
*
|
||||
* TODO: fill this in!
|
||||
*/
|
||||
static inline void sched_cacheflush(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the current interrupt enable state & disable IRQs
|
||||
*/
|
||||
|
|
|
@ -310,15 +310,6 @@ void enable_hlt(void);
|
|||
extern int es7000_plat;
|
||||
void cpu_idle_wait(void);
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible:
|
||||
*/
|
||||
static inline void sched_cacheflush(void)
|
||||
{
|
||||
wbinvd();
|
||||
}
|
||||
|
||||
extern unsigned long arch_align_stack(unsigned long sp);
|
||||
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
|
||||
|
||||
|
|
|
@ -259,7 +259,6 @@ extern void ia64_load_extra (struct task_struct *task);
|
|||
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
|
||||
|
||||
void cpu_idle_wait(void);
|
||||
void sched_cacheflush(void);
|
||||
|
||||
#define arch_align_stack(x) (x)
|
||||
|
||||
|
|
|
@ -54,16 +54,6 @@
|
|||
); \
|
||||
} while(0)
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible.
|
||||
*
|
||||
* TODO: fill this in!
|
||||
*/
|
||||
static inline void sched_cacheflush(void)
|
||||
{
|
||||
}
|
||||
|
||||
/* Interrupt Control */
|
||||
#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
|
||||
#define local_irq_enable() \
|
||||
|
|
|
@ -71,16 +71,6 @@ do { \
|
|||
write_c0_userlocal(task_thread_info(current)->tp_value);\
|
||||
} while(0)
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible.
|
||||
*
|
||||
* TODO: fill this in!
|
||||
*/
|
||||
static inline void sched_cacheflush(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
|
||||
{
|
||||
__u32 retval;
|
||||
|
|
|
@ -48,17 +48,6 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
|
|||
(last) = _switch_to(prev, next); \
|
||||
} while(0)
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible.
|
||||
*
|
||||
* TODO: fill this in!
|
||||
*/
|
||||
static inline void sched_cacheflush(void)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
/* interrupt control */
|
||||
#define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory")
|
||||
#define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
|
||||
|
|
|
@ -184,16 +184,6 @@ struct thread_struct;
|
|||
extern struct task_struct *_switch(struct thread_struct *prev,
|
||||
struct thread_struct *next);
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible.
|
||||
*
|
||||
* TODO: fill this in!
|
||||
*/
|
||||
static inline void sched_cacheflush(void)
|
||||
{
|
||||
}
|
||||
|
||||
extern unsigned int rtas_data;
|
||||
extern int mem_init_done; /* set on boot once kmalloc can be called */
|
||||
extern unsigned long memory_limit;
|
||||
|
|
|
@ -129,16 +129,6 @@ extern struct task_struct *__switch_to(struct task_struct *,
|
|||
struct task_struct *);
|
||||
#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible.
|
||||
*
|
||||
* TODO: fill this in!
|
||||
*/
|
||||
static inline void sched_cacheflush(void)
|
||||
{
|
||||
}
|
||||
|
||||
struct thread_struct;
|
||||
extern struct task_struct *_switch(struct thread_struct *prev,
|
||||
struct thread_struct *next);
|
||||
|
|
|
@ -97,16 +97,6 @@ static inline void restore_access_regs(unsigned int *acrs)
|
|||
prev = __switch_to(prev,next); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible.
|
||||
*
|
||||
* TODO: fill this in!
|
||||
*/
|
||||
static inline void sched_cacheflush(void)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
extern void account_vtime(struct task_struct *);
|
||||
extern void account_tick_vtime(struct task_struct *);
|
||||
|
|
|
@ -64,16 +64,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
|||
last = __last; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible.
|
||||
*
|
||||
* TODO: fill this in!
|
||||
*/
|
||||
static inline void sched_cacheflush(void)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_SH4A
|
||||
#define __icbi() \
|
||||
{ \
|
||||
|
|
|
@ -164,16 +164,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
|
|||
"o0", "o1", "o2", "o3", "o7"); \
|
||||
} while(0)
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible.
|
||||
*
|
||||
* TODO: fill this in!
|
||||
*/
|
||||
static inline void sched_cacheflush(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Changing the IRQ level on the Sparc.
|
||||
*/
|
||||
|
|
|
@ -204,16 +204,6 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
|
|||
} \
|
||||
} while(0)
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible.
|
||||
*
|
||||
* TODO: fill this in!
|
||||
*/
|
||||
static inline void sched_cacheflush(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
|
||||
{
|
||||
unsigned long tmp1, tmp2;
|
||||
|
|
|
@ -111,15 +111,6 @@ static inline void write_cr4(unsigned long val)
|
|||
#define wbinvd() \
|
||||
__asm__ __volatile__ ("wbinvd": : :"memory");
|
||||
|
||||
/*
|
||||
* On SMP systems, when the scheduler does migration-cost autodetection,
|
||||
* it needs a way to flush as much of the CPU's caches as possible.
|
||||
*/
|
||||
static inline void sched_cacheflush(void)
|
||||
{
|
||||
wbinvd();
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#define nop() __asm__ __volatile__ ("nop")
|
||||
|
|
Loading…
Reference in a new issue