mirror of
https://github.com/openwall/lkrg.git
synced 2023-12-13 21:30:29 +01:00
Simplify synchronization with JUMP_LABEL engine
We don't need to introduce custom LKRG-counter lock to synchronize with JUMP_LABEL engine and avoid potential deadlock with FTRACE. We can check if jump_label lock is taken after acquiring ftrace lock and before taking text_mutex. This simplification changes p_text_section_(un)lock API. This also fixes problem reported by Jacek
This commit is contained in:
parent
83cb64640a
commit
f98da1b17c
6 changed files with 26 additions and 34 deletions
|
@ -138,9 +138,8 @@ int p_cpu_callback(struct notifier_block *p_block, unsigned long p_action, void
|
|||
int p_cpu_online_action(unsigned int p_cpu) {
|
||||
|
||||
int tmp_online_CPUs = p_db.p_cpu.online_CPUs;
|
||||
unsigned long p_flags;
|
||||
|
||||
p_text_section_lock(&p_flags);
|
||||
p_text_section_lock();
|
||||
spin_lock(&p_db_lock);
|
||||
|
||||
smp_call_function_single(p_cpu,p_dump_CPU_metadata,p_db.p_CPU_metadata_array,true);
|
||||
|
@ -196,7 +195,7 @@ int p_cpu_online_action(unsigned int p_cpu) {
|
|||
/* God mode off ;) */
|
||||
// spin_unlock_irqrestore(&p_db_lock,p_db_flags);
|
||||
spin_unlock(&p_db_lock);
|
||||
p_text_section_unlock(&p_flags);
|
||||
p_text_section_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -204,9 +203,8 @@ int p_cpu_online_action(unsigned int p_cpu) {
|
|||
int p_cpu_dead_action(unsigned int p_cpu) {
|
||||
|
||||
int tmp_online_CPUs = p_db.p_cpu.online_CPUs;
|
||||
unsigned long p_flags;
|
||||
|
||||
p_text_section_lock(&p_flags);
|
||||
p_text_section_lock();
|
||||
spin_lock(&p_db_lock);
|
||||
|
||||
p_db.p_CPU_metadata_array[p_cpu].p_cpu_online = P_CPU_OFFLINE;
|
||||
|
@ -270,7 +268,7 @@ int p_cpu_dead_action(unsigned int p_cpu) {
|
|||
/* God mode off ;) */
|
||||
// spin_unlock_irqrestore(&p_db_lock,p_db_flags);
|
||||
spin_unlock(&p_db_lock);
|
||||
p_text_section_unlock(&p_flags);
|
||||
p_text_section_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -190,10 +190,15 @@ int p_create_database(void) {
|
|||
|
||||
int p_tmp;
|
||||
// int p_tmp_cpu;
|
||||
unsigned long p_flags;
|
||||
|
||||
memset(&p_db,0x0,sizeof(p_hash_database));
|
||||
|
||||
if ( (P_SYM(p_jump_label_mutex) = (struct mutex *)P_SYM(p_kallsyms_lookup_name)("jump_label_mutex")) == NULL) {
|
||||
p_print_log(P_LKRG_ERR,
|
||||
"CREATING DATABASE: error! Can't find 'jump_label_mutex' variable :( Exiting...\n");
|
||||
return P_LKRG_GENERAL_ERROR;
|
||||
}
|
||||
|
||||
if ( (P_SYM(p_text_mutex) = (struct mutex *)P_SYM(p_kallsyms_lookup_name)("text_mutex")) == NULL) {
|
||||
p_print_log(P_LKRG_ERR,
|
||||
"CREATING DATABASE: error! Can't find 'text_mutex' variable :( Exiting...\n");
|
||||
|
@ -307,7 +312,7 @@ int p_create_database(void) {
|
|||
}
|
||||
|
||||
|
||||
p_text_section_lock(&p_flags);
|
||||
p_text_section_lock();
|
||||
|
||||
/*
|
||||
* Memory allocation may fail... let's loop here!
|
||||
|
@ -327,7 +332,7 @@ int p_create_database(void) {
|
|||
(unsigned int)p_db.p_module_kobj_nr * sizeof(p_module_kobj_mem));
|
||||
*/
|
||||
|
||||
p_text_section_unlock(&p_flags);
|
||||
p_text_section_unlock();
|
||||
|
||||
/* Register module notification routine - must be outside p_text_section_(un)lock */
|
||||
p_register_module_notifier();
|
||||
|
@ -352,14 +357,14 @@ int p_create_database(void) {
|
|||
|
||||
|
||||
#if !defined(CONFIG_GRKERNSEC)
|
||||
p_text_section_lock(&p_flags);
|
||||
p_text_section_lock();
|
||||
if (hash_from_kernel_stext() != P_LKRG_SUCCESS) {
|
||||
p_print_log(P_LKRG_CRIT,
|
||||
"CREATING DATABASE ERROR: HASH FROM _STEXT!\n");
|
||||
p_text_section_unlock(&p_flags);
|
||||
p_text_section_unlock();
|
||||
return P_LKRG_GENERAL_ERROR;
|
||||
}
|
||||
p_text_section_unlock(&p_flags);
|
||||
p_text_section_unlock();
|
||||
#endif
|
||||
|
||||
return P_LKRG_SUCCESS;
|
||||
|
|
|
@ -178,33 +178,23 @@ int hash_from_kernel_stext(void);
|
|||
int hash_from_kernel_rodata(void);
|
||||
int hash_from_iommu_table(void);
|
||||
|
||||
static inline void p_text_section_lock(unsigned long *p_arg) {
|
||||
static inline void p_text_section_lock(void) {
|
||||
|
||||
#if defined(CONFIG_FUNCTION_TRACER)
|
||||
mutex_lock(P_SYM(p_ftrace_lock));
|
||||
#endif
|
||||
do {
|
||||
while (!p_lkrg_counter_lock_trylock(&p_jl_lock, p_arg))
|
||||
schedule();
|
||||
if (p_lkrg_counter_lock_val_read(&p_jl_lock)) {
|
||||
p_lkrg_counter_lock_unlock(&p_jl_lock, p_arg);
|
||||
schedule();
|
||||
continue;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} while(1);
|
||||
while (mutex_is_locked(P_SYM(p_jump_label_mutex)))
|
||||
schedule();
|
||||
mutex_lock(P_SYM(p_text_mutex));
|
||||
/* We are heavily consuming module list here - take 'module_mutex' */
|
||||
mutex_lock(&module_mutex);
|
||||
}
|
||||
|
||||
static inline void p_text_section_unlock(unsigned long *p_arg) {
|
||||
static inline void p_text_section_unlock(void) {
|
||||
|
||||
/* Release the 'module_mutex' */
|
||||
mutex_unlock(&module_mutex);
|
||||
mutex_unlock(P_SYM(p_text_mutex));
|
||||
p_lkrg_counter_lock_unlock(&p_jl_lock, p_arg);
|
||||
#if defined(CONFIG_FUNCTION_TRACER)
|
||||
mutex_unlock(P_SYM(p_ftrace_lock));
|
||||
#endif
|
||||
|
|
|
@ -117,7 +117,6 @@ void p_check_integrity(struct work_struct *p_work) {
|
|||
struct module *p_tmp_mod;
|
||||
unsigned int p_tmp = 0;
|
||||
int p_ret;
|
||||
unsigned long p_flags;
|
||||
|
||||
if (!P_CTRL(p_kint_validate) || (!p_manual && P_CTRL(p_kint_validate) == 1))
|
||||
goto p_check_integrity_tasks;
|
||||
|
@ -212,7 +211,7 @@ void p_check_integrity(struct work_struct *p_work) {
|
|||
p_tmp_hash = hash_from_CPU_data(p_tmp_cpus);
|
||||
put_online_cpus();
|
||||
|
||||
p_text_section_lock(&p_flags);
|
||||
p_text_section_lock();
|
||||
|
||||
/*
|
||||
* Memory allocation may fail... let's loop here!
|
||||
|
@ -229,7 +228,7 @@ void p_check_integrity(struct work_struct *p_work) {
|
|||
schedule();
|
||||
}
|
||||
/*
|
||||
p_text_section_unlock(&p_flags);
|
||||
p_text_section_unlock();
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&p_db_lock,p_db_flags);
|
||||
|
@ -1772,7 +1771,7 @@ void p_check_integrity(struct work_struct *p_work) {
|
|||
|
||||
p_check_integrity_cancel:
|
||||
|
||||
p_text_section_unlock(&p_flags);
|
||||
p_text_section_unlock();
|
||||
if (p_tmp_cpus) {
|
||||
kzfree(p_tmp_cpus);
|
||||
p_tmp_cpus = NULL;
|
||||
|
|
|
@ -57,7 +57,6 @@ static void p_module_notifier_wrapper(unsigned long p_event, struct module *p_km
|
|||
static int p_module_event_notifier(struct notifier_block *p_this, unsigned long p_event, void *p_kmod) {
|
||||
|
||||
struct module *p_tmp = p_kmod;
|
||||
unsigned long p_flags;
|
||||
|
||||
// STRONG_DEBUG
|
||||
#ifdef P_LKRG_DEBUG
|
||||
|
@ -110,7 +109,7 @@ static int p_module_event_notifier(struct notifier_block *p_this, unsigned long
|
|||
* We must keep in track that information ;)
|
||||
*/
|
||||
|
||||
p_text_section_lock(&p_flags);
|
||||
p_text_section_lock();
|
||||
/*
|
||||
* First, synchronize possible database changes with other LKRG components...
|
||||
* We want to be as fast as possible to get this lock! :)
|
||||
|
@ -179,7 +178,7 @@ static int p_module_event_notifier(struct notifier_block *p_this, unsigned long
|
|||
* and recalculate global module hashes...
|
||||
*/
|
||||
|
||||
p_text_section_lock(&p_flags);
|
||||
p_text_section_lock();
|
||||
|
||||
/*
|
||||
* First, synchronize possible database changes with other LKRG components...
|
||||
|
@ -233,7 +232,7 @@ p_module_event_notifier_unlock_out:
|
|||
/* God mode off ;) */
|
||||
// spin_unlock_irqrestore(&p_db_lock,p_db_flags);
|
||||
spin_unlock(&p_db_lock);
|
||||
p_text_section_unlock(&p_flags);
|
||||
p_text_section_unlock();
|
||||
|
||||
p_module_event_notifier_activity_out:
|
||||
|
||||
|
|
|
@ -168,6 +168,7 @@ typedef struct _p_lkrg_global_symbols_structure {
|
|||
#endif
|
||||
int (*p_core_kernel_text)(unsigned long p_addr);
|
||||
pmd_t *(*p_mm_find_pmd)(struct mm_struct *mm, unsigned long address);
|
||||
struct mutex *p_jump_label_mutex;
|
||||
struct mutex *p_text_mutex;
|
||||
struct text_poke_loc **p_tp_vec;
|
||||
int *p_tp_vec_nr;
|
||||
|
|
Loading…
Reference in a new issue