#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include extern int max_threads; static struct workqueue_struct *khelper_wq; #define CAP_BSET (void *)1 #define CAP_PI (void *)2 static kernel_cap_t usermodehelper_bset = CAP_FULL_SET; static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; static DEFINE_SPINLOCK(umh_sysctl_lock); static DECLARE_RWSEM(umhelper_sem); #ifdef CONFIG_MODULES char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe"; static void free_modprobe_argv(struct subprocess_info *info) { kfree(info->argv[3]); kfree(info->argv); } static int call_modprobe(char *module_name, int wait) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL); if (!argv) goto out; module_name = kstrdup(module_name, GFP_KERNEL); if (!module_name) goto free_argv; argv[0] = modprobe_path; argv[1] = "-q"; argv[2] = "--"; argv[3] = module_name; argv[4] = NULL; return call_usermodehelper_fns(modprobe_path, argv, envp, wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL); free_argv: kfree(argv); out: return -ENOMEM; } int __request_module(bool wait, const char *fmt, ...) { va_list args; char module_name[MODULE_NAME_LEN]; unsigned int max_modprobes; int ret; static atomic_t kmod_concurrent = ATOMIC_INIT(0); #define MAX_KMOD_CONCURRENT 50 static int kmod_loop_msg; va_start(args, fmt); ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); va_end(args); if (ret >= MODULE_NAME_LEN) return -ENAMETOOLONG; ret = security_kernel_module_request(module_name); if (ret) return ret; max_modprobes = min(max_threads/2, MAX_KMOD_CONCURRENT); atomic_inc(&kmod_concurrent); if (atomic_read(&kmod_concurrent) > max_modprobes) { if (kmod_loop_msg < 5) { printk(KERN_ERR "request_module: runaway loop modprobe %s\n", module_name); kmod_loop_msg++; } atomic_dec(&kmod_concurrent); return -ENOMEM; } trace_module_request(module_name, wait, _RET_IP_); ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC); atomic_dec(&kmod_concurrent); return ret; } EXPORT_SYMBOL(__request_module); #endif static int ____call_usermodehelper(void *data) { struct subprocess_info *sub_info = data; struct cred *new; int retval; spin_lock_irq(¤t->sighand->siglock); flush_signal_handlers(current, 1); spin_unlock_irq(¤t->sighand->siglock); set_cpus_allowed_ptr(current, cpu_all_mask); set_user_nice(current, 0); retval = -ENOMEM; new = prepare_kernel_cred(current); if (!new) goto fail; spin_lock(&umh_sysctl_lock); new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); new->cap_inheritable = cap_intersect(usermodehelper_inheritable, new->cap_inheritable); spin_unlock(&umh_sysctl_lock); if (sub_info->init) { retval = sub_info->init(sub_info, new); if (retval) { abort_creds(new); goto fail; } } commit_creds(new); retval = kernel_execve(sub_info->path, (const char *const *)sub_info->argv, (const char *const *)sub_info->envp); fail: sub_info->retval = retval; return 0; } void call_usermodehelper_freeinfo(struct subprocess_info *info) { if (info->cleanup) (*info->cleanup)(info); kfree(info); } EXPORT_SYMBOL(call_usermodehelper_freeinfo); static void umh_complete(struct subprocess_info *sub_info) { struct completion *comp = xchg(&sub_info->complete, NULL); if (comp) complete(comp); else call_usermodehelper_freeinfo(sub_info); } static int wait_for_helper(void *data) { struct subprocess_info *sub_info = data; pid_t pid; spin_lock_irq(¤t->sighand->siglock); current->sighand->action[SIGCHLD-1].sa.sa_handler = SIG_DFL; spin_unlock_irq(¤t->sighand->siglock); pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD); if (pid < 0) { sub_info->retval = pid; } else { int ret = -ECHILD; sys_wait4(pid, (int __user *)&ret, 0, NULL); if (ret) sub_info->retval = ret; } umh_complete(sub_info); return 0; } static void __call_usermodehelper(struct work_struct *work) { struct subprocess_info *sub_info = container_of(work, struct subprocess_info, work); int wait = sub_info->wait & ~UMH_KILLABLE; pid_t pid; if (wait == UMH_WAIT_PROC) pid = kernel_thread(wait_for_helper, sub_info, CLONE_FS | CLONE_FILES | SIGCHLD); else pid = kernel_thread(____call_usermodehelper, sub_info, CLONE_VFORK | SIGCHLD); switch (wait) { case UMH_NO_WAIT: call_usermodehelper_freeinfo(sub_info); break; case UMH_WAIT_PROC: if (pid > 0) break; case UMH_WAIT_EXEC: if (pid < 0) sub_info->retval = pid; umh_complete(sub_info); } } static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED; static atomic_t running_helpers = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq); #define RUNNING_HELPERS_TIMEOUT (5 * HZ) int usermodehelper_read_trylock(void) { DEFINE_WAIT(wait); int ret = 0; down_read(&umhelper_sem); for (;;) { prepare_to_wait(&usermodehelper_disabled_waitq, &wait, TASK_INTERRUPTIBLE); if (!usermodehelper_disabled) break; if (usermodehelper_disabled == UMH_DISABLED) ret = -EAGAIN; up_read(&umhelper_sem); if (ret) break; schedule(); try_to_freeze(); down_read(&umhelper_sem); } finish_wait(&usermodehelper_disabled_waitq, &wait); return ret; } EXPORT_SYMBOL_GPL(usermodehelper_read_trylock); long usermodehelper_read_lock_wait(long timeout) { DEFINE_WAIT(wait); if (timeout < 0) return -EINVAL; down_read(&umhelper_sem); for (;;) { prepare_to_wait(&usermodehelper_disabled_waitq, &wait, TASK_UNINTERRUPTIBLE); if (!usermodehelper_disabled) break; up_read(&umhelper_sem); timeout = schedule_timeout(timeout); if (!timeout) break; down_read(&umhelper_sem); } finish_wait(&usermodehelper_disabled_waitq, &wait); return timeout; } EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait); void usermodehelper_read_unlock(void) { up_read(&umhelper_sem); } EXPORT_SYMBOL_GPL(usermodehelper_read_unlock); void __usermodehelper_set_disable_depth(enum umh_disable_depth depth) { down_write(&umhelper_sem); usermodehelper_disabled = depth; wake_up(&usermodehelper_disabled_waitq); up_write(&umhelper_sem); } int __usermodehelper_disable(enum umh_disable_depth depth) { long retval; if (!depth) return -EINVAL; down_write(&umhelper_sem); usermodehelper_disabled = depth; up_write(&umhelper_sem); retval = wait_event_timeout(running_helpers_waitq, atomic_read(&running_helpers) == 0, RUNNING_HELPERS_TIMEOUT); if (retval) return 0; __usermodehelper_set_disable_depth(UMH_ENABLED); return -EAGAIN; } static void helper_lock(void) { atomic_inc(&running_helpers); smp_mb__after_atomic_inc(); } static void helper_unlock(void) { if (atomic_dec_and_test(&running_helpers)) wake_up(&running_helpers_waitq); } struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, char **envp, gfp_t gfp_mask) { struct subprocess_info *sub_info; sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask); if (!sub_info) goto out; INIT_WORK(&sub_info->work, __call_usermodehelper); sub_info->path = path; sub_info->argv = argv; sub_info->envp = envp; out: return sub_info; } EXPORT_SYMBOL(call_usermodehelper_setup); void call_usermodehelper_setfns(struct subprocess_info *info, int (*init)(struct subprocess_info *info, struct cred *new), void (*cleanup)(struct subprocess_info *info), void *data) { info->cleanup = cleanup; info->init = init; info->data = data; } EXPORT_SYMBOL(call_usermodehelper_setfns); int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) { DECLARE_COMPLETION_ONSTACK(done); int retval = 0; helper_lock(); if (sub_info->path[0] == '\0') goto out; if (!khelper_wq || usermodehelper_disabled) { retval = -EBUSY; goto out; } sub_info->complete = &done; sub_info->wait = wait; queue_work(khelper_wq, &sub_info->work); if (wait == UMH_NO_WAIT) goto unlock; if (wait & UMH_KILLABLE) { retval = wait_for_completion_killable(&done); if (!retval) goto wait_done; if (xchg(&sub_info->complete, NULL)) goto unlock; } wait_for_completion(&done); wait_done: retval = sub_info->retval; out: call_usermodehelper_freeinfo(sub_info); unlock: helper_unlock(); return retval; } EXPORT_SYMBOL(call_usermodehelper_exec); static int proc_cap_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; unsigned long cap_array[_KERNEL_CAPABILITY_U32S]; kernel_cap_t new_cap; int err, i; if (write && (!capable(CAP_SETPCAP) || !capable(CAP_SYS_MODULE))) return -EPERM; spin_lock(&umh_sysctl_lock); for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) { if (table->data == CAP_BSET) cap_array[i] = usermodehelper_bset.cap[i]; else if (table->data == CAP_PI) cap_array[i] = usermodehelper_inheritable.cap[i]; else BUG(); } spin_unlock(&umh_sysctl_lock); t = *table; t.data = &cap_array; err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); if (err < 0) return err; for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) new_cap.cap[i] = cap_array[i]; spin_lock(&umh_sysctl_lock); if (write) { if (table->data == CAP_BSET) usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap); if (table->data == CAP_PI) usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap); } spin_unlock(&umh_sysctl_lock); return 0; } struct ctl_table usermodehelper_table[] = { { .procname = "bset", .data = CAP_BSET, .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), .mode = 0600, .proc_handler = proc_cap_handler, }, { .procname = "inheritable", .data = CAP_PI, .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), .mode = 0600, .proc_handler = proc_cap_handler, }, { } }; void __init usermodehelper_init(void) { khelper_wq = create_singlethread_workqueue("khelper"); BUG_ON(!khelper_wq); }