📄 module.c
字号:
static inline unsigned int find_pcpusec(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, const char *secstrings){ return 0;}static inline void percpu_modcopy(void *pcpudst, const void *src, unsigned long size){ /* pcpusec should be 0, and size of that section should be 0. */ BUG_ON(size != 0);}#endif /* CONFIG_SMP */#define MODINFO_ATTR(field) \static void setup_modinfo_##field(struct module *mod, const char *s) \{ \ mod->field = kstrdup(s, GFP_KERNEL); \} \static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ struct module *mod, char *buffer) \{ \ return sprintf(buffer, "%s\n", mod->field); \} \static int modinfo_##field##_exists(struct module *mod) \{ \ return mod->field != NULL; \} \static void free_modinfo_##field(struct module *mod) \{ \ kfree(mod->field); \ mod->field = NULL; \} \static struct module_attribute modinfo_##field = { \ .attr = { .name = __stringify(field), .mode = 0444 }, \ .show = show_modinfo_##field, \ .setup = setup_modinfo_##field, \ .test = modinfo_##field##_exists, \ .free = free_modinfo_##field, \};MODINFO_ATTR(version);MODINFO_ATTR(srcversion);static char last_unloaded_module[MODULE_NAME_LEN+1];#ifdef CONFIG_MODULE_UNLOAD/* Init the unload section of the module. */static void module_unload_init(struct module *mod){ unsigned int i; INIT_LIST_HEAD(&mod->modules_which_use_me); for (i = 0; i < NR_CPUS; i++) local_set(&mod->ref[i].count, 0); /* Hold reference count during initialization. */ local_set(&mod->ref[raw_smp_processor_id()].count, 1); /* Backwards compatibility macros put refcount during init. */ mod->waiter = current;}/* modules using other modules */struct module_use{ struct list_head list; struct module *module_which_uses;};/* Does a already use b? */static int already_uses(struct module *a, struct module *b){ struct module_use *use; list_for_each_entry(use, &b->modules_which_use_me, list) { if (use->module_which_uses == a) { DEBUGP("%s uses %s!\n", a->name, b->name); return 1; } } DEBUGP("%s does not use %s!\n", a->name, b->name); return 0;}/* Module a uses b */static int use_module(struct module *a, struct module *b){ struct module_use *use; int no_warn, err; if (b == NULL || already_uses(a, b)) return 1; /* If we're interrupted or time out, we fail. */ if (wait_event_interruptible_timeout( module_wq, (err = strong_try_module_get(b)) != -EBUSY, 30 * HZ) <= 0) { printk("%s: gave up waiting for init of module %s.\n", a->name, b->name); return 0; } /* If strong_try_module_get() returned a different error, we fail. */ if (err) return 0; DEBUGP("Allocating new usage for %s.\n", a->name); use = kmalloc(sizeof(*use), GFP_ATOMIC); if (!use) { printk("%s: out of memory loading\n", a->name); module_put(b); return 0; } use->module_which_uses = a; list_add(&use->list, &b->modules_which_use_me); no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name); return 1;}/* Clear the unload stuff of the module. */static void module_unload_free(struct module *mod){ struct module *i; list_for_each_entry(i, &modules, list) { struct module_use *use; list_for_each_entry(use, &i->modules_which_use_me, list) { if (use->module_which_uses == mod) { DEBUGP("%s unusing %s\n", mod->name, i->name); module_put(i); list_del(&use->list); kfree(use); sysfs_remove_link(i->holders_dir, mod->name); /* There can be at most one match. */ break; } } }}#ifdef CONFIG_MODULE_FORCE_UNLOADstatic inline int try_force_unload(unsigned int flags){ int ret = (flags & O_TRUNC); if (ret) add_taint(TAINT_FORCED_RMMOD); return ret;}#elsestatic inline int try_force_unload(unsigned int flags){ return 0;}#endif /* CONFIG_MODULE_FORCE_UNLOAD */struct stopref{ struct module *mod; int flags; int *forced;};/* Whole machine is stopped with interrupts off when this runs. */static int __try_stop_module(void *_sref){ struct stopref *sref = _sref; /* If it's not unused, quit unless we're forcing. */ if (module_refcount(sref->mod) != 0) { if (!(*sref->forced = try_force_unload(sref->flags))) return -EWOULDBLOCK; } /* Mark it as dying. */ sref->mod->state = MODULE_STATE_GOING; return 0;}static int try_stop_module(struct module *mod, int flags, int *forced){ if (flags & O_NONBLOCK) { struct stopref sref = { mod, flags, forced }; return stop_machine(__try_stop_module, &sref, NULL); } else { /* We don't need to stop the machine for this. */ mod->state = MODULE_STATE_GOING; synchronize_sched(); return 0; }}unsigned int module_refcount(struct module *mod){ unsigned int i, total = 0; for (i = 0; i < NR_CPUS; i++) total += local_read(&mod->ref[i].count); return total;}EXPORT_SYMBOL(module_refcount);/* This exists whether we can unload or not */static void free_module(struct module *mod);static void wait_for_zero_refcount(struct module *mod){ /* Since we might sleep for some time, release the mutex first */ mutex_unlock(&module_mutex); for (;;) { DEBUGP("Looking at refcount...\n"); set_current_state(TASK_UNINTERRUPTIBLE); if (module_refcount(mod) == 0) break; schedule(); } current->state = TASK_RUNNING; mutex_lock(&module_mutex);}asmlinkage longsys_delete_module(const char __user *name_user, unsigned int flags){ struct module *mod; char name[MODULE_NAME_LEN]; int ret, forced = 0; if (!capable(CAP_SYS_MODULE)) return -EPERM; if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) return -EFAULT; name[MODULE_NAME_LEN-1] = '\0'; if (mutex_lock_interruptible(&module_mutex) != 0) return -EINTR; mod = find_module(name); if (!mod) { ret = -ENOENT; goto out; } if (!list_empty(&mod->modules_which_use_me)) { /* Other modules depend on us: get rid of them first. */ ret = -EWOULDBLOCK; goto out; } /* Doing init or already dying? */ if (mod->state != MODULE_STATE_LIVE) { /* FIXME: if (force), slam module count and wake up waiter --RR */ DEBUGP("%s already dying\n", mod->name); ret = -EBUSY; goto out; } /* If it has an init func, it must have an exit func to unload */ if (mod->init && !mod->exit) { forced = try_force_unload(flags); if (!forced) { /* This module can't be removed */ ret = -EBUSY; goto out; } } /* Set this up before setting mod->state */ mod->waiter = current; /* Stop the machine so refcounts can't move and disable module. */ ret = try_stop_module(mod, flags, &forced); if (ret != 0) goto out; /* Never wait if forced. */ if (!forced && module_refcount(mod) != 0) wait_for_zero_refcount(mod); mutex_unlock(&module_mutex); /* Final destruction now noone is using it. */ if (mod->exit != NULL) mod->exit(); blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_GOING, mod); mutex_lock(&module_mutex); /* Store the name of the last unloaded module for diagnostic purposes */ strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); free_module(mod); out: mutex_unlock(&module_mutex); return ret;}static void print_unload_info(struct seq_file *m, struct module *mod){ struct module_use *use; int printed_something = 0; seq_printf(m, " %u ", module_refcount(mod)); /* Always include a trailing , so userspace can differentiate between this and the old multi-field proc format. */ list_for_each_entry(use, &mod->modules_which_use_me, list) { printed_something = 1; seq_printf(m, "%s,", use->module_which_uses->name); } if (mod->init != NULL && mod->exit == NULL) { printed_something = 1; seq_printf(m, "[permanent],"); } if (!printed_something) seq_printf(m, "-");}void __symbol_put(const char *symbol){ struct module *owner; preempt_disable(); if (IS_ERR_VALUE(find_symbol(symbol, &owner, NULL, true, false))) BUG(); module_put(owner); preempt_enable();}EXPORT_SYMBOL(__symbol_put);void symbol_put_addr(void *addr){ struct module *modaddr; if (core_kernel_text((unsigned long)addr)) return; if (!(modaddr = module_text_address((unsigned long)addr))) BUG(); module_put(modaddr);}EXPORT_SYMBOL_GPL(symbol_put_addr);static ssize_t show_refcnt(struct module_attribute *mattr, struct module *mod, char *buffer){ return sprintf(buffer, "%u\n", module_refcount(mod));}static struct module_attribute refcnt = { .attr = { .name = "refcnt", .mode = 0444 }, .show = show_refcnt,};void module_put(struct module *module){ if (module) { unsigned int cpu = get_cpu(); local_dec(&module->ref[cpu].count); /* Maybe they're waiting for us to drop reference? */ if (unlikely(!module_is_live(module))) wake_up_process(module->waiter); put_cpu(); }}EXPORT_SYMBOL(module_put);#else /* !CONFIG_MODULE_UNLOAD */static void print_unload_info(struct seq_file *m, struct module *mod){ /* We don't know the usage count, or what modules are using. */ seq_printf(m, " - -");}static inline void module_unload_free(struct module *mod){}static inline int use_module(struct module *a, struct module *b){ return strong_try_module_get(b) == 0;}static inline void module_unload_init(struct module *mod){}#endif /* CONFIG_MODULE_UNLOAD */static ssize_t show_initstate(struct module_attribute *mattr, struct module *mod, char *buffer){ const char *state = "unknown"; switch (mod->state) { case MODULE_STATE_LIVE: state = "live"; break; case MODULE_STATE_COMING: state = "coming"; break; case MODULE_STATE_GOING: state = "going"; break; } return sprintf(buffer, "%s\n", state);}static struct module_attribute initstate = { .attr = { .name = "initstate", .mode = 0444 }, .show = show_initstate,};static struct module_attribute *modinfo_attrs[] = { &modinfo_version, &modinfo_srcversion, &initstate,#ifdef CONFIG_MODULE_UNLOAD &refcnt,#endif NULL,};static const char vermagic[] = VERMAGIC_STRING;static int try_to_force_load(struct module *mod, const char *symname){#ifdef CONFIG_MODULE_FORCE_LOAD if (!(tainted & TAINT_FORCED_MODULE)) printk("%s: no version for \"%s\" found: kernel tainted.\n", mod->name, symname); add_taint_module(mod, TAINT_FORCED_MODULE); return 0;#else return -ENOEXEC;#endif}#ifdef CONFIG_MODVERSIONSstatic int check_version(Elf_Shdr *sechdrs, unsigned int versindex, const char *symname, struct module *mod, const unsigned long *crc){ unsigned int i, num_versions; struct modversion_info *versions; /* Exporting module didn't supply crcs? OK, we're already tainted. */ if (!crc) return 1; /* No versions at all? modprobe --force does this. */ if (versindex == 0) return try_to_force_load(mod, symname) == 0; versions = (void *) sechdrs[versindex].sh_addr; num_versions = sechdrs[versindex].sh_size / sizeof(struct modversion_info); for (i = 0; i < num_versions; i++) { if (strcmp(versions[i].name, symname) != 0) continue; if (versions[i].crc == *crc) return 1; DEBUGP("Found checksum %lX vs module %lX\n", *crc, versions[i].crc); goto bad_version; } printk(KERN_WARNING "%s: no symbol version for %s\n", mod->name, symname); return 0;bad_version: printk("%s: disagrees about version of symbol %s\n", mod->name, symname); return 0;}static inline int check_modstruct_version(Elf_Shdr *sechdrs, unsigned int versindex, struct module *mod){ const unsigned long *crc; if (IS_ERR_VALUE(find_symbol("struct_module", NULL, &crc, true, false))) BUG(); return check_version(sechdrs, versindex, "struct_module", mod, crc);}/* First part is kernel version, which we ignore if module has crcs. */static inline int same_magic(const char *amagic, const char *bmagic, bool has_crcs){ if (has_crcs) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -