📄 1.txt
字号:
2 * linux/fs/exec.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 /*
8 * #!-checking implemented by tytso.
9 */
10 /*
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
14 *
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
17 *
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
22 * formats.
23 */
24
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/mman.h>
28 #include <linux/a.out.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/smp_lock.h>
32 #include <linux/init.h>
33 #include <linux/pagemap.h>
34 #include <linux/highmem.h>
35 #include <linux/spinlock.h>
36 #include <linux/key.h>
37 #include <linux/personality.h>
38 #include <linux/binfmts.h>
39 #include <linux/swap.h>
40 #include <linux/utsname.h>
41 #include <linux/pid_namespace.h>
42 #include <linux/module.h>
43 #include <linux/namei.h>
44 #include <linux/proc_fs.h>
45 #include <linux/ptrace.h>
46 #include <linux/mount.h>
47 #include <linux/security.h>
48 #include <linux/syscalls.h>
49 #include <linux/rmap.h>
50 #include <linux/tsacct_kern.h>
51 #include <linux/cn_proc.h>
52 #include <linux/audit.h>
53 #include <linux/signalfd.h>
54
55 #include <asm/uaccess.h>
56 #include <asm/mmu_context.h>
57
58 #ifdef CONFIG_KMOD
59 #include <linux/kmod.h>
60 #endif
61
62 int core_uses_pid;
63 char core_pattern[CORENAME_MAX_SIZE] = "core";
64 int suid_dumpable = 0;
65
66 EXPORT_SYMBOL(suid_dumpable);
67 /* The maximal length of core_pattern is also specified in sysctl.c */
68
69 static struct linux_binfmt *formats;
70 static DEFINE_RWLOCK(binfmt_lock);
71
72 int register_binfmt(struct linux_binfmt * fmt)
73 {
74 struct linux_binfmt ** tmp = &formats;
75
76 if (!fmt)
77 return -EINVAL;
78 if (fmt->next)
79 return -EBUSY;
80 write_lock(&binfmt_lock);
81 while (*tmp) {
82 if (fmt == *tmp) {
83 write_unlock(&binfmt_lock);
84 return -EBUSY;
85 }
86 tmp = &(*tmp)->next;
87 }
88 fmt->next = formats;
89 formats = fmt;
90 write_unlock(&binfmt_lock);
91 return 0;
92 }
93
94 EXPORT_SYMBOL(register_binfmt);
95
96 int unregister_binfmt(struct linux_binfmt * fmt)
97 {
98 struct linux_binfmt ** tmp = &formats;
99
100 write_lock(&binfmt_lock);
101 while (*tmp) {
102 if (fmt == *tmp) {
103 *tmp = fmt->next;
104 fmt->next = NULL;
105 write_unlock(&binfmt_lock);
106 return 0;
107 }
108 tmp = &(*tmp)->next;
109 }
110 write_unlock(&binfmt_lock);
111 return -EINVAL;
112 }
113
114 EXPORT_SYMBOL(unregister_binfmt);
115
116 static inline void put_binfmt(struct linux_binfmt * fmt)
117 {
118 module_put(fmt->module);
119 }
120
121 /*
122 * Note that a shared library must be both readable and executable due to
123 * security reasons.
124 *
125 * Also note that we take the address to load from from the file itself.
126 */
127 asmlinkage long sys_uselib(const char __user * library)
128 {
129 struct file * file;
130 struct nameidata nd;
131 int error;
132
133 error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
134 if (error)
135 goto out;
136
137 error = -EACCES;
138 if (nd.mnt->mnt_flags & MNT_NOEXEC)
139 goto exit;
140 error = -EINVAL;
141 if (!S_ISREG(nd.dentry->d_inode->i_mode))
142 goto exit;
143
144 error = vfs_permission(&nd, MAY_READ | MAY_EXEC);
145 if (error)
146 goto exit;
147
148 file = nameidata_to_filp(&nd, O_RDONLY);
149 error = PTR_ERR(file);
150 if (IS_ERR(file))
151 goto out;
152
153 error = -ENOEXEC;
154 if(file->f_op) {
155 struct linux_binfmt * fmt;
156
157 read_lock(&binfmt_lock);
158 for (fmt = formats ; fmt ; fmt = fmt->next) {
159 if (!fmt->load_shlib)
160 continue;
161 if (!try_module_get(fmt->module))
162 continue;
163 read_unlock(&binfmt_lock);
164 error = fmt->load_shlib(file);
165 read_lock(&binfmt_lock);
166 put_binfmt(fmt);
167 if (error != -ENOEXEC)
168 break;
169 }
170 read_unlock(&binfmt_lock);
171 }
172 fput(file);
173 out:
174 return error;
175 exit:
176 release_open_intent(&nd);
177 path_release(&nd);
178 goto out;
179 }
180
181 /*
182 * count() counts the number of strings in array ARGV.
183 */
184 static int count(char __user * __user * argv, int max)
185 {
186 int i = 0;
187
188 if (argv != NULL) {
189 for (;;) {
190 char __user * p;
191
192 if (get_user(p, argv))
193 return -EFAULT;
194 if (!p)
195 break;
196 argv++;
197 if(++i > max)
198 return -E2BIG;
199 cond_resched();
200 }
201 }
202 return i;
203 }
204
205 /*
206 * 'copy_strings()' copies argument/environment strings from user
207 * memory to free pages in kernel mem. These are in a format ready
208 * to be put directly into the top of new user memory.
209 */
210 static int copy_strings(int argc, char __user * __user * argv,
211 struct linux_binprm *bprm)
212 {
213 struct page *kmapped_page = NULL;
214 char *kaddr = NULL;
215 int ret;
216
217 while (argc-- > 0) {
218 char __user *str;
219 int len;
220 unsigned long pos;
221
222 if (get_user(str, argv+argc) ||
223 !(len = strnlen_user(str, bprm->p))) {
224 ret = -EFAULT;
225 goto out;
226 }
227
228 if (bprm->p < len) {
229 ret = -E2BIG;
230 goto out;
231 }
232
233 bprm->p -= len;
234 /* XXX: add architecture specific overflow check here. */
235 pos = bprm->p;
236
237 while (len > 0) {
238 int i, new, err;
239 int offset, bytes_to_copy;
240 struct page *page;
241
242 offset = pos % PAGE_SIZE;
243 i = pos/PAGE_SIZE;
244 page = bprm->page[i];
245 new = 0;
246 if (!page) {
247 page = alloc_page(GFP_HIGHUSER);
248 bprm->page[i] = page;
249 if (!page) {
250 ret = -ENOMEM;
251 goto out;
252 }
253 new = 1;
254 }
255
256 if (page != kmapped_page) {
257 if (kmapped_page)
258 kunmap(kmapped_page);
259 kmapped_page = page;
260 kaddr = kmap(kmapped_page);
261 }
262 if (new && offset)
263 memset(kaddr, 0, offset);
264 bytes_to_copy = PAGE_SIZE - offset;
265 if (bytes_to_copy > len) {
266 bytes_to_copy = len;
267 if (new)
268 memset(kaddr+offset+len, 0,
269 PAGE_SIZE-offset-len);
270 }
271 err = copy_from_user(kaddr+offset, str, bytes_to_copy);
272 if (err) {
273 ret = -EFAULT;
274 goto out;
275 }
276
277 pos += bytes_to_copy;
278 str += bytes_to_copy;
279 len -= bytes_to_copy;
280 }
281 }
282 ret = 0;
283 out:
284 if (kmapped_page)
285 kunmap(kmapped_page);
286 return ret;
287 }
288
289 /*
290 * Like copy_strings, but get argv and its values from kernel memory.
291 */
292 int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
293 {
294 int r;
295 mm_segment_t oldfs = get_fs();
296 set_fs(KERNEL_DS);
297 r = copy_strings(argc, (char __user * __user *)argv, bprm);
298 set_fs(oldfs);
299 return r;
300 }
301
302 EXPORT_SYMBOL(copy_strings_kernel);
303
304 #ifdef CONFIG_MMU
305 /*
306 * This routine is used to map in a page into an address space: needed by
307 * execve() for the initial stack and environment pages.
308 *
309 * vma->vm_mm->mmap_sem is held for writing.
310 */
311 void install_arg_page(struct vm_area_struct *vma,
312 struct page *page, unsigned long address)
313 {
314 struct mm_struct *mm = vma->vm_mm;
315 pte_t * pte;
316 spinlock_t *ptl;
317
318 if (unlikely(anon_vma_prepare(vma)))
319 goto out;
320
321 flush_dcache_page(page);
322 pte = get_locked_pte(mm, address, &ptl);
323 if (!pte)
324 goto out;
325 if (!pte_none(*pte)) {
326 pte_unmap_unlock(pte, ptl);
327 goto out;
328 }
329 inc_mm_counter(mm, anon_rss);
330 lru_cache_add_active(page);
331 set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
332 page, vma->vm_page_prot))));
333 page_add_new_anon_rmap(page, vma, address);
334 pte_unmap_unlock(pte, ptl);
335
336 /* no need for flush_tlb */
337 return;
338 out:
339 __free_page(page);
340 force_sig(SIGKILL, current);
341 }
342
343 #define EXTRA_STACK_VM_PAGES 20 /* random */
344
345 int setup_arg_pages(struct linux_binprm *bprm,
346 unsigned long stack_top,
347 int executable_stack)
348 {
349 unsigned long stack_base;
350 struct vm_area_struct *mpnt;
351 struct mm_struct *mm = current->mm;
352 int i, ret;
353 long arg_size;
354
355 #ifdef CONFIG_STACK_GROWSUP
356 /* Move the argument and environment strings to the bottom of the
357 * stack space.
358 */
359 int offset, j;
360 char *to, *from;
361
362 /* Start by shifting all the pages down */
363 i = 0;
364 for (j = 0; j < MAX_ARG_PAGES; j++) {
365 struct page *page = bprm->page[j];
366 if (!page)
367 continue;
368 bprm->page[i++] = page;
369 }
370
371 /* Now move them within their pages */
372 offset = bprm->p % PAGE_SIZE;
373 to = kmap(bprm->page[0]);
374 for (j = 1; j < i; j++) {
375 memmove(to, to + offset, PAGE_SIZE - offset);
376 from = kmap(bprm->page[j]);
377 memcpy(to + PAGE_SIZE - offset, from, offset);
378 kunmap(bprm->page[j - 1]);
379 to = from;
380 }
381 memmove(to, to + offset, PAGE_SIZE - offset);
382 kunmap(bprm->page[j - 1]);
383
384 /* Limit stack size to 1GB */
385 stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
386 if (stack_base > (1 << 30))
387 stack_base = 1 << 30;
388 stack_base = PAGE_ALIGN(stack_top - stack_base);
389
390 /* Adjust bprm->p to point to the end of the strings. */
391 bprm->p = stack_base + PAGE_SIZE * i - offset;
392
393 mm->arg_start = stack_base;
394 arg_size = i << PAGE_SHIFT;
395
396 /* zero pages that were copied above */
397 while (i < MAX_ARG_PAGES)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -