📄 1.txt
字号:
398 bprm->page[i++] = NULL;
399 #else
400 stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE);
401 stack_base = PAGE_ALIGN(stack_base);
402 bprm->p += stack_base;
403 mm->arg_start = bprm->p;
404 arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
405 #endif
406
407 arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
408
409 if (bprm->loader)
410 bprm->loader += stack_base;
411 bprm->exec += stack_base;
412
413 mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
414 if (!mpnt)
415 return -ENOMEM;
416
417 down_write(&mm->mmap_sem);
418 {
419 mpnt->vm_mm = mm;
420 #ifdef CONFIG_STACK_GROWSUP
421 mpnt->vm_start = stack_base;
422 mpnt->vm_end = stack_base + arg_size;
423 #else
424 mpnt->vm_end = stack_top;
425 mpnt->vm_start = mpnt->vm_end - arg_size;
426 #endif
427 /* Adjust stack execute permissions; explicitly enable
428 * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
429 * and leave alone (arch default) otherwise. */
430 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
431 mpnt->vm_flags = VM_STACK_FLAGS | VM_EXEC;
432 else if (executable_stack == EXSTACK_DISABLE_X)
433 mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
434 else
435 mpnt->vm_flags = VM_STACK_FLAGS;
436 mpnt->vm_flags |= mm->def_flags;
437 mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
438 if ((ret = insert_vm_struct(mm, mpnt))) {
439 up_write(&mm->mmap_sem);
440 kmem_cache_free(vm_area_cachep, mpnt);
441 return ret;
442 }
443 mm->stack_vm = mm->total_vm = vma_pages(mpnt);
444 }
445
446 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
447 struct page *page = bprm->page[i];
448 if (page) {
449 bprm->page[i] = NULL;
450 install_arg_page(mpnt, page, stack_base);
451 }
452 stack_base += PAGE_SIZE;
453 }
454 up_write(&mm->mmap_sem);
455
456 return 0;
457 }
458
459 EXPORT_SYMBOL(setup_arg_pages);
460
461 #define free_arg_pages(bprm) do { } while (0)
462
463 #else
464
465 static inline void free_arg_pages(struct linux_binprm *bprm)
466 {
467 int i;
468
469 for (i = 0; i < MAX_ARG_PAGES; i++) {
470 if (bprm->page[i])
471 __free_page(bprm->page[i]);
472 bprm->page[i] = NULL;
473 }
474 }
475
476 #endif /* CONFIG_MMU */
477
478 struct file *open_exec(const char *name)
479 {
480 struct nameidata nd;
481 int err;
482 struct file *file;
483
484 err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
485 file = ERR_PTR(err);
486
487 if (!err) {
488 struct inode *inode = nd.dentry->d_inode;
489 file = ERR_PTR(-EACCES);
490 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
491 S_ISREG(inode->i_mode)) {
492 int err = vfs_permission(&nd, MAY_EXEC);
493 file = ERR_PTR(err);
494 if (!err) {
495 file = nameidata_to_filp(&nd, O_RDONLY);
496 if (!IS_ERR(file)) {
497 err = deny_write_access(file);
498 if (err) {
499 fput(file);
500 file = ERR_PTR(err);
501 }
502 }
503 out:
504 return file;
505 }
506 }
507 release_open_intent(&nd);
508 path_release(&nd);
509 }
510 goto out;
511 }
512
513 EXPORT_SYMBOL(open_exec);
514
515 int kernel_read(struct file *file, unsigned long offset,
516 char *addr, unsigned long count)
517 {
518 mm_segment_t old_fs;
519 loff_t pos = offset;
520 int result;
521
522 old_fs = get_fs();
523 set_fs(get_ds());
524 /* The cast to a user pointer is valid due to the set_fs() */
525 result = vfs_read(file, (void __user *)addr, count, &pos);
526 set_fs(old_fs);
527 return result;
528 }
529
530 EXPORT_SYMBOL(kernel_read);
531
532 static int exec_mmap(struct mm_struct *mm)
533 {
534 struct task_struct *tsk;
535 struct mm_struct * old_mm, *active_mm;
536
537 /* Notify parent that we're no longer interested in the old VM */
538 tsk = current;
539 old_mm = current->mm;
540 mm_release(tsk, old_mm);
541
542 if (old_mm) {
543 /*
544 * Make sure that if there is a core dump in progress
545 * for the old mm, we get out and die instead of going
546 * through with the exec. We must hold mmap_sem around
547 * checking core_waiters and changing tsk->mm. The
548 * core-inducing thread will increment core_waiters for
549 * each thread whose ->mm == old_mm.
550 */
551 down_read(&old_mm->mmap_sem);
552 if (unlikely(old_mm->core_waiters)) {
553 up_read(&old_mm->mmap_sem);
554 return -EINTR;
555 }
556 }
557 task_lock(tsk);
558 active_mm = tsk->active_mm;
559 tsk->mm = mm;
560 tsk->active_mm = mm;
561 activate_mm(active_mm, mm);
562 task_unlock(tsk);
563 arch_pick_mmap_layout(mm);
564 if (old_mm) {
565 up_read(&old_mm->mmap_sem);
566 BUG_ON(active_mm != old_mm);
567 mmput(old_mm);
568 return 0;
569 }
570 mmdrop(active_mm);
571 return 0;
572 }
573
574 /*
575 * This function makes sure the current process has its own signal table,
576 * so that flush_signal_handlers can later reset the handlers without
577 * disturbing other processes. (Other processes might share the signal
578 * table via the CLONE_SIGHAND option to clone().)
579 */
580 static int de_thread(struct task_struct *tsk)
581 {
582 struct signal_struct *sig = tsk->signal;
583 struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
584 spinlock_t *lock = &oldsighand->siglock;
585 struct task_struct *leader = NULL;
586 int count;
587
588 /*
589 * If we don't share sighandlers, then we aren't sharing anything
590 * and we can just re-use it all.
591 */
592 if (atomic_read(&oldsighand->count) <= 1) {
593 BUG_ON(atomic_read(&sig->count) != 1);
594 signalfd_detach(tsk);
595 exit_itimers(sig);
596 return 0;
597 }
598
599 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
600 if (!newsighand)
601 return -ENOMEM;
602
603 if (thread_group_empty(tsk))
604 goto no_thread_group;
605
606 /*
607 * Kill all other threads in the thread group.
608 * We must hold tasklist_lock to call zap_other_threads.
609 */
610 read_lock(&tasklist_lock);
611 spin_lock_irq(lock);
612 if (sig->flags & SIGNAL_GROUP_EXIT) {
613 /*
614 * Another group action in progress, just
615 * return so that the signal is processed.
616 */
617 spin_unlock_irq(lock);
618 read_unlock(&tasklist_lock);
619 kmem_cache_free(sighand_cachep, newsighand);
620 return -EAGAIN;
621 }
622
623 /*
624 * child_reaper ignores SIGKILL, change it now.
625 * Reparenting needs write_lock on tasklist_lock,
626 * so it is safe to do it under read_lock.
627 */
628 if (unlikely(tsk->group_leader == child_reaper(tsk)))
629 tsk->nsproxy->pid_ns->child_reaper = tsk;
630
631 zap_other_threads(tsk);
632 read_unlock(&tasklist_lock);
633
634 /*
635 * Account for the thread group leader hanging around:
636 */
637 count = 1;
638 if (!thread_group_leader(tsk)) {
639 count = 2;
640 /*
641 * The SIGALRM timer survives the exec, but needs to point
642 * at us as the new group leader now. We have a race with
643 * a timer firing now getting the old leader, so we need to
644 * synchronize with any firing (by calling del_timer_sync)
645 * before we can safely let the old group leader die.
646 */
647 sig->tsk = tsk;
648 spin_unlock_irq(lock);
649 if (hrtimer_cancel(&sig->real_timer))
650 hrtimer_restart(&sig->real_timer);
651 spin_lock_irq(lock);
652 }
653 while (atomic_read(&sig->count) > count) {
654 sig->group_exit_task = tsk;
655 sig->notify_count = count;
656 __set_current_state(TASK_UNINTERRUPTIBLE);
657 spin_unlock_irq(lock);
658 schedule();
659 spin_lock_irq(lock);
660 }
661 sig->group_exit_task = NULL;
662 sig->notify_count = 0;
663 spin_unlock_irq(lock);
664
665 /*
666 * At this point all other threads have exited, all we have to
667 * do is to wait for the thread group leader to become inactive,
668 * and to assume its PID:
669 */
670 if (!thread_group_leader(tsk)) {
671 /*
672 * Wait for the thread group leader to be a zombie.
673 * It should already be zombie at this point, most
674 * of the time.
675 */
676 leader = tsk->group_leader;
677 while (leader->exit_state != EXIT_ZOMBIE)
678 yield();
679
680 /*
681 * The only record we have of the real-time age of a
682 * process, regardless of execs it's done, is start_time.
683 * All the past CPU time is accumulated in signal_struct
684 * from sister threads now dead. But in this non-leader
685 * exec, nothing survives from the original leader thread,
686 * whose birth marks the true age of this process now.
687 * When we take on its identity by switching to its PID, we
688 * also take its birthdate (always earlier than our own).
689 */
690 tsk->start_time = leader->start_time;
691
692 write_lock_irq(&tasklist_lock);
693
694 BUG_ON(leader->tgid != tsk->tgid);
695 BUG_ON(tsk->pid == tsk->tgid);
696 /*
697 * An exec() starts a new thread group with the
698 * TGID of the previous thread group. Rehash the
699 * two threads with a switched PID, and release
700 * the former thread group leader:
701 */
702
703 /* Become a process group leader with the old leader's pid.
704 * The old leader becomes a thread of the this thread group.
705 * Note: The old leader also uses this pid until release_task
706 * is called. Odd but simple and correct.
707 */
708 detach_pid(tsk, PIDTYPE_PID);
709 tsk->pid = leader->pid;
710 attach_pid(tsk, PIDTYPE_PID, find_pid(tsk->pid));
711 transfer_pid(leader, tsk, PIDTYPE_PGID);
712 transfer_pid(leader, tsk, PIDTYPE_SID);
713 list_replace_rcu(&leader->tasks, &tsk->tasks);
714
715 tsk->group_leader = tsk;
716 leader->group_leader = tsk;
717
718 tsk->exit_signal = SIGCHLD;
719
720 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
721 leader->exit_state = EXIT_DEAD;
722
723 write_unlock_irq(&tasklist_lock);
724 }
725
726 /*
727 * There may be one thread left which is just exiting,
728 * but it's safe to stop telling the group to kill themselves.
729 */
730 sig->flags = 0;
731
732 no_thread_group:
733 signalfd_detach(tsk);
734 exit_itimers(sig);
735 if (leader)
736 release_task(leader);
737
738 BUG_ON(atomic_read(&sig->count) != 1);
739
740 if (atomic_read(&oldsighand->count) == 1) {
741 /*
742 * Now that we nuked the rest of the thread group,
743 * it turns out we are not sharing sighand any more either.
744 * So we can just keep it.
745 */
746 kmem_cache_free(sighand_cachep, newsighand);
747 } else {
748 /*
749 * Move our state over to newsighand and switch it in.
750 */
751 atomic_set(&newsighand->count, 1);
752 memcpy(newsighand->action, oldsighand->action,
753 sizeof(newsighand->action));
754
755 write_lock_irq(&tasklist_lock);
756 spin_lock(&oldsighand->siglock);
757 spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
758
759 rcu_assign_pointer(tsk->sighand, newsighand);
760 recalc_sigpending();
761
762 spin_unlock(&newsighand->siglock);
763 spin_unlock(&oldsighand->siglock);
764 write_unlock_irq(&tasklist_lock);
765
766 __cleanup_sighand(oldsighand);
767 }
768
769 BUG_ON(!thread_group_leader(tsk));
770 return 0;
771 }
772
773 /*
774 * These functions flushes out all traces of the currently running executable
775 * so that a new one can be started
776 */
777
778 static void flush_old_files(struct files_struct * files)
779 {
780 long j = -1;
781 struct fdtable *fdt;
782
783 spin_lock(&files->file_lock);
784 for (;;) {
785 unsigned long set, i;
786
787 j++;
788 i = j * __NFDBITS;
789 fdt = files_fdtable(files);
790 if (i >= fdt->max_fds)
791 break;
792 set = fdt->close_on_exec->fds_bits[j];
793 if (!set)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -