linux/fs/coredump.c
<<
>>
Prefs
   1#include <linux/slab.h>
   2#include <linux/file.h>
   3#include <linux/fdtable.h>
   4#include <linux/freezer.h>
   5#include <linux/mm.h>
   6#include <linux/stat.h>
   7#include <linux/fcntl.h>
   8#include <linux/swap.h>
   9#include <linux/string.h>
  10#include <linux/init.h>
  11#include <linux/pagemap.h>
  12#include <linux/perf_event.h>
  13#include <linux/highmem.h>
  14#include <linux/spinlock.h>
  15#include <linux/key.h>
  16#include <linux/personality.h>
  17#include <linux/binfmts.h>
  18#include <linux/coredump.h>
  19#include <linux/utsname.h>
  20#include <linux/pid_namespace.h>
  21#include <linux/module.h>
  22#include <linux/namei.h>
  23#include <linux/mount.h>
  24#include <linux/security.h>
  25#include <linux/syscalls.h>
  26#include <linux/tsacct_kern.h>
  27#include <linux/cn_proc.h>
  28#include <linux/audit.h>
  29#include <linux/tracehook.h>
  30#include <linux/kmod.h>
  31#include <linux/fsnotify.h>
  32#include <linux/fs_struct.h>
  33#include <linux/pipe_fs_i.h>
  34#include <linux/oom.h>
  35#include <linux/compat.h>
  36#include <linux/sched.h>
  37#include <linux/fs.h>
  38#include <linux/path.h>
  39#include <linux/timekeeping.h>
  40
  41#include <asm/uaccess.h>
  42#include <asm/mmu_context.h>
  43#include <asm/tlb.h>
  44#include <asm/exec.h>
  45
  46#include <trace/events/task.h>
  47#include "internal.h"
  48
  49#include <trace/events/sched.h>
  50
  51int core_uses_pid;
  52unsigned int core_pipe_limit;
  53char core_pattern[CORENAME_MAX_SIZE] = "core";
  54static int core_name_size = CORENAME_MAX_SIZE;
  55
  56struct core_name {
  57        char *corename;
  58        int used, size;
  59};
  60
  61/* The maximal length of core_pattern is also specified in sysctl.c */
  62
  63static int expand_corename(struct core_name *cn, int size)
  64{
  65        char *corename = krealloc(cn->corename, size, GFP_KERNEL);
  66
  67        if (!corename)
  68                return -ENOMEM;
  69
  70        if (size > core_name_size) /* racy but harmless */
  71                core_name_size = size;
  72
  73        cn->size = ksize(corename);
  74        cn->corename = corename;
  75        return 0;
  76}
  77
  78static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt,
  79                                     va_list arg)
  80{
  81        int free, need;
  82        va_list arg_copy;
  83
  84again:
  85        free = cn->size - cn->used;
  86
  87        va_copy(arg_copy, arg);
  88        need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy);
  89        va_end(arg_copy);
  90
  91        if (need < free) {
  92                cn->used += need;
  93                return 0;
  94        }
  95
  96        if (!expand_corename(cn, cn->size + need - free + 1))
  97                goto again;
  98
  99        return -ENOMEM;
 100}
 101
 102static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...)
 103{
 104        va_list arg;
 105        int ret;
 106
 107        va_start(arg, fmt);
 108        ret = cn_vprintf(cn, fmt, arg);
 109        va_end(arg);
 110
 111        return ret;
 112}
 113
 114static __printf(2, 3)
 115int cn_esc_printf(struct core_name *cn, const char *fmt, ...)
 116{
 117        int cur = cn->used;
 118        va_list arg;
 119        int ret;
 120
 121        va_start(arg, fmt);
 122        ret = cn_vprintf(cn, fmt, arg);
 123        va_end(arg);
 124
 125        if (ret == 0) {
 126                /*
 127                 * Ensure that this coredump name component can't cause the
 128                 * resulting corefile path to consist of a ".." or ".".
 129                 */
 130                if ((cn->used - cur == 1 && cn->corename[cur] == '.') ||
 131                                (cn->used - cur == 2 && cn->corename[cur] == '.'
 132                                && cn->corename[cur+1] == '.'))
 133                        cn->corename[cur] = '!';
 134
 135                /*
 136                 * Empty names are fishy and could be used to create a "//" in a
 137                 * corefile name, causing the coredump to happen one directory
 138                 * level too high. Enforce that all components of the core
 139                 * pattern are at least one character long.
 140                 */
 141                if (cn->used == cur)
 142                        ret = cn_printf(cn, "!");
 143        }
 144
 145        for (; cur < cn->used; ++cur) {
 146                if (cn->corename[cur] == '/')
 147                        cn->corename[cur] = '!';
 148        }
 149        return ret;
 150}
 151
 152static int cn_print_exe_file(struct core_name *cn)
 153{
 154        struct file *exe_file;
 155        char *pathbuf, *path;
 156        int ret;
 157
 158        exe_file = get_mm_exe_file(current->mm);
 159        if (!exe_file)
 160                return cn_esc_printf(cn, "%s (path unknown)", current->comm);
 161
 162        pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
 163        if (!pathbuf) {
 164                ret = -ENOMEM;
 165                goto put_exe_file;
 166        }
 167
 168        path = file_path(exe_file, pathbuf, PATH_MAX);
 169        if (IS_ERR(path)) {
 170                ret = PTR_ERR(path);
 171                goto free_buf;
 172        }
 173
 174        ret = cn_esc_printf(cn, "%s", path);
 175
 176free_buf:
 177        kfree(pathbuf);
 178put_exe_file:
 179        fput(exe_file);
 180        return ret;
 181}
 182
 183/* format_corename will inspect the pattern parameter, and output a
 184 * name into corename, which must have space for at least
 185 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
 186 */
 187static int format_corename(struct core_name *cn, struct coredump_params *cprm)
 188{
 189        const struct cred *cred = current_cred();
 190        const char *pat_ptr = core_pattern;
 191        int ispipe = (*pat_ptr == '|');
 192        int pid_in_pattern = 0;
 193        int err = 0;
 194
 195        cn->used = 0;
 196        cn->corename = NULL;
 197        if (expand_corename(cn, core_name_size))
 198                return -ENOMEM;
 199        cn->corename[0] = '\0';
 200
 201        if (ispipe)
 202                ++pat_ptr;
 203
 204        /* Repeat as long as we have more pattern to process and more output
 205           space */
 206        while (*pat_ptr) {
 207                if (*pat_ptr != '%') {
 208                        err = cn_printf(cn, "%c", *pat_ptr++);
 209                } else {
 210                        switch (*++pat_ptr) {
 211                        /* single % at the end, drop that */
 212                        case 0:
 213                                goto out;
 214                        /* Double percent, output one percent */
 215                        case '%':
 216                                err = cn_printf(cn, "%c", '%');
 217                                break;
 218                        /* pid */
 219                        case 'p':
 220                                pid_in_pattern = 1;
 221                                err = cn_printf(cn, "%d",
 222                                              task_tgid_vnr(current));
 223                                break;
 224                        /* global pid */
 225                        case 'P':
 226                                err = cn_printf(cn, "%d",
 227                                              task_tgid_nr(current));
 228                                break;
 229                        case 'i':
 230                                err = cn_printf(cn, "%d",
 231                                              task_pid_vnr(current));
 232                                break;
 233                        case 'I':
 234                                err = cn_printf(cn, "%d",
 235                                              task_pid_nr(current));
 236                                break;
 237                        /* uid */
 238                        case 'u':
 239                                err = cn_printf(cn, "%u",
 240                                                from_kuid(&init_user_ns,
 241                                                          cred->uid));
 242                                break;
 243                        /* gid */
 244                        case 'g':
 245                                err = cn_printf(cn, "%u",
 246                                                from_kgid(&init_user_ns,
 247                                                          cred->gid));
 248                                break;
 249                        case 'd':
 250                                err = cn_printf(cn, "%d",
 251                                        __get_dumpable(cprm->mm_flags));
 252                                break;
 253                        /* signal that caused the coredump */
 254                        case 's':
 255                                err = cn_printf(cn, "%d",
 256                                                cprm->siginfo->si_signo);
 257                                break;
 258                        /* UNIX time of coredump */
 259                        case 't': {
 260                                time64_t time;
 261
 262                                time = ktime_get_real_seconds();
 263                                err = cn_printf(cn, "%lld", time);
 264                                break;
 265                        }
 266                        /* hostname */
 267                        case 'h':
 268                                down_read(&uts_sem);
 269                                err = cn_esc_printf(cn, "%s",
 270                                              utsname()->nodename);
 271                                up_read(&uts_sem);
 272                                break;
 273                        /* executable */
 274                        case 'e':
 275                                err = cn_esc_printf(cn, "%s", current->comm);
 276                                break;
 277                        case 'E':
 278                                err = cn_print_exe_file(cn);
 279                                break;
 280                        /* core limit size */
 281                        case 'c':
 282                                err = cn_printf(cn, "%lu",
 283                                              rlimit(RLIMIT_CORE));
 284                                break;
 285                        default:
 286                                break;
 287                        }
 288                        ++pat_ptr;
 289                }
 290
 291                if (err)
 292                        return err;
 293        }
 294
 295out:
 296        /* Backward compatibility with core_uses_pid:
 297         *
 298         * If core_pattern does not include a %p (as is the default)
 299         * and core_uses_pid is set, then .%pid will be appended to
 300         * the filename. Do not do this for piped commands. */
 301        if (!ispipe && !pid_in_pattern && core_uses_pid) {
 302                err = cn_printf(cn, ".%d", task_tgid_vnr(current));
 303                if (err)
 304                        return err;
 305        }
 306        return ispipe;
 307}
 308
 309static int zap_process(struct task_struct *start, int exit_code, int flags)
 310{
 311        struct task_struct *t;
 312        int nr = 0;
 313
 314        /* ignore all signals except SIGKILL, see prepare_signal() */
 315        start->signal->flags = SIGNAL_GROUP_COREDUMP | flags;
 316        start->signal->group_exit_code = exit_code;
 317        start->signal->group_stop_count = 0;
 318
 319        for_each_thread(start, t) {
 320                task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
 321                if (t != current && t->mm) {
 322                        sigaddset(&t->pending.signal, SIGKILL);
 323                        signal_wake_up(t, 1);
 324                        nr++;
 325                }
 326        }
 327
 328        return nr;
 329}
 330
 331static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
 332                        struct core_state *core_state, int exit_code)
 333{
 334        struct task_struct *g, *p;
 335        unsigned long flags;
 336        int nr = -EAGAIN;
 337
 338        spin_lock_irq(&tsk->sighand->siglock);
 339        if (!signal_group_exit(tsk->signal)) {
 340                mm->core_state = core_state;
 341                tsk->signal->group_exit_task = tsk;
 342                nr = zap_process(tsk, exit_code, 0);
 343                clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
 344        }
 345        spin_unlock_irq(&tsk->sighand->siglock);
 346        if (unlikely(nr < 0))
 347                return nr;
 348
 349        tsk->flags |= PF_DUMPCORE;
 350        if (atomic_read(&mm->mm_users) == nr + 1)
 351                goto done;
 352        /*
 353         * We should find and kill all tasks which use this mm, and we should
 354         * count them correctly into ->nr_threads. We don't take tasklist
 355         * lock, but this is safe wrt:
 356         *
 357         * fork:
 358         *      None of sub-threads can fork after zap_process(leader). All
 359         *      processes which were created before this point should be
 360         *      visible to zap_threads() because copy_process() adds the new
 361         *      process to the tail of init_task.tasks list, and lock/unlock
 362         *      of ->siglock provides a memory barrier.
 363         *
 364         * do_exit:
 365         *      The caller holds mm->mmap_sem. This means that the task which
 366         *      uses this mm can't pass exit_mm(), so it can't exit or clear
 367         *      its ->mm.
 368         *
 369         * de_thread:
 370         *      It does list_replace_rcu(&leader->tasks, &current->tasks),
 371         *      we must see either old or new leader, this does not matter.
 372         *      However, it can change p->sighand, so lock_task_sighand(p)
 373         *      must be used. Since p->mm != NULL and we hold ->mmap_sem
 374         *      it can't fail.
 375         *
 376         *      Note also that "g" can be the old leader with ->mm == NULL
 377         *      and already unhashed and thus removed from ->thread_group.
 378         *      This is OK, __unhash_process()->list_del_rcu() does not
 379         *      clear the ->next pointer, we will find the new leader via
 380         *      next_thread().
 381         */
 382        rcu_read_lock();
 383        for_each_process(g) {
 384                if (g == tsk->group_leader)
 385                        continue;
 386                if (g->flags & PF_KTHREAD)
 387                        continue;
 388
 389                for_each_thread(g, p) {
 390                        if (unlikely(!p->mm))
 391                                continue;
 392                        if (unlikely(p->mm == mm)) {
 393                                lock_task_sighand(p, &flags);
 394                                nr += zap_process(p, exit_code,
 395                                                        SIGNAL_GROUP_EXIT);
 396                                unlock_task_sighand(p, &flags);
 397                        }
 398                        break;
 399                }
 400        }
 401        rcu_read_unlock();
 402done:
 403        atomic_set(&core_state->nr_threads, nr);
 404        return nr;
 405}
 406
 407static int coredump_wait(int exit_code, struct core_state *core_state)
 408{
 409        struct task_struct *tsk = current;
 410        struct mm_struct *mm = tsk->mm;
 411        int core_waiters = -EBUSY;
 412
 413        init_completion(&core_state->startup);
 414        core_state->dumper.task = tsk;
 415        core_state->dumper.next = NULL;
 416
 417        if (down_write_killable(&mm->mmap_sem))
 418                return -EINTR;
 419
 420        if (!mm->core_state)
 421                core_waiters = zap_threads(tsk, mm, core_state, exit_code);
 422        up_write(&mm->mmap_sem);
 423
 424        if (core_waiters > 0) {
 425                struct core_thread *ptr;
 426
 427                freezer_do_not_count();
 428                wait_for_completion(&core_state->startup);
 429                freezer_count();
 430                /*
 431                 * Wait for all the threads to become inactive, so that
 432                 * all the thread context (extended register state, like
 433                 * fpu etc) gets copied to the memory.
 434                 */
 435                ptr = core_state->dumper.next;
 436                while (ptr != NULL) {
 437                        wait_task_inactive(ptr->task, 0);
 438                        ptr = ptr->next;
 439                }
 440        }
 441
 442        return core_waiters;
 443}
 444
 445static void coredump_finish(struct mm_struct *mm, bool core_dumped)
 446{
 447        struct core_thread *curr, *next;
 448        struct task_struct *task;
 449
 450        spin_lock_irq(&current->sighand->siglock);
 451        if (core_dumped && !__fatal_signal_pending(current))
 452                current->signal->group_exit_code |= 0x80;
 453        current->signal->group_exit_task = NULL;
 454        current->signal->flags = SIGNAL_GROUP_EXIT;
 455        spin_unlock_irq(&current->sighand->siglock);
 456
 457        next = mm->core_state->dumper.next;
 458        while ((curr = next) != NULL) {
 459                next = curr->next;
 460                task = curr->task;
 461                /*
 462                 * see exit_mm(), curr->task must not see
 463                 * ->task == NULL before we read ->next.
 464                 */
 465                smp_mb();
 466                curr->task = NULL;
 467                wake_up_process(task);
 468        }
 469
 470        mm->core_state = NULL;
 471}
 472
 473static bool dump_interrupted(void)
 474{
 475        /*
 476         * SIGKILL or freezing() interrupt the coredumping. Perhaps we
 477         * can do try_to_freeze() and check __fatal_signal_pending(),
 478         * but then we need to teach dump_write() to restart and clear
 479         * TIF_SIGPENDING.
 480         */
 481        return signal_pending(current);
 482}
 483
 484static void wait_for_dump_helpers(struct file *file)
 485{
 486        struct pipe_inode_info *pipe = file->private_data;
 487
 488        pipe_lock(pipe);
 489        pipe->readers++;
 490        pipe->writers--;
 491        wake_up_interruptible_sync(&pipe->wait);
 492        kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 493        pipe_unlock(pipe);
 494
 495        /*
 496         * We actually want wait_event_freezable() but then we need
 497         * to clear TIF_SIGPENDING and improve dump_interrupted().
 498         */
 499        wait_event_interruptible(pipe->wait, pipe->readers == 1);
 500
 501        pipe_lock(pipe);
 502        pipe->readers--;
 503        pipe->writers++;
 504        pipe_unlock(pipe);
 505}
 506
 507/*
 508 * umh_pipe_setup
 509 * helper function to customize the process used
 510 * to collect the core in userspace.  Specifically
 511 * it sets up a pipe and installs it as fd 0 (stdin)
 512 * for the process.  Returns 0 on success, or
 513 * PTR_ERR on failure.
 514 * Note that it also sets the core limit to 1.  This
 515 * is a special value that we use to trap recursive
 516 * core dumps
 517 */
 518static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
 519{
 520        struct file *files[2];
 521        struct coredump_params *cp = (struct coredump_params *)info->data;
 522        int err = create_pipe_files(files, 0);
 523        if (err)
 524                return err;
 525
 526        cp->file = files[1];
 527
 528        err = replace_fd(0, files[0], 0);
 529        fput(files[0]);
 530        /* and disallow core files too */
 531        current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
 532
 533        return err;
 534}
 535
 536void do_coredump(const siginfo_t *siginfo)
 537{
 538        struct core_state core_state;
 539        struct core_name cn;
 540        struct mm_struct *mm = current->mm;
 541        struct linux_binfmt * binfmt;
 542        const struct cred *old_cred;
 543        struct cred *cred;
 544        int retval = 0;
 545        int ispipe;
 546        struct files_struct *displaced;
 547        /* require nonrelative corefile path and be extra careful */
 548        bool need_suid_safe = false;
 549        bool core_dumped = false;
 550        static atomic_t core_dump_count = ATOMIC_INIT(0);
 551        struct coredump_params cprm = {
 552                .siginfo = siginfo,
 553                .regs = signal_pt_regs(),
 554                .limit = rlimit(RLIMIT_CORE),
 555                /*
 556                 * We must use the same mm->flags while dumping core to avoid
 557                 * inconsistency of bit flags, since this flag is not protected
 558                 * by any locks.
 559                 */
 560                .mm_flags = mm->flags,
 561        };
 562
 563        audit_core_dumps(siginfo->si_signo);
 564
 565        binfmt = mm->binfmt;
 566        if (!binfmt || !binfmt->core_dump)
 567                goto fail;
 568        if (!__get_dumpable(cprm.mm_flags))
 569                goto fail;
 570
 571        cred = prepare_creds();
 572        if (!cred)
 573                goto fail;
 574        /*
 575         * We cannot trust fsuid as being the "true" uid of the process
 576         * nor do we know its entire history. We only know it was tainted
 577         * so we dump it as root in mode 2, and only into a controlled
 578         * environment (pipe handler or fully qualified path).
 579         */
 580        if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
 581                /* Setuid core dump mode */
 582                cred->fsuid = GLOBAL_ROOT_UID;  /* Dump root private */
 583                need_suid_safe = true;
 584        }
 585
 586        retval = coredump_wait(siginfo->si_signo, &core_state);
 587        if (retval < 0)
 588                goto fail_creds;
 589
 590        old_cred = override_creds(cred);
 591
 592        ispipe = format_corename(&cn, &cprm);
 593
 594        if (ispipe) {
 595                int dump_count;
 596                char **helper_argv;
 597                struct subprocess_info *sub_info;
 598
 599                if (ispipe < 0) {
 600                        printk(KERN_WARNING "format_corename failed\n");
 601                        printk(KERN_WARNING "Aborting core\n");
 602                        goto fail_unlock;
 603                }
 604
 605                if (cprm.limit == 1) {
 606                        /* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
 607                         *
 608                         * Normally core limits are irrelevant to pipes, since
 609                         * we're not writing to the file system, but we use
 610                         * cprm.limit of 1 here as a special value, this is a
 611                         * consistent way to catch recursive crashes.
 612                         * We can still crash if the core_pattern binary sets
 613                         * RLIM_CORE = !1, but it runs as root, and can do
 614                         * lots of stupid things.
 615                         *
 616                         * Note that we use task_tgid_vnr here to grab the pid
 617                         * of the process group leader.  That way we get the
 618                         * right pid if a thread in a multi-threaded
 619                         * core_pattern process dies.
 620                         */
 621                        printk(KERN_WARNING
 622                                "Process %d(%s) has RLIMIT_CORE set to 1\n",
 623                                task_tgid_vnr(current), current->comm);
 624                        printk(KERN_WARNING "Aborting core\n");
 625                        goto fail_unlock;
 626                }
 627                cprm.limit = RLIM_INFINITY;
 628
 629                dump_count = atomic_inc_return(&core_dump_count);
 630                if (core_pipe_limit && (core_pipe_limit < dump_count)) {
 631                        printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
 632                               task_tgid_vnr(current), current->comm);
 633                        printk(KERN_WARNING "Skipping core dump\n");
 634                        goto fail_dropcount;
 635                }
 636
 637                helper_argv = argv_split(GFP_KERNEL, cn.corename, NULL);
 638                if (!helper_argv) {
 639                        printk(KERN_WARNING "%s failed to allocate memory\n",
 640                               __func__);
 641                        goto fail_dropcount;
 642                }
 643
 644                retval = -ENOMEM;
 645                sub_info = call_usermodehelper_setup(helper_argv[0],
 646                                                helper_argv, NULL, GFP_KERNEL,
 647                                                umh_pipe_setup, NULL, &cprm);
 648                if (sub_info)
 649                        retval = call_usermodehelper_exec(sub_info,
 650                                                          UMH_WAIT_EXEC);
 651
 652                argv_free(helper_argv);
 653                if (retval) {
 654                        printk(KERN_INFO "Core dump to |%s pipe failed\n",
 655                               cn.corename);
 656                        goto close_fail;
 657                }
 658        } else {
 659                struct inode *inode;
 660                int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
 661                                 O_LARGEFILE | O_EXCL;
 662
 663                if (cprm.limit < binfmt->min_coredump)
 664                        goto fail_unlock;
 665
 666                if (need_suid_safe && cn.corename[0] != '/') {
 667                        printk(KERN_WARNING "Pid %d(%s) can only dump core "\
 668                                "to fully qualified path!\n",
 669                                task_tgid_vnr(current), current->comm);
 670                        printk(KERN_WARNING "Skipping core dump\n");
 671                        goto fail_unlock;
 672                }
 673
 674                /*
 675                 * Unlink the file if it exists unless this is a SUID
 676                 * binary - in that case, we're running around with root
 677                 * privs and don't want to unlink another user's coredump.
 678                 */
 679                if (!need_suid_safe) {
 680                        mm_segment_t old_fs;
 681
 682                        old_fs = get_fs();
 683                        set_fs(KERNEL_DS);
 684                        /*
 685                         * If it doesn't exist, that's fine. If there's some
 686                         * other problem, we'll catch it at the filp_open().
 687                         */
 688                        (void) sys_unlink((const char __user *)cn.corename);
 689                        set_fs(old_fs);
 690                }
 691
 692                /*
 693                 * There is a race between unlinking and creating the
 694                 * file, but if that causes an EEXIST here, that's
 695                 * fine - another process raced with us while creating
 696                 * the corefile, and the other process won. To userspace,
 697                 * what matters is that at least one of the two processes
 698                 * writes its coredump successfully, not which one.
 699                 */
 700                if (need_suid_safe) {
 701                        /*
 702                         * Using user namespaces, normal user tasks can change
 703                         * their current->fs->root to point to arbitrary
 704                         * directories. Since the intention of the "only dump
 705                         * with a fully qualified path" rule is to control where
 706                         * coredumps may be placed using root privileges,
 707                         * current->fs->root must not be used. Instead, use the
 708                         * root directory of init_task.
 709                         */
 710                        struct path root;
 711
 712                        task_lock(&init_task);
 713                        get_fs_root(init_task.fs, &root);
 714                        task_unlock(&init_task);
 715                        cprm.file = file_open_root(root.dentry, root.mnt,
 716                                cn.corename, open_flags, 0600);
 717                        path_put(&root);
 718                } else {
 719                        cprm.file = filp_open(cn.corename, open_flags, 0600);
 720                }
 721                if (IS_ERR(cprm.file))
 722                        goto fail_unlock;
 723
 724                inode = file_inode(cprm.file);
 725                if (inode->i_nlink > 1)
 726                        goto close_fail;
 727                if (d_unhashed(cprm.file->f_path.dentry))
 728                        goto close_fail;
 729                /*
 730                 * AK: actually i see no reason to not allow this for named
 731                 * pipes etc, but keep the previous behaviour for now.
 732                 */
 733                if (!S_ISREG(inode->i_mode))
 734                        goto close_fail;
 735                /*
 736                 * Don't dump core if the filesystem changed owner or mode
 737                 * of the file during file creation. This is an issue when
 738                 * a process dumps core while its cwd is e.g. on a vfat
 739                 * filesystem.
 740                 */
 741                if (!uid_eq(inode->i_uid, current_fsuid()))
 742                        goto close_fail;
 743                if ((inode->i_mode & 0677) != 0600)
 744                        goto close_fail;
 745                if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
 746                        goto close_fail;
 747                if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
 748                        goto close_fail;
 749        }
 750
 751        /* get us an unshared descriptor table; almost always a no-op */
 752        retval = unshare_files(&displaced);
 753        if (retval)
 754                goto close_fail;
 755        if (displaced)
 756                put_files_struct(displaced);
 757        if (!dump_interrupted()) {
 758                file_start_write(cprm.file);
 759                core_dumped = binfmt->core_dump(&cprm);
 760                file_end_write(cprm.file);
 761        }
 762        if (ispipe && core_pipe_limit)
 763                wait_for_dump_helpers(cprm.file);
 764close_fail:
 765        if (cprm.file)
 766                filp_close(cprm.file, NULL);
 767fail_dropcount:
 768        if (ispipe)
 769                atomic_dec(&core_dump_count);
 770fail_unlock:
 771        kfree(cn.corename);
 772        coredump_finish(mm, core_dumped);
 773        revert_creds(old_cred);
 774fail_creds:
 775        put_cred(cred);
 776fail:
 777        return;
 778}
 779
 780/*
 781 * Core dumping helper functions.  These are the only things you should
 782 * do on a core-file: use only these functions to write out all the
 783 * necessary info.
 784 */
 785int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
 786{
 787        struct file *file = cprm->file;
 788        loff_t pos = file->f_pos;
 789        ssize_t n;
 790        if (cprm->written + nr > cprm->limit)
 791                return 0;
 792        while (nr) {
 793                if (dump_interrupted())
 794                        return 0;
 795                n = __kernel_write(file, addr, nr, &pos);
 796                if (n <= 0)
 797                        return 0;
 798                file->f_pos = pos;
 799                cprm->written += n;
 800                cprm->pos += n;
 801                nr -= n;
 802        }
 803        return 1;
 804}
 805EXPORT_SYMBOL(dump_emit);
 806
 807int dump_skip(struct coredump_params *cprm, size_t nr)
 808{
 809        static char zeroes[PAGE_SIZE];
 810        struct file *file = cprm->file;
 811        if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
 812                if (dump_interrupted() ||
 813                    file->f_op->llseek(file, nr, SEEK_CUR) < 0)
 814                        return 0;
 815                cprm->pos += nr;
 816                return 1;
 817        } else {
 818                while (nr > PAGE_SIZE) {
 819                        if (!dump_emit(cprm, zeroes, PAGE_SIZE))
 820                                return 0;
 821                        nr -= PAGE_SIZE;
 822                }
 823                return dump_emit(cprm, zeroes, nr);
 824        }
 825}
 826EXPORT_SYMBOL(dump_skip);
 827
 828int dump_align(struct coredump_params *cprm, int align)
 829{
 830        unsigned mod = cprm->pos & (align - 1);
 831        if (align & (align - 1))
 832                return 0;
 833        return mod ? dump_skip(cprm, align - mod) : 1;
 834}
 835EXPORT_SYMBOL(dump_align);
 836