linux/kernel/kcov.c
<<
>>
Prefs
   1#define pr_fmt(fmt) "kcov: " fmt
   2
   3#define DISABLE_BRANCH_PROFILING
   4#include <linux/compiler.h>
   5#include <linux/types.h>
   6#include <linux/file.h>
   7#include <linux/fs.h>
   8#include <linux/mm.h>
   9#include <linux/printk.h>
  10#include <linux/sched.h>
  11#include <linux/slab.h>
  12#include <linux/spinlock.h>
  13#include <linux/vmalloc.h>
  14#include <linux/debugfs.h>
  15#include <linux/uaccess.h>
  16#include <linux/kcov.h>
  17
  18/*
  19 * kcov descriptor (one per opened debugfs file).
  20 * State transitions of the descriptor:
  21 *  - initial state after open()
  22 *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
  23 *  - then, mmap() call (several calls are allowed but not useful)
  24 *  - then, repeated enable/disable for a task (only one task a time allowed)
  25 */
  26struct kcov {
  27        /*
  28         * Reference counter. We keep one for:
  29         *  - opened file descriptor
  30         *  - task with enabled coverage (we can't unwire it from another task)
  31         */
  32        atomic_t                refcount;
  33        /* The lock protects mode, size, area and t. */
  34        spinlock_t              lock;
  35        enum kcov_mode          mode;
  36        /* Size of arena (in long's for KCOV_MODE_TRACE). */
  37        unsigned                size;
  38        /* Coverage buffer shared with user space. */
  39        void                    *area;
  40        /* Task for which we collect coverage, or NULL. */
  41        struct task_struct      *t;
  42};
  43
  44/*
  45 * Entry point from instrumented code.
  46 * This is called once per basic-block/edge.
  47 */
  48void notrace __sanitizer_cov_trace_pc(void)
  49{
  50        struct task_struct *t;
  51        enum kcov_mode mode;
  52
  53        t = current;
  54        /*
  55         * We are interested in code coverage as a function of a syscall inputs,
  56         * so we ignore code executed in interrupts.
  57         * The checks for whether we are in an interrupt are open-coded, because
  58         * 1. We can't use in_interrupt() here, since it also returns true
  59         *    when we are inside local_bh_disable() section.
  60         * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
  61         *    since that leads to slower generated code (three separate tests,
  62         *    one for each of the flags).
  63         */
  64        if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
  65                                                        | NMI_MASK)))
  66                return;
  67        mode = READ_ONCE(t->kcov_mode);
  68        if (mode == KCOV_MODE_TRACE) {
  69                unsigned long *area;
  70                unsigned long pos;
  71
  72                /*
  73                 * There is some code that runs in interrupts but for which
  74                 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
  75                 * READ_ONCE()/barrier() effectively provides load-acquire wrt
  76                 * interrupts, there are paired barrier()/WRITE_ONCE() in
  77                 * kcov_ioctl_locked().
  78                 */
  79                barrier();
  80                area = t->kcov_area;
  81                /* The first word is number of subsequent PCs. */
  82                pos = READ_ONCE(area[0]) + 1;
  83                if (likely(pos < t->kcov_size)) {
  84                        area[pos] = _RET_IP_;
  85                        WRITE_ONCE(area[0], pos);
  86                }
  87        }
  88}
  89EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
  90
  91static void kcov_get(struct kcov *kcov)
  92{
  93        atomic_inc(&kcov->refcount);
  94}
  95
  96static void kcov_put(struct kcov *kcov)
  97{
  98        if (atomic_dec_and_test(&kcov->refcount)) {
  99                vfree(kcov->area);
 100                kfree(kcov);
 101        }
 102}
 103
 104void kcov_task_init(struct task_struct *t)
 105{
 106        t->kcov_mode = KCOV_MODE_DISABLED;
 107        t->kcov_size = 0;
 108        t->kcov_area = NULL;
 109        t->kcov = NULL;
 110}
 111
 112void kcov_task_exit(struct task_struct *t)
 113{
 114        struct kcov *kcov;
 115
 116        kcov = t->kcov;
 117        if (kcov == NULL)
 118                return;
 119        spin_lock(&kcov->lock);
 120        if (WARN_ON(kcov->t != t)) {
 121                spin_unlock(&kcov->lock);
 122                return;
 123        }
 124        /* Just to not leave dangling references behind. */
 125        kcov_task_init(t);
 126        kcov->t = NULL;
 127        spin_unlock(&kcov->lock);
 128        kcov_put(kcov);
 129}
 130
 131static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
 132{
 133        int res = 0;
 134        void *area;
 135        struct kcov *kcov = vma->vm_file->private_data;
 136        unsigned long size, off;
 137        struct page *page;
 138
 139        area = vmalloc_user(vma->vm_end - vma->vm_start);
 140        if (!area)
 141                return -ENOMEM;
 142
 143        spin_lock(&kcov->lock);
 144        size = kcov->size * sizeof(unsigned long);
 145        if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
 146            vma->vm_end - vma->vm_start != size) {
 147                res = -EINVAL;
 148                goto exit;
 149        }
 150        if (!kcov->area) {
 151                kcov->area = area;
 152                vma->vm_flags |= VM_DONTEXPAND;
 153                spin_unlock(&kcov->lock);
 154                for (off = 0; off < size; off += PAGE_SIZE) {
 155                        page = vmalloc_to_page(kcov->area + off);
 156                        if (vm_insert_page(vma, vma->vm_start + off, page))
 157                                WARN_ONCE(1, "vm_insert_page() failed");
 158                }
 159                return 0;
 160        }
 161exit:
 162        spin_unlock(&kcov->lock);
 163        vfree(area);
 164        return res;
 165}
 166
 167static int kcov_open(struct inode *inode, struct file *filep)
 168{
 169        struct kcov *kcov;
 170
 171        kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
 172        if (!kcov)
 173                return -ENOMEM;
 174        atomic_set(&kcov->refcount, 1);
 175        spin_lock_init(&kcov->lock);
 176        filep->private_data = kcov;
 177        return nonseekable_open(inode, filep);
 178}
 179
 180static int kcov_close(struct inode *inode, struct file *filep)
 181{
 182        kcov_put(filep->private_data);
 183        return 0;
 184}
 185
 186static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
 187                             unsigned long arg)
 188{
 189        struct task_struct *t;
 190        unsigned long size, unused;
 191
 192        switch (cmd) {
 193        case KCOV_INIT_TRACE:
 194                /*
 195                 * Enable kcov in trace mode and setup buffer size.
 196                 * Must happen before anything else.
 197                 */
 198                if (kcov->mode != KCOV_MODE_DISABLED)
 199                        return -EBUSY;
 200                /*
 201                 * Size must be at least 2 to hold current position and one PC.
 202                 * Later we allocate size * sizeof(unsigned long) memory,
 203                 * that must not overflow.
 204                 */
 205                size = arg;
 206                if (size < 2 || size > INT_MAX / sizeof(unsigned long))
 207                        return -EINVAL;
 208                kcov->size = size;
 209                kcov->mode = KCOV_MODE_TRACE;
 210                return 0;
 211        case KCOV_ENABLE:
 212                /*
 213                 * Enable coverage for the current task.
 214                 * At this point user must have been enabled trace mode,
 215                 * and mmapped the file. Coverage collection is disabled only
 216                 * at task exit or voluntary by KCOV_DISABLE. After that it can
 217                 * be enabled for another task.
 218                 */
 219                unused = arg;
 220                if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
 221                    kcov->area == NULL)
 222                        return -EINVAL;
 223                if (kcov->t != NULL)
 224                        return -EBUSY;
 225                t = current;
 226                /* Cache in task struct for performance. */
 227                t->kcov_size = kcov->size;
 228                t->kcov_area = kcov->area;
 229                /* See comment in __sanitizer_cov_trace_pc(). */
 230                barrier();
 231                WRITE_ONCE(t->kcov_mode, kcov->mode);
 232                t->kcov = kcov;
 233                kcov->t = t;
 234                /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
 235                kcov_get(kcov);
 236                return 0;
 237        case KCOV_DISABLE:
 238                /* Disable coverage for the current task. */
 239                unused = arg;
 240                if (unused != 0 || current->kcov != kcov)
 241                        return -EINVAL;
 242                t = current;
 243                if (WARN_ON(kcov->t != t))
 244                        return -EINVAL;
 245                kcov_task_init(t);
 246                kcov->t = NULL;
 247                kcov_put(kcov);
 248                return 0;
 249        default:
 250                return -ENOTTY;
 251        }
 252}
 253
 254static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
 255{
 256        struct kcov *kcov;
 257        int res;
 258
 259        kcov = filep->private_data;
 260        spin_lock(&kcov->lock);
 261        res = kcov_ioctl_locked(kcov, cmd, arg);
 262        spin_unlock(&kcov->lock);
 263        return res;
 264}
 265
 266static const struct file_operations kcov_fops = {
 267        .open           = kcov_open,
 268        .unlocked_ioctl = kcov_ioctl,
 269        .mmap           = kcov_mmap,
 270        .release        = kcov_close,
 271};
 272
 273static int __init kcov_init(void)
 274{
 275        /*
 276         * The kcov debugfs file won't ever get removed and thus,
 277         * there is no need to protect it against removal races. The
 278         * use of debugfs_create_file_unsafe() is actually safe here.
 279         */
 280        if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
 281                pr_err("failed to create kcov in debugfs\n");
 282                return -ENOMEM;
 283        }
 284        return 0;
 285}
 286
 287device_initcall(kcov_init);
 288