1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
|
/**
* @file backtrace.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon
* @author David Smith
*/
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
#include <asm/stacktrace.h>
static void backtrace_warning_symbol(void *data, char *msg,
unsigned long symbol)
{
/* Ignore warnings */
}
static void backtrace_warning(void *data, char *msg)
{
/* Ignore warnings */
}
static int backtrace_stack(void *data, char *name)
{
/* Yes, we want all stacks */
return 0;
}
static void backtrace_address(void *data, unsigned long addr, int reliable)
{
unsigned int *depth = data;
if ((*depth)--)
oprofile_add_trace(addr);
}
static struct stacktrace_ops backtrace_ops = {
.warning = backtrace_warning,
.warning_symbol = backtrace_warning_symbol,
.stack = backtrace_stack,
.address = backtrace_address,
};
/* from arch/x86/kernel/cpu/perf_event.c: */
/*
* best effort, GUP based copy_from_user() that assumes IRQ or NMI context
*/
static unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
{
unsigned long offset, addr = (unsigned long)from;
unsigned long size, len = 0;
struct page *page;
void *map;
int ret;
do {
ret = __get_user_pages_fast(addr, 1, 0, &page);
if (!ret)
break;
offset = addr & (PAGE_SIZE - 1);
size = min(PAGE_SIZE - offset, n - len);
map = kmap_atomic(page, KM_USER0);
memcpy(to, map+offset, size);
kunmap_atomic(map, KM_USER0);
put_page(page);
len += size;
to += size;
addr += size;
} while (len < n);
return len;
}
struct frame_head {
struct frame_head *bp;
unsigned long ret;
} __attribute__((packed));
static struct frame_head *dump_user_backtrace(struct frame_head *head)
{
/* Also check accessibility of one struct frame_head beyond: */
struct frame_head bufhead[2];
unsigned long bytes;
bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
if (bytes != sizeof(bufhead))
return NULL;
oprofile_add_trace(bufhead[0].ret);
/* frame pointers should strictly progress back up the stack
* (towards higher addresses) */
if (head >= bufhead[0].bp)
return NULL;
return bufhead[0].bp;
}
void
x86_backtrace(struct pt_regs * const regs, unsigned int depth)
{
struct frame_head *head = (struct frame_head *)frame_pointer(regs);
if (!user_mode_vm(regs)) {
unsigned long stack = kernel_stack_pointer(regs);
if (depth)
dump_trace(NULL, regs, (unsigned long *)stack, 0,
&backtrace_ops, &depth);
return;
}
while (depth-- && head)
head = dump_user_backtrace(head);
}
|