1 |
3 |
xianfeng |
/*
|
2 |
|
|
* Kernel Probes (KProbes)
|
3 |
|
|
*
|
4 |
|
|
* This program is free software; you can redistribute it and/or modify
|
5 |
|
|
* it under the terms of the GNU General Public License as published by
|
6 |
|
|
* the Free Software Foundation; either version 2 of the License, or
|
7 |
|
|
* (at your option) any later version.
|
8 |
|
|
*
|
9 |
|
|
* This program is distributed in the hope that it will be useful,
|
10 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
11 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
12 |
|
|
* GNU General Public License for more details.
|
13 |
|
|
*
|
14 |
|
|
* You should have received a copy of the GNU General Public License
|
15 |
|
|
* along with this program; if not, write to the Free Software
|
16 |
|
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
17 |
|
|
*
|
18 |
|
|
* Copyright (C) IBM Corporation, 2002, 2004
|
19 |
|
|
*
|
20 |
|
|
* 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
|
21 |
|
|
* Probes initial implementation ( includes contributions from
|
22 |
|
|
* Rusty Russell).
|
23 |
|
|
* 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
|
24 |
|
|
* interface to access function arguments.
|
25 |
|
|
* 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
|
26 |
|
|
* for PPC64
|
27 |
|
|
*/
|
28 |
|
|
|
29 |
|
|
#include <linux/kprobes.h>
|
30 |
|
|
#include <linux/ptrace.h>
|
31 |
|
|
#include <linux/preempt.h>
|
32 |
|
|
#include <linux/module.h>
|
33 |
|
|
#include <linux/kdebug.h>
|
34 |
|
|
#include <asm/cacheflush.h>
|
35 |
|
|
#include <asm/sstep.h>
|
36 |
|
|
#include <asm/uaccess.h>
|
37 |
|
|
|
38 |
|
|
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
39 |
|
|
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
40 |
|
|
|
41 |
|
|
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
|
42 |
|
|
|
43 |
|
|
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
44 |
|
|
{
|
45 |
|
|
int ret = 0;
|
46 |
|
|
kprobe_opcode_t insn = *p->addr;
|
47 |
|
|
|
48 |
|
|
if ((unsigned long)p->addr & 0x03) {
|
49 |
|
|
printk("Attempt to register kprobe at an unaligned address\n");
|
50 |
|
|
ret = -EINVAL;
|
51 |
|
|
} else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
|
52 |
|
|
printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
|
53 |
|
|
ret = -EINVAL;
|
54 |
|
|
}
|
55 |
|
|
|
56 |
|
|
/* insn must be on a special executable page on ppc64 */
|
57 |
|
|
if (!ret) {
|
58 |
|
|
p->ainsn.insn = get_insn_slot();
|
59 |
|
|
if (!p->ainsn.insn)
|
60 |
|
|
ret = -ENOMEM;
|
61 |
|
|
}
|
62 |
|
|
|
63 |
|
|
if (!ret) {
|
64 |
|
|
memcpy(p->ainsn.insn, p->addr,
|
65 |
|
|
MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
|
66 |
|
|
p->opcode = *p->addr;
|
67 |
|
|
flush_icache_range((unsigned long)p->ainsn.insn,
|
68 |
|
|
(unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
|
69 |
|
|
}
|
70 |
|
|
|
71 |
|
|
p->ainsn.boostable = 0;
|
72 |
|
|
return ret;
|
73 |
|
|
}
|
74 |
|
|
|
75 |
|
|
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
76 |
|
|
{
|
77 |
|
|
*p->addr = BREAKPOINT_INSTRUCTION;
|
78 |
|
|
flush_icache_range((unsigned long) p->addr,
|
79 |
|
|
(unsigned long) p->addr + sizeof(kprobe_opcode_t));
|
80 |
|
|
}
|
81 |
|
|
|
82 |
|
|
void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
83 |
|
|
{
|
84 |
|
|
*p->addr = p->opcode;
|
85 |
|
|
flush_icache_range((unsigned long) p->addr,
|
86 |
|
|
(unsigned long) p->addr + sizeof(kprobe_opcode_t));
|
87 |
|
|
}
|
88 |
|
|
|
89 |
|
|
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
90 |
|
|
{
|
91 |
|
|
mutex_lock(&kprobe_mutex);
|
92 |
|
|
free_insn_slot(p->ainsn.insn, 0);
|
93 |
|
|
mutex_unlock(&kprobe_mutex);
|
94 |
|
|
}
|
95 |
|
|
|
96 |
|
|
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
97 |
|
|
{
|
98 |
|
|
regs->msr |= MSR_SE;
|
99 |
|
|
|
100 |
|
|
/*
|
101 |
|
|
* On powerpc we should single step on the original
|
102 |
|
|
* instruction even if the probed insn is a trap
|
103 |
|
|
* variant as values in regs could play a part in
|
104 |
|
|
* if the trap is taken or not
|
105 |
|
|
*/
|
106 |
|
|
regs->nip = (unsigned long)p->ainsn.insn;
|
107 |
|
|
}
|
108 |
|
|
|
109 |
|
|
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
110 |
|
|
{
|
111 |
|
|
kcb->prev_kprobe.kp = kprobe_running();
|
112 |
|
|
kcb->prev_kprobe.status = kcb->kprobe_status;
|
113 |
|
|
kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
|
114 |
|
|
}
|
115 |
|
|
|
116 |
|
|
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
|
117 |
|
|
{
|
118 |
|
|
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
|
119 |
|
|
kcb->kprobe_status = kcb->prev_kprobe.status;
|
120 |
|
|
kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
|
121 |
|
|
}
|
122 |
|
|
|
123 |
|
|
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
|
124 |
|
|
struct kprobe_ctlblk *kcb)
|
125 |
|
|
{
|
126 |
|
|
__get_cpu_var(current_kprobe) = p;
|
127 |
|
|
kcb->kprobe_saved_msr = regs->msr;
|
128 |
|
|
}
|
129 |
|
|
|
130 |
|
|
/* Called with kretprobe_lock held */
|
131 |
|
|
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
|
132 |
|
|
struct pt_regs *regs)
|
133 |
|
|
{
|
134 |
|
|
ri->ret_addr = (kprobe_opcode_t *)regs->link;
|
135 |
|
|
|
136 |
|
|
/* Replace the return addr with trampoline addr */
|
137 |
|
|
regs->link = (unsigned long)kretprobe_trampoline;
|
138 |
|
|
}
|
139 |
|
|
|
140 |
|
|
static int __kprobes kprobe_handler(struct pt_regs *regs)
|
141 |
|
|
{
|
142 |
|
|
struct kprobe *p;
|
143 |
|
|
int ret = 0;
|
144 |
|
|
unsigned int *addr = (unsigned int *)regs->nip;
|
145 |
|
|
struct kprobe_ctlblk *kcb;
|
146 |
|
|
|
147 |
|
|
/*
|
148 |
|
|
* We don't want to be preempted for the entire
|
149 |
|
|
* duration of kprobe processing
|
150 |
|
|
*/
|
151 |
|
|
preempt_disable();
|
152 |
|
|
kcb = get_kprobe_ctlblk();
|
153 |
|
|
|
154 |
|
|
/* Check we're not actually recursing */
|
155 |
|
|
if (kprobe_running()) {
|
156 |
|
|
p = get_kprobe(addr);
|
157 |
|
|
if (p) {
|
158 |
|
|
kprobe_opcode_t insn = *p->ainsn.insn;
|
159 |
|
|
if (kcb->kprobe_status == KPROBE_HIT_SS &&
|
160 |
|
|
is_trap(insn)) {
|
161 |
|
|
regs->msr &= ~MSR_SE;
|
162 |
|
|
regs->msr |= kcb->kprobe_saved_msr;
|
163 |
|
|
goto no_kprobe;
|
164 |
|
|
}
|
165 |
|
|
/* We have reentered the kprobe_handler(), since
|
166 |
|
|
* another probe was hit while within the handler.
|
167 |
|
|
* We here save the original kprobes variables and
|
168 |
|
|
* just single step on the instruction of the new probe
|
169 |
|
|
* without calling any user handlers.
|
170 |
|
|
*/
|
171 |
|
|
save_previous_kprobe(kcb);
|
172 |
|
|
set_current_kprobe(p, regs, kcb);
|
173 |
|
|
kcb->kprobe_saved_msr = regs->msr;
|
174 |
|
|
kprobes_inc_nmissed_count(p);
|
175 |
|
|
prepare_singlestep(p, regs);
|
176 |
|
|
kcb->kprobe_status = KPROBE_REENTER;
|
177 |
|
|
return 1;
|
178 |
|
|
} else {
|
179 |
|
|
if (*addr != BREAKPOINT_INSTRUCTION) {
|
180 |
|
|
/* If trap variant, then it belongs not to us */
|
181 |
|
|
kprobe_opcode_t cur_insn = *addr;
|
182 |
|
|
if (is_trap(cur_insn))
|
183 |
|
|
goto no_kprobe;
|
184 |
|
|
/* The breakpoint instruction was removed by
|
185 |
|
|
* another cpu right after we hit, no further
|
186 |
|
|
* handling of this interrupt is appropriate
|
187 |
|
|
*/
|
188 |
|
|
ret = 1;
|
189 |
|
|
goto no_kprobe;
|
190 |
|
|
}
|
191 |
|
|
p = __get_cpu_var(current_kprobe);
|
192 |
|
|
if (p->break_handler && p->break_handler(p, regs)) {
|
193 |
|
|
goto ss_probe;
|
194 |
|
|
}
|
195 |
|
|
}
|
196 |
|
|
goto no_kprobe;
|
197 |
|
|
}
|
198 |
|
|
|
199 |
|
|
p = get_kprobe(addr);
|
200 |
|
|
if (!p) {
|
201 |
|
|
if (*addr != BREAKPOINT_INSTRUCTION) {
|
202 |
|
|
/*
|
203 |
|
|
* PowerPC has multiple variants of the "trap"
|
204 |
|
|
* instruction. If the current instruction is a
|
205 |
|
|
* trap variant, it could belong to someone else
|
206 |
|
|
*/
|
207 |
|
|
kprobe_opcode_t cur_insn = *addr;
|
208 |
|
|
if (is_trap(cur_insn))
|
209 |
|
|
goto no_kprobe;
|
210 |
|
|
/*
|
211 |
|
|
* The breakpoint instruction was removed right
|
212 |
|
|
* after we hit it. Another cpu has removed
|
213 |
|
|
* either a probepoint or a debugger breakpoint
|
214 |
|
|
* at this address. In either case, no further
|
215 |
|
|
* handling of this interrupt is appropriate.
|
216 |
|
|
*/
|
217 |
|
|
ret = 1;
|
218 |
|
|
}
|
219 |
|
|
/* Not one of ours: let kernel handle it */
|
220 |
|
|
goto no_kprobe;
|
221 |
|
|
}
|
222 |
|
|
|
223 |
|
|
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
224 |
|
|
set_current_kprobe(p, regs, kcb);
|
225 |
|
|
if (p->pre_handler && p->pre_handler(p, regs))
|
226 |
|
|
/* handler has already set things up, so skip ss setup */
|
227 |
|
|
return 1;
|
228 |
|
|
|
229 |
|
|
ss_probe:
|
230 |
|
|
if (p->ainsn.boostable >= 0) {
|
231 |
|
|
unsigned int insn = *p->ainsn.insn;
|
232 |
|
|
|
233 |
|
|
/* regs->nip is also adjusted if emulate_step returns 1 */
|
234 |
|
|
ret = emulate_step(regs, insn);
|
235 |
|
|
if (ret > 0) {
|
236 |
|
|
/*
|
237 |
|
|
* Once this instruction has been boosted
|
238 |
|
|
* successfully, set the boostable flag
|
239 |
|
|
*/
|
240 |
|
|
if (unlikely(p->ainsn.boostable == 0))
|
241 |
|
|
p->ainsn.boostable = 1;
|
242 |
|
|
|
243 |
|
|
if (p->post_handler)
|
244 |
|
|
p->post_handler(p, regs, 0);
|
245 |
|
|
|
246 |
|
|
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
247 |
|
|
reset_current_kprobe();
|
248 |
|
|
preempt_enable_no_resched();
|
249 |
|
|
return 1;
|
250 |
|
|
} else if (ret < 0) {
|
251 |
|
|
/*
|
252 |
|
|
* We don't allow kprobes on mtmsr(d)/rfi(d), etc.
|
253 |
|
|
* So, we should never get here... but, its still
|
254 |
|
|
* good to catch them, just in case...
|
255 |
|
|
*/
|
256 |
|
|
printk("Can't step on instruction %x\n", insn);
|
257 |
|
|
BUG();
|
258 |
|
|
} else if (ret == 0)
|
259 |
|
|
/* This instruction can't be boosted */
|
260 |
|
|
p->ainsn.boostable = -1;
|
261 |
|
|
}
|
262 |
|
|
prepare_singlestep(p, regs);
|
263 |
|
|
kcb->kprobe_status = KPROBE_HIT_SS;
|
264 |
|
|
return 1;
|
265 |
|
|
|
266 |
|
|
no_kprobe:
|
267 |
|
|
preempt_enable_no_resched();
|
268 |
|
|
return ret;
|
269 |
|
|
}
|
270 |
|
|
|
271 |
|
|
/*
|
272 |
|
|
* Function return probe trampoline:
|
273 |
|
|
* - init_kprobes() establishes a probepoint here
|
274 |
|
|
* - When the probed function returns, this probe
|
275 |
|
|
* causes the handlers to fire
|
276 |
|
|
*/
|
277 |
|
|
void kretprobe_trampoline_holder(void)
|
278 |
|
|
{
|
279 |
|
|
asm volatile(".global kretprobe_trampoline\n"
|
280 |
|
|
"kretprobe_trampoline:\n"
|
281 |
|
|
"nop\n");
|
282 |
|
|
}
|
283 |
|
|
|
284 |
|
|
/*
|
285 |
|
|
* Called when the probe at kretprobe trampoline is hit
|
286 |
|
|
*/
|
287 |
|
|
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
288 |
|
|
{
|
289 |
|
|
struct kretprobe_instance *ri = NULL;
|
290 |
|
|
struct hlist_head *head, empty_rp;
|
291 |
|
|
struct hlist_node *node, *tmp;
|
292 |
|
|
unsigned long flags, orig_ret_address = 0;
|
293 |
|
|
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
|
294 |
|
|
|
295 |
|
|
INIT_HLIST_HEAD(&empty_rp);
|
296 |
|
|
spin_lock_irqsave(&kretprobe_lock, flags);
|
297 |
|
|
head = kretprobe_inst_table_head(current);
|
298 |
|
|
|
299 |
|
|
/*
|
300 |
|
|
* It is possible to have multiple instances associated with a given
|
301 |
|
|
* task either because an multiple functions in the call path
|
302 |
|
|
* have a return probe installed on them, and/or more then one return
|
303 |
|
|
* return probe was registered for a target function.
|
304 |
|
|
*
|
305 |
|
|
* We can handle this because:
|
306 |
|
|
* - instances are always inserted at the head of the list
|
307 |
|
|
* - when multiple return probes are registered for the same
|
308 |
|
|
* function, the first instance's ret_addr will point to the
|
309 |
|
|
* real return address, and all the rest will point to
|
310 |
|
|
* kretprobe_trampoline
|
311 |
|
|
*/
|
312 |
|
|
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
313 |
|
|
if (ri->task != current)
|
314 |
|
|
/* another task is sharing our hash bucket */
|
315 |
|
|
continue;
|
316 |
|
|
|
317 |
|
|
if (ri->rp && ri->rp->handler)
|
318 |
|
|
ri->rp->handler(ri, regs);
|
319 |
|
|
|
320 |
|
|
orig_ret_address = (unsigned long)ri->ret_addr;
|
321 |
|
|
recycle_rp_inst(ri, &empty_rp);
|
322 |
|
|
|
323 |
|
|
if (orig_ret_address != trampoline_address)
|
324 |
|
|
/*
|
325 |
|
|
* This is the real return address. Any other
|
326 |
|
|
* instances associated with this task are for
|
327 |
|
|
* other calls deeper on the call stack
|
328 |
|
|
*/
|
329 |
|
|
break;
|
330 |
|
|
}
|
331 |
|
|
|
332 |
|
|
kretprobe_assert(ri, orig_ret_address, trampoline_address);
|
333 |
|
|
regs->nip = orig_ret_address;
|
334 |
|
|
|
335 |
|
|
reset_current_kprobe();
|
336 |
|
|
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
337 |
|
|
preempt_enable_no_resched();
|
338 |
|
|
|
339 |
|
|
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
340 |
|
|
hlist_del(&ri->hlist);
|
341 |
|
|
kfree(ri);
|
342 |
|
|
}
|
343 |
|
|
/*
|
344 |
|
|
* By returning a non-zero value, we are telling
|
345 |
|
|
* kprobe_handler() that we don't want the post_handler
|
346 |
|
|
* to run (and have re-enabled preemption)
|
347 |
|
|
*/
|
348 |
|
|
return 1;
|
349 |
|
|
}
|
350 |
|
|
|
351 |
|
|
/*
|
352 |
|
|
* Called after single-stepping. p->addr is the address of the
|
353 |
|
|
* instruction whose first byte has been replaced by the "breakpoint"
|
354 |
|
|
* instruction. To avoid the SMP problems that can occur when we
|
355 |
|
|
* temporarily put back the original opcode to single-step, we
|
356 |
|
|
* single-stepped a copy of the instruction. The address of this
|
357 |
|
|
* copy is p->ainsn.insn.
|
358 |
|
|
*/
|
359 |
|
|
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
|
360 |
|
|
{
|
361 |
|
|
int ret;
|
362 |
|
|
unsigned int insn = *p->ainsn.insn;
|
363 |
|
|
|
364 |
|
|
regs->nip = (unsigned long)p->addr;
|
365 |
|
|
ret = emulate_step(regs, insn);
|
366 |
|
|
if (ret == 0)
|
367 |
|
|
regs->nip = (unsigned long)p->addr + 4;
|
368 |
|
|
}
|
369 |
|
|
|
370 |
|
|
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
371 |
|
|
{
|
372 |
|
|
struct kprobe *cur = kprobe_running();
|
373 |
|
|
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
374 |
|
|
|
375 |
|
|
if (!cur)
|
376 |
|
|
return 0;
|
377 |
|
|
|
378 |
|
|
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
|
379 |
|
|
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
380 |
|
|
cur->post_handler(cur, regs, 0);
|
381 |
|
|
}
|
382 |
|
|
|
383 |
|
|
resume_execution(cur, regs);
|
384 |
|
|
regs->msr |= kcb->kprobe_saved_msr;
|
385 |
|
|
|
386 |
|
|
/*Restore back the original saved kprobes variables and continue. */
|
387 |
|
|
if (kcb->kprobe_status == KPROBE_REENTER) {
|
388 |
|
|
restore_previous_kprobe(kcb);
|
389 |
|
|
goto out;
|
390 |
|
|
}
|
391 |
|
|
reset_current_kprobe();
|
392 |
|
|
out:
|
393 |
|
|
preempt_enable_no_resched();
|
394 |
|
|
|
395 |
|
|
/*
|
396 |
|
|
* if somebody else is singlestepping across a probe point, msr
|
397 |
|
|
* will have SE set, in which case, continue the remaining processing
|
398 |
|
|
* of do_debug, as if this is not a probe hit.
|
399 |
|
|
*/
|
400 |
|
|
if (regs->msr & MSR_SE)
|
401 |
|
|
return 0;
|
402 |
|
|
|
403 |
|
|
return 1;
|
404 |
|
|
}
|
405 |
|
|
|
406 |
|
|
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
407 |
|
|
{
|
408 |
|
|
struct kprobe *cur = kprobe_running();
|
409 |
|
|
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
410 |
|
|
const struct exception_table_entry *entry;
|
411 |
|
|
|
412 |
|
|
switch(kcb->kprobe_status) {
|
413 |
|
|
case KPROBE_HIT_SS:
|
414 |
|
|
case KPROBE_REENTER:
|
415 |
|
|
/*
|
416 |
|
|
* We are here because the instruction being single
|
417 |
|
|
* stepped caused a page fault. We reset the current
|
418 |
|
|
* kprobe and the nip points back to the probe address
|
419 |
|
|
* and allow the page fault handler to continue as a
|
420 |
|
|
* normal page fault.
|
421 |
|
|
*/
|
422 |
|
|
regs->nip = (unsigned long)cur->addr;
|
423 |
|
|
regs->msr &= ~MSR_SE;
|
424 |
|
|
regs->msr |= kcb->kprobe_saved_msr;
|
425 |
|
|
if (kcb->kprobe_status == KPROBE_REENTER)
|
426 |
|
|
restore_previous_kprobe(kcb);
|
427 |
|
|
else
|
428 |
|
|
reset_current_kprobe();
|
429 |
|
|
preempt_enable_no_resched();
|
430 |
|
|
break;
|
431 |
|
|
case KPROBE_HIT_ACTIVE:
|
432 |
|
|
case KPROBE_HIT_SSDONE:
|
433 |
|
|
/*
|
434 |
|
|
* We increment the nmissed count for accounting,
|
435 |
|
|
* we can also use npre/npostfault count for accouting
|
436 |
|
|
* these specific fault cases.
|
437 |
|
|
*/
|
438 |
|
|
kprobes_inc_nmissed_count(cur);
|
439 |
|
|
|
440 |
|
|
/*
|
441 |
|
|
* We come here because instructions in the pre/post
|
442 |
|
|
* handler caused the page_fault, this could happen
|
443 |
|
|
* if handler tries to access user space by
|
444 |
|
|
* copy_from_user(), get_user() etc. Let the
|
445 |
|
|
* user-specified handler try to fix it first.
|
446 |
|
|
*/
|
447 |
|
|
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
|
448 |
|
|
return 1;
|
449 |
|
|
|
450 |
|
|
/*
|
451 |
|
|
* In case the user-specified fault handler returned
|
452 |
|
|
* zero, try to fix up.
|
453 |
|
|
*/
|
454 |
|
|
if ((entry = search_exception_tables(regs->nip)) != NULL) {
|
455 |
|
|
regs->nip = entry->fixup;
|
456 |
|
|
return 1;
|
457 |
|
|
}
|
458 |
|
|
|
459 |
|
|
/*
|
460 |
|
|
* fixup_exception() could not handle it,
|
461 |
|
|
* Let do_page_fault() fix it.
|
462 |
|
|
*/
|
463 |
|
|
break;
|
464 |
|
|
default:
|
465 |
|
|
break;
|
466 |
|
|
}
|
467 |
|
|
return 0;
|
468 |
|
|
}
|
469 |
|
|
|
470 |
|
|
/*
|
471 |
|
|
* Wrapper routine to for handling exceptions.
|
472 |
|
|
*/
|
473 |
|
|
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
474 |
|
|
unsigned long val, void *data)
|
475 |
|
|
{
|
476 |
|
|
struct die_args *args = (struct die_args *)data;
|
477 |
|
|
int ret = NOTIFY_DONE;
|
478 |
|
|
|
479 |
|
|
if (args->regs && user_mode(args->regs))
|
480 |
|
|
return ret;
|
481 |
|
|
|
482 |
|
|
switch (val) {
|
483 |
|
|
case DIE_BPT:
|
484 |
|
|
if (kprobe_handler(args->regs))
|
485 |
|
|
ret = NOTIFY_STOP;
|
486 |
|
|
break;
|
487 |
|
|
case DIE_SSTEP:
|
488 |
|
|
if (post_kprobe_handler(args->regs))
|
489 |
|
|
ret = NOTIFY_STOP;
|
490 |
|
|
break;
|
491 |
|
|
default:
|
492 |
|
|
break;
|
493 |
|
|
}
|
494 |
|
|
return ret;
|
495 |
|
|
}
|
496 |
|
|
|
497 |
|
|
#ifdef CONFIG_PPC64
|
498 |
|
|
unsigned long arch_deref_entry_point(void *entry)
|
499 |
|
|
{
|
500 |
|
|
return (unsigned long)(((func_descr_t *)entry)->entry);
|
501 |
|
|
}
|
502 |
|
|
#endif
|
503 |
|
|
|
504 |
|
|
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
505 |
|
|
{
|
506 |
|
|
struct jprobe *jp = container_of(p, struct jprobe, kp);
|
507 |
|
|
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
508 |
|
|
|
509 |
|
|
memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
|
510 |
|
|
|
511 |
|
|
/* setup return addr to the jprobe handler routine */
|
512 |
|
|
regs->nip = arch_deref_entry_point(jp->entry);
|
513 |
|
|
#ifdef CONFIG_PPC64
|
514 |
|
|
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
|
515 |
|
|
#endif
|
516 |
|
|
|
517 |
|
|
return 1;
|
518 |
|
|
}
|
519 |
|
|
|
520 |
|
|
void __kprobes jprobe_return(void)
|
521 |
|
|
{
|
522 |
|
|
asm volatile("trap" ::: "memory");
|
523 |
|
|
}
|
524 |
|
|
|
525 |
|
|
void __kprobes jprobe_return_end(void)
|
526 |
|
|
{
|
527 |
|
|
};
|
528 |
|
|
|
529 |
|
|
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
530 |
|
|
{
|
531 |
|
|
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
532 |
|
|
|
533 |
|
|
/*
|
534 |
|
|
* FIXME - we should ideally be validating that we got here 'cos
|
535 |
|
|
* of the "trap" in jprobe_return() above, before restoring the
|
536 |
|
|
* saved regs...
|
537 |
|
|
*/
|
538 |
|
|
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
|
539 |
|
|
preempt_enable_no_resched();
|
540 |
|
|
return 1;
|
541 |
|
|
}
|
542 |
|
|
|
543 |
|
|
static struct kprobe trampoline_p = {
|
544 |
|
|
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
|
545 |
|
|
.pre_handler = trampoline_probe_handler
|
546 |
|
|
};
|
547 |
|
|
|
548 |
|
|
int __init arch_init_kprobes(void)
|
549 |
|
|
{
|
550 |
|
|
return register_kprobe(&trampoline_p);
|
551 |
|
|
}
|
552 |
|
|
|
553 |
|
|
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
|
554 |
|
|
{
|
555 |
|
|
if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
|
556 |
|
|
return 1;
|
557 |
|
|
|
558 |
|
|
return 0;
|
559 |
|
|
}
|