2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/kernel.h>
16 #include <linux/string.h>
18 #include <asm/backtrace.h>
20 #include <arch/chip.h>
25 #include <asm/opcode-tile.h>
32 /** A decoded bundle used for backtracer analysis. */
34 tile_bundle_bits bits;
36 struct tile_decoded_instruction
37 insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE];
41 /* This implementation only makes sense for native tools. */
42 /** Default function to read memory. */
44 bt_read_memory(void *result, VirtualAddress addr, size_t size, void *extra)
46 /* FIXME: this should do some horrible signal stuff to catch
47 * SEGV cleanly and fail.
49 * Or else the caller should do the setjmp for efficiency.
52 memcpy(result, (const void *)addr, size);
57 /** Locates an instruction inside the given bundle that
58 * has the specified mnemonic, and whose first 'num_operands_to_match'
59 * operands exactly match those in 'operand_values'.
61 static const struct tile_decoded_instruction*
62 find_matching_insn(const BacktraceBundle *bundle,
63 tile_mnemonic mnemonic,
64 const int *operand_values,
65 int num_operands_to_match)
70 for (i = 0; i < bundle->num_insns; i++) {
71 const struct tile_decoded_instruction *insn =
74 if (insn->opcode->mnemonic != mnemonic)
78 for (j = 0; j < num_operands_to_match; j++) {
79 if (operand_values[j] != insn->operand_values[j]) {
92 /** Does this bundle contain an 'iret' instruction? */
94 bt_has_iret(const BacktraceBundle *bundle)
96 return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL;
99 /** Does this bundle contain an 'addi sp, sp, OFFSET' or
100 * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET?
103 bt_has_addi_sp(const BacktraceBundle *bundle, int *adjust)
105 static const int vals[2] = { TREG_SP, TREG_SP };
107 const struct tile_decoded_instruction *insn =
108 find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2);
110 insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2);
114 *adjust = insn->operand_values[2];
118 /** Does this bundle contain any 'info OP' or 'infol OP'
119 * instruction, and if so, what are their OP? Note that OP is interpreted
120 * as an unsigned value by this code since that's what the caller wants.
121 * Returns the number of info ops found.
124 bt_get_info_ops(const BacktraceBundle *bundle,
125 int operands[MAX_INFO_OPS_PER_BUNDLE])
130 for (i = 0; i < bundle->num_insns; i++) {
131 const struct tile_decoded_instruction *insn =
134 if (insn->opcode->mnemonic == TILE_OPC_INFO ||
135 insn->opcode->mnemonic == TILE_OPC_INFOL) {
136 operands[num_ops++] = insn->operand_values[0];
143 /** Does this bundle contain a jrp instruction, and if so, to which
144 * register is it jumping?
147 bt_has_jrp(const BacktraceBundle *bundle, int *target_reg)
149 const struct tile_decoded_instruction *insn =
150 find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0);
154 *target_reg = insn->operand_values[0];
158 /** Does this bundle modify the specified register in any way? */
160 bt_modifies_reg(const BacktraceBundle *bundle, int reg)
163 for (i = 0; i < bundle->num_insns; i++) {
164 const struct tile_decoded_instruction *insn =
167 if (insn->opcode->implicitly_written_register == reg)
170 for (j = 0; j < insn->opcode->num_operands; j++)
171 if (insn->operands[j]->is_dest_reg &&
172 insn->operand_values[j] == reg)
179 /** Does this bundle modify sp? */
181 bt_modifies_sp(const BacktraceBundle *bundle)
183 return bt_modifies_reg(bundle, TREG_SP);
186 /** Does this bundle modify lr? */
188 bt_modifies_lr(const BacktraceBundle *bundle)
190 return bt_modifies_reg(bundle, TREG_LR);
193 /** Does this bundle contain the instruction 'move fp, sp'? */
195 bt_has_move_r52_sp(const BacktraceBundle *bundle)
197 static const int vals[2] = { 52, TREG_SP };
198 return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL;
201 /** Does this bundle contain the instruction 'sw sp, lr'? */
203 bt_has_sw_sp_lr(const BacktraceBundle *bundle)
205 static const int vals[2] = { TREG_SP, TREG_LR };
206 return find_matching_insn(bundle, TILE_OPC_SW, vals, 2) != NULL;
209 /** Locates the caller's PC and SP for a program starting at the
213 find_caller_pc_and_caller_sp(CallerLocation *location,
214 const VirtualAddress start_pc,
215 BacktraceMemoryReader read_memory_func,
216 void *read_memory_func_extra)
218 /* Have we explicitly decided what the sp is,
219 * rather than just the default?
221 bool sp_determined = false;
223 /* Has any bundle seen so far modified lr? */
224 bool lr_modified = false;
226 /* Have we seen a move from sp to fp? */
227 bool sp_moved_to_r52 = false;
229 /* Have we seen a terminating bundle? */
230 bool seen_terminating_bundle = false;
232 /* Cut down on round-trip reading overhead by reading several
235 tile_bundle_bits prefetched_bundles[32];
236 int num_bundles_prefetched = 0;
240 /* Default to assuming that the caller's sp is the current sp.
241 * This is necessary to handle the case where we start backtracing
242 * right at the end of the epilog.
244 location->sp_location = SP_LOC_OFFSET;
245 location->sp_offset = 0;
247 /* Default to having no idea where the caller PC is. */
248 location->pc_location = PC_LOC_UNKNOWN;
250 /* Don't even try if the PC is not aligned. */
251 if (start_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0)
254 for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) {
256 BacktraceBundle bundle;
257 int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE];
258 int one_ago, jrp_reg;
261 if (next_bundle >= num_bundles_prefetched) {
262 /* Prefetch some bytes, but don't cross a page
263 * boundary since that might cause a read failure we
264 * don't care about if we only need the first few
265 * bytes. Note: we don't care what the actual page
266 * size is; using the minimum possible page size will
267 * prevent any problems.
269 unsigned int bytes_to_prefetch = 4096 - (pc & 4095);
270 if (bytes_to_prefetch > sizeof prefetched_bundles)
271 bytes_to_prefetch = sizeof prefetched_bundles;
273 if (!read_memory_func(prefetched_bundles, pc,
275 read_memory_func_extra)) {
276 if (pc == start_pc) {
277 /* The program probably called a bad
278 * address, such as a NULL pointer.
279 * So treat this as if we are at the
280 * start of the function prolog so the
281 * backtrace will show how we got here.
283 location->pc_location = PC_LOC_IN_LR;
287 /* Unreadable address. Give up. */
292 num_bundles_prefetched =
293 bytes_to_prefetch / sizeof(tile_bundle_bits);
296 /* Decode the next bundle. */
297 bundle.bits = prefetched_bundles[next_bundle++];
299 parse_insn_tile(bundle.bits, pc, bundle.insns);
300 num_info_ops = bt_get_info_ops(&bundle, info_operands);
302 /* First look at any one_ago info ops if they are interesting,
303 * since they should shadow any non-one-ago info ops.
305 for (one_ago = (pc != start_pc) ? 1 : 0;
306 one_ago >= 0; one_ago--) {
308 for (i = 0; i < num_info_ops; i++) {
309 int info_operand = info_operands[i];
310 if (info_operand < CALLER_UNKNOWN_BASE) {
311 /* Weird; reserved value, ignore it. */
315 /* Skip info ops which are not in the
316 * "one_ago" mode we want right now.
318 if (((info_operand & ONE_BUNDLE_AGO_FLAG) != 0)
322 /* Clear the flag to make later checking
324 info_operand &= ~ONE_BUNDLE_AGO_FLAG;
326 /* Default to looking at PC_IN_LR_FLAG. */
327 if (info_operand & PC_IN_LR_FLAG)
328 location->pc_location =
331 location->pc_location =
334 switch (info_operand) {
335 case CALLER_UNKNOWN_BASE:
336 location->pc_location = PC_LOC_UNKNOWN;
337 location->sp_location = SP_LOC_UNKNOWN;
340 case CALLER_SP_IN_R52_BASE:
341 case CALLER_SP_IN_R52_BASE | PC_IN_LR_FLAG:
342 location->sp_location = SP_LOC_IN_R52;
347 const unsigned int val = info_operand
348 - CALLER_SP_OFFSET_BASE;
349 const unsigned int sp_offset =
350 (val >> NUM_INFO_OP_FLAGS) * 8;
351 if (sp_offset < 32768) {
352 /* This is a properly encoded
354 location->sp_location =
356 location->sp_offset =
360 /* This looked like an SP
361 * offset, but it's outside
362 * the legal range, so this
363 * must be an unrecognized
364 * info operand. Ignore it.
373 if (seen_terminating_bundle) {
374 /* We saw a terminating bundle during the previous
375 * iteration, so we were only looking for an info op.
380 if (bundle.bits == 0) {
381 /* Wacky terminating bundle. Stop looping, and hope
382 * we've already seen enough to find the caller.
388 * Try to determine caller's SP.
391 if (!sp_determined) {
393 if (bt_has_addi_sp(&bundle, &adjust)) {
394 location->sp_location = SP_LOC_OFFSET;
397 /* We are in prolog about to adjust
399 location->sp_offset = 0;
401 /* We are in epilog restoring SP. */
402 location->sp_offset = adjust;
405 sp_determined = true;
407 if (bt_has_move_r52_sp(&bundle)) {
408 /* Maybe in prolog, creating an
409 * alloca-style frame. But maybe in
410 * the middle of a fixed-size frame
411 * clobbering r52 with SP.
413 sp_moved_to_r52 = true;
416 if (bt_modifies_sp(&bundle)) {
417 if (sp_moved_to_r52) {
418 /* We saw SP get saved into
419 * r52 earlier (or now), which
420 * must have been in the
421 * prolog, so we now know that
422 * SP is still holding the
425 location->sp_location =
427 location->sp_offset = 0;
429 /* Someone must have saved
430 * aside the caller's SP value
431 * into r52, so r52 holds the
434 location->sp_location =
437 sp_determined = true;
442 if (bt_has_iret(&bundle)) {
443 /* This is a terminating bundle. */
444 seen_terminating_bundle = true;
449 * Try to determine caller's PC.
453 has_jrp = bt_has_jrp(&bundle, &jrp_reg);
455 seen_terminating_bundle = true;
457 if (location->pc_location == PC_LOC_UNKNOWN) {
459 if (jrp_reg == TREG_LR && !lr_modified) {
460 /* Looks like a leaf function, or else
461 * lr is already restored. */
462 location->pc_location =
465 location->pc_location =
468 } else if (bt_has_sw_sp_lr(&bundle)) {
469 /* In prolog, spilling initial lr to stack. */
470 location->pc_location = PC_LOC_IN_LR;
471 } else if (bt_modifies_lr(&bundle)) {
479 backtrace_init(BacktraceIterator *state,
480 BacktraceMemoryReader read_memory_func,
481 void *read_memory_func_extra,
482 VirtualAddress pc, VirtualAddress lr,
483 VirtualAddress sp, VirtualAddress r52)
485 CallerLocation location;
486 VirtualAddress fp, initial_frame_caller_pc;
488 if (read_memory_func == NULL) {
489 read_memory_func = bt_read_memory;
492 /* Find out where we are in the initial frame. */
493 find_caller_pc_and_caller_sp(&location, pc,
494 read_memory_func, read_memory_func_extra);
496 switch (location.sp_location) {
507 fp = sp + location.sp_offset;
516 /* The frame pointer should theoretically be aligned mod 8. If
517 * it's not even aligned mod 4 then something terrible happened
518 * and we should mark it as invalid.
523 /* -1 means "don't know initial_frame_caller_pc". */
524 initial_frame_caller_pc = -1;
526 switch (location.pc_location) {
533 if (lr == 0 || lr % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
537 initial_frame_caller_pc = lr;
541 case PC_LOC_ON_STACK:
542 /* Leave initial_frame_caller_pc as -1,
543 * meaning check the stack.
556 state->initial_frame_caller_pc = initial_frame_caller_pc;
557 state->read_memory_func = read_memory_func;
558 state->read_memory_func_extra = read_memory_func_extra;
562 backtrace_next(BacktraceIterator *state)
564 VirtualAddress next_fp, next_pc, next_frame[2];
566 if (state->fp == -1) {
567 /* No parent frame. */
571 /* Try to read the frame linkage data chaining to the next function. */
572 if (!state->read_memory_func(&next_frame, state->fp, sizeof next_frame,
573 state->read_memory_func_extra)) {
577 next_fp = next_frame[1];
578 if (next_fp % 4 != 0) {
579 /* Caller's frame pointer is suspect, so give up.
580 * Technically it should be aligned mod 8, but we will
586 if (state->initial_frame_caller_pc != -1) {
587 /* We must be in the initial stack frame and already know the
590 next_pc = state->initial_frame_caller_pc;
592 /* Force reading stack next time, in case we were in the
593 * initial frame. We don't do this above just to paranoidly
594 * avoid changing the struct at all when we return false.
596 state->initial_frame_caller_pc = -1;
598 /* Get the caller PC from the frame linkage area. */
599 next_pc = next_frame[0];
601 next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
602 /* The PC is suspect, so give up. */
607 /* Update state to become the caller's stack frame. */
609 state->sp = state->fp;
615 #else /* TILE_CHIP < 10 */
618 backtrace_init(BacktraceIterator *state,
619 BacktraceMemoryReader read_memory_func,
620 void *read_memory_func_extra,
621 VirtualAddress pc, VirtualAddress lr,
622 VirtualAddress sp, VirtualAddress r52)
627 state->initial_frame_caller_pc = -1;
628 state->read_memory_func = read_memory_func;
629 state->read_memory_func_extra = read_memory_func_extra;
632 bool backtrace_next(BacktraceIterator *state) { return false; }
634 #endif /* TILE_CHIP < 10 */