1// Copyright (c) 2010 Google Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// * Redistributions of source code must retain the above copyright
9// notice, this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above
11// copyright notice, this list of conditions and the following disclaimer
12// in the documentation and/or other materials provided with the
13// distribution.
14// * Neither the name of Google Inc. nor the names of its
15// contributors may be used to endorse or promote products derived from
16// this software without specific prior written permission.
17//
18// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30// stackwalker_amd64.cc: amd64-specific stackwalker.
31//
32// See stackwalker_amd64.h for documentation.
33//
34// Author: Mark Mentovai, Ted Mielczarek
35
36#include <assert.h>
37
38#include "common/scoped_ptr.h"
39#include "google_breakpad/processor/call_stack.h"
40#include "google_breakpad/processor/memory_region.h"
41#include "google_breakpad/processor/source_line_resolver_interface.h"
42#include "google_breakpad/processor/stack_frame_cpu.h"
43#include "google_breakpad/processor/system_info.h"
44#include "processor/cfi_frame_info.h"
45#include "processor/logging.h"
46#include "processor/stackwalker_amd64.h"
47
48namespace google_breakpad {
49
50
51const StackwalkerAMD64::CFIWalker::RegisterSet
52StackwalkerAMD64::cfi_register_map_[] = {
53 // It may seem like $rip and $rsp are callee-saves, because the callee is
54 // responsible for having them restored upon return. But the callee_saves
55 // flags here really means that the walker should assume they're
56 // unchanged if the CFI doesn't mention them --- clearly wrong for $rip
57 // and $rsp.
58 { "$rax", NULL, false,
59 StackFrameAMD64::CONTEXT_VALID_RAX, &MDRawContextAMD64::rax },
60 { "$rdx", NULL, false,
61 StackFrameAMD64::CONTEXT_VALID_RDX, &MDRawContextAMD64::rdx },
62 { "$rcx", NULL, false,
63 StackFrameAMD64::CONTEXT_VALID_RCX, &MDRawContextAMD64::rcx },
64 { "$rbx", NULL, true,
65 StackFrameAMD64::CONTEXT_VALID_RBX, &MDRawContextAMD64::rbx },
66 { "$rsi", NULL, false,
67 StackFrameAMD64::CONTEXT_VALID_RSI, &MDRawContextAMD64::rsi },
68 { "$rdi", NULL, false,
69 StackFrameAMD64::CONTEXT_VALID_RDI, &MDRawContextAMD64::rdi },
70 { "$rbp", NULL, true,
71 StackFrameAMD64::CONTEXT_VALID_RBP, &MDRawContextAMD64::rbp },
72 { "$rsp", ".cfa", false,
73 StackFrameAMD64::CONTEXT_VALID_RSP, &MDRawContextAMD64::rsp },
74 { "$r8", NULL, false,
75 StackFrameAMD64::CONTEXT_VALID_R8, &MDRawContextAMD64::r8 },
76 { "$r9", NULL, false,
77 StackFrameAMD64::CONTEXT_VALID_R9, &MDRawContextAMD64::r9 },
78 { "$r10", NULL, false,
79 StackFrameAMD64::CONTEXT_VALID_R10, &MDRawContextAMD64::r10 },
80 { "$r11", NULL, false,
81 StackFrameAMD64::CONTEXT_VALID_R11, &MDRawContextAMD64::r11 },
82 { "$r12", NULL, true,
83 StackFrameAMD64::CONTEXT_VALID_R12, &MDRawContextAMD64::r12 },
84 { "$r13", NULL, true,
85 StackFrameAMD64::CONTEXT_VALID_R13, &MDRawContextAMD64::r13 },
86 { "$r14", NULL, true,
87 StackFrameAMD64::CONTEXT_VALID_R14, &MDRawContextAMD64::r14 },
88 { "$r15", NULL, true,
89 StackFrameAMD64::CONTEXT_VALID_R15, &MDRawContextAMD64::r15 },
90 { "$rip", ".ra", false,
91 StackFrameAMD64::CONTEXT_VALID_RIP, &MDRawContextAMD64::rip },
92};
93
94StackwalkerAMD64::StackwalkerAMD64(const SystemInfo* system_info,
95 const MDRawContextAMD64* context,
96 MemoryRegion* memory,
97 const CodeModules* modules,
98 StackFrameSymbolizer* resolver_helper)
99 : Stackwalker(system_info, memory, modules, resolver_helper),
100 context_(context),
101 cfi_walker_(cfi_register_map_,
102 (sizeof(cfi_register_map_) / sizeof(cfi_register_map_[0]))) {
103}
104
105uint64_t StackFrameAMD64::ReturnAddress() const {
106 assert(context_validity & StackFrameAMD64::CONTEXT_VALID_RIP);
107 return context.rip;
108}
109
110StackFrame* StackwalkerAMD64::GetContextFrame() {
111 if (!context_) {
112 BPLOG(ERROR) << "Can't get context frame without context";
113 return NULL;
114 }
115
116 StackFrameAMD64* frame = new StackFrameAMD64();
117
118 // The instruction pointer is stored directly in a register, so pull it
119 // straight out of the CPU context structure.
120 frame->context = *context_;
121 frame->context_validity = StackFrameAMD64::CONTEXT_VALID_ALL;
122 frame->trust = StackFrame::FRAME_TRUST_CONTEXT;
123 frame->instruction = frame->context.rip;
124
125 return frame;
126}
127
128StackFrameAMD64* StackwalkerAMD64::GetCallerByCFIFrameInfo(
129 const vector<StackFrame*>& frames,
130 CFIFrameInfo* cfi_frame_info) {
131 StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
132
133 scoped_ptr<StackFrameAMD64> frame(new StackFrameAMD64());
134 if (!cfi_walker_
135 .FindCallerRegisters(*memory_, *cfi_frame_info,
136 last_frame->context, last_frame->context_validity,
137 &frame->context, &frame->context_validity))
138 return NULL;
139
140 // Make sure we recovered all the essentials.
141 static const int essentials = (StackFrameAMD64::CONTEXT_VALID_RIP
142 | StackFrameAMD64::CONTEXT_VALID_RSP);
143 if ((frame->context_validity & essentials) != essentials)
144 return NULL;
145
146 if (!frame->context.rip || !frame->context.rsp) {
147 BPLOG(ERROR) << "invalid rip/rsp";
148 return NULL;
149 }
150
151 frame->trust = StackFrame::FRAME_TRUST_CFI;
152 return frame.release();
153}
154
155// Returns true if `ptr` is not in x86-64 canonical form.
156// https://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
157static bool is_non_canonical(uint64_t ptr) {
158 return ptr > 0x7FFFFFFFFFFF && ptr < 0xFFFF800000000000;
159}
160
161StackFrameAMD64* StackwalkerAMD64::GetCallerByFramePointerRecovery(
162 const vector<StackFrame*>& frames) {
163 StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
164 uint64_t last_rbp = last_frame->context.rbp;
165
166 // Assume the presence of a frame pointer. This is not mandated by the
167 // AMD64 ABI, c.f. section 3.2.2 footnote 7, though it is typical for
168 // compilers to still preserve the frame pointer and not treat %rbp as a
169 // general purpose register.
170 //
171 // With this assumption, the CALL instruction pushes the return address
172 // onto the stack and sets %rip to the procedure to enter. The procedure
173 // then establishes the stack frame with a prologue that PUSHes the current
174 // %rbp onto the stack, MOVes the current %rsp to %rbp, and then allocates
175 // space for any local variables. Using this procedure linking information,
176 // it is possible to locate frame information for the callee:
177 //
178 // %caller_rsp = *(%callee_rbp + 16)
179 // %caller_rip = *(%callee_rbp + 8)
180 // %caller_rbp = *(%callee_rbp)
181
182 // If rbp is not 8-byte aligned it can't be a frame pointer.
183 if (last_rbp % 8 != 0) {
184 return NULL;
185 }
186
187 uint64_t caller_rip, caller_rbp;
188 if (memory_->GetMemoryAtAddress(last_rbp + 8, &caller_rip) &&
189 memory_->GetMemoryAtAddress(last_rbp, &caller_rbp)) {
190 uint64_t caller_rsp = last_rbp + 16;
191
192 // If the recovered rip is not a canonical address it can't be
193 // the return address, so rbp must not have been a frame pointer.
194 if (is_non_canonical(caller_rip)) {
195 return NULL;
196 }
197
198 // Check that rbp is within the right frame
199 if (caller_rsp <= last_rbp || caller_rbp < caller_rsp) {
200 return NULL;
201 }
202
203 // Sanity check that resulting rbp is still inside stack memory.
204 uint64_t unused;
205 if (!memory_->GetMemoryAtAddress(caller_rbp, &unused)) {
206 return NULL;
207 }
208
209 StackFrameAMD64* frame = new StackFrameAMD64();
210 frame->trust = StackFrame::FRAME_TRUST_FP;
211 frame->context = last_frame->context;
212 frame->context.rip = caller_rip;
213 frame->context.rsp = caller_rsp;
214 frame->context.rbp = caller_rbp;
215 frame->context_validity = StackFrameAMD64::CONTEXT_VALID_RIP |
216 StackFrameAMD64::CONTEXT_VALID_RSP |
217 StackFrameAMD64::CONTEXT_VALID_RBP;
218 return frame;
219 }
220
221 return NULL;
222}
223
224StackFrameAMD64* StackwalkerAMD64::GetCallerByStackScan(
225 const vector<StackFrame*>& frames) {
226 StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
227 uint64_t last_rsp = last_frame->context.rsp;
228 uint64_t caller_rip_address, caller_rip;
229
230 if (!ScanForReturnAddress(last_rsp, &caller_rip_address, &caller_rip,
231 frames.size() == 1 /* is_context_frame */)) {
232 // No plausible return address was found.
233 return NULL;
234 }
235
236 // Create a new stack frame (ownership will be transferred to the caller)
237 // and fill it in.
238 StackFrameAMD64* frame = new StackFrameAMD64();
239
240 frame->trust = StackFrame::FRAME_TRUST_SCAN;
241 frame->context = last_frame->context;
242 frame->context.rip = caller_rip;
243 // The caller's %rsp is directly underneath the return address pushed by
244 // the call.
245 frame->context.rsp = caller_rip_address + 8;
246 frame->context_validity = StackFrameAMD64::CONTEXT_VALID_RIP |
247 StackFrameAMD64::CONTEXT_VALID_RSP;
248
249 // Other unwinders give up if they don't have an %rbp value, so see if we
250 // can pass some plausible value on.
251 if (last_frame->context_validity & StackFrameAMD64::CONTEXT_VALID_RBP) {
252 // Functions typically push their caller's %rbp immediately upon entry,
253 // and then set %rbp to point to that. So if the callee's %rbp is
254 // pointing to the first word below the alleged return address, presume
255 // that the caller's %rbp is saved there.
256 if (caller_rip_address - 8 == last_frame->context.rbp) {
257 uint64_t caller_rbp = 0;
258 if (memory_->GetMemoryAtAddress(last_frame->context.rbp, &caller_rbp) &&
259 caller_rbp > caller_rip_address) {
260 frame->context.rbp = caller_rbp;
261 frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP;
262 }
263 } else if (last_frame->context.rbp >= caller_rip_address + 8) {
264 // If the callee's %rbp is plausible as a value for the caller's
265 // %rbp, presume that the callee left it unchanged.
266 frame->context.rbp = last_frame->context.rbp;
267 frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP;
268 }
269 }
270
271 return frame;
272}
273
274StackFrame* StackwalkerAMD64::GetCallerFrame(const CallStack* stack,
275 bool stack_scan_allowed) {
276 if (!memory_ || !stack) {
277 BPLOG(ERROR) << "Can't get caller frame without memory or stack";
278 return NULL;
279 }
280
281 const vector<StackFrame*>& frames = *stack->frames();
282 StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
283 scoped_ptr<StackFrameAMD64> new_frame;
284
285 // If we have DWARF CFI information, use it.
286 scoped_ptr<CFIFrameInfo> cfi_frame_info(
287 frame_symbolizer_->FindCFIFrameInfo(last_frame));
288 if (cfi_frame_info.get())
289 new_frame.reset(GetCallerByCFIFrameInfo(frames, cfi_frame_info.get()));
290
291 // If CFI was not available or failed, try using frame pointer recovery.
292 if (!new_frame.get()) {
293 new_frame.reset(GetCallerByFramePointerRecovery(frames));
294 }
295
296 // If all else fails, fall back to stack scanning.
297 if (stack_scan_allowed && !new_frame.get()) {
298 new_frame.reset(GetCallerByStackScan(frames));
299 }
300
301 // If nothing worked, tell the caller.
302 if (!new_frame.get())
303 return NULL;
304
305 if (system_info_->os_short == "nacl") {
306 // Apply constraints from Native Client's x86-64 sandbox. These
307 // registers have the 4GB-aligned sandbox base address (from r15)
308 // added to them, and only the bottom 32 bits are relevant for
309 // stack walking.
310 new_frame->context.rip = static_cast<uint32_t>(new_frame->context.rip);
311 new_frame->context.rsp = static_cast<uint32_t>(new_frame->context.rsp);
312 new_frame->context.rbp = static_cast<uint32_t>(new_frame->context.rbp);
313 }
314
315 // Should we terminate the stack walk? (end-of-stack or broken invariant)
316 if (TerminateWalk(new_frame->context.rip, new_frame->context.rsp,
317 last_frame->context.rsp, frames.size() == 1)) {
318 return NULL;
319 }
320
321 // new_frame->context.rip is the return address, which is the instruction
322 // after the CALL that caused us to arrive at the callee. Set
323 // new_frame->instruction to one less than that, so it points within the
324 // CALL instruction. See StackFrame::instruction for details, and
325 // StackFrameAMD64::ReturnAddress.
326 new_frame->instruction = new_frame->context.rip - 1;
327
328 return new_frame.release();
329}
330
331} // namespace google_breakpad
332