1// Copyright 2017 The Abseil Authors.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14//
15// Produce stack trace
16
17#ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
18#define ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
19
20#if defined(__linux__) && (defined(__i386__) || defined(__x86_64__))
21#include <ucontext.h> // for ucontext_t
22#endif
23
24#if !defined(_WIN32)
25#include <unistd.h>
26#endif
27
28#include <cassert>
29#include <cstdint>
30
31#include "absl/base/macros.h"
32#include "absl/base/port.h"
33#include "absl/debugging/internal/address_is_readable.h"
34#include "absl/debugging/internal/vdso_support.h" // a no-op on non-elf or non-glibc systems
35#include "absl/debugging/stacktrace.h"
36
37#include "absl/base/internal/raw_logging.h"
38
39#if defined(__linux__) && defined(__i386__)
40// Count "push %reg" instructions in VDSO __kernel_vsyscall(),
41// preceeding "syscall" or "sysenter".
42// If __kernel_vsyscall uses frame pointer, answer 0.
43//
44// kMaxBytes tells how many instruction bytes of __kernel_vsyscall
45// to analyze before giving up. Up to kMaxBytes+1 bytes of
46// instructions could be accessed.
47//
48// Here are known __kernel_vsyscall instruction sequences:
49//
50// SYSENTER (linux-2.6.26/arch/x86/vdso/vdso32/sysenter.S).
51// Used on Intel.
52// 0xffffe400 <__kernel_vsyscall+0>: push %ecx
53// 0xffffe401 <__kernel_vsyscall+1>: push %edx
54// 0xffffe402 <__kernel_vsyscall+2>: push %ebp
55// 0xffffe403 <__kernel_vsyscall+3>: mov %esp,%ebp
56// 0xffffe405 <__kernel_vsyscall+5>: sysenter
57//
58// SYSCALL (see linux-2.6.26/arch/x86/vdso/vdso32/syscall.S).
59// Used on AMD.
60// 0xffffe400 <__kernel_vsyscall+0>: push %ebp
61// 0xffffe401 <__kernel_vsyscall+1>: mov %ecx,%ebp
62// 0xffffe403 <__kernel_vsyscall+3>: syscall
63//
64
65// The sequence below isn't actually expected in Google fleet,
66// here only for completeness. Remove this comment from OSS release.
67
68// i386 (see linux-2.6.26/arch/x86/vdso/vdso32/int80.S)
69// 0xffffe400 <__kernel_vsyscall+0>: int $0x80
70// 0xffffe401 <__kernel_vsyscall+1>: ret
71//
72static const int kMaxBytes = 10;
73
74// We use assert()s instead of DCHECK()s -- this is too low level
75// for DCHECK().
76
77static int CountPushInstructions(const unsigned char *const addr) {
78 int result = 0;
79 for (int i = 0; i < kMaxBytes; ++i) {
80 if (addr[i] == 0x89) {
81 // "mov reg,reg"
82 if (addr[i + 1] == 0xE5) {
83 // Found "mov %esp,%ebp".
84 return 0;
85 }
86 ++i; // Skip register encoding byte.
87 } else if (addr[i] == 0x0F &&
88 (addr[i + 1] == 0x34 || addr[i + 1] == 0x05)) {
89 // Found "sysenter" or "syscall".
90 return result;
91 } else if ((addr[i] & 0xF0) == 0x50) {
92 // Found "push %reg".
93 ++result;
94 } else if (addr[i] == 0xCD && addr[i + 1] == 0x80) {
95 // Found "int $0x80"
96 assert(result == 0);
97 return 0;
98 } else {
99 // Unexpected instruction.
100 assert(false && "unexpected instruction in __kernel_vsyscall");
101 return 0;
102 }
103 }
104 // Unexpected: didn't find SYSENTER or SYSCALL in
105 // [__kernel_vsyscall, __kernel_vsyscall + kMaxBytes) interval.
106 assert(false && "did not find SYSENTER or SYSCALL in __kernel_vsyscall");
107 return 0;
108}
109#endif
110
111// Assume stack frames larger than 100,000 bytes are bogus.
112static const int kMaxFrameBytes = 100000;
113
114// Returns the stack frame pointer from signal context, 0 if unknown.
115// vuc is a ucontext_t *. We use void* to avoid the use
116// of ucontext_t on non-POSIX systems.
117static uintptr_t GetFP(const void *vuc) {
118#if !defined(__linux__)
119 static_cast<void>(vuc); // Avoid an unused argument compiler warning.
120#else
121 if (vuc != nullptr) {
122 auto *uc = reinterpret_cast<const ucontext_t *>(vuc);
123#if defined(__i386__)
124 const auto bp = uc->uc_mcontext.gregs[REG_EBP];
125 const auto sp = uc->uc_mcontext.gregs[REG_ESP];
126#elif defined(__x86_64__)
127 const auto bp = uc->uc_mcontext.gregs[REG_RBP];
128 const auto sp = uc->uc_mcontext.gregs[REG_RSP];
129#else
130 const uintptr_t bp = 0;
131 const uintptr_t sp = 0;
132#endif
133 // Sanity-check that the base pointer is valid. It should be as long as
134 // SHRINK_WRAP_FRAME_POINTER is not set, but it's possible that some code in
135 // the process is compiled with --copt=-fomit-frame-pointer or
136 // --copt=-momit-leaf-frame-pointer.
137 //
138 // TODO(bcmills): -momit-leaf-frame-pointer is currently the default
139 // behavior when building with clang. Talk to the C++ toolchain team about
140 // fixing that.
141 if (bp >= sp && bp - sp <= kMaxFrameBytes) return bp;
142
143 // If bp isn't a plausible frame pointer, return the stack pointer instead.
144 // If we're lucky, it points to the start of a stack frame; otherwise, we'll
145 // get one frame of garbage in the stack trace and fail the sanity check on
146 // the next iteration.
147 return sp;
148 }
149#endif
150 return 0;
151}
152
153// Given a pointer to a stack frame, locate and return the calling
154// stackframe, or return null if no stackframe can be found. Perform sanity
155// checks (the strictness of which is controlled by the boolean parameter
156// "STRICT_UNWINDING") to reduce the chance that a bad pointer is returned.
157template <bool STRICT_UNWINDING, bool WITH_CONTEXT>
158ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
159ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
160static void **NextStackFrame(void **old_fp, const void *uc) {
161 void **new_fp = (void **)*old_fp;
162
163#if defined(__linux__) && defined(__i386__)
164 if (WITH_CONTEXT && uc != nullptr) {
165 // How many "push %reg" instructions are there at __kernel_vsyscall?
166 // This is constant for a given kernel and processor, so compute
167 // it only once.
168 static int num_push_instructions = -1; // Sentinel: not computed yet.
169 // Initialize with sentinel value: __kernel_rt_sigreturn can not possibly
170 // be there.
171 static const unsigned char *kernel_rt_sigreturn_address = nullptr;
172 static const unsigned char *kernel_vsyscall_address = nullptr;
173 if (num_push_instructions == -1) {
174 absl::debugging_internal::VDSOSupport vdso;
175 if (vdso.IsPresent()) {
176 absl::debugging_internal::VDSOSupport::SymbolInfo
177 rt_sigreturn_symbol_info;
178 absl::debugging_internal::VDSOSupport::SymbolInfo vsyscall_symbol_info;
179 if (!vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_2.5", STT_FUNC,
180 &rt_sigreturn_symbol_info) ||
181 !vdso.LookupSymbol("__kernel_vsyscall", "LINUX_2.5", STT_FUNC,
182 &vsyscall_symbol_info) ||
183 rt_sigreturn_symbol_info.address == nullptr ||
184 vsyscall_symbol_info.address == nullptr) {
185 // Unexpected: 32-bit VDSO is present, yet one of the expected
186 // symbols is missing or null.
187 assert(false && "VDSO is present, but doesn't have expected symbols");
188 num_push_instructions = 0;
189 } else {
190 kernel_rt_sigreturn_address =
191 reinterpret_cast<const unsigned char *>(
192 rt_sigreturn_symbol_info.address);
193 kernel_vsyscall_address =
194 reinterpret_cast<const unsigned char *>(
195 vsyscall_symbol_info.address);
196 num_push_instructions =
197 CountPushInstructions(kernel_vsyscall_address);
198 }
199 } else {
200 num_push_instructions = 0;
201 }
202 }
203 if (num_push_instructions != 0 && kernel_rt_sigreturn_address != nullptr &&
204 old_fp[1] == kernel_rt_sigreturn_address) {
205 const ucontext_t *ucv = static_cast<const ucontext_t *>(uc);
206 // This kernel does not use frame pointer in its VDSO code,
207 // and so %ebp is not suitable for unwinding.
208 void **const reg_ebp =
209 reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_EBP]);
210 const unsigned char *const reg_eip =
211 reinterpret_cast<unsigned char *>(ucv->uc_mcontext.gregs[REG_EIP]);
212 if (new_fp == reg_ebp && kernel_vsyscall_address <= reg_eip &&
213 reg_eip - kernel_vsyscall_address < kMaxBytes) {
214 // We "stepped up" to __kernel_vsyscall, but %ebp is not usable.
215 // Restore from 'ucv' instead.
216 void **const reg_esp =
217 reinterpret_cast<void **>(ucv->uc_mcontext.gregs[REG_ESP]);
218 // Check that alleged %esp is not null and is reasonably aligned.
219 if (reg_esp &&
220 ((uintptr_t)reg_esp & (sizeof(reg_esp) - 1)) == 0) {
221 // Check that alleged %esp is actually readable. This is to prevent
222 // "double fault" in case we hit the first fault due to e.g. stack
223 // corruption.
224 void *const reg_esp2 = reg_esp[num_push_instructions - 1];
225 if (absl::debugging_internal::AddressIsReadable(reg_esp2)) {
226 // Alleged %esp is readable, use it for further unwinding.
227 new_fp = reinterpret_cast<void **>(reg_esp2);
228 }
229 }
230 }
231 }
232 }
233#endif
234
235 const uintptr_t old_fp_u = reinterpret_cast<uintptr_t>(old_fp);
236 const uintptr_t new_fp_u = reinterpret_cast<uintptr_t>(new_fp);
237
238 // Check that the transition from frame pointer old_fp to frame
239 // pointer new_fp isn't clearly bogus. Skip the checks if new_fp
240 // matches the signal context, so that we don't skip out early when
241 // using an alternate signal stack.
242 //
243 // TODO(bcmills): The GetFP call should be completely unnecessary when
244 // SHRINK_WRAP_FRAME_POINTER is set (because we should be back in the thread's
245 // stack by this point), but it is empirically still needed (e.g. when the
246 // stack includes a call to abort). unw_get_reg returns UNW_EBADREG for some
247 // frames. Figure out why GetValidFrameAddr and/or libunwind isn't doing what
248 // it's supposed to.
249 if (STRICT_UNWINDING &&
250 (!WITH_CONTEXT || uc == nullptr || new_fp_u != GetFP(uc))) {
251 // With the stack growing downwards, older stack frame must be
252 // at a greater address that the current one.
253 if (new_fp_u <= old_fp_u) return nullptr;
254 if (new_fp_u - old_fp_u > kMaxFrameBytes) return nullptr;
255 } else {
256 if (new_fp == nullptr) return nullptr; // skip AddressIsReadable() below
257 // In the non-strict mode, allow discontiguous stack frames.
258 // (alternate-signal-stacks for example).
259 if (new_fp == old_fp) return nullptr;
260 }
261
262 if (new_fp_u & (sizeof(void *) - 1)) return nullptr;
263#ifdef __i386__
264 // On 32-bit machines, the stack pointer can be very close to
265 // 0xffffffff, so we explicitly check for a pointer into the
266 // last two pages in the address space
267 if (new_fp_u >= 0xffffe000) return nullptr;
268#endif
269#if !defined(_WIN32)
270 if (!STRICT_UNWINDING) {
271 // Lax sanity checks cause a crash in 32-bit tcmalloc/crash_reason_test
272 // on AMD-based machines with VDSO-enabled kernels.
273 // Make an extra sanity check to insure new_fp is readable.
274 // Note: NextStackFrame<false>() is only called while the program
275 // is already on its last leg, so it's ok to be slow here.
276
277 if (!absl::debugging_internal::AddressIsReadable(new_fp)) {
278 return nullptr;
279 }
280 }
281#endif
282 return new_fp;
283}
284
285template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
286ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
287ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
288ABSL_ATTRIBUTE_NOINLINE
289static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
290 const void *ucp, int *min_dropped_frames) {
291 int n = 0;
292 void **fp = reinterpret_cast<void **>(__builtin_frame_address(0));
293
294 while (fp && n < max_depth) {
295 if (*(fp + 1) == reinterpret_cast<void *>(0)) {
296 // In 64-bit code, we often see a frame that
297 // points to itself and has a return address of 0.
298 break;
299 }
300 void **next_fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp);
301 if (skip_count > 0) {
302 skip_count--;
303 } else {
304 result[n] = *(fp + 1);
305 if (IS_STACK_FRAMES) {
306 if (next_fp > fp) {
307 sizes[n] = (uintptr_t)next_fp - (uintptr_t)fp;
308 } else {
309 // A frame-size of 0 is used to indicate unknown frame size.
310 sizes[n] = 0;
311 }
312 }
313 n++;
314 }
315 fp = next_fp;
316 }
317 if (min_dropped_frames != nullptr) {
318 // Implementation detail: we clamp the max of frames we are willing to
319 // count, so as not to spend too much time in the loop below.
320 const int kMaxUnwind = 1000;
321 int j = 0;
322 for (; fp != nullptr && j < kMaxUnwind; j++) {
323 fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp);
324 }
325 *min_dropped_frames = j;
326 }
327 return n;
328}
329
330namespace absl {
331namespace debugging_internal {
332bool StackTraceWorksForTest() {
333 return true;
334}
335} // namespace debugging_internal
336} // namespace absl
337
338#endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_X86_INL_INC_
339