1// Copyright 2017 The Abseil Authors.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// Allow dynamic symbol lookup in the kernel VDSO page.
16//
17// VDSOSupport -- a class representing kernel VDSO (if present).
18
19#include "absl/debugging/internal/vdso_support.h"
20
21#ifdef ABSL_HAVE_VDSO_SUPPORT // defined in vdso_support.h
22
23#include <errno.h>
24#include <fcntl.h>
25#include <sys/syscall.h>
26#include <unistd.h>
27
28#if __GLIBC_PREREQ(2, 16) // GLIBC-2.16 implements getauxval.
29#include <sys/auxv.h>
30#endif
31
32#include "absl/base/dynamic_annotations.h"
33#include "absl/base/internal/raw_logging.h"
34#include "absl/base/port.h"
35
36#ifndef AT_SYSINFO_EHDR
37#define AT_SYSINFO_EHDR 33 // for crosstoolv10
38#endif
39
40namespace absl {
41namespace debugging_internal {
42
43ABSL_CONST_INIT
44std::atomic<const void *> VDSOSupport::vdso_base_(
45 debugging_internal::ElfMemImage::kInvalidBase);
46
47std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(&InitAndGetCPU);
48VDSOSupport::VDSOSupport()
49 // If vdso_base_ is still set to kInvalidBase, we got here
50 // before VDSOSupport::Init has been called. Call it now.
51 : image_(vdso_base_.load(std::memory_order_relaxed) ==
52 debugging_internal::ElfMemImage::kInvalidBase
53 ? Init()
54 : vdso_base_.load(std::memory_order_relaxed)) {}
55
56// NOTE: we can't use GoogleOnceInit() below, because we can be
57// called by tcmalloc, and none of the *once* stuff may be functional yet.
58//
59// In addition, we hope that the VDSOSupportHelper constructor
60// causes this code to run before there are any threads, and before
61// InitGoogle() has executed any chroot or setuid calls.
62//
63// Finally, even if there is a race here, it is harmless, because
64// the operation should be idempotent.
65const void *VDSOSupport::Init() {
66 const auto kInvalidBase = debugging_internal::ElfMemImage::kInvalidBase;
67#if __GLIBC_PREREQ(2, 16)
68 if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
69 errno = 0;
70 const void *const sysinfo_ehdr =
71 reinterpret_cast<const void *>(getauxval(AT_SYSINFO_EHDR));
72 if (errno == 0) {
73 vdso_base_.store(sysinfo_ehdr, std::memory_order_relaxed);
74 }
75 }
76#endif // __GLIBC_PREREQ(2, 16)
77 if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
78 // Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
79 // on stack, and so glibc works as if VDSO was not present.
80 // But going directly to kernel via /proc/self/auxv below bypasses
81 // Valgrind zapping. So we check for Valgrind separately.
82 if (RunningOnValgrind()) {
83 vdso_base_.store(nullptr, std::memory_order_relaxed);
84 getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
85 return nullptr;
86 }
87 int fd = open("/proc/self/auxv", O_RDONLY);
88 if (fd == -1) {
89 // Kernel too old to have a VDSO.
90 vdso_base_.store(nullptr, std::memory_order_relaxed);
91 getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
92 return nullptr;
93 }
94 ElfW(auxv_t) aux;
95 while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
96 if (aux.a_type == AT_SYSINFO_EHDR) {
97 vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
98 std::memory_order_relaxed);
99 break;
100 }
101 }
102 close(fd);
103 if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
104 // Didn't find AT_SYSINFO_EHDR in auxv[].
105 vdso_base_.store(nullptr, std::memory_order_relaxed);
106 }
107 }
108 GetCpuFn fn = &GetCPUViaSyscall; // default if VDSO not present.
109 if (vdso_base_.load(std::memory_order_relaxed)) {
110 VDSOSupport vdso;
111 SymbolInfo info;
112 if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
113 fn = reinterpret_cast<GetCpuFn>(const_cast<void *>(info.address));
114 }
115 }
116 // Subtle: this code runs outside of any locks; prevent compiler
117 // from assigning to getcpu_fn_ more than once.
118 getcpu_fn_.store(fn, std::memory_order_relaxed);
119 return vdso_base_.load(std::memory_order_relaxed);
120}
121
122const void *VDSOSupport::SetBase(const void *base) {
123 ABSL_RAW_CHECK(base != debugging_internal::ElfMemImage::kInvalidBase,
124 "internal error");
125 const void *old_base = vdso_base_.load(std::memory_order_relaxed);
126 vdso_base_.store(base, std::memory_order_relaxed);
127 image_.Init(base);
128 // Also reset getcpu_fn_, so GetCPU could be tested with simulated VDSO.
129 getcpu_fn_.store(&InitAndGetCPU, std::memory_order_relaxed);
130 return old_base;
131}
132
133bool VDSOSupport::LookupSymbol(const char *name,
134 const char *version,
135 int type,
136 SymbolInfo *info) const {
137 return image_.LookupSymbol(name, version, type, info);
138}
139
140bool VDSOSupport::LookupSymbolByAddress(const void *address,
141 SymbolInfo *info_out) const {
142 return image_.LookupSymbolByAddress(address, info_out);
143}
144
145// NOLINT on 'long' because this routine mimics kernel api.
146long VDSOSupport::GetCPUViaSyscall(unsigned *cpu, // NOLINT(runtime/int)
147 void *, void *) {
148#ifdef SYS_getcpu
149 return syscall(SYS_getcpu, cpu, nullptr, nullptr);
150#else
151 // x86_64 never implemented sys_getcpu(), except as a VDSO call.
152 static_cast<void>(cpu); // Avoid an unused argument compiler warning.
153 errno = ENOSYS;
154 return -1;
155#endif
156}
157
158// Use fast __vdso_getcpu if available.
159long VDSOSupport::InitAndGetCPU(unsigned *cpu, // NOLINT(runtime/int)
160 void *x, void *y) {
161 Init();
162 GetCpuFn fn = getcpu_fn_.load(std::memory_order_relaxed);
163 ABSL_RAW_CHECK(fn != &InitAndGetCPU, "Init() did not set getcpu_fn_");
164 return (*fn)(cpu, x, y);
165}
166
167// This function must be very fast, and may be called from very
168// low level (e.g. tcmalloc). Hence I avoid things like
169// GoogleOnceInit() and ::operator new.
170ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
171int GetCPU() {
172 unsigned cpu;
173 int ret_code = (*VDSOSupport::getcpu_fn_)(&cpu, nullptr, nullptr);
174 return ret_code == 0 ? cpu : ret_code;
175}
176
177// We need to make sure VDSOSupport::Init() is called before
178// InitGoogle() does any setuid or chroot calls. If VDSOSupport
179// is used in any global constructor, this will happen, since
180// VDSOSupport's constructor calls Init. But if not, we need to
181// ensure it here, with a global constructor of our own. This
182// is an allowed exception to the normal rule against non-trivial
183// global constructors.
184static class VDSOInitHelper {
185 public:
186 VDSOInitHelper() { VDSOSupport::Init(); }
187} vdso_init_helper;
188
189} // namespace debugging_internal
190} // namespace absl
191
192#endif // ABSL_HAVE_VDSO_SUPPORT
193