1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4//
5// File: VirtualCallStub.CPP
6//
7// This file contains the virtual call stub manager and caches
8//
9
10
11
12//
13
14//
15// ============================================================================
16
17#include "common.h"
18#include "array.h"
19#ifdef FEATURE_PREJIT
20#include "compile.h"
21#endif
22
23#ifdef FEATURE_PERFMAP
24#include "perfmap.h"
25#endif
26
27#ifndef DACCESS_COMPILE
28
29//@TODO: make these conditional on whether logs are being produced
30//instrumentation counters
31UINT32 g_site_counter = 0; //# of call sites
32UINT32 g_site_write = 0; //# of call site backpatch writes
33UINT32 g_site_write_poly = 0; //# of call site backpatch writes to point to resolve stubs
34UINT32 g_site_write_mono = 0; //# of call site backpatch writes to point to dispatch stubs
35
36UINT32 g_stub_lookup_counter = 0; //# of lookup stubs
37UINT32 g_stub_mono_counter = 0; //# of dispatch stubs
38UINT32 g_stub_poly_counter = 0; //# of resolve stubs
39UINT32 g_stub_vtable_counter = 0; //# of vtable call stubs
40UINT32 g_stub_space = 0; //# of bytes of stubs
41
42UINT32 g_reclaim_counter = 0; //# of times a ReclaimAll was performed
43
44UINT32 g_worker_call = 0; //# of calls into ResolveWorker
45UINT32 g_worker_call_no_patch = 0;
46UINT32 g_worker_collide_to_mono = 0; //# of times we converted a poly stub to a mono stub instead of writing the cache entry
47
48UINT32 g_external_call = 0; //# of calls into GetTarget(token, pMT)
49UINT32 g_external_call_no_patch = 0;
50
51UINT32 g_insert_cache_external = 0; //# of times Insert was called for IK_EXTERNAL
52UINT32 g_insert_cache_shared = 0; //# of times Insert was called for IK_SHARED
53UINT32 g_insert_cache_dispatch = 0; //# of times Insert was called for IK_DISPATCH
54UINT32 g_insert_cache_resolve = 0; //# of times Insert was called for IK_RESOLVE
55UINT32 g_insert_cache_hit = 0; //# of times Insert found an empty cache entry
56UINT32 g_insert_cache_miss = 0; //# of times Insert already had a matching cache entry
57UINT32 g_insert_cache_collide = 0; //# of times Insert found a used cache entry
58UINT32 g_insert_cache_write = 0; //# of times Insert wrote a cache entry
59
60UINT32 g_cache_entry_counter = 0; //# of cache structs
61UINT32 g_cache_entry_space = 0; //# of bytes used by cache lookup structs
62
63UINT32 g_call_lookup_counter = 0; //# of times lookup stubs entered
64
65UINT32 g_mono_call_counter = 0; //# of time dispatch stubs entered
66UINT32 g_mono_miss_counter = 0; //# of times expected MT did not match actual MT (dispatch stubs)
67
68UINT32 g_poly_call_counter = 0; //# of times resolve stubs entered
69UINT32 g_poly_miss_counter = 0; //# of times cache missed (resolve stub)
70
71UINT32 g_chained_lookup_call_counter = 0; //# of hits in a chained lookup
72UINT32 g_chained_lookup_miss_counter = 0; //# of misses in a chained lookup
73
74UINT32 g_chained_lookup_external_call_counter = 0; //# of hits in an external chained lookup
75UINT32 g_chained_lookup_external_miss_counter = 0; //# of misses in an external chained lookup
76
77UINT32 g_chained_entry_promoted = 0; //# of times a cache entry is promoted to the start of the chain
78
79UINT32 g_bucket_space = 0; //# of bytes in caches and tables, not including the stubs themselves
80UINT32 g_bucket_space_dead = 0; //# of bytes of abandoned buckets not yet recycled
81
82#endif // !DACCESS_COMPILE
83
84// This is the number of times a successful chain lookup will occur before the
85// entry is promoted to the front of the chain. This is declared as extern because
86// the default value (CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT) is defined in the header.
87#ifdef _TARGET_ARM64_
88extern "C" size_t g_dispatch_cache_chain_success_counter;
89#else
90extern size_t g_dispatch_cache_chain_success_counter;
91#endif
92
93#define DECLARE_DATA
94#include "virtualcallstub.h"
95#undef DECLARE_DATA
96#include "profilepriv.h"
97#include "contractimpl.h"
98
99SPTR_IMPL_INIT(VirtualCallStubManagerManager, VirtualCallStubManagerManager, g_pManager, NULL);
100
101#ifndef DACCESS_COMPILE
102
103#ifdef STUB_LOGGING
104UINT32 STUB_MISS_COUNT_VALUE = 100;
105UINT32 STUB_COLLIDE_WRITE_PCT = 100;
106UINT32 STUB_COLLIDE_MONO_PCT = 0;
107#endif // STUB_LOGGING
108
109FastTable* BucketTable::dead = NULL; //linked list of the abandoned buckets
110
111DispatchCache *g_resolveCache = NULL; //cache of dispatch stubs for in line lookup by resolve stubs.
112
113size_t g_dispatch_cache_chain_success_counter = CALL_STUB_CACHE_INITIAL_SUCCESS_COUNT;
114
115#ifdef STUB_LOGGING
116UINT32 g_resetCacheCounter;
117UINT32 g_resetCacheIncr;
118UINT32 g_dumpLogCounter;
119UINT32 g_dumpLogIncr;
120#endif // STUB_LOGGING
121
122//@TODO: use the existing logging mechanisms. for now we write to a file.
123HANDLE g_hStubLogFile;
124
125void VirtualCallStubManager::StartupLogging()
126{
127 CONTRACTL
128 {
129 NOTHROW;
130 GC_TRIGGERS;
131 FORBID_FAULT;
132 }
133 CONTRACTL_END
134
135 GCX_PREEMP();
136
137 EX_TRY
138 {
139 FAULT_NOT_FATAL(); // We handle filecreation problems locally
140 SString str;
141 str.Printf(W("StubLog_%d.log"), GetCurrentProcessId());
142 g_hStubLogFile = WszCreateFile (str.GetUnicode(),
143 GENERIC_WRITE,
144 0,
145 0,
146 CREATE_ALWAYS,
147 FILE_ATTRIBUTE_NORMAL,
148 0);
149 }
150 EX_CATCH
151 {
152 }
153 EX_END_CATCH(SwallowAllExceptions)
154
155 if (g_hStubLogFile == INVALID_HANDLE_VALUE) {
156 g_hStubLogFile = NULL;
157 }
158}
159
160#define OUTPUT_FORMAT_INT "\t%-30s %d\r\n"
161#define OUTPUT_FORMAT_PCT "\t%-30s %#5.2f%%\r\n"
162#define OUTPUT_FORMAT_INT_PCT "\t%-30s %5d (%#5.2f%%)\r\n"
163
164void VirtualCallStubManager::LoggingDump()
165{
166 CONTRACTL
167 {
168 NOTHROW;
169 GC_TRIGGERS;
170 FORBID_FAULT;
171 }
172 CONTRACTL_END
173
174 VirtualCallStubManagerIterator it =
175 VirtualCallStubManagerManager::GlobalManager()->IterateVirtualCallStubManagers();
176
177 while (it.Next())
178 {
179 it.Current()->LogStats();
180 }
181
182 g_resolveCache->LogStats();
183
184 // Temp space to use for formatting the output.
185 static const int FMT_STR_SIZE = 160;
186 char szPrintStr[FMT_STR_SIZE];
187 DWORD dwWriteByte;
188
189 if(g_hStubLogFile)
190 {
191#ifdef STUB_LOGGING
192 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nstub tuning parameters\r\n");
193 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
194
195 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\t%-30s %3d (0x%02x)\r\n", "STUB_MISS_COUNT_VALUE",
196 STUB_MISS_COUNT_VALUE, STUB_MISS_COUNT_VALUE);
197 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
198 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "STUB_COLLIDE_WRITE_PCT",
199 STUB_COLLIDE_WRITE_PCT, STUB_COLLIDE_WRITE_PCT);
200 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
201 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "STUB_COLLIDE_MONO_PCT",
202 STUB_COLLIDE_MONO_PCT, STUB_COLLIDE_MONO_PCT);
203 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
204 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "DumpLogCounter",
205 g_dumpLogCounter, g_dumpLogCounter);
206 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
207 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "DumpLogIncr",
208 g_dumpLogCounter, g_dumpLogIncr);
209 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
210 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "ResetCacheCounter",
211 g_resetCacheCounter, g_resetCacheCounter);
212 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
213 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\t%-30s %3d%% (0x%02x)\r\n", "ResetCacheIncr",
214 g_resetCacheCounter, g_resetCacheIncr);
215 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
216#endif // STUB_LOGGING
217
218 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nsite data\r\n");
219 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
220
221 //output counters
222 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_counter", g_site_counter);
223 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
224 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_write", g_site_write);
225 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
226 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_write_mono", g_site_write_mono);
227 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
228 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_write_poly", g_site_write_poly);
229 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
230
231 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n%-30s %d\r\n", "reclaim_counter", g_reclaim_counter);
232 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
233
234 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nstub data\r\n");
235 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
236
237 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_lookup_counter", g_stub_lookup_counter);
238 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
239 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_mono_counter", g_stub_mono_counter);
240 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
241 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_poly_counter", g_stub_poly_counter);
242 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
243 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_vtable_counter", g_stub_vtable_counter);
244 WriteFile(g_hStubLogFile, szPrintStr, (DWORD)strlen(szPrintStr), &dwWriteByte, NULL);
245 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_space", g_stub_space);
246 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
247
248#ifdef STUB_LOGGING
249
250 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nlookup stub data\r\n");
251 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
252
253 UINT32 total_calls = g_mono_call_counter + g_poly_call_counter;
254
255 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "lookup_call_counter", g_call_lookup_counter);
256 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
257
258 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n%-30s %d\r\n", "total stub dispatch calls", total_calls);
259 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
260
261 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n%-30s %#5.2f%%\r\n", "mono stub data",
262 100.0 * double(g_mono_call_counter)/double(total_calls));
263 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
264
265 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "mono_call_counter", g_mono_call_counter);
266 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
267 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "mono_miss_counter", g_mono_miss_counter);
268 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
269 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
270 100.0 * double(g_mono_miss_counter)/double(g_mono_call_counter));
271 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
272
273 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n%-30s %#5.2f%%\r\n", "poly stub data",
274 100.0 * double(g_poly_call_counter)/double(total_calls));
275 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
276
277 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "poly_call_counter", g_poly_call_counter);
278 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
279 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "poly_miss_counter", g_poly_miss_counter);
280 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
281 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
282 100.0 * double(g_poly_miss_counter)/double(g_poly_call_counter));
283 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
284#endif // STUB_LOGGING
285
286#ifdef CHAIN_LOOKUP
287 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nchain lookup data\r\n");
288 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
289
290#ifdef STUB_LOGGING
291 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_call_counter", g_chained_lookup_call_counter);
292 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
293 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_miss_counter", g_chained_lookup_miss_counter);
294 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
295 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
296 100.0 * double(g_chained_lookup_miss_counter)/double(g_chained_lookup_call_counter));
297 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
298
299 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_external_call_counter", g_chained_lookup_external_call_counter);
300 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
301 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "chained_lookup_external_miss_counter", g_chained_lookup_external_miss_counter);
302 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
303 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_PCT, "miss percent",
304 100.0 * double(g_chained_lookup_external_miss_counter)/double(g_chained_lookup_external_call_counter));
305 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
306#endif // STUB_LOGGING
307 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "chained_entry_promoted", g_chained_entry_promoted);
308 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
309#endif // CHAIN_LOOKUP
310
311#ifdef STUB_LOGGING
312 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n%-30s %#5.2f%%\r\n", "worker (slow resolver) data",
313 100.0 * double(g_worker_call)/double(total_calls));
314#else // !STUB_LOGGING
315 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nworker (slow resolver) data\r\n");
316#endif // !STUB_LOGGING
317 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
318
319 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "worker_call", g_worker_call);
320 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
321 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "worker_call_no_patch", g_worker_call_no_patch);
322 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
323 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "external_call", g_external_call);
324 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
325 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "external_call_no_patch", g_external_call_no_patch);
326 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
327 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "worker_collide_to_mono", g_worker_collide_to_mono);
328 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
329
330 UINT32 total_inserts = g_insert_cache_external
331 + g_insert_cache_shared
332 + g_insert_cache_dispatch
333 + g_insert_cache_resolve;
334
335 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n%-30s %d\r\n", "insert cache data", total_inserts);
336 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
337
338 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_external", g_insert_cache_external,
339 100.0 * double(g_insert_cache_external)/double(total_inserts));
340 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
341 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_shared", g_insert_cache_shared,
342 100.0 * double(g_insert_cache_shared)/double(total_inserts));
343 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
344 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_dispatch", g_insert_cache_dispatch,
345 100.0 * double(g_insert_cache_dispatch)/double(total_inserts));
346 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
347 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_resolve", g_insert_cache_resolve,
348 100.0 * double(g_insert_cache_resolve)/double(total_inserts));
349 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
350 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_hit", g_insert_cache_hit,
351 100.0 * double(g_insert_cache_hit)/double(total_inserts));
352 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
353 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_miss", g_insert_cache_miss,
354 100.0 * double(g_insert_cache_miss)/double(total_inserts));
355 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
356 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_collide", g_insert_cache_collide,
357 100.0 * double(g_insert_cache_collide)/double(total_inserts));
358 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
359 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT_PCT, "insert_cache_write", g_insert_cache_write,
360 100.0 * double(g_insert_cache_write)/double(total_inserts));
361 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
362
363 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\ncache data\r\n");
364 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
365
366 size_t total, used;
367 g_resolveCache->GetLoadFactor(&total, &used);
368
369 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_used", used);
370 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
371 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_counter", g_cache_entry_counter);
372 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
373 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_space", g_cache_entry_space);
374 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
375
376 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nstub hash table data\r\n");
377 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
378
379 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "bucket_space", g_bucket_space);
380 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
381 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "bucket_space_dead", g_bucket_space_dead);
382 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
383
384 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\ncache_load:\t%d used, %d total, utilization %#5.2f%%\r\n",
385 used, total, 100.0 * double(used) / double(total));
386 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
387
388#ifdef STUB_LOGGING
389 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\ncache entry write counts\r\n");
390 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
391 DispatchCache::CacheEntryData *rgCacheData = g_resolveCache->cacheData;
392 for (UINT16 i = 0; i < CALL_STUB_CACHE_SIZE; i++)
393 {
394 sprintf_s(szPrintStr, COUNTOF(szPrintStr), " %4d", rgCacheData[i]);
395 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
396 if (i % 16 == 15)
397 {
398 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n");
399 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
400 }
401 }
402 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\n");
403 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
404#endif // STUB_LOGGING
405
406#if 0
407 for (unsigned i = 0; i < ContractImplMap::max_delta_count; i++)
408 {
409 if (ContractImplMap::deltasDescs[i] != 0)
410 {
411 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "deltasDescs[%d]\t%d\r\n", i, ContractImplMap::deltasDescs[i]);
412 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
413 }
414 }
415 for (unsigned i = 0; i < ContractImplMap::max_delta_count; i++)
416 {
417 if (ContractImplMap::deltasSlots[i] != 0)
418 {
419 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "deltasSlots[%d]\t%d\r\n", i, ContractImplMap::deltasSlots[i]);
420 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
421 }
422 }
423 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "cout of maps:\t%d\r\n", ContractImplMap::countMaps);
424 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
425 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "count of interfaces:\t%d\r\n", ContractImplMap::countInterfaces);
426 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
427 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "count of deltas:\t%d\r\n", ContractImplMap::countDelta);
428 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
429 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "total delta for descs:\t%d\r\n", ContractImplMap::totalDeltaDescs);
430 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
431 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "total delta for slots:\t%d\r\n", ContractImplMap::totalDeltaSlots);
432 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
433
434#endif // 0
435 }
436}
437
438void VirtualCallStubManager::FinishLogging()
439{
440 LoggingDump();
441
442 if(g_hStubLogFile)
443 {
444 CloseHandle(g_hStubLogFile);
445 }
446 g_hStubLogFile = NULL;
447}
448
449void VirtualCallStubManager::ResetCache()
450{
451 CONTRACTL
452 {
453 NOTHROW;
454 GC_TRIGGERS;
455 FORBID_FAULT;
456 }
457 CONTRACTL_END
458
459 g_resolveCache->LogStats();
460
461 g_insert_cache_external = 0;
462 g_insert_cache_shared = 0;
463 g_insert_cache_dispatch = 0;
464 g_insert_cache_resolve = 0;
465 g_insert_cache_hit = 0;
466 g_insert_cache_miss = 0;
467 g_insert_cache_collide = 0;
468 g_insert_cache_write = 0;
469
470 // Go through each cache entry and if the cache element there is in
471 // the cache entry heap of the manager being deleted, then we just
472 // set the cache entry to empty.
473 DispatchCache::Iterator it(g_resolveCache);
474 while (it.IsValid())
475 {
476 it.UnlinkEntry();
477 }
478
479}
480
481void VirtualCallStubManager::Init(BaseDomain *pDomain, LoaderAllocator *pLoaderAllocator)
482{
483 CONTRACTL {
484 THROWS;
485 GC_TRIGGERS;
486 PRECONDITION(CheckPointer(pDomain));
487 INJECT_FAULT(COMPlusThrowOM(););
488 } CONTRACTL_END;
489
490 // Record the parent domain
491 parentDomain = pDomain;
492 isCollectible = !!pLoaderAllocator->IsCollectible();
493
494 //
495 // Init critical sections
496 //
497
498 m_indCellLock.Init(CrstVSDIndirectionCellLock, CRST_UNSAFE_ANYMODE);
499
500 //
501 // Now allocate all BucketTables
502 //
503
504 NewHolder<BucketTable> resolvers_holder(new BucketTable(CALL_STUB_MIN_BUCKETS));
505 NewHolder<BucketTable> dispatchers_holder(new BucketTable(CALL_STUB_MIN_BUCKETS*2));
506 NewHolder<BucketTable> lookups_holder(new BucketTable(CALL_STUB_MIN_BUCKETS));
507 NewHolder<BucketTable> vtableCallers_holder(new BucketTable(CALL_STUB_MIN_BUCKETS));
508 NewHolder<BucketTable> cache_entries_holder(new BucketTable(CALL_STUB_MIN_BUCKETS));
509
510 //
511 // Now allocate our LoaderHeaps
512 //
513
514 //
515 // First do some calculation to determine how many pages that we
516 // will need to commit and reserve for each out our loader heaps
517 //
518 DWORD indcell_heap_reserve_size;
519 DWORD indcell_heap_commit_size;
520 DWORD cache_entry_heap_reserve_size;
521 DWORD cache_entry_heap_commit_size;
522 DWORD lookup_heap_reserve_size;
523 DWORD lookup_heap_commit_size;
524 DWORD dispatch_heap_reserve_size;
525 DWORD dispatch_heap_commit_size;
526 DWORD resolve_heap_reserve_size;
527 DWORD resolve_heap_commit_size;
528 DWORD vtable_heap_reserve_size;
529 DWORD vtable_heap_commit_size;
530
531 //
532 // Setup an expected number of items to commit and reserve
533 //
534 // The commit number is not that important as we always commit at least one page worth of items
535 // The reserve number shoudl be high enough to cover a typical lare application,
536 // in order to minimize the fragmentation of our rangelists
537 //
538
539 if (parentDomain->IsDefaultDomain())
540 {
541 indcell_heap_commit_size = 16; indcell_heap_reserve_size = 2000;
542 cache_entry_heap_commit_size = 16; cache_entry_heap_reserve_size = 800;
543
544 lookup_heap_commit_size = 24; lookup_heap_reserve_size = 250;
545 dispatch_heap_commit_size = 24; dispatch_heap_reserve_size = 600;
546 resolve_heap_commit_size = 24; resolve_heap_reserve_size = 300;
547 vtable_heap_commit_size = 24; vtable_heap_reserve_size = 600;
548 }
549 else if (parentDomain->IsSharedDomain())
550 {
551 indcell_heap_commit_size = 16; indcell_heap_reserve_size = 100;
552#ifdef _WIN64
553 indcell_heap_reserve_size = 2000;
554#endif
555 cache_entry_heap_commit_size = 16; cache_entry_heap_reserve_size = 500;
556
557 lookup_heap_commit_size = 24; lookup_heap_reserve_size = 200;
558 dispatch_heap_commit_size = 24; dispatch_heap_reserve_size = 450;
559 resolve_heap_commit_size = 24; resolve_heap_reserve_size = 200;
560 vtable_heap_commit_size = 24; vtable_heap_reserve_size = 450;
561 }
562 else
563 {
564 indcell_heap_commit_size = 8; indcell_heap_reserve_size = 8;
565 cache_entry_heap_commit_size = 8; cache_entry_heap_reserve_size = 8;
566
567 lookup_heap_commit_size = 8; lookup_heap_reserve_size = 8;
568 dispatch_heap_commit_size = 8; dispatch_heap_reserve_size = 8;
569 resolve_heap_commit_size = 8; resolve_heap_reserve_size = 8;
570 vtable_heap_commit_size = 8; vtable_heap_reserve_size = 8;
571 }
572
573#ifdef _WIN64
574 // If we're on 64-bit, there's a ton of address space, so reserve more space to
575 // try to avoid getting into the situation where the resolve heap is more than
576 // a rel32 jump away from the dispatch heap, since this will cause us to produce
577 // larger dispatch stubs on AMD64.
578 dispatch_heap_reserve_size *= 10;
579 resolve_heap_reserve_size *= 10;
580#endif
581
582 //
583 // Convert the number of items into a size in bytes to commit and reserve
584 //
585 indcell_heap_reserve_size *= sizeof(void *);
586 indcell_heap_commit_size *= sizeof(void *);
587
588 cache_entry_heap_reserve_size *= sizeof(ResolveCacheElem);
589 cache_entry_heap_commit_size *= sizeof(ResolveCacheElem);
590
591 lookup_heap_reserve_size *= sizeof(LookupHolder);
592 lookup_heap_commit_size *= sizeof(LookupHolder);
593
594 DWORD dispatchHolderSize = sizeof(DispatchHolder);
595#ifdef _TARGET_AMD64_
596 dispatchHolderSize = static_cast<DWORD>(DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_SHORT));
597#endif
598
599 dispatch_heap_reserve_size *= dispatchHolderSize;
600 dispatch_heap_commit_size *= dispatchHolderSize;
601
602 resolve_heap_reserve_size *= sizeof(ResolveHolder);
603 resolve_heap_commit_size *= sizeof(ResolveHolder);
604
605 vtable_heap_reserve_size *= static_cast<DWORD>(VTableCallHolder::GetHolderSize(0));
606 vtable_heap_commit_size *= static_cast<DWORD>(VTableCallHolder::GetHolderSize(0));
607
608 //
609 // Align up all of the commit and reserve sizes
610 //
611 indcell_heap_reserve_size = (DWORD) ALIGN_UP(indcell_heap_reserve_size, GetOsPageSize());
612 indcell_heap_commit_size = (DWORD) ALIGN_UP(indcell_heap_commit_size, GetOsPageSize());
613
614 cache_entry_heap_reserve_size = (DWORD) ALIGN_UP(cache_entry_heap_reserve_size, GetOsPageSize());
615 cache_entry_heap_commit_size = (DWORD) ALIGN_UP(cache_entry_heap_commit_size, GetOsPageSize());
616
617 lookup_heap_reserve_size = (DWORD) ALIGN_UP(lookup_heap_reserve_size, GetOsPageSize());
618 lookup_heap_commit_size = (DWORD) ALIGN_UP(lookup_heap_commit_size, GetOsPageSize());
619
620 dispatch_heap_reserve_size = (DWORD) ALIGN_UP(dispatch_heap_reserve_size, GetOsPageSize());
621 dispatch_heap_commit_size = (DWORD) ALIGN_UP(dispatch_heap_commit_size, GetOsPageSize());
622
623 resolve_heap_reserve_size = (DWORD) ALIGN_UP(resolve_heap_reserve_size, GetOsPageSize());
624 resolve_heap_commit_size = (DWORD) ALIGN_UP(resolve_heap_commit_size, GetOsPageSize());
625
626 vtable_heap_reserve_size = (DWORD) ALIGN_UP(vtable_heap_reserve_size, GetOsPageSize());
627 vtable_heap_commit_size = (DWORD) ALIGN_UP(vtable_heap_commit_size, GetOsPageSize());
628
629 BYTE * initReservedMem = NULL;
630
631 if (!isCollectible)
632 {
633 DWORD dwTotalReserveMemSizeCalc = indcell_heap_reserve_size +
634 cache_entry_heap_reserve_size +
635 lookup_heap_reserve_size +
636 dispatch_heap_reserve_size +
637 resolve_heap_reserve_size +
638 vtable_heap_reserve_size;
639
640 DWORD dwTotalReserveMemSize = (DWORD) ALIGN_UP(dwTotalReserveMemSizeCalc, VIRTUAL_ALLOC_RESERVE_GRANULARITY);
641
642 // If there's wasted reserved memory, we hand this out to the heaps to avoid waste.
643 {
644 DWORD dwWastedReserveMemSize = dwTotalReserveMemSize - dwTotalReserveMemSizeCalc;
645 if (dwWastedReserveMemSize != 0)
646 {
647 DWORD cWastedPages = dwWastedReserveMemSize / GetOsPageSize();
648 DWORD cPagesPerHeap = cWastedPages / 6;
649 DWORD cPagesRemainder = cWastedPages % 6; // We'll throw this at the resolve heap
650
651 indcell_heap_reserve_size += cPagesPerHeap * GetOsPageSize();
652 cache_entry_heap_reserve_size += cPagesPerHeap * GetOsPageSize();
653 lookup_heap_reserve_size += cPagesPerHeap * GetOsPageSize();
654 dispatch_heap_reserve_size += cPagesPerHeap * GetOsPageSize();
655 vtable_heap_reserve_size += cPagesPerHeap * GetOsPageSize();
656 resolve_heap_reserve_size += cPagesPerHeap * GetOsPageSize();
657 resolve_heap_reserve_size += cPagesRemainder * GetOsPageSize();
658 }
659
660 CONSISTENCY_CHECK((indcell_heap_reserve_size +
661 cache_entry_heap_reserve_size +
662 lookup_heap_reserve_size +
663 dispatch_heap_reserve_size +
664 resolve_heap_reserve_size +
665 vtable_heap_reserve_size) ==
666 dwTotalReserveMemSize);
667 }
668
669 initReservedMem = ClrVirtualAllocExecutable (dwTotalReserveMemSize, MEM_RESERVE, PAGE_NOACCESS);
670
671 m_initialReservedMemForHeaps = (BYTE *) initReservedMem;
672
673 if (initReservedMem == NULL)
674 COMPlusThrowOM();
675 }
676 else
677 {
678 indcell_heap_reserve_size = GetOsPageSize();
679 indcell_heap_commit_size = GetOsPageSize();
680
681 cache_entry_heap_reserve_size = GetOsPageSize();
682 cache_entry_heap_commit_size = GetOsPageSize();
683
684 lookup_heap_reserve_size = GetOsPageSize();
685 lookup_heap_commit_size = GetOsPageSize();
686
687 dispatch_heap_reserve_size = GetOsPageSize();
688 dispatch_heap_commit_size = GetOsPageSize();
689
690 resolve_heap_reserve_size = GetOsPageSize();
691 resolve_heap_commit_size = GetOsPageSize();
692
693 // Heap for the collectible case is carefully tuned to sum up to 16 pages. Today, we only use the
694 // vtable jump stubs in the R2R scenario, which is unlikely to be loaded in the collectible context,
695 // so we'll keep the heap numbers at zero for now. If we ever use vtable stubs in the collectible
696 // scenario, we'll just allocate the memory on demand.
697 vtable_heap_reserve_size = 0;
698 vtable_heap_commit_size = 0;
699
700#ifdef _DEBUG
701 DWORD dwTotalReserveMemSizeCalc = indcell_heap_reserve_size +
702 cache_entry_heap_reserve_size +
703 lookup_heap_reserve_size +
704 dispatch_heap_reserve_size +
705 resolve_heap_reserve_size +
706 vtable_heap_reserve_size;
707#endif
708
709 DWORD dwActualVSDSize = 0;
710
711 initReservedMem = pLoaderAllocator->GetVSDHeapInitialBlock(&dwActualVSDSize);
712 _ASSERTE(dwActualVSDSize == dwTotalReserveMemSizeCalc);
713
714 m_initialReservedMemForHeaps = (BYTE *) initReservedMem;
715
716 if (initReservedMem == NULL)
717 COMPlusThrowOM();
718 }
719
720 // Hot memory, Writable, No-Execute, infrequent writes
721 NewHolder<LoaderHeap> indcell_heap_holder(
722 new LoaderHeap(indcell_heap_reserve_size, indcell_heap_commit_size,
723 initReservedMem, indcell_heap_reserve_size,
724#ifdef ENABLE_PERF_COUNTERS
725 &(GetPerfCounters().m_Loading.cbLoaderHeapSize),
726#else
727 NULL,
728#endif
729 NULL, FALSE));
730
731 initReservedMem += indcell_heap_reserve_size;
732
733 // Hot memory, Writable, No-Execute, infrequent writes
734 NewHolder<LoaderHeap> cache_entry_heap_holder(
735 new LoaderHeap(cache_entry_heap_reserve_size, cache_entry_heap_commit_size,
736 initReservedMem, cache_entry_heap_reserve_size,
737#ifdef ENABLE_PERF_COUNTERS
738 &(GetPerfCounters().m_Loading.cbLoaderHeapSize),
739#else
740 NULL,
741#endif
742 &cache_entry_rangeList, FALSE));
743
744 initReservedMem += cache_entry_heap_reserve_size;
745
746 // Warm memory, Writable, Execute, write exactly once
747 NewHolder<LoaderHeap> lookup_heap_holder(
748 new LoaderHeap(lookup_heap_reserve_size, lookup_heap_commit_size,
749 initReservedMem, lookup_heap_reserve_size,
750#ifdef ENABLE_PERF_COUNTERS
751 &(GetPerfCounters().m_Loading.cbLoaderHeapSize),
752#else
753 NULL,
754#endif
755 &lookup_rangeList, TRUE));
756
757 initReservedMem += lookup_heap_reserve_size;
758
759 // Hot memory, Writable, Execute, write exactly once
760 NewHolder<LoaderHeap> dispatch_heap_holder(
761 new LoaderHeap(dispatch_heap_reserve_size, dispatch_heap_commit_size,
762 initReservedMem, dispatch_heap_reserve_size,
763#ifdef ENABLE_PERF_COUNTERS
764 &(GetPerfCounters().m_Loading.cbLoaderHeapSize),
765#else
766 NULL,
767#endif
768 &dispatch_rangeList, TRUE));
769
770 initReservedMem += dispatch_heap_reserve_size;
771
772 // Hot memory, Writable, Execute, write exactly once
773 NewHolder<LoaderHeap> resolve_heap_holder(
774 new LoaderHeap(resolve_heap_reserve_size, resolve_heap_commit_size,
775 initReservedMem, resolve_heap_reserve_size,
776#ifdef ENABLE_PERF_COUNTERS
777 &(GetPerfCounters().m_Loading.cbLoaderHeapSize),
778#else
779 NULL,
780#endif
781 &resolve_rangeList, TRUE));
782
783 initReservedMem += resolve_heap_reserve_size;
784
785 // Hot memory, Writable, Execute, write exactly once
786 NewHolder<LoaderHeap> vtable_heap_holder(
787 new LoaderHeap(vtable_heap_reserve_size, vtable_heap_commit_size,
788 initReservedMem, vtable_heap_reserve_size,
789#ifdef ENABLE_PERF_COUNTERS
790 &(GetPerfCounters().m_Loading.cbLoaderHeapSize),
791#else
792 NULL,
793#endif
794 &vtable_rangeList, TRUE));
795
796 initReservedMem += vtable_heap_reserve_size;
797
798 // Allocate the initial counter block
799 NewHolder<counter_block> m_counters_holder(new counter_block);
800
801 //
802 // On success of every allocation, assign the objects and suppress the release
803 //
804
805 indcell_heap = indcell_heap_holder; indcell_heap_holder.SuppressRelease();
806 lookup_heap = lookup_heap_holder; lookup_heap_holder.SuppressRelease();
807 dispatch_heap = dispatch_heap_holder; dispatch_heap_holder.SuppressRelease();
808 resolve_heap = resolve_heap_holder; resolve_heap_holder.SuppressRelease();
809 vtable_heap = vtable_heap_holder; vtable_heap_holder.SuppressRelease();
810 cache_entry_heap = cache_entry_heap_holder; cache_entry_heap_holder.SuppressRelease();
811
812 resolvers = resolvers_holder; resolvers_holder.SuppressRelease();
813 dispatchers = dispatchers_holder; dispatchers_holder.SuppressRelease();
814 lookups = lookups_holder; lookups_holder.SuppressRelease();
815 vtableCallers = vtableCallers_holder; vtableCallers_holder.SuppressRelease();
816 cache_entries = cache_entries_holder; cache_entries_holder.SuppressRelease();
817
818 m_counters = m_counters_holder; m_counters_holder.SuppressRelease();
819
820 // Create the initial failure counter block
821 m_counters->next = NULL;
822 m_counters->used = 0;
823 m_cur_counter_block = m_counters;
824
825 m_cur_counter_block_for_reclaim = m_counters;
826 m_cur_counter_block_for_reclaim_index = 0;
827
828 // Keep track of all of our managers
829 VirtualCallStubManagerManager::GlobalManager()->AddStubManager(this);
830}
831
832void VirtualCallStubManager::Uninit()
833{
834 WRAPPER_NO_CONTRACT;
835
836 if (isCollectible)
837 {
838 parentDomain->GetCollectibleVSDRanges()->RemoveRanges(this);
839 }
840
841 // Keep track of all our managers
842 VirtualCallStubManagerManager::GlobalManager()->RemoveStubManager(this);
843}
844
845VirtualCallStubManager::~VirtualCallStubManager()
846{
847 CONTRACTL {
848 NOTHROW;
849 GC_NOTRIGGER;
850 FORBID_FAULT;
851 } CONTRACTL_END;
852
853 LogStats();
854
855 // Go through each cache entry and if the cache element there is in
856 // the cache entry heap of the manager being deleted, then we just
857 // set the cache entry to empty.
858 DispatchCache::Iterator it(g_resolveCache);
859 while (it.IsValid())
860 {
861 // Using UnlinkEntry performs an implicit call to Next (see comment for UnlinkEntry).
862 // Thus, we need to avoid calling Next when we delete an entry so
863 // that we don't accidentally skip entries.
864 while (it.IsValid() && cache_entry_rangeList.IsInRange((TADDR)it.Entry()))
865 {
866 it.UnlinkEntry();
867 }
868 it.Next();
869 }
870
871 if (indcell_heap) { delete indcell_heap; indcell_heap = NULL;}
872 if (lookup_heap) { delete lookup_heap; lookup_heap = NULL;}
873 if (dispatch_heap) { delete dispatch_heap; dispatch_heap = NULL;}
874 if (resolve_heap) { delete resolve_heap; resolve_heap = NULL;}
875 if (vtable_heap) { delete vtable_heap; vtable_heap = NULL;}
876 if (cache_entry_heap) { delete cache_entry_heap; cache_entry_heap = NULL;}
877
878 if (resolvers) { delete resolvers; resolvers = NULL;}
879 if (dispatchers) { delete dispatchers; dispatchers = NULL;}
880 if (lookups) { delete lookups; lookups = NULL;}
881 if (vtableCallers) { delete vtableCallers; vtableCallers = NULL;}
882 if (cache_entries) { delete cache_entries; cache_entries = NULL;}
883
884 // Now get rid of the memory taken by the counter_blocks
885 while (m_counters != NULL)
886 {
887 counter_block *del = m_counters;
888 m_counters = m_counters->next;
889 delete del;
890 }
891
892 // This was the block reserved by Init for the heaps.
893 // For the collectible case, the VSD logic does not allocate the memory.
894 if (m_initialReservedMemForHeaps && !isCollectible)
895 ClrVirtualFree (m_initialReservedMemForHeaps, 0, MEM_RELEASE);
896
897 // Free critical section
898 m_indCellLock.Destroy();
899}
900
901// Initialize static structures, and start up logging if necessary
902void VirtualCallStubManager::InitStatic()
903{
904 STANDARD_VM_CONTRACT;
905
906#ifdef STUB_LOGGING
907 // Note if you change these values using environment variables then you must use hex values :-(
908 STUB_MISS_COUNT_VALUE = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubMissCount);
909 STUB_COLLIDE_WRITE_PCT = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubCollideWritePct);
910 STUB_COLLIDE_MONO_PCT = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubCollideMonoPct);
911 g_dumpLogCounter = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubDumpLogCounter);
912 g_dumpLogIncr = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubDumpLogIncr);
913 g_resetCacheCounter = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubResetCacheCounter);
914 g_resetCacheIncr = (INT32) CLRConfig::GetConfigValue(CLRConfig::INTERNAL_VirtualCallStubResetCacheIncr);
915#endif // STUB_LOGGING
916
917#ifndef STUB_DISPATCH_PORTABLE
918 DispatchHolder::InitializeStatic();
919 ResolveHolder::InitializeStatic();
920#endif // !STUB_DISPATCH_PORTABLE
921 LookupHolder::InitializeStatic();
922
923 g_resolveCache = new DispatchCache();
924
925 if(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_VirtualCallStubLogging))
926 StartupLogging();
927
928 VirtualCallStubManagerManager::InitStatic();
929}
930
931// Static shutdown code.
932// At the moment, this doesn't do anything more than log statistics.
933void VirtualCallStubManager::UninitStatic()
934{
935 CONTRACTL
936 {
937 NOTHROW;
938 GC_TRIGGERS;
939 FORBID_FAULT;
940 }
941 CONTRACTL_END
942
943 if (g_hStubLogFile != NULL)
944 {
945 VirtualCallStubManagerIterator it =
946 VirtualCallStubManagerManager::GlobalManager()->IterateVirtualCallStubManagers();
947 while (it.Next())
948 {
949 it.Current()->LogStats();
950 }
951
952 g_resolveCache->LogStats();
953
954 FinishLogging();
955 }
956}
957
958/* reclaim/rearrange any structures that can only be done during a gc sync point
959i.e. need to be serialized and non-concurrant. */
960void VirtualCallStubManager::ReclaimAll()
961{
962 STATIC_CONTRACT_NOTHROW;
963 STATIC_CONTRACT_GC_NOTRIGGER;
964 STATIC_CONTRACT_FORBID_FAULT;
965
966 /* @todo: if/when app domain unloading is supported,
967 and when we have app domain specific stub heaps, we can complete the unloading
968 of an app domain stub heap at this point, and make any patches to existing stubs that are
969 not being unload so that they nolonger refer to any of the unloaded app domains code or types
970 */
971
972 //reclaim space of abandoned buckets
973 BucketTable::Reclaim();
974
975 VirtualCallStubManagerIterator it =
976 VirtualCallStubManagerManager::GlobalManager()->IterateVirtualCallStubManagers();
977 while (it.Next())
978 {
979 it.Current()->Reclaim();
980 }
981
982 g_reclaim_counter++;
983}
984
985/* reclaim/rearrange any structures that can only be done during a gc sync point
986i.e. need to be serialized and non-concurrant. */
987void VirtualCallStubManager::Reclaim()
988{
989 LIMITED_METHOD_CONTRACT;
990
991 UINT32 limit = min(counter_block::MAX_COUNTER_ENTRIES,
992 m_cur_counter_block_for_reclaim->used);
993 limit = min(m_cur_counter_block_for_reclaim_index + 16, limit);
994
995 for (UINT32 i = m_cur_counter_block_for_reclaim_index; i < limit; i++)
996 {
997 m_cur_counter_block_for_reclaim->block[i] += (STUB_MISS_COUNT_VALUE/10)+1;
998 }
999
1000 // Increment the index by the number we processed
1001 m_cur_counter_block_for_reclaim_index = limit;
1002
1003 // If we ran to the end of the block, go to the next
1004 if (m_cur_counter_block_for_reclaim_index == m_cur_counter_block->used)
1005 {
1006 m_cur_counter_block_for_reclaim = m_cur_counter_block_for_reclaim->next;
1007 m_cur_counter_block_for_reclaim_index = 0;
1008
1009 // If this was the last block in the chain, go back to the beginning
1010 if (m_cur_counter_block_for_reclaim == NULL)
1011 m_cur_counter_block_for_reclaim = m_counters;
1012 }
1013}
1014
1015#endif // !DACCESS_COMPILE
1016
1017//----------------------------------------------------------------------------
1018/* static */
1019VirtualCallStubManager *VirtualCallStubManager::FindStubManager(PCODE stubAddress, StubKind* wbStubKind, BOOL usePredictStubKind)
1020{
1021 CONTRACTL {
1022 NOTHROW;
1023 GC_NOTRIGGER;
1024 FORBID_FAULT;
1025 SO_TOLERANT;
1026 } CONTRACTL_END
1027
1028#ifndef DACCESS_COMPILE
1029 VirtualCallStubManager *pCur;
1030 StubKind kind;
1031
1032 //
1033 // See if we are managed by the current domain
1034 //
1035 AppDomain *pDomain = GetThread()->GetDomain();
1036 pCur = pDomain->GetLoaderAllocator()->GetVirtualCallStubManager();
1037 // For the following call stack:
1038 // SimpleRWLock::TryEnterRead
1039 // SimpleRWLock::EnterRead
1040 // LockedRangeList::IsInRangeWorker
1041 // VirtualCallStubManager::isDispatchingStub
1042 //
1043 CONTRACT_VIOLATION(SOToleranceViolation);
1044 kind = pCur->getStubKind(stubAddress, usePredictStubKind);
1045 if (kind != SK_UNKNOWN)
1046 {
1047 if (wbStubKind)
1048 *wbStubKind = kind;
1049 return pCur;
1050 }
1051
1052 //
1053 // See if we are managed by a collectible loader allocator
1054 //
1055 if (pDomain->GetCollectibleVSDRanges()->IsInRange(stubAddress, reinterpret_cast<TADDR *>(&pCur)))
1056 {
1057 _ASSERTE(pCur != NULL);
1058
1059 kind = pCur->getStubKind(stubAddress, usePredictStubKind);
1060 if (kind != SK_UNKNOWN)
1061 {
1062 if (wbStubKind)
1063 *wbStubKind = kind;
1064 return pCur;
1065 }
1066 }
1067
1068 if (wbStubKind)
1069 *wbStubKind = SK_UNKNOWN;
1070
1071#else // DACCESS_COMPILE
1072 _ASSERTE(!"DACCESS Not implemented.");
1073#endif // DACCESS_COMPILE
1074
1075 return NULL;
1076}
1077
1078/* for use by debugger.
1079*/
1080BOOL VirtualCallStubManager::CheckIsStub_Internal(PCODE stubStartAddress)
1081{
1082 STATIC_CONTRACT_NOTHROW;
1083 STATIC_CONTRACT_GC_NOTRIGGER;
1084 STATIC_CONTRACT_FORBID_FAULT;
1085 SUPPORTS_DAC;
1086
1087 BOOL fIsOwner = isStub(stubStartAddress);
1088
1089#if defined(_TARGET_X86_) && defined(FEATURE_PREJIT)
1090 if (!fIsOwner && parentDomain->IsDefaultDomain())
1091 {
1092 fIsOwner = (stubStartAddress == GetEEFuncEntryPoint(StubDispatchFixupStub));
1093 }
1094#endif // defined(_TARGET_X86_) && defined(FEATURE_PREJIT)
1095
1096 return fIsOwner;
1097}
1098
1099/* for use by debugger.
1100*/
1101
1102extern "C" void STDCALL StubDispatchFixupPatchLabel();
1103
1104BOOL VirtualCallStubManager::DoTraceStub(PCODE stubStartAddress, TraceDestination *trace)
1105{
1106 LIMITED_METHOD_CONTRACT;
1107
1108 LOG((LF_CORDB, LL_EVERYTHING, "VirtualCallStubManager::DoTraceStub called\n"));
1109
1110 _ASSERTE(CheckIsStub_Internal(stubStartAddress));
1111
1112#ifdef FEATURE_PREJIT
1113 if (stubStartAddress == GetEEFuncEntryPoint(StubDispatchFixupStub))
1114 {
1115 trace->InitForManagerPush(GetEEFuncEntryPoint(StubDispatchFixupPatchLabel), this);
1116 return TRUE;
1117 }
1118#endif
1119
1120 // @workaround: Well, we really need the context to figure out where we're going, so
1121 // we'll do a TRACE_MGR_PUSH so that TraceManager gets called and we can use
1122 // the provided context to figure out where we're going.
1123 trace->InitForManagerPush(stubStartAddress, this);
1124 return TRUE;
1125}
1126
1127//----------------------------------------------------------------------------
1128BOOL VirtualCallStubManager::TraceManager(Thread *thread,
1129 TraceDestination *trace,
1130 T_CONTEXT *pContext,
1131 BYTE **pRetAddr)
1132{
1133 CONTRACTL
1134 {
1135 THROWS;
1136 GC_TRIGGERS;
1137 INJECT_FAULT(COMPlusThrowOM(););
1138 }
1139 CONTRACTL_END
1140
1141#ifdef FEATURE_PREJIT
1142 // This is the case for the lazy slot fixup
1143 if (GetIP(pContext) == GetEEFuncEntryPoint(StubDispatchFixupPatchLabel)) {
1144
1145 *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext);
1146
1147 // The destination for the virtual invocation
1148 return StubManager::TraceStub(StubManagerHelpers::GetTailCallTarget(pContext), trace);
1149 }
1150#endif // FEATURE_PREJIT
1151
1152 TADDR pStub = GetIP(pContext);
1153
1154 // The return address should be on the top of the stack
1155 *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext);
1156
1157 // Get the token from the stub
1158 CONSISTENCY_CHECK(isStub(pStub));
1159 size_t token = GetTokenFromStub(pStub);
1160
1161 // Get the this object from ECX
1162 Object *pObj = StubManagerHelpers::GetThisPtr(pContext);
1163
1164 // Call common trace code.
1165 return (TraceResolver(pObj, token, trace));
1166}
1167
1168#ifndef DACCESS_COMPILE
1169
1170PCODE VirtualCallStubManager::GetCallStub(TypeHandle ownerType, MethodDesc *pMD)
1171{
1172 CONTRACTL {
1173 THROWS;
1174 GC_TRIGGERS;
1175 MODE_ANY;
1176 PRECONDITION(CheckPointer(pMD));
1177 PRECONDITION(!pMD->IsInterface() || ownerType.GetMethodTable()->HasSameTypeDefAs(pMD->GetMethodTable()));
1178 INJECT_FAULT(COMPlusThrowOM(););
1179 } CONTRACTL_END;
1180
1181 return GetCallStub(ownerType, pMD->GetSlot());
1182}
1183
1184//find or create a stub
1185PCODE VirtualCallStubManager::GetCallStub(TypeHandle ownerType, DWORD slot)
1186{
1187 CONTRACT (PCODE) {
1188 THROWS;
1189 GC_TRIGGERS;
1190 MODE_ANY;
1191 INJECT_FAULT(COMPlusThrowOM(););
1192 POSTCONDITION(RETVAL != NULL);
1193 } CONTRACT_END;
1194
1195 GCX_COOP(); // This is necessary for BucketTable synchronization
1196
1197 MethodTable * pMT = ownerType.GetMethodTable();
1198
1199 DispatchToken token;
1200 if (pMT->IsInterface())
1201 token = pMT->GetLoaderAllocator()->GetDispatchToken(pMT->GetTypeID(), slot);
1202 else
1203 token = DispatchToken::CreateDispatchToken(slot);
1204
1205 //get a stub from lookups, make if necessary
1206 PCODE stub = CALL_STUB_EMPTY_ENTRY;
1207 PCODE addrOfResolver = GetEEFuncEntryPoint(ResolveWorkerAsmStub);
1208
1209 LookupEntry entryL;
1210 Prober probeL(&entryL);
1211 if (lookups->SetUpProber(token.To_SIZE_T(), 0, &probeL))
1212 {
1213 if ((stub = (PCODE)(lookups->Find(&probeL))) == CALL_STUB_EMPTY_ENTRY)
1214 {
1215 LookupHolder *pLookupHolder = GenerateLookupStub(addrOfResolver, token.To_SIZE_T());
1216 stub = (PCODE) (lookups->Add((size_t)(pLookupHolder->stub()->entryPoint()), &probeL));
1217 }
1218 }
1219
1220 _ASSERTE(stub != CALL_STUB_EMPTY_ENTRY);
1221 stats.site_counter++;
1222
1223 RETURN (stub);
1224}
1225
1226PCODE VirtualCallStubManager::GetVTableCallStub(DWORD slot)
1227{
1228 CONTRACT(PCODE) {
1229 THROWS;
1230 GC_TRIGGERS;
1231 MODE_ANY;
1232 INJECT_FAULT(COMPlusThrowOM(););
1233 PRECONDITION(!MethodTable::VTableIndir_t::isRelative /* Not yet supported */);
1234 POSTCONDITION(RETVAL != NULL);
1235 } CONTRACT_END;
1236
1237 GCX_COOP(); // This is necessary for BucketTable synchronization
1238
1239 PCODE stub = CALL_STUB_EMPTY_ENTRY;
1240
1241 VTableCallEntry entry;
1242 Prober probe(&entry);
1243 if (vtableCallers->SetUpProber(DispatchToken::CreateDispatchToken(slot).To_SIZE_T(), 0, &probe))
1244 {
1245 if ((stub = (PCODE)(vtableCallers->Find(&probe))) == CALL_STUB_EMPTY_ENTRY)
1246 {
1247 VTableCallHolder *pHolder = GenerateVTableCallStub(slot);
1248 stub = (PCODE)(vtableCallers->Add((size_t)(pHolder->stub()->entryPoint()), &probe));
1249 }
1250 }
1251
1252 _ASSERTE(stub != CALL_STUB_EMPTY_ENTRY);
1253 RETURN(stub);
1254}
1255
1256VTableCallHolder* VirtualCallStubManager::GenerateVTableCallStub(DWORD slot)
1257{
1258 CONTRACT(VTableCallHolder*) {
1259 THROWS;
1260 GC_TRIGGERS;
1261 MODE_ANY;
1262 INJECT_FAULT(COMPlusThrowOM(););
1263 PRECONDITION(!MethodTable::VTableIndir_t::isRelative /* Not yet supported */);
1264 POSTCONDITION(RETVAL != NULL);
1265 } CONTRACT_END;
1266
1267 //allocate from the requisite heap and copy the template over it.
1268 VTableCallHolder * pHolder = (VTableCallHolder*)(void*)vtable_heap->AllocAlignedMem(VTableCallHolder::GetHolderSize(slot), CODE_SIZE_ALIGN);
1269
1270 pHolder->Initialize(slot);
1271 ClrFlushInstructionCache(pHolder->stub(), pHolder->stub()->size());
1272
1273 AddToCollectibleVSDRangeList(pHolder);
1274
1275 //incr our counters
1276 stats.stub_vtable_counter++;
1277 stats.stub_space += (UINT32)pHolder->stub()->size();
1278 LOG((LF_STUBS, LL_INFO10000, "GenerateVTableCallStub for slot " FMT_ADDR "at" FMT_ADDR "\n",
1279 DBG_ADDR(slot), DBG_ADDR(pHolder->stub())));
1280
1281#ifdef FEATURE_PERFMAP
1282 PerfMap::LogStubs(__FUNCTION__, "GenerateVTableCallStub", (PCODE)pHolder->stub(), pHolder->stub()->size());
1283#endif
1284
1285 RETURN(pHolder);
1286}
1287
1288#ifdef FEATURE_PREJIT
1289extern "C" PCODE STDCALL StubDispatchFixupWorker(TransitionBlock * pTransitionBlock,
1290 TADDR siteAddrForRegisterIndirect,
1291 DWORD sectionIndex,
1292 Module * pModule)
1293{
1294 CONTRACTL {
1295 THROWS;
1296 GC_TRIGGERS;
1297 MODE_COOPERATIVE;
1298 ENTRY_POINT;
1299 } CONTRACTL_END;
1300
1301 PCODE pTarget = NULL;
1302
1303 MAKE_CURRENT_THREAD_AVAILABLE();
1304
1305#ifdef _DEBUG
1306 Thread::ObjectRefFlush(CURRENT_THREAD);
1307#endif
1308
1309 FrameWithCookie<StubDispatchFrame> frame(pTransitionBlock);
1310 StubDispatchFrame * pSDFrame = &frame;
1311
1312 PCODE returnAddress = pSDFrame->GetUnadjustedReturnAddress();
1313
1314 StubCallSite callSite(siteAddrForRegisterIndirect, returnAddress);
1315
1316 TADDR pIndirectCell = (TADDR)callSite.GetIndirectCell();
1317
1318 // FUTURE: Consider always passing in module and section index to avoid the lookups
1319 if (pModule == NULL)
1320 {
1321 pModule = ExecutionManager::FindZapModule(pIndirectCell);
1322 sectionIndex = (DWORD)-1;
1323 }
1324 _ASSERTE(pModule != NULL);
1325
1326 pSDFrame->SetCallSite(pModule, pIndirectCell);
1327
1328 pSDFrame->Push(CURRENT_THREAD);
1329 INSTALL_MANAGED_EXCEPTION_DISPATCHER;
1330 INSTALL_UNWIND_AND_CONTINUE_HANDLER;
1331
1332 PEImageLayout *pNativeImage = pModule->GetNativeOrReadyToRunImage();
1333
1334 DWORD rva = pNativeImage->GetDataRva(pIndirectCell);
1335
1336 PTR_CORCOMPILE_IMPORT_SECTION pImportSection;
1337 if (sectionIndex != (DWORD) -1)
1338 {
1339 pImportSection = pModule->GetImportSectionFromIndex(sectionIndex);
1340 _ASSERTE(pImportSection == pModule->GetImportSectionForRVA(rva));
1341 }
1342 else
1343 {
1344 pImportSection = pModule->GetImportSectionForRVA(rva);
1345 }
1346 _ASSERTE(pImportSection != NULL);
1347
1348 _ASSERTE(pImportSection->EntrySize == sizeof(TADDR));
1349
1350 COUNT_T index = (rva - VAL32(pImportSection->Section.VirtualAddress)) / sizeof(TADDR);
1351
1352 // Get the stub manager for this module
1353 VirtualCallStubManager *pMgr = pModule->GetLoaderAllocator()->GetVirtualCallStubManager();
1354
1355 // Force a GC on every jit if the stress level is high enough
1356 GCStress<cfg_any>::MaybeTrigger();
1357
1358 // Get the data section
1359 PTR_DWORD pSignatures = dac_cast<PTR_DWORD>(pNativeImage->GetRvaData(pImportSection->Signatures));
1360
1361 PCCOR_SIGNATURE pBlob = (BYTE *)pNativeImage->GetRvaData(pSignatures[index]);
1362
1363 BYTE kind = *pBlob++;
1364
1365 Module * pInfoModule = pModule;
1366 if (kind & ENCODE_MODULE_OVERRIDE)
1367 {
1368 DWORD moduleIndex = CorSigUncompressData(pBlob);
1369 pInfoModule = pModule->GetModuleFromIndex(moduleIndex);
1370 kind &= ~ENCODE_MODULE_OVERRIDE;
1371 }
1372 _ASSERTE(kind == ENCODE_VIRTUAL_ENTRY_SLOT);
1373
1374 DWORD slot = CorSigUncompressData(pBlob);
1375
1376 TypeHandle ownerType = ZapSig::DecodeType(pModule, pInfoModule, pBlob);
1377
1378 MethodTable * pMT = ownerType.GetMethodTable();
1379
1380 DispatchToken token;
1381 if (pMT->IsInterface())
1382 token = pMT->GetLoaderAllocator()->GetDispatchToken(pMT->GetTypeID(), slot);
1383 else
1384 token = DispatchToken::CreateDispatchToken(slot);
1385
1386 OBJECTREF *protectedObj = pSDFrame->GetThisPtr();
1387 _ASSERTE(protectedObj != NULL);
1388 if (*protectedObj == NULL) {
1389 COMPlusThrow(kNullReferenceException);
1390 }
1391
1392 pTarget = pMgr->ResolveWorker(&callSite, protectedObj, token, VirtualCallStubManager::SK_LOOKUP);
1393 _ASSERTE(pTarget != NULL);
1394
1395 // Ready to return
1396
1397 UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
1398 UNINSTALL_MANAGED_EXCEPTION_DISPATCHER;
1399 pSDFrame->Pop(CURRENT_THREAD);
1400
1401 return pTarget;
1402}
1403#endif // FEATURE_PREJIT
1404
1405//+----------------------------------------------------------------------------
1406//
1407// Method: VirtualCallStubManager::GenerateStubIndirection
1408//
1409// Synopsis: This method allocates an indirection cell for use by the virtual stub dispatch (currently
1410// only implemented for interface calls).
1411// For normal methods: the indirection cell allocated will never be freed until app domain unload
1412// For dynamic methods: we recycle the indirection cells when a dynamic method is collected. To
1413// do that we keep all the recycled indirection cells in a linked list: m_RecycledIndCellList. When
1414// the dynamic method needs an indirection cell it allocates one from m_RecycledIndCellList. Each
1415// dynamic method keeps track of all the indirection cells it uses and add them back to
1416// m_RecycledIndCellList when it is finalized.
1417//
1418//+----------------------------------------------------------------------------
1419BYTE *VirtualCallStubManager::GenerateStubIndirection(PCODE target, BOOL fUseRecycledCell /* = FALSE*/ )
1420{
1421 CONTRACT (BYTE*) {
1422 THROWS;
1423 GC_TRIGGERS;
1424 INJECT_FAULT(COMPlusThrowOM(););
1425 PRECONDITION(target != NULL);
1426 POSTCONDITION(CheckPointer(RETVAL));
1427 } CONTRACT_END;
1428
1429 _ASSERTE(isStub(target));
1430
1431 CrstHolder lh(&m_indCellLock);
1432
1433 // The indirection cell to hold the pointer to the stub
1434 BYTE * ret = NULL;
1435 UINT32 cellsPerBlock = INDCELLS_PER_BLOCK;
1436
1437 // First try the recycled indirection cell list for Dynamic methods
1438 if (fUseRecycledCell)
1439 ret = GetOneRecycledIndCell();
1440
1441 // Try the free indirection cell list
1442 if (!ret)
1443 ret = GetOneFreeIndCell();
1444
1445 // Allocate from loader heap
1446 if (!ret)
1447 {
1448 // Free list is empty, allocate a block of indcells from indcell_heap and insert it into the free list.
1449 BYTE ** pBlock = (BYTE **) (void *) indcell_heap->AllocMem(S_SIZE_T(cellsPerBlock) * S_SIZE_T(sizeof(BYTE *)));
1450
1451 // return the first cell in the block and add the rest to the free list
1452 ret = (BYTE *)pBlock;
1453
1454 // link all the cells together
1455 // we don't need to null terminate the linked list, InsertIntoFreeIndCellList will do it.
1456 for (UINT32 i = 1; i < cellsPerBlock - 1; ++i)
1457 {
1458 pBlock[i] = (BYTE *)&(pBlock[i+1]);
1459 }
1460
1461 // insert the list into the free indcell list.
1462 InsertIntoFreeIndCellList((BYTE *)&pBlock[1], (BYTE*)&pBlock[cellsPerBlock - 1]);
1463 }
1464
1465 *((PCODE *)ret) = target;
1466 RETURN ret;
1467}
1468
1469ResolveCacheElem *VirtualCallStubManager::GetResolveCacheElem(void *pMT,
1470 size_t token,
1471 void *target)
1472{
1473 CONTRACTL
1474 {
1475 THROWS;
1476 GC_TRIGGERS;
1477 MODE_COOPERATIVE;
1478 INJECT_FAULT(COMPlusThrowOM(););
1479 }
1480 CONTRACTL_END
1481
1482 //get an cache entry elem, or make one if necessary
1483 ResolveCacheElem* elem = NULL;
1484 ResolveCacheEntry entryRC;
1485 Prober probeRC(&entryRC);
1486 if (cache_entries->SetUpProber(token, (size_t) pMT, &probeRC))
1487 {
1488 elem = (ResolveCacheElem*) (cache_entries->Find(&probeRC));
1489 if (elem == CALL_STUB_EMPTY_ENTRY)
1490 {
1491 elem = GenerateResolveCacheElem(target, pMT, token);
1492 elem = (ResolveCacheElem*) (cache_entries->Add((size_t) elem, &probeRC));
1493 }
1494 }
1495 _ASSERTE(elem && (elem != CALL_STUB_EMPTY_ENTRY));
1496 return elem;
1497}
1498
1499#endif // !DACCESS_COMPILE
1500
1501size_t VirtualCallStubManager::GetTokenFromStub(PCODE stub)
1502{
1503 CONTRACTL
1504 {
1505 NOTHROW;
1506 GC_NOTRIGGER;
1507 FORBID_FAULT;
1508 }
1509 CONTRACTL_END
1510
1511 _ASSERTE(stub != NULL);
1512 StubKind stubKind = SK_UNKNOWN;
1513 VirtualCallStubManager * pMgr = FindStubManager(stub, &stubKind);
1514
1515 return GetTokenFromStubQuick(pMgr, stub, stubKind);
1516}
1517
1518size_t VirtualCallStubManager::GetTokenFromStubQuick(VirtualCallStubManager * pMgr, PCODE stub, StubKind kind)
1519{
1520 CONTRACTL
1521 {
1522 NOTHROW;
1523 GC_NOTRIGGER;
1524 FORBID_FAULT;
1525 }
1526 CONTRACTL_END
1527
1528 _ASSERTE(pMgr != NULL);
1529 _ASSERTE(stub != NULL);
1530 _ASSERTE(kind != SK_UNKNOWN);
1531
1532#ifndef DACCESS_COMPILE
1533
1534 if (kind == SK_DISPATCH)
1535 {
1536 _ASSERTE(pMgr->isDispatchingStub(stub));
1537 DispatchStub * dispatchStub = (DispatchStub *) PCODEToPINSTR(stub);
1538 ResolveHolder * resolveHolder = ResolveHolder::FromFailEntry(dispatchStub->failTarget());
1539 _ASSERTE(pMgr->isResolvingStub(resolveHolder->stub()->resolveEntryPoint()));
1540 return resolveHolder->stub()->token();
1541 }
1542 else if (kind == SK_RESOLVE)
1543 {
1544 _ASSERTE(pMgr->isResolvingStub(stub));
1545 ResolveHolder * resolveHolder = ResolveHolder::FromResolveEntry(stub);
1546 return resolveHolder->stub()->token();
1547 }
1548 else if (kind == SK_LOOKUP)
1549 {
1550 _ASSERTE(pMgr->isLookupStub(stub));
1551 LookupHolder * lookupHolder = LookupHolder::FromLookupEntry(stub);
1552 return lookupHolder->stub()->token();
1553 }
1554 else if (kind == SK_VTABLECALL)
1555 {
1556 _ASSERTE(pMgr->isVTableCallStub(stub));
1557 VTableCallStub * vtableStub = (VTableCallStub *)PCODEToPINSTR(stub);
1558 return vtableStub->token();
1559 }
1560
1561 _ASSERTE(!"Should not get here.");
1562
1563#else // DACCESS_COMPILE
1564
1565 DacNotImpl();
1566
1567#endif // DACCESS_COMPILE
1568
1569 return 0;
1570}
1571
1572#ifndef DACCESS_COMPILE
1573
1574#ifdef CHAIN_LOOKUP
1575ResolveCacheElem* __fastcall VirtualCallStubManager::PromoteChainEntry(ResolveCacheElem *pElem)
1576{
1577 CONTRACTL {
1578 NOTHROW;
1579 GC_NOTRIGGER;
1580 FORBID_FAULT;
1581 SO_TOLERANT;
1582 PRECONDITION(CheckPointer(pElem));
1583 } CONTRACTL_END;
1584
1585 // @todo - Remove this when have a probe that generates a hard SO.
1586 CONTRACT_VIOLATION(SOToleranceViolation);
1587 g_resolveCache->PromoteChainEntry(pElem);
1588 return pElem;
1589}
1590#endif // CHAIN_LOOKUP
1591
1592/* Resolve to a method and return its address or NULL if there is none.
1593 Our return value is the target address that control should continue to. Our caller will
1594 enter the target address as if a direct call with the original stack frame had been made from
1595 the actual call site. Hence our strategy is to either return a target address
1596 of the actual method implementation, or the prestub if we cannot find the actual implementation.
1597 If we are returning a real method address, we may patch the original call site to point to a
1598 dispatching stub before returning. Note, if we encounter a method that hasn't been jitted
1599 yet, we will return the prestub, which should cause it to be jitted and we will
1600 be able to build the dispatching stub on a later call thru the call site. If we encounter
1601 any other kind of problem, rather than throwing an exception, we will also return the
1602 prestub, unless we are unable to find the method at all, in which case we return NULL.
1603 */
1604PCODE VSD_ResolveWorker(TransitionBlock * pTransitionBlock,
1605 TADDR siteAddrForRegisterIndirect,
1606 size_t token
1607#ifndef _TARGET_X86_
1608 , UINT_PTR flags
1609#endif
1610 )
1611{
1612 CONTRACTL {
1613 THROWS;
1614 GC_TRIGGERS;
1615 INJECT_FAULT(COMPlusThrowOM(););
1616 PRECONDITION(CheckPointer(pTransitionBlock));
1617 MODE_COOPERATIVE;
1618 SO_TOLERANT;
1619 } CONTRACTL_END;
1620
1621 MAKE_CURRENT_THREAD_AVAILABLE();
1622
1623#ifdef _DEBUG
1624 Thread::ObjectRefFlush(CURRENT_THREAD);
1625#endif
1626
1627 FrameWithCookie<StubDispatchFrame> frame(pTransitionBlock);
1628 StubDispatchFrame * pSDFrame = &frame;
1629
1630 PCODE returnAddress = pSDFrame->GetUnadjustedReturnAddress();
1631
1632 StubCallSite callSite(siteAddrForRegisterIndirect, returnAddress);
1633
1634 OBJECTREF *protectedObj = pSDFrame->GetThisPtr();
1635 _ASSERTE(protectedObj != NULL);
1636 OBJECTREF pObj = *protectedObj;
1637
1638 PCODE target = NULL;
1639
1640 if (pObj == NULL) {
1641 pSDFrame->SetForNullReferenceException();
1642 pSDFrame->Push(CURRENT_THREAD);
1643 INSTALL_MANAGED_EXCEPTION_DISPATCHER;
1644 INSTALL_UNWIND_AND_CONTINUE_HANDLER;
1645 COMPlusThrow(kNullReferenceException);
1646 UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
1647 UNINSTALL_MANAGED_EXCEPTION_DISPATCHER;
1648 _ASSERTE(!"Throw returned");
1649 }
1650
1651#ifndef _TARGET_X86_
1652 if (flags & SDF_ResolvePromoteChain)
1653 {
1654 BEGIN_SO_INTOLERANT_CODE(CURRENT_THREAD);
1655
1656 ResolveCacheElem * pElem = (ResolveCacheElem *)token;
1657 g_resolveCache->PromoteChainEntry(pElem);
1658 target = (PCODE) pElem->target;
1659
1660 // Have we failed the dispatch stub too many times?
1661 if (flags & SDF_ResolveBackPatch)
1662 {
1663 PCODE stubAddr = callSite.GetSiteTarget();
1664 VirtualCallStubManager * pMgr = VirtualCallStubManager::FindStubManager(stubAddr);
1665 pMgr->BackPatchWorker(&callSite);
1666 }
1667
1668 END_SO_INTOLERANT_CODE;
1669
1670 return target;
1671 }
1672#endif
1673
1674 pSDFrame->SetCallSite(NULL, (TADDR)callSite.GetIndirectCell());
1675
1676 DispatchToken representativeToken(token);
1677 MethodTable * pRepresentativeMT = pObj->GetMethodTable();
1678 if (representativeToken.IsTypedToken())
1679 {
1680 pRepresentativeMT = CURRENT_THREAD->GetDomain()->LookupType(representativeToken.GetTypeID());
1681 CONSISTENCY_CHECK(CheckPointer(pRepresentativeMT));
1682 }
1683
1684 pSDFrame->SetRepresentativeSlot(pRepresentativeMT, representativeToken.GetSlotNumber());
1685 pSDFrame->Push(CURRENT_THREAD);
1686 INSTALL_MANAGED_EXCEPTION_DISPATCHER;
1687 INSTALL_UNWIND_AND_CONTINUE_HANDLER;
1688
1689 // For Virtual Delegates the m_siteAddr is a field of a managed object
1690 // Thus we have to report it as an interior pointer,
1691 // so that it is updated during a gc
1692 GCPROTECT_BEGININTERIOR( *(callSite.GetIndirectCellAddress()) );
1693
1694 GCStress<vsd_on_resolve>::MaybeTriggerAndProtect(pObj);
1695
1696 PCODE callSiteTarget = callSite.GetSiteTarget();
1697 CONSISTENCY_CHECK(callSiteTarget != NULL);
1698
1699 VirtualCallStubManager::StubKind stubKind = VirtualCallStubManager::SK_UNKNOWN;
1700 VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(callSiteTarget, &stubKind);
1701 PREFIX_ASSUME(pMgr != NULL);
1702
1703#ifndef _TARGET_X86_
1704 // Have we failed the dispatch stub too many times?
1705 if (flags & SDF_ResolveBackPatch)
1706 {
1707 pMgr->BackPatchWorker(&callSite);
1708 }
1709#endif
1710
1711 target = pMgr->ResolveWorker(&callSite, protectedObj, token, stubKind);
1712
1713 GCPROTECT_END();
1714
1715 UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
1716 UNINSTALL_MANAGED_EXCEPTION_DISPATCHER;
1717 pSDFrame->Pop(CURRENT_THREAD);
1718
1719 return target;
1720}
1721
1722void VirtualCallStubManager::BackPatchWorkerStatic(PCODE returnAddress, TADDR siteAddrForRegisterIndirect)
1723{
1724 CONTRACTL {
1725 NOTHROW;
1726 GC_NOTRIGGER;
1727 FORBID_FAULT;
1728 ENTRY_POINT;
1729 PRECONDITION(returnAddress != NULL);
1730 } CONTRACTL_END
1731
1732 BEGIN_ENTRYPOINT_VOIDRET;
1733
1734 StubCallSite callSite(siteAddrForRegisterIndirect, returnAddress);
1735
1736 PCODE callSiteTarget = callSite.GetSiteTarget();
1737 CONSISTENCY_CHECK(callSiteTarget != NULL);
1738
1739 VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(callSiteTarget);
1740 PREFIX_ASSUME(pMgr != NULL);
1741
1742 pMgr->BackPatchWorker(&callSite);
1743
1744 END_ENTRYPOINT_VOIDRET;
1745}
1746
1747#if defined(_TARGET_X86_) && defined(FEATURE_PAL)
1748void BackPatchWorkerStaticStub(PCODE returnAddr, TADDR siteAddrForRegisterIndirect)
1749{
1750 VirtualCallStubManager::BackPatchWorkerStatic(returnAddr, siteAddrForRegisterIndirect);
1751}
1752#endif
1753
1754PCODE VirtualCallStubManager::ResolveWorker(StubCallSite* pCallSite,
1755 OBJECTREF *protectedObj,
1756 DispatchToken token,
1757 StubKind stubKind)
1758{
1759 CONTRACTL {
1760 THROWS;
1761 GC_TRIGGERS;
1762 MODE_COOPERATIVE;
1763 INJECT_FAULT(COMPlusThrowOM(););
1764 PRECONDITION(protectedObj != NULL);
1765 PRECONDITION(*protectedObj != NULL);
1766 PRECONDITION(IsProtectedByGCFrame(protectedObj));
1767 } CONTRACTL_END;
1768
1769 MethodTable* objectType = (*protectedObj)->GetMethodTable();
1770 CONSISTENCY_CHECK(CheckPointer(objectType));
1771
1772#ifdef STUB_LOGGING
1773 if (g_dumpLogCounter != 0)
1774 {
1775 UINT32 total_calls = g_mono_call_counter + g_poly_call_counter;
1776
1777 if (total_calls > g_dumpLogCounter)
1778 {
1779 VirtualCallStubManager::LoggingDump();
1780 if (g_dumpLogIncr == 0)
1781 g_dumpLogCounter = 0;
1782 else
1783 g_dumpLogCounter += g_dumpLogIncr;
1784 }
1785 }
1786
1787 if (g_resetCacheCounter != 0)
1788 {
1789 UINT32 total_calls = g_mono_call_counter + g_poly_call_counter;
1790
1791 if (total_calls > g_resetCacheCounter)
1792 {
1793 VirtualCallStubManager::ResetCache();
1794 if (g_resetCacheIncr == 0)
1795 g_resetCacheCounter = 0;
1796 else
1797 g_resetCacheCounter += g_resetCacheIncr;
1798 }
1799 }
1800#endif // STUB_LOGGING
1801
1802 //////////////////////////////////////////////////////////////
1803 // Get the managers associated with the callee
1804
1805 VirtualCallStubManager *pCalleeMgr = NULL; // Only set if the caller is shared, NULL otherwise
1806
1807 BOOL bCallToShorterLivedTarget = FALSE;
1808
1809 // We care about the following cases:
1810 // Call from shared domain -> domain-specific target (collectible or not)
1811 // Call from any site -> collectible target
1812 if (parentDomain->IsSharedDomain())
1813 {
1814 // The callee's manager
1815 pCalleeMgr = objectType->GetLoaderAllocator()->GetVirtualCallStubManager();
1816 // We already know that we are the shared manager, so we can just see if the callee has the same manager
1817 bCallToShorterLivedTarget = (pCalleeMgr != this);
1818 }
1819 else if (objectType->GetLoaderAllocator()->IsCollectible())
1820 {
1821 // The callee's manager
1822 pCalleeMgr = objectType->GetLoaderAllocator()->GetVirtualCallStubManager();
1823 if (pCalleeMgr != this)
1824 {
1825 bCallToShorterLivedTarget = TRUE;
1826 }
1827 else
1828 {
1829 pCalleeMgr = NULL;
1830 }
1831 }
1832
1833 stats.worker_call++;
1834
1835 LOG((LF_STUBS, LL_INFO100000, "ResolveWorker from %sStub, token" FMT_ADDR "object's MT" FMT_ADDR "ind-cell" FMT_ADDR "call-site" FMT_ADDR "%s\n",
1836 (stubKind == SK_DISPATCH) ? "Dispatch" : (stubKind == SK_RESOLVE) ? "Resolve" : (stubKind == SK_LOOKUP) ? "Lookup" : "Unknown",
1837 DBG_ADDR(token.To_SIZE_T()), DBG_ADDR(objectType), DBG_ADDR(pCallSite->GetIndirectCell()), DBG_ADDR(pCallSite->GetReturnAddress()),
1838 bCallToShorterLivedTarget ? "bCallToShorterLivedTarget" : "" ));
1839
1840 PCODE stub = CALL_STUB_EMPTY_ENTRY;
1841 PCODE target = NULL;
1842 BOOL patch = FALSE;
1843
1844 // This code can throw an OOM, but we do not want to fail in this case because
1845 // we must always successfully determine the target of a virtual call so that
1846 // CERs can work (there are a couple of exceptions to this involving generics).
1847 // Since the code below is just trying to see if a stub representing the current
1848 // type and token exist, it is not strictly necessary in determining the target.
1849 // We will treat the case of an OOM the same as the case of not finding an entry
1850 // in the hash tables and will continue on to the slow resolve case, which is
1851 // guaranteed not to fail outside of a couple of generics-specific cases.
1852 EX_TRY
1853 {
1854 /////////////////////////////////////////////////////////////////////////////
1855 // First see if we can find a dispatcher stub for this token and type. If a
1856 // match is found, use the target stored in the entry.
1857 {
1858 DispatchEntry entryD;
1859 Prober probeD(&entryD);
1860 if (dispatchers->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeD))
1861 {
1862 stub = (PCODE) dispatchers->Find(&probeD);
1863 if (stub != CALL_STUB_EMPTY_ENTRY)
1864 {
1865 target = (PCODE)entryD.Target();
1866 patch = TRUE;
1867 }
1868 }
1869 }
1870
1871 /////////////////////////////////////////////////////////////////////////////////////
1872 // Second see if we can find a ResolveCacheElem for this token and type.
1873 // If a match is found, use the target stored in the entry.
1874 if (target == NULL)
1875 {
1876 ResolveCacheElem * elem = NULL;
1877 ResolveCacheEntry entryRC;
1878 Prober probeRC(&entryRC);
1879 if (cache_entries->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeRC))
1880 {
1881 elem = (ResolveCacheElem *)(cache_entries->Find(&probeRC));
1882 if (elem != CALL_STUB_EMPTY_ENTRY)
1883 {
1884 target = (PCODE)entryRC.Target();
1885 patch = TRUE;
1886 }
1887 }
1888 }
1889 }
1890 EX_CATCH
1891 {
1892 }
1893 EX_END_CATCH (SwallowAllExceptions);
1894
1895 /////////////////////////////////////////////////////////////////////////////////////
1896 // If we failed to find a target in either the resolver or cache entry hash tables,
1897 // we need to perform a full resolution of the token and type.
1898 //@TODO: Would be nice to add assertion code to ensure we only ever call Resolver once per <token,type>.
1899 if (target == NULL)
1900 {
1901 CONSISTENCY_CHECK(stub == CALL_STUB_EMPTY_ENTRY);
1902 patch = Resolver(objectType, token, protectedObj, &target, TRUE /* throwOnConflict */);
1903
1904#if defined(_DEBUG)
1905 if ( !objectType->IsComObjectType() &&
1906 !objectType->IsICastable())
1907 {
1908 CONSISTENCY_CHECK(!MethodTable::GetMethodDescForSlotAddress(target)->IsGenericMethodDefinition());
1909 }
1910#endif // _DEBUG
1911 }
1912
1913 CONSISTENCY_CHECK(target != NULL);
1914
1915 // Now that we've successfully determined the target, we will wrap the remaining logic in a giant
1916 // TRY/CATCH statement because it is there purely to emit stubs and cache entries. In the event
1917 // that emitting stub or cache entries throws an exception (for example, because of OOM), we should
1918 // not fail to perform the required dispatch. This is all because the basic assumption of
1919 // Constrained Execution Regions (CERs) is that all virtual method calls can be made without
1920 // failure.
1921 //
1922 // NOTE: The THROWS contract for this method does not change, because there are still a few special
1923 // cases involving generics that can throw when trying to determine the target method. These cases
1924 // are exceptional and will be documented as unsupported for CERs.
1925 //
1926 // NOTE: We do not try to keep track of the memory that has been allocated throughout this process
1927 // just so we can revert the memory should things fail. This is because we add the elements to the
1928 // hash tables and can be reused later on. Additionally, the hash tables are unlocked so we could
1929 // never remove the elements anyway.
1930 EX_TRY
1931 {
1932 // If we're the shared domain, we can't burn a dispatch stub to the target
1933 // if that target is outside the shared domain (through virtuals
1934 // originating in the shared domain but overridden by a non-shared type and
1935 // called on a collection, like HashTable would call GetHashCode on an
1936 // arbitrary object in its colletion). Dispatch stubs would be hard to clean,
1937 // but resolve stubs are easy to clean because we just clean the cache.
1938 //@TODO: Figure out how to track these indirection cells so that in the
1939 //@TODO: future we can create dispatch stubs for this case.
1940 BOOL bCreateDispatchStub = !bCallToShorterLivedTarget;
1941
1942 DispatchCache::InsertKind insertKind = DispatchCache::IK_NONE;
1943
1944 if (target != NULL)
1945 {
1946 if (patch)
1947 {
1948 // NOTE: This means that we are sharing dispatch stubs among callsites. If we decide we don't want
1949 // to do this in the future, just remove this condition
1950 if (stub == CALL_STUB_EMPTY_ENTRY)
1951 {
1952 //we have a target but not the dispatcher stub, lets build it
1953 //First we need a failure target (the resolver stub)
1954 ResolveHolder *pResolveHolder = NULL;
1955 ResolveEntry entryR;
1956 Prober probeR(&entryR);
1957 PCODE pBackPatchFcn;
1958 PCODE pResolverFcn;
1959
1960#ifdef _TARGET_X86_
1961 // Only X86 implementation needs a BackPatch function
1962 pBackPatchFcn = (PCODE) GetEEFuncEntryPoint(BackPatchWorkerAsmStub);
1963#else // !_TARGET_X86_
1964 pBackPatchFcn = NULL;
1965#endif // !_TARGET_X86_
1966
1967#ifdef CHAIN_LOOKUP
1968 pResolverFcn = (PCODE) GetEEFuncEntryPoint(ResolveWorkerChainLookupAsmStub);
1969#else // CHAIN_LOOKUP
1970 // Use the the slow resolver
1971 pResolverFcn = (PCODE) GetEEFuncEntryPoint(ResolveWorkerAsmStub);
1972#endif
1973
1974 // First see if we've already created a resolve stub for this token
1975 if (resolvers->SetUpProber(token.To_SIZE_T(), 0, &probeR))
1976 {
1977 // Find the right resolver, make it if necessary
1978 PCODE addrOfResolver = (PCODE)(resolvers->Find(&probeR));
1979 if (addrOfResolver == CALL_STUB_EMPTY_ENTRY)
1980 {
1981 pResolveHolder = GenerateResolveStub(pResolverFcn,
1982 pBackPatchFcn,
1983 token.To_SIZE_T());
1984
1985 // Add the resolve entrypoint into the cache.
1986 //@TODO: Can we store a pointer to the holder rather than the entrypoint?
1987 resolvers->Add((size_t)(pResolveHolder->stub()->resolveEntryPoint()), &probeR);
1988 }
1989 else
1990 {
1991 pResolveHolder = ResolveHolder::FromResolveEntry(addrOfResolver);
1992 }
1993 CONSISTENCY_CHECK(CheckPointer(pResolveHolder));
1994 stub = pResolveHolder->stub()->resolveEntryPoint();
1995 CONSISTENCY_CHECK(stub != NULL);
1996 }
1997
1998 // Only create a dispatch stub if:
1999 // 1. We successfully created or found a resolve stub.
2000 // 2. We are not blocked from creating a dispatch stub.
2001 // 3. The call site is currently wired to a lookup stub. If the call site is wired
2002 // to anything else, then we're never going to use the dispatch stub so there's
2003 // no use in creating it.
2004 if (pResolveHolder != NULL && stubKind == SK_LOOKUP)
2005 {
2006 DispatchEntry entryD;
2007 Prober probeD(&entryD);
2008 if (bCreateDispatchStub &&
2009 dispatchers->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeD))
2010 {
2011 // We are allowed to create a reusable dispatch stub for all assemblies
2012 // this allows us to optimize the call interception case the same way
2013 DispatchHolder *pDispatchHolder = NULL;
2014 PCODE addrOfDispatch = (PCODE)(dispatchers->Find(&probeD));
2015 if (addrOfDispatch == CALL_STUB_EMPTY_ENTRY)
2016 {
2017 PCODE addrOfFail = pResolveHolder->stub()->failEntryPoint();
2018 pDispatchHolder = GenerateDispatchStub(
2019 target, addrOfFail, objectType, token.To_SIZE_T());
2020 dispatchers->Add((size_t)(pDispatchHolder->stub()->entryPoint()), &probeD);
2021 }
2022 else
2023 {
2024 pDispatchHolder = DispatchHolder::FromDispatchEntry(addrOfDispatch);
2025 }
2026
2027 // Now assign the entrypoint to stub
2028 CONSISTENCY_CHECK(CheckPointer(pDispatchHolder));
2029 stub = pDispatchHolder->stub()->entryPoint();
2030 CONSISTENCY_CHECK(stub != NULL);
2031 }
2032 else
2033 {
2034 insertKind = DispatchCache::IK_SHARED;
2035 }
2036 }
2037 }
2038 }
2039 else
2040 {
2041 stats.worker_call_no_patch++;
2042 }
2043 }
2044
2045 // When we get here, target is where to go to
2046 // and patch is TRUE, telling us that we may have to back patch the call site with stub
2047 if (stub != CALL_STUB_EMPTY_ENTRY)
2048 {
2049 _ASSERTE(patch);
2050
2051 // If we go here and have a dispatching stub in hand, it probably means
2052 // that the cache used by the resolve stubs (g_resolveCache) does not have this stub,
2053 // so insert it.
2054 //
2055 // We only insert into the cache if we have a ResolveStub or we have a DispatchStub
2056 // that missed, since we want to keep the resolve cache empty of unused entries.
2057 // If later the dispatch stub fails (because of another type at the call site),
2058 // we'll insert the new value into the cache for the next time.
2059 // Note that if we decide to skip creating a DispatchStub beacuise we are calling
2060 // from a shared to unshared domain the we also will insert into the cache.
2061
2062 if (insertKind == DispatchCache::IK_NONE)
2063 {
2064 if (stubKind == SK_DISPATCH)
2065 {
2066 insertKind = DispatchCache::IK_DISPATCH;
2067 }
2068 else if (stubKind == SK_RESOLVE)
2069 {
2070 insertKind = DispatchCache::IK_RESOLVE;
2071 }
2072 }
2073
2074 if (insertKind != DispatchCache::IK_NONE)
2075 {
2076 // Because the TransparentProxy MT is process-global, we cannot cache targets for
2077 // unshared interfaces because there is the possiblity of caching a
2078 // <token, TPMT, target> entry where target is in AD1, and then matching against
2079 // this entry from AD2 which happens to be using the same token, perhaps for a
2080 // completely different interface.
2081 }
2082
2083 if (insertKind != DispatchCache::IK_NONE)
2084 {
2085 VirtualCallStubManager * pMgrForCacheElem = this;
2086
2087 // If we're calling from shared to unshared, make sure the cache element is
2088 // allocated in the unshared manager so that when the unshared code unloads
2089 // the cache element is unloaded.
2090 if (bCallToShorterLivedTarget)
2091 {
2092 _ASSERTE(pCalleeMgr != NULL);
2093 pMgrForCacheElem = pCalleeMgr;
2094 }
2095
2096 // Find or create a new ResolveCacheElem
2097 ResolveCacheElem *e = pMgrForCacheElem->GetResolveCacheElem(objectType, token.To_SIZE_T(), (void *)target);
2098
2099 // Try to insert this entry into the resolver cache table
2100 // When we get a collision we may decide not to insert this element
2101 // and Insert will return FALSE if we decided not to add the entry
2102#ifdef STUB_LOGGING
2103 BOOL didInsert =
2104#endif
2105 g_resolveCache->Insert(e, insertKind);
2106
2107#ifdef STUB_LOGGING
2108 if ((STUB_COLLIDE_MONO_PCT > 0) && !didInsert && (stubKind == SK_RESOLVE))
2109 {
2110 // If we decided not to perform the insert and we came in with a resolve stub
2111 // then we currently have a polymorphic callsite, So we flip a coin to decide
2112 // whether to convert this callsite back into a dispatch stub (monomorphic callsite)
2113
2114 if (!bCallToShorterLivedTarget && bCreateDispatchStub)
2115 {
2116 // We are allowed to create a reusable dispatch stub for all assemblies
2117 // this allows us to optimize the call interception case the same way
2118
2119 UINT32 coin = UINT32(GetRandomInt(100));
2120
2121 if (coin < STUB_COLLIDE_MONO_PCT)
2122 {
2123 DispatchEntry entryD;
2124 Prober probeD(&entryD);
2125 if (dispatchers->SetUpProber(token.To_SIZE_T(), (size_t) objectType, &probeD))
2126 {
2127 DispatchHolder *pDispatchHolder = NULL;
2128 PCODE addrOfDispatch = (PCODE)(dispatchers->Find(&probeD));
2129 if (addrOfDispatch == CALL_STUB_EMPTY_ENTRY)
2130 {
2131 // It is possible that we never created this monomorphic dispatch stub
2132 // so we may have to create it now
2133 ResolveHolder* pResolveHolder = ResolveHolder::FromResolveEntry(pCallSite->GetSiteTarget());
2134 PCODE addrOfFail = pResolveHolder->stub()->failEntryPoint();
2135 pDispatchHolder = GenerateDispatchStub(
2136 target, addrOfFail, objectType, token.To_SIZE_T());
2137 dispatchers->Add((size_t)(pDispatchHolder->stub()->entryPoint()), &probeD);
2138 }
2139 else
2140 {
2141 pDispatchHolder = DispatchHolder::FromDispatchEntry(addrOfDispatch);
2142 }
2143
2144 // increment the of times we changed a cache collision into a mono stub
2145 stats.worker_collide_to_mono++;
2146
2147 // Now assign the entrypoint to stub
2148 CONSISTENCY_CHECK(pDispatchHolder != NULL);
2149 stub = pDispatchHolder->stub()->entryPoint();
2150 CONSISTENCY_CHECK(stub != NULL);
2151 }
2152 }
2153 }
2154 }
2155#endif // STUB_LOGGING
2156 }
2157
2158 if (stubKind == SK_LOOKUP)
2159 {
2160 BackPatchSite(pCallSite, (PCODE)stub);
2161 }
2162 }
2163 }
2164 EX_CATCH
2165 {
2166 }
2167 EX_END_CATCH (SwallowAllExceptions);
2168
2169 // Target can be NULL only if we can't resolve to an address
2170 _ASSERTE(target != NULL);
2171
2172 return target;
2173}
2174
2175/*
2176Resolve the token in the context of the method table, and set the target to point to
2177the address that we should go to to get to the implementation. Return a boolean indicating
2178whether or not this is a permenent choice or a temporary choice. For example, if the code has
2179not been jitted yet, return FALSE and set the target to the prestub. If the target is set to NULL,
2180it means that the token is not resolvable.
2181*/
2182BOOL
2183VirtualCallStubManager::Resolver(
2184 MethodTable * pMT,
2185 DispatchToken token,
2186 OBJECTREF * protectedObj, // this one can actually be NULL, consider using pMT is you don't need the object itself
2187 PCODE * ppTarget,
2188 BOOL throwOnConflict)
2189{
2190 CONTRACTL {
2191 THROWS;
2192 GC_TRIGGERS;
2193 PRECONDITION(CheckPointer(pMT));
2194 PRECONDITION(TypeHandle(pMT).CheckFullyLoaded());
2195 } CONTRACTL_END;
2196
2197#ifdef _DEBUG
2198 MethodTable * dbg_pTokenMT = pMT;
2199 MethodDesc * dbg_pTokenMD = NULL;
2200 if (token.IsTypedToken())
2201 {
2202 dbg_pTokenMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
2203 dbg_pTokenMD = dbg_pTokenMT->FindDispatchSlot(token.GetSlotNumber(), throwOnConflict).GetMethodDesc();
2204 }
2205#endif // _DEBUG
2206
2207 g_IBCLogger.LogMethodTableAccess(pMT);
2208
2209 // NOTE: CERs are not hardened against transparent proxy types,
2210 // so no need to worry about throwing an exception from here.
2211
2212 LOG((LF_LOADER, LL_INFO10000, "SD: VCSM::Resolver: (start) looking up %s method in %s\n",
2213 token.IsThisToken() ? "this" : "interface",
2214 pMT->GetClass()->GetDebugClassName()));
2215
2216 MethodDesc * pMD = NULL;
2217 BOOL fShouldPatch = FALSE;
2218 DispatchSlot implSlot(pMT->FindDispatchSlot(token, throwOnConflict));
2219
2220 // If we found a target, then just figure out if we're allowed to create a stub around
2221 // this target and backpatch the callsite.
2222 if (!implSlot.IsNull())
2223 {
2224 g_IBCLogger.LogDispatchTableSlotAccess(&implSlot);
2225#if defined(LOGGING) || defined(_DEBUG)
2226 {
2227 pMD = implSlot.GetMethodDesc();
2228 if (pMD != NULL)
2229 {
2230 // Make sure we aren't crossing app domain boundaries
2231 CONSISTENCY_CHECK(GetAppDomain()->CheckValidModule(pMD->GetModule()));
2232#ifdef LOGGING
2233 WORD slot = pMD->GetSlot();
2234 BOOL fIsOverriddenMethod =
2235 (pMT->GetNumParentVirtuals() <= slot && slot < pMT->GetNumVirtuals());
2236 LOG((LF_LOADER, LL_INFO10000, "SD: VCSM::Resolver: (end) looked up %s %s method %s::%s\n",
2237 fIsOverriddenMethod ? "overridden" : "newslot",
2238 token.IsThisToken() ? "this" : "interface",
2239 pMT->GetClass()->GetDebugClassName(),
2240 pMD->GetName()));
2241#endif // LOGGING
2242 }
2243 }
2244#endif // defined(LOGGING) || defined(_DEBUG)
2245
2246 BOOL fSlotCallsPrestub = DoesSlotCallPrestub(implSlot.GetTarget());
2247 if (!fSlotCallsPrestub)
2248 {
2249 // Skip fixup precode jump for better perf
2250 PCODE pDirectTarget = Precode::TryToSkipFixupPrecode(implSlot.GetTarget());
2251 if (pDirectTarget != NULL)
2252 implSlot = DispatchSlot(pDirectTarget);
2253
2254 // Only patch to a target if it's not going to call the prestub.
2255 fShouldPatch = TRUE;
2256 }
2257 else
2258 {
2259 // Getting the MethodDesc is very expensive,
2260 // so only call this when we are calling the prestub
2261 pMD = implSlot.GetMethodDesc();
2262
2263 if (pMD == NULL)
2264 {
2265 // pMD can be NULL when another thread raced in and patched the Method Entry Point
2266 // so that it no longer points at the prestub
2267 // In such a case DoesSlotCallPrestub will now return FALSE
2268 CONSISTENCY_CHECK(!DoesSlotCallPrestub(implSlot.GetTarget()));
2269 fSlotCallsPrestub = FALSE;
2270 }
2271
2272 if (!fSlotCallsPrestub)
2273 {
2274 // Only patch to a target if it's not going to call the prestub.
2275 fShouldPatch = TRUE;
2276 }
2277 else
2278 {
2279 CONSISTENCY_CHECK(CheckPointer(pMD));
2280 if (pMD->IsGenericMethodDefinition())
2281 {
2282 //@GENERICS: Currently, generic virtual methods are called only through JIT_VirtualFunctionPointer
2283 // and so we could never have a virtual call stub at a call site for a generic virtual.
2284 // As such, we're assuming the only callers to Resolver are calls to GetTarget caused
2285 // indirectly by JIT_VirtualFunctionPointer. So, we're return TRUE for patching so that
2286 // we can cache the result in GetTarget and we don't have to perform the full resolve
2287 // every time. If the way we call generic virtual methods changes, this will also need
2288 // to change.
2289 fShouldPatch = TRUE;
2290 }
2291 else
2292 {
2293 g_IBCLogger.LogMethodDescAccess(pMD);
2294 }
2295 }
2296 }
2297 }
2298#ifdef FEATURE_COMINTEROP
2299 else if (pMT->IsComObjectType() && IsInterfaceToken(token))
2300 {
2301 MethodTable * pItfMT = GetTypeFromToken(token);
2302 implSlot = pItfMT->FindDispatchSlot(token.GetSlotNumber(), throwOnConflict);
2303
2304 if (pItfMT->HasInstantiation())
2305 {
2306 DispatchSlot ds(implSlot);
2307 MethodDesc * pTargetMD = ds.GetMethodDesc();
2308 if (!pTargetMD->HasMethodInstantiation())
2309 {
2310 _ASSERTE(pItfMT->IsProjectedFromWinRT() || pItfMT->IsWinRTRedirectedInterface(TypeHandle::Interop_ManagedToNative));
2311
2312 MethodDesc *pInstMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
2313 pTargetMD,
2314 pItfMT,
2315 FALSE, // forceBoxedEntryPoint
2316 Instantiation(), // methodInst
2317 FALSE, // allowInstParam
2318 TRUE); // forceRemotableMethod
2319
2320 _ASSERTE(pInstMD->IsComPlusCall() || pInstMD->IsGenericComPlusCall());
2321
2322 *ppTarget = pInstMD->GetStableEntryPoint();
2323 return TRUE;
2324 }
2325 }
2326
2327 fShouldPatch = TRUE;
2328 }
2329#endif // FEATURE_COMINTEROP
2330#ifdef FEATURE_ICASTABLE
2331 else if (pMT->IsICastable() && protectedObj != NULL && *protectedObj != NULL)
2332 {
2333 GCStress<cfg_any>::MaybeTrigger();
2334
2335 // In case of ICastable, instead of trying to find method implementation in the real object type
2336 // we call pObj.GetValueInternal() and call Resolver() again with whatever type it returns.
2337 // It allows objects that implement ICastable to mimic behavior of other types.
2338 MethodTable * pTokenMT = GetTypeFromToken(token);
2339
2340 // Make call to ICastableHelpers.GetImplType(this, interfaceTypeObj)
2341 PREPARE_NONVIRTUAL_CALLSITE(METHOD__ICASTABLEHELPERS__GETIMPLTYPE);
2342
2343 OBJECTREF tokenManagedType = pTokenMT->GetManagedClassObject(); //GC triggers
2344
2345 DECLARE_ARGHOLDER_ARRAY(args, 2);
2346 args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(*protectedObj);
2347 args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(tokenManagedType);
2348
2349 OBJECTREF impTypeObj = NULL;
2350 CALL_MANAGED_METHOD_RETREF(impTypeObj, OBJECTREF, args);
2351
2352 INDEBUG(tokenManagedType = NULL); //tokenManagedType wasn't protected during the call
2353 if (impTypeObj == NULL) // GetImplType returns default(RuntimeTypeHandle)
2354 {
2355 COMPlusThrow(kEntryPointNotFoundException);
2356 }
2357
2358 ReflectClassBaseObject* resultTypeObj = ((ReflectClassBaseObject*)OBJECTREFToObject(impTypeObj));
2359 TypeHandle resulTypeHnd = resultTypeObj->GetType();
2360 MethodTable *pResultMT = resulTypeHnd.GetMethodTable();
2361
2362 return Resolver(pResultMT, token, protectedObj, ppTarget, throwOnConflict);
2363 }
2364#endif // FEATURE_ICASTABLE
2365
2366 if (implSlot.IsNull())
2367 {
2368 MethodTable * pTokenMT = NULL;
2369 MethodDesc * pTokenMD = NULL;
2370 if (token.IsTypedToken())
2371 {
2372 pTokenMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
2373 pTokenMD = pTokenMT->FindDispatchSlot(token.GetSlotNumber(), throwOnConflict).GetMethodDesc();
2374 }
2375
2376#ifdef FEATURE_COMINTEROP
2377 if ((pTokenMT != NULL) && (pTokenMT->GetClass()->IsEquivalentType()))
2378 {
2379 SString methodName;
2380 DefineFullyQualifiedNameForClassW();
2381 pTokenMD->GetFullMethodInfo(methodName);
2382
2383 COMPlusThrowHR(COR_E_MISSINGMETHOD, COR_E_MISSINGMETHOD, GetFullyQualifiedNameForClassNestedAwareW(pMT), methodName.GetUnicode());
2384 }
2385 else
2386#endif // FEATURE_COMINTEROP
2387 if (!throwOnConflict)
2388 {
2389 // Assume we got null because there was a default interface method conflict
2390 *ppTarget = NULL;
2391 return FALSE;
2392 }
2393 else
2394 {
2395 // Method not found, and this should never happen for anything but equivalent types
2396 CONSISTENCY_CHECK(!implSlot.IsNull() && "Valid method implementation was not found.");
2397 COMPlusThrow(kEntryPointNotFoundException);
2398 }
2399 }
2400
2401 *ppTarget = implSlot.GetTarget();
2402
2403 return fShouldPatch;
2404} // VirtualCallStubManager::Resolver
2405
2406#endif // !DACCESS_COMPILE
2407
2408//----------------------------------------------------------------------------
2409// Given a contract, return true if the contract represents a slot on the target.
2410BOOL VirtualCallStubManager::IsClassToken(DispatchToken token)
2411{
2412 CONTRACT (BOOL) {
2413 NOTHROW;
2414 GC_NOTRIGGER;
2415 } CONTRACT_END;
2416 RETURN (token.IsThisToken());
2417}
2418
2419//----------------------------------------------------------------------------
2420// Given a contract, return true if the contract represents an interface, false if just a slot.
2421BOOL VirtualCallStubManager::IsInterfaceToken(DispatchToken token)
2422{
2423 CONTRACT (BOOL) {
2424 NOTHROW;
2425 GC_NOTRIGGER;
2426 } CONTRACT_END;
2427 BOOL ret = token.IsTypedToken();
2428 // For now, only interfaces have typed dispatch tokens.
2429 CONSISTENCY_CHECK(!ret || CheckPointer(GetThread()->GetDomain()->LookupType(token.GetTypeID())));
2430 CONSISTENCY_CHECK(!ret || GetThread()->GetDomain()->LookupType(token.GetTypeID())->IsInterface());
2431 RETURN (ret);
2432}
2433
2434#ifndef DACCESS_COMPILE
2435
2436//----------------------------------------------------------------------------
2437MethodDesc *
2438VirtualCallStubManager::GetRepresentativeMethodDescFromToken(
2439 DispatchToken token,
2440 MethodTable * pMT)
2441{
2442 CONTRACT (MethodDesc *) {
2443 NOTHROW;
2444 GC_NOTRIGGER;
2445 MODE_COOPERATIVE;
2446 PRECONDITION(CheckPointer(pMT));
2447 POSTCONDITION(CheckPointer(RETVAL));
2448 SO_TOLERANT;
2449 } CONTRACT_END;
2450
2451 // This is called when trying to create a HelperMethodFrame, which means there are
2452 // potentially managed references on the stack that are not yet protected.
2453 GCX_FORBID();
2454
2455 if (token.IsTypedToken())
2456 {
2457 pMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
2458 CONSISTENCY_CHECK(CheckPointer(pMT));
2459 token = DispatchToken::CreateDispatchToken(token.GetSlotNumber());
2460 }
2461 CONSISTENCY_CHECK(token.IsThisToken());
2462 RETURN (pMT->GetMethodDescForSlot(token.GetSlotNumber()));
2463}
2464
2465//----------------------------------------------------------------------------
2466MethodTable *VirtualCallStubManager::GetTypeFromToken(DispatchToken token)
2467{
2468 CONTRACTL {
2469 NOTHROW;
2470 WRAPPER(GC_TRIGGERS);
2471 } CONTRACTL_END;
2472 MethodTable *pMT = GetThread()->GetDomain()->LookupType(token.GetTypeID());
2473 _ASSERTE(pMT != NULL);
2474 _ASSERTE(pMT->LookupTypeID() == token.GetTypeID());
2475 return pMT;
2476}
2477
2478#endif // !DACCESS_COMPILE
2479
2480//----------------------------------------------------------------------------
2481MethodDesc *VirtualCallStubManager::GetInterfaceMethodDescFromToken(DispatchToken token)
2482{
2483 CONTRACTL {
2484 NOTHROW;
2485 WRAPPER(GC_TRIGGERS);
2486 PRECONDITION(IsInterfaceToken(token));
2487 } CONTRACTL_END;
2488
2489#ifndef DACCESS_COMPILE
2490
2491 MethodTable * pMT = GetTypeFromToken(token);
2492 PREFIX_ASSUME(pMT != NULL);
2493 CONSISTENCY_CHECK(CheckPointer(pMT));
2494 return pMT->GetMethodDescForSlot(token.GetSlotNumber());
2495
2496#else // DACCESS_COMPILE
2497
2498 DacNotImpl();
2499 return NULL;
2500
2501#endif // DACCESS_COMPILE
2502}
2503
2504#ifndef DACCESS_COMPILE
2505
2506//----------------------------------------------------------------------------
2507// This will check to see if a match is in the cache.
2508// Returns the target on success, otherwise NULL.
2509PCODE VirtualCallStubManager::CacheLookup(size_t token, UINT16 tokenHash, MethodTable *pMT)
2510{
2511 CONTRACTL {
2512 NOTHROW;
2513 GC_NOTRIGGER;
2514 SO_TOLERANT;
2515 PRECONDITION(CheckPointer(pMT));
2516 } CONTRACTL_END
2517
2518 // Now look in the cache for a match
2519 ResolveCacheElem *pElem = g_resolveCache->Lookup(token, tokenHash, pMT);
2520
2521 // If the element matches, return the target - we're done!
2522 return (PCODE)(pElem != NULL ? pElem->target : NULL);
2523}
2524
2525
2526//----------------------------------------------------------------------------
2527/* static */
2528PCODE
2529VirtualCallStubManager::GetTarget(
2530 DispatchToken token,
2531 MethodTable * pMT,
2532 BOOL throwOnConflict)
2533{
2534 CONTRACTL {
2535 THROWS;
2536 GC_TRIGGERS;
2537 INJECT_FAULT(COMPlusThrowOM(););
2538 PRECONDITION(CheckPointer(pMT));
2539 } CONTRACTL_END
2540
2541 g_external_call++;
2542
2543 if (token.IsThisToken())
2544 {
2545 return pMT->GetRestoredSlot(token.GetSlotNumber());
2546 }
2547
2548 GCX_COOP(); // This is necessary for BucketTable synchronization
2549
2550 PCODE target = NULL;
2551
2552#ifndef STUB_DISPATCH_PORTABLE
2553 target = CacheLookup(token.To_SIZE_T(), DispatchCache::INVALID_HASH, pMT);
2554 if (target != NULL)
2555 return target;
2556#endif // !STUB_DISPATCH_PORTABLE
2557
2558 // No match, now do full resolve
2559 BOOL fPatch;
2560
2561 // TODO: passing NULL as protectedObj here can lead to incorrect behavior for ICastable objects
2562 // We need to review if this is the case and refactor this code if we want ICastable to become officially supported
2563 fPatch = Resolver(pMT, token, NULL, &target, throwOnConflict);
2564 _ASSERTE(!throwOnConflict || target != NULL);
2565
2566#ifndef STUB_DISPATCH_PORTABLE
2567 if (fPatch)
2568 {
2569 ResolveCacheElem *pCacheElem = pMT->GetLoaderAllocator()->GetVirtualCallStubManager()->
2570 GetResolveCacheElem(pMT, token.To_SIZE_T(), (BYTE *)target);
2571
2572 if (pCacheElem)
2573 {
2574 if (!g_resolveCache->Insert(pCacheElem, DispatchCache::IK_EXTERNAL))
2575 {
2576 // We decided not to perform the insert
2577 }
2578 }
2579 }
2580 else
2581 {
2582 g_external_call_no_patch++;
2583 }
2584#endif // !STUB_DISPATCH_PORTABLE
2585
2586 return target;
2587}
2588
2589#endif // !DACCESS_COMPILE
2590
2591//----------------------------------------------------------------------------
2592/*
2593Resolve the token in the context of the method table, and set the target to point to
2594the address that we should go to to get to the implementation. Return a boolean indicating
2595whether or not this is a permenent choice or a temporary choice. For example, if the code has
2596not been jitted yet, return FALSE and set the target to the prestub. If the target is set to NULL,
2597it means that the token is not resolvable.
2598*/
2599BOOL
2600VirtualCallStubManager::TraceResolver(
2601 Object * pObj,
2602 DispatchToken token,
2603 TraceDestination * trace)
2604{
2605 CONTRACTL {
2606 THROWS;
2607 GC_TRIGGERS;
2608 PRECONDITION(CheckPointer(pObj, NULL_OK));
2609 INJECT_FAULT(COMPlusThrowOM(););
2610 } CONTRACTL_END
2611
2612 // If someone is trying to step into a stub dispatch call on a null object,
2613 // just say that we can't trace this call and we'll just end up throwing
2614 // a null ref exception.
2615 if (pObj == NULL)
2616 {
2617 return FALSE;
2618 }
2619
2620 MethodTable *pMT = pObj->GetMethodTable();
2621 CONSISTENCY_CHECK(CheckPointer(pMT));
2622
2623
2624 DispatchSlot slot(pMT->FindDispatchSlot(token, TRUE /* throwOnConflict */));
2625
2626 if (slot.IsNull() && IsInterfaceToken(token) && pMT->IsComObjectType())
2627 {
2628 MethodDesc * pItfMD = GetInterfaceMethodDescFromToken(token);
2629 CONSISTENCY_CHECK(pItfMD->GetMethodTable()->GetSlot(pItfMD->GetSlot()) == pItfMD->GetMethodEntryPoint());
2630 slot = pItfMD->GetMethodTable()->FindDispatchSlot(pItfMD->GetSlot(), TRUE /* throwOnConflict */);
2631 }
2632
2633 return (StubManager::TraceStub(slot.GetTarget(), trace));
2634}
2635
2636#ifndef DACCESS_COMPILE
2637
2638//----------------------------------------------------------------------------
2639/* Change the call site. It is failing the expected MT test in the dispatcher stub
2640too often.
2641*/
2642void VirtualCallStubManager::BackPatchWorker(StubCallSite* pCallSite)
2643{
2644 CONTRACTL {
2645 NOTHROW;
2646 GC_NOTRIGGER;
2647 FORBID_FAULT;
2648 } CONTRACTL_END
2649
2650 PCODE callSiteTarget = pCallSite->GetSiteTarget();
2651
2652 if (isDispatchingStub(callSiteTarget))
2653 {
2654 DispatchHolder * dispatchHolder = DispatchHolder::FromDispatchEntry(callSiteTarget);
2655 DispatchStub * dispatchStub = dispatchHolder->stub();
2656
2657 //yes, patch it to point to the resolve stub
2658 //We can ignore the races now since we now know that the call site does go thru our
2659 //stub mechanisms, hence no matter who wins the race, we are correct.
2660 //We find the correct resolve stub by following the failure path in the dispatcher stub itself
2661 PCODE failEntry = dispatchStub->failTarget();
2662 ResolveStub* resolveStub = ResolveHolder::FromFailEntry(failEntry)->stub();
2663 PCODE resolveEntry = resolveStub->resolveEntryPoint();
2664 BackPatchSite(pCallSite, resolveEntry);
2665
2666 LOG((LF_STUBS, LL_INFO10000, "BackPatchWorker call-site" FMT_ADDR "dispatchStub" FMT_ADDR "\n",
2667 DBG_ADDR(pCallSite->GetReturnAddress()), DBG_ADDR(dispatchHolder->stub())));
2668
2669 //Add back the default miss count to the counter being used by this resolve stub
2670 //Since resolve stub are shared among many dispatch stubs each dispatch stub
2671 //that fails decrements the shared counter and the dispatch stub that trips the
2672 //counter gets converted into a polymorphic site
2673 INT32* counter = resolveStub->pCounter();
2674 *counter += STUB_MISS_COUNT_VALUE;
2675 }
2676}
2677
2678//----------------------------------------------------------------------------
2679/* consider changing the call site to point to stub, if appropriate do it
2680*/
2681void VirtualCallStubManager::BackPatchSite(StubCallSite* pCallSite, PCODE stub)
2682{
2683 CONTRACTL {
2684 NOTHROW;
2685 GC_NOTRIGGER;
2686 FORBID_FAULT;
2687 PRECONDITION(stub != NULL);
2688 PRECONDITION(CheckPointer(pCallSite));
2689 PRECONDITION(pCallSite->GetSiteTarget() != NULL);
2690 } CONTRACTL_END
2691
2692 PCODE patch = stub;
2693
2694 // This will take care of the prejit case and find the actual patch site
2695 PCODE prior = pCallSite->GetSiteTarget();
2696
2697 //is this really going to change anything, if not don't do it.
2698 if (prior == patch)
2699 return;
2700
2701 //we only want to do the following transitions for right now:
2702 // prior new
2703 // lookup dispatching or resolving
2704 // dispatching resolving
2705 if (isResolvingStub(prior))
2706 return;
2707
2708 if(isDispatchingStub(stub))
2709 {
2710 if(isDispatchingStub(prior))
2711 {
2712 return;
2713 }
2714 else
2715 {
2716 stats.site_write_mono++;
2717 }
2718 }
2719 else
2720 {
2721 stats.site_write_poly++;
2722 }
2723
2724 //patch the call site
2725 pCallSite->SetSiteTarget(patch);
2726
2727 stats.site_write++;
2728}
2729
2730//----------------------------------------------------------------------------
2731void StubCallSite::SetSiteTarget(PCODE newTarget)
2732{
2733 WRAPPER_NO_CONTRACT;
2734 PTR_PCODE pCell = GetIndirectCell();
2735 if (EnsureWritablePagesNoThrow(pCell, sizeof(PCODE)))
2736 *pCell = newTarget;
2737}
2738
2739//----------------------------------------------------------------------------
2740/* Generate a dispatcher stub, pMTExpected is the method table to burn in the stub, and the two addrOf's
2741are the addresses the stub is to transfer to depending on the test with pMTExpected
2742*/
2743DispatchHolder *VirtualCallStubManager::GenerateDispatchStub(PCODE addrOfCode,
2744 PCODE addrOfFail,
2745 void * pMTExpected,
2746 size_t dispatchToken)
2747{
2748 CONTRACT (DispatchHolder*) {
2749 THROWS;
2750 GC_TRIGGERS;
2751 INJECT_FAULT(COMPlusThrowOM(););
2752 PRECONDITION(addrOfCode != NULL);
2753 PRECONDITION(addrOfFail != NULL);
2754 PRECONDITION(CheckPointer(pMTExpected));
2755 POSTCONDITION(CheckPointer(RETVAL));
2756 } CONTRACT_END;
2757
2758 size_t dispatchHolderSize = sizeof(DispatchHolder);
2759
2760#ifdef _TARGET_AMD64_
2761 // See comment around m_fShouldAllocateLongJumpDispatchStubs for explanation.
2762 if (m_fShouldAllocateLongJumpDispatchStubs
2763 INDEBUG(|| g_pConfig->ShouldGenerateLongJumpDispatchStub()))
2764 {
2765 RETURN GenerateDispatchStubLong(addrOfCode,
2766 addrOfFail,
2767 pMTExpected,
2768 dispatchToken);
2769 }
2770
2771 dispatchHolderSize = DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_SHORT);
2772#endif
2773
2774 //allocate from the requisite heap and copy the template over it.
2775 DispatchHolder * holder = (DispatchHolder*) (void*)
2776 dispatch_heap->AllocAlignedMem(dispatchHolderSize, CODE_SIZE_ALIGN);
2777
2778#ifdef _TARGET_AMD64_
2779 if (!DispatchHolder::CanShortJumpDispatchStubReachFailTarget(addrOfFail, (LPCBYTE)holder))
2780 {
2781 m_fShouldAllocateLongJumpDispatchStubs = TRUE;
2782 RETURN GenerateDispatchStub(addrOfCode, addrOfFail, pMTExpected, dispatchToken);
2783 }
2784#endif
2785
2786 holder->Initialize(addrOfCode,
2787 addrOfFail,
2788 (size_t)pMTExpected
2789#ifdef _TARGET_AMD64_
2790 , DispatchStub::e_TYPE_SHORT
2791#endif
2792 );
2793
2794 ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
2795
2796 AddToCollectibleVSDRangeList(holder);
2797
2798 //incr our counters
2799 stats.stub_mono_counter++;
2800 stats.stub_space += (UINT32)dispatchHolderSize;
2801 LOG((LF_STUBS, LL_INFO10000, "GenerateDispatchStub for token" FMT_ADDR "and pMT" FMT_ADDR "at" FMT_ADDR "\n",
2802 DBG_ADDR(dispatchToken), DBG_ADDR(pMTExpected), DBG_ADDR(holder->stub())));
2803
2804#ifdef FEATURE_PERFMAP
2805 PerfMap::LogStubs(__FUNCTION__, "GenerateDispatchStub", (PCODE)holder->stub(), holder->stub()->size());
2806#endif
2807
2808 RETURN (holder);
2809}
2810
2811#ifdef _TARGET_AMD64_
2812//----------------------------------------------------------------------------
2813/* Generate a dispatcher stub, pMTExpected is the method table to burn in the stub, and the two addrOf's
2814are the addresses the stub is to transfer to depending on the test with pMTExpected
2815*/
2816DispatchHolder *VirtualCallStubManager::GenerateDispatchStubLong(PCODE addrOfCode,
2817 PCODE addrOfFail,
2818 void * pMTExpected,
2819 size_t dispatchToken)
2820{
2821 CONTRACT (DispatchHolder*) {
2822 THROWS;
2823 GC_TRIGGERS;
2824 INJECT_FAULT(COMPlusThrowOM(););
2825 PRECONDITION(addrOfCode != NULL);
2826 PRECONDITION(addrOfFail != NULL);
2827 PRECONDITION(CheckPointer(pMTExpected));
2828 POSTCONDITION(CheckPointer(RETVAL));
2829 } CONTRACT_END;
2830
2831 //allocate from the requisite heap and copy the template over it.
2832 DispatchHolder * holder = (DispatchHolder*) (void*)
2833 dispatch_heap->AllocAlignedMem(DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_LONG), CODE_SIZE_ALIGN);
2834
2835 holder->Initialize(addrOfCode,
2836 addrOfFail,
2837 (size_t)pMTExpected,
2838 DispatchStub::e_TYPE_LONG);
2839
2840 ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
2841
2842 AddToCollectibleVSDRangeList(holder);
2843
2844 //incr our counters
2845 stats.stub_mono_counter++;
2846 stats.stub_space += static_cast<UINT32>(DispatchHolder::GetHolderSize(DispatchStub::e_TYPE_LONG));
2847 LOG((LF_STUBS, LL_INFO10000, "GenerateDispatchStub for token" FMT_ADDR "and pMT" FMT_ADDR "at" FMT_ADDR "\n",
2848 DBG_ADDR(dispatchToken), DBG_ADDR(pMTExpected), DBG_ADDR(holder->stub())));
2849
2850#ifdef FEATURE_PERFMAP
2851 PerfMap::LogStubs(__FUNCTION__, "GenerateDispatchStub", (PCODE)holder->stub(), holder->stub()->size());
2852#endif
2853
2854 RETURN (holder);
2855}
2856#endif
2857
2858//----------------------------------------------------------------------------
2859/* Generate a resolve stub for the given dispatchToken.
2860addrOfResolver is where to go if the inline cache check misses
2861addrOfPatcher is who to call if the fail piece is being called too often by dispacher stubs
2862*/
2863ResolveHolder *VirtualCallStubManager::GenerateResolveStub(PCODE addrOfResolver,
2864 PCODE addrOfPatcher,
2865 size_t dispatchToken)
2866{
2867 CONTRACT (ResolveHolder*) {
2868 THROWS;
2869 GC_TRIGGERS;
2870 INJECT_FAULT(COMPlusThrowOM(););
2871 PRECONDITION(addrOfResolver != NULL);
2872#if defined(_TARGET_X86_)
2873 PRECONDITION(addrOfPatcher != NULL);
2874#endif
2875 POSTCONDITION(CheckPointer(RETVAL));
2876 } CONTRACT_END;
2877
2878 _ASSERTE(addrOfResolver);
2879
2880 //get a counter for the fail piece
2881
2882 UINT32 counter_index = counter_block::MAX_COUNTER_ENTRIES;
2883 counter_block *cur_block = NULL;
2884
2885 while (true)
2886 {
2887 cur_block = VolatileLoad(&m_cur_counter_block);
2888
2889 if ((cur_block != NULL) && (cur_block->used < counter_block::MAX_COUNTER_ENTRIES))
2890 {
2891 counter_index = FastInterlockIncrement((LONG*)&cur_block->used) - 1;
2892 if (counter_index < counter_block::MAX_COUNTER_ENTRIES)
2893 {
2894 // Typical case we allocate the next free counter in the block
2895 break;
2896 }
2897 }
2898
2899 // Otherwise we have to create a new counter_block to serve as the head of m_cur_counter_block list
2900
2901 // Create the new block in the main heap
2902 counter_block *pNew = new counter_block;
2903
2904 // Initialize the new block
2905 pNew->next = cur_block;
2906 pNew->used = 0;
2907
2908 // Try to link in the new block
2909 if (InterlockedCompareExchangeT(&m_cur_counter_block, pNew, cur_block) != cur_block)
2910 {
2911 // Lost a race to add pNew as new head
2912 delete pNew;
2913 }
2914 }
2915
2916 CONSISTENCY_CHECK(counter_index < counter_block::MAX_COUNTER_ENTRIES);
2917 CONSISTENCY_CHECK(CheckPointer(cur_block));
2918
2919 // Initialize the default miss counter for this resolve stub
2920 INT32* counterAddr = &(cur_block->block[counter_index]);
2921 *counterAddr = STUB_MISS_COUNT_VALUE;
2922
2923 //allocate from the requisite heap and copy the templates for each piece over it.
2924 ResolveHolder * holder = (ResolveHolder*) (void*)
2925 resolve_heap->AllocAlignedMem(sizeof(ResolveHolder), CODE_SIZE_ALIGN);
2926
2927 holder->Initialize(addrOfResolver, addrOfPatcher,
2928 dispatchToken, DispatchCache::HashToken(dispatchToken),
2929 g_resolveCache->GetCacheBaseAddr(), counterAddr);
2930 ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
2931
2932 AddToCollectibleVSDRangeList(holder);
2933
2934 //incr our counters
2935 stats.stub_poly_counter++;
2936 stats.stub_space += sizeof(ResolveHolder)+sizeof(size_t);
2937 LOG((LF_STUBS, LL_INFO10000, "GenerateResolveStub for token" FMT_ADDR "at" FMT_ADDR "\n",
2938 DBG_ADDR(dispatchToken), DBG_ADDR(holder->stub())));
2939
2940#ifdef FEATURE_PERFMAP
2941 PerfMap::LogStubs(__FUNCTION__, "GenerateResolveStub", (PCODE)holder->stub(), holder->stub()->size());
2942#endif
2943
2944 RETURN (holder);
2945}
2946
2947//----------------------------------------------------------------------------
2948/* Generate a lookup stub for the given dispatchToken. addrOfResolver is where the stub always transfers control
2949*/
2950LookupHolder *VirtualCallStubManager::GenerateLookupStub(PCODE addrOfResolver, size_t dispatchToken)
2951{
2952 CONTRACT (LookupHolder*) {
2953 THROWS;
2954 GC_TRIGGERS;
2955 INJECT_FAULT(COMPlusThrowOM(););
2956 PRECONDITION(addrOfResolver != NULL);
2957 POSTCONDITION(CheckPointer(RETVAL));
2958 } CONTRACT_END;
2959
2960 //allocate from the requisite heap and copy the template over it.
2961 LookupHolder * holder = (LookupHolder*) (void*) lookup_heap->AllocAlignedMem(sizeof(LookupHolder), CODE_SIZE_ALIGN);
2962
2963 holder->Initialize(addrOfResolver, dispatchToken);
2964 ClrFlushInstructionCache(holder->stub(), holder->stub()->size());
2965
2966 AddToCollectibleVSDRangeList(holder);
2967
2968 //incr our counters
2969 stats.stub_lookup_counter++;
2970 stats.stub_space += sizeof(LookupHolder);
2971 LOG((LF_STUBS, LL_INFO10000, "GenerateLookupStub for token" FMT_ADDR "at" FMT_ADDR "\n",
2972 DBG_ADDR(dispatchToken), DBG_ADDR(holder->stub())));
2973
2974#ifdef FEATURE_PERFMAP
2975 PerfMap::LogStubs(__FUNCTION__, "GenerateLookupStub", (PCODE)holder->stub(), holder->stub()->size());
2976#endif
2977
2978 RETURN (holder);
2979}
2980
2981//----------------------------------------------------------------------------
2982/* Generate a cache entry
2983*/
2984ResolveCacheElem *VirtualCallStubManager::GenerateResolveCacheElem(void *addrOfCode,
2985 void *pMTExpected,
2986 size_t token)
2987{
2988 CONTRACTL
2989 {
2990 THROWS;
2991 GC_TRIGGERS;
2992 INJECT_FAULT(COMPlusThrowOM(););
2993 }
2994 CONTRACTL_END
2995
2996 CONSISTENCY_CHECK(CheckPointer(pMTExpected));
2997
2998 //allocate from the requisite heap and set the appropriate fields
2999 ResolveCacheElem *e = (ResolveCacheElem*) (void*)
3000 cache_entry_heap->AllocAlignedMem(sizeof(ResolveCacheElem), CODE_SIZE_ALIGN);
3001
3002 e->pMT = pMTExpected;
3003 e->token = token;
3004 e->target = addrOfCode;
3005
3006 e->pNext = NULL;
3007
3008 //incr our counters
3009 stats.cache_entry_counter++;
3010 stats.cache_entry_space += sizeof(ResolveCacheElem);
3011
3012 return e;
3013}
3014
3015//------------------------------------------------------------------
3016// Adds the stub manager to our linked list of virtual stub managers
3017// and adds to the global list.
3018//------------------------------------------------------------------
3019void VirtualCallStubManagerManager::AddStubManager(VirtualCallStubManager *pMgr)
3020{
3021 WRAPPER_NO_CONTRACT;
3022
3023 SimpleWriteLockHolder lh(&m_RWLock);
3024
3025 pMgr->m_pNext = m_pManagers;
3026 m_pManagers = pMgr;
3027
3028 STRESS_LOG2(LF_CORDB | LF_CLASSLOADER, LL_INFO100,
3029 "VirtualCallStubManagerManager::AddStubManager - 0x%p (vptr 0x%p)\n", pMgr, (*(PVOID*)pMgr));
3030}
3031
3032//------------------------------------------------------------------
3033// Removes the stub manager from our linked list of virtual stub
3034// managers and fromthe global list.
3035//------------------------------------------------------------------
3036void VirtualCallStubManagerManager::RemoveStubManager(VirtualCallStubManager *pMgr)
3037{
3038 CONTRACTL
3039 {
3040 NOTHROW;
3041 GC_NOTRIGGER;
3042 MODE_ANY;
3043 CAN_TAKE_LOCK;
3044 }
3045 CONTRACTL_END;
3046
3047 SimpleWriteLockHolder lh(&m_RWLock);
3048
3049 // Remove this manager from our list.
3050 for (VirtualCallStubManager **pCur = &m_pManagers;
3051 *pCur != NULL;
3052 pCur = &((*pCur)->m_pNext))
3053 {
3054 if (*pCur == pMgr)
3055 *pCur = (*pCur)->m_pNext;
3056 }
3057
3058 // Make sure we don't have a residual pointer left over.
3059 m_pCacheElem = NULL;
3060
3061 STRESS_LOG1(LF_CORDB | LF_CLASSLOADER, LL_INFO100,
3062 "VirtualCallStubManagerManager::RemoveStubManager - 0x%p\n", pMgr);
3063}
3064
3065//------------------------------------------------------------------
3066// Logs stub usage statistics
3067//------------------------------------------------------------------
3068void VirtualCallStubManager::LogStats()
3069{
3070 STATIC_CONTRACT_NOTHROW;
3071 STATIC_CONTRACT_GC_NOTRIGGER;
3072 STATIC_CONTRACT_FORBID_FAULT;
3073
3074 // Our Init routine assignes all fields atomically so testing one field should suffice to
3075 // test whehter the Init succeeded.
3076 if (!resolvers)
3077 {
3078 return;
3079 }
3080
3081 BOOL isShared = parentDomain->IsSharedDomain();
3082 BOOL isDefault = parentDomain->IsDefaultDomain();
3083
3084 // Temp space to use for formatting the output.
3085 static const int FMT_STR_SIZE = 160;
3086 char szPrintStr[FMT_STR_SIZE];
3087 DWORD dwWriteByte;
3088
3089 if (g_hStubLogFile && (stats.site_write != 0))
3090 {
3091 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nStats for %s Manager\r\n", isShared ? "the Shared" :
3092 isDefault ? "the Default" : "an Unshared");
3093 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
3094
3095 //output counters
3096 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_counter", stats.site_counter);
3097 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
3098 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_write", stats.site_write);
3099 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
3100 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_write_mono", stats.site_write_mono);
3101 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
3102 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "site_write_poly", stats.site_write_poly);
3103 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
3104
3105 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\nstub data\r\n");
3106 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
3107
3108 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_lookup_counter", stats.stub_lookup_counter);
3109 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
3110 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_mono_counter", stats.stub_mono_counter);
3111 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
3112 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_poly_counter", stats.stub_poly_counter);
3113 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
3114 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "stub_space", stats.stub_space);
3115 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
3116
3117 size_t total, used;
3118 g_resolveCache->GetLoadFactor(&total, &used);
3119
3120 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_used", used);
3121 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
3122 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_counter", stats.cache_entry_counter);
3123 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
3124 sprintf_s(szPrintStr, COUNTOF(szPrintStr), OUTPUT_FORMAT_INT, "cache_entry_space", stats.cache_entry_space);
3125 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
3126
3127 sprintf_s(szPrintStr, COUNTOF(szPrintStr), "\r\ncache_load:\t%d used, %d total, utilization %#5.2f%%\r\n",
3128 used, total, 100.0 * double(used) / double(total));
3129 WriteFile (g_hStubLogFile, szPrintStr, (DWORD) strlen(szPrintStr), &dwWriteByte, NULL);
3130 }
3131
3132 resolvers->LogStats();
3133 dispatchers->LogStats();
3134 lookups->LogStats();
3135 vtableCallers->LogStats();
3136 cache_entries->LogStats();
3137
3138 g_site_counter += stats.site_counter;
3139 g_stub_lookup_counter += stats.stub_lookup_counter;
3140 g_stub_poly_counter += stats.stub_poly_counter;
3141 g_stub_mono_counter += stats.stub_mono_counter;
3142 g_stub_vtable_counter += stats.stub_vtable_counter;
3143 g_site_write += stats.site_write;
3144 g_site_write_poly += stats.site_write_poly;
3145 g_site_write_mono += stats.site_write_mono;
3146 g_worker_call += stats.worker_call;
3147 g_worker_call_no_patch += stats.worker_call_no_patch;
3148 g_worker_collide_to_mono += stats.worker_collide_to_mono;
3149 g_stub_space += stats.stub_space;
3150 g_cache_entry_counter += stats.cache_entry_counter;
3151 g_cache_entry_space += stats.cache_entry_space;
3152
3153 stats.site_counter = 0;
3154 stats.stub_lookup_counter = 0;
3155 stats.stub_poly_counter = 0;
3156 stats.stub_mono_counter = 0;
3157 stats.stub_vtable_counter = 0;
3158 stats.site_write = 0;
3159 stats.site_write_poly = 0;
3160 stats.site_write_mono = 0;
3161 stats.worker_call = 0;
3162 stats.worker_call_no_patch = 0;
3163 stats.worker_collide_to_mono = 0;
3164 stats.stub_space = 0;
3165 stats.cache_entry_counter = 0;
3166 stats.cache_entry_space = 0;
3167}
3168
3169void Prober::InitProber(size_t key1, size_t key2, size_t* table)
3170{
3171 CONTRACTL {
3172 NOTHROW;
3173 GC_NOTRIGGER;
3174 FORBID_FAULT;
3175 } CONTRACTL_END
3176
3177 _ASSERTE(table);
3178
3179 keyA = key1;
3180 keyB = key2;
3181 base = &table[CALL_STUB_FIRST_INDEX];
3182 mask = table[CALL_STUB_MASK_INDEX];
3183 FormHash();
3184}
3185
3186size_t Prober::Find()
3187{
3188 CONTRACTL {
3189 NOTHROW;
3190 GC_NOTRIGGER;
3191 FORBID_FAULT;
3192 } CONTRACTL_END
3193
3194 size_t entry;
3195 //if this prober has already visited every slot, there is nothing more to look at.
3196 //note, this means that if a prober is going to be reused, the FormHash() function
3197 //needs to be called to reset it.
3198 if (NoMore())
3199 return CALL_STUB_EMPTY_ENTRY;
3200 do
3201 {
3202 entry = Read();
3203
3204 //if we hit an empty entry, it means it cannot be in the table
3205 if(entry==CALL_STUB_EMPTY_ENTRY)
3206 {
3207 return CALL_STUB_EMPTY_ENTRY;
3208 }
3209
3210 //we have a real entry, see if it is the one we want using our comparer
3211 comparer->SetContents(entry);
3212 if (comparer->Equals(keyA, keyB))
3213 {
3214 return entry;
3215 }
3216 } while(Next()); //Next() returns false when we have visited every slot
3217 return CALL_STUB_EMPTY_ENTRY;
3218}
3219
3220size_t Prober::Add(size_t newEntry)
3221{
3222 CONTRACTL {
3223 NOTHROW;
3224 GC_NOTRIGGER;
3225 FORBID_FAULT;
3226 } CONTRACTL_END
3227
3228 size_t entry;
3229 //if we have visited every slot then there is no room in the table to add this new entry
3230 if (NoMore())
3231 return CALL_STUB_EMPTY_ENTRY;
3232
3233 do
3234 {
3235 entry = Read();
3236 if (entry==CALL_STUB_EMPTY_ENTRY)
3237 {
3238 //it's not in the table and we have the correct empty slot in hand
3239 //in which to add it.
3240 //try and grab it, if we succeed we break out to add the entry
3241 //if we fail, it means a racer swoped in a wrote in
3242 //this slot, so we will just keep looking
3243 if (GrabEntry(newEntry))
3244 {
3245 break;
3246 }
3247
3248 // We didn't grab this entry, so keep trying.
3249 continue;
3250 }
3251 //check if this entry is already in the table, if so we are done
3252 comparer->SetContents(entry);
3253 if (comparer->Equals(keyA, keyB))
3254 {
3255 return entry;
3256 }
3257 } while(Next()); //Next() returns false when we have visited every slot
3258
3259 //if we have visited every slot then there is no room in the table to add this new entry
3260 if (NoMore())
3261 return CALL_STUB_EMPTY_ENTRY;
3262
3263 CONSISTENCY_CHECK(Read() == newEntry);
3264 return newEntry;
3265}
3266
3267/*Atomically grab an entry, if it is empty, so we can write in it.
3268@TODO: It is not clear if this routine is actually necessary and/or if the
3269interlocked compare exchange is necessary as opposed to just a read write with racing allowed.
3270If we didn't have it, all that would happen is potentially more duplicates or
3271dropped entries, and we are supposed to run correctly even if they
3272happen. So in a sense this is a perf optimization, whose value has
3273not been measured, i.e. it might be faster without it.
3274*/
3275BOOL Prober::GrabEntry(size_t entryValue)
3276{
3277 LIMITED_METHOD_CONTRACT;
3278
3279 return FastInterlockCompareExchangePointer(&base[index],
3280 entryValue, static_cast<size_t>(CALL_STUB_EMPTY_ENTRY)) == CALL_STUB_EMPTY_ENTRY;
3281}
3282
3283inline void FastTable::IncrementCount()
3284{
3285 LIMITED_METHOD_CONTRACT;
3286
3287 // This MUST be an interlocked increment, since BucketTable::GetMoreSpace relies on
3288 // the return value of FastTable::isFull to tell it whether or not to continue with
3289 // trying to allocate a new FastTable. If two threads race and try to increment this
3290 // at the same time and one increment is lost, then the size will be inaccurate and
3291 // BucketTable::GetMoreSpace will never succeed, resulting in an infinite loop trying
3292 // to add a new entry.
3293 FastInterlockIncrement((LONG *)&contents[CALL_STUB_COUNT_INDEX]);
3294}
3295
3296size_t FastTable::Add(size_t entry, Prober* probe)
3297{
3298 CONTRACTL {
3299 NOTHROW;
3300 GC_NOTRIGGER;
3301 FORBID_FAULT;
3302 } CONTRACTL_END
3303
3304 size_t result = probe->Add(entry);
3305 if (result == entry) IncrementCount();
3306 return result;
3307}
3308
3309size_t FastTable::Find(Prober* probe)
3310{
3311 WRAPPER_NO_CONTRACT;
3312
3313 return probe->Find();
3314}
3315
3316/*Increase the size of the bucket referenced by the prober p and copy the existing members into it.
3317Since duplicates and lost entries are okay, we can build the larger table
3318and then try to swap it in. If it turns out that somebody else is racing us,
3319the worst that will happen is we drop a few entries on the floor, which is okay.
3320If by chance we swap out a table that somebody else is inserting an entry into, that
3321is okay too, just another dropped entry. If we detect dups, we just drop them on
3322the floor. */
3323BOOL BucketTable::GetMoreSpace(const Prober* p)
3324{
3325 CONTRACTL {
3326 THROWS;
3327 GC_TRIGGERS;
3328 MODE_COOPERATIVE; // This is necessary for synchronization with BucketTable::Reclaim
3329 INJECT_FAULT(COMPlusThrowOM(););
3330 } CONTRACTL_END;
3331
3332 //get ahold of the current bucket
3333 Prober probe(p->comparer);
3334 size_t index = ComputeBucketIndex(p->keyA, p->keyB);
3335
3336 FastTable* oldBucket = (FastTable*) Read(index);
3337
3338 if (!oldBucket->isFull())
3339 {
3340 return TRUE;
3341 }
3342 //make a larger bucket
3343 size_t numEntries;
3344 if (oldBucket->tableSize() == CALL_STUB_MIN_ENTRIES)
3345 {
3346 numEntries = CALL_STUB_SECONDARY_ENTRIES;
3347 }
3348 else
3349 {
3350 numEntries = oldBucket->tableSize()*CALL_STUB_GROWTH_FACTOR;
3351 }
3352
3353 FastTable* newBucket = FastTable::MakeTable(numEntries);
3354
3355 //copy via insertion from the old to the new bucket
3356 size_t* limit = &oldBucket->contents[(oldBucket->tableSize())+CALL_STUB_FIRST_INDEX];
3357 size_t* e;
3358 for (e = &oldBucket->contents[CALL_STUB_FIRST_INDEX]; e<limit; e++)
3359 {
3360 size_t moved = *e;
3361 if (moved == CALL_STUB_EMPTY_ENTRY)
3362 {
3363 continue;
3364 }
3365 probe.comparer->SetContents(moved);
3366 probe.InitProber(probe.comparer->KeyA(), probe.comparer->KeyB(), &newBucket->contents[0]);
3367 //if the new bucket fills up, give up (this should never happen I think)
3368 if (newBucket->Add(moved, &probe) == CALL_STUB_EMPTY_ENTRY)
3369 {
3370 _ASSERTE(!"This should never happen");
3371 return FALSE;
3372 }
3373 }
3374
3375 // Doing an interlocked exchange here ensures that if someone has raced and beaten us to
3376 // replacing the entry, then we will just put the new bucket we just created in the
3377 // dead list instead of risking a race condition which would put a duplicate of the old
3378 // bucket in the dead list (and even possibly cause a cyclic list).
3379 if (FastInterlockCompareExchangePointer(reinterpret_cast<FastTable * volatile *>(&buckets[index]), newBucket, oldBucket) != oldBucket)
3380 oldBucket = newBucket;
3381
3382 // Link the old onto the "to be reclaimed" list.
3383 // Use the dead link field of the abandoned buckets to form the list
3384 FastTable* list;
3385 do {
3386 list = VolatileLoad(&dead);
3387 oldBucket->contents[CALL_STUB_DEAD_LINK] = (size_t) list;
3388 } while (FastInterlockCompareExchangePointer(&dead, oldBucket, list) != list);
3389
3390#ifdef _DEBUG
3391 {
3392 // Validate correctness of the list
3393 FastTable *curr = oldBucket;
3394 while (curr)
3395 {
3396 FastTable *next = (FastTable *) curr->contents[CALL_STUB_DEAD_LINK];
3397 size_t i = 0;
3398 while (next)
3399 {
3400 next = (FastTable *) next->contents[CALL_STUB_DEAD_LINK];
3401 _ASSERTE(curr != next); // Make sure we don't have duplicates
3402 _ASSERTE(i++ < SIZE_T_MAX/4); // This just makes sure we don't have a cycle
3403 }
3404 curr = next;
3405 }
3406 }
3407#endif // _DEBUG
3408
3409 //update our counters
3410 stats.bucket_space_dead += UINT32((oldBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
3411 stats.bucket_space -= UINT32((oldBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
3412 stats.bucket_space += UINT32((newBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
3413 return TRUE;
3414}
3415
3416void BucketTable::Reclaim()
3417{
3418
3419 CONTRACTL
3420 {
3421 NOTHROW;
3422 GC_NOTRIGGER;
3423 FORBID_FAULT;
3424 }
3425 CONTRACTL_END
3426
3427 //reclaim the dead (abandoned) buckets on the dead list
3428 // The key issue is to not reclaim the list if any thread is in a stub or
3429 // if any thread is accessing (read or write) the cache tables. So we will declare
3430 // those points to be non-gc safe points, and reclaim when the gc syncs the threads
3431 //@TODO: add an assert to ensure we are at a gc safe point
3432 FastTable* list = dead;
3433
3434 //see if there is anything to do.
3435 //We ignore the race, since we will just pick them up on the next go around
3436 if (list == NULL) return;
3437
3438 //Try and grab the list exclusively, if we fail, it means that either somebody
3439 //else grabbed it, or something go added. In either case we just give up and assume
3440 //we will catch it on the next go around.
3441 //we use an interlock here in case we are called during shutdown not at a gc safe point
3442 //in which case the race is between several threads wanting to reclaim.
3443 //We are assuming that we are assuming the actually having to do anything is rare
3444 //so that the interlocked overhead is acceptable. If this is not true, then
3445 //we need to examine exactly how and when we may be called during shutdown.
3446 if (FastInterlockCompareExchangePointer(&dead, NULL, list) != list)
3447 return;
3448
3449#ifdef _DEBUG
3450 // Validate correctness of the list
3451 FastTable *curr = list;
3452 while (curr)
3453 {
3454 FastTable *next = (FastTable *) curr->contents[CALL_STUB_DEAD_LINK];
3455 size_t i = 0;
3456 while (next)
3457 {
3458 next = (FastTable *) next->contents[CALL_STUB_DEAD_LINK];
3459 _ASSERTE(curr != next); // Make sure we don't have duplicates
3460 _ASSERTE(i++ < SIZE_T_MAX/4); // This just makes sure we don't have a cycle
3461 }
3462 curr = next;
3463 }
3464#endif // _DEBUG
3465
3466 //we now have the list off by ourself, so we can just walk and cleanup
3467 while (list)
3468 {
3469 size_t next = list->contents[CALL_STUB_DEAD_LINK];
3470 delete [] (size_t*)list;
3471 list = (FastTable*) next;
3472 }
3473}
3474
3475//
3476// When using SetUpProber the proper values to use for keyA, keyB are:
3477//
3478// KeyA KeyB
3479//-------------------------------------------------------
3480// lookups token the stub calling convention
3481// dispatchers token the expected MT
3482// resolver token the stub calling convention
3483// cache_entries token the expected method table
3484// vtableCallers token unused (zero)
3485//
3486BOOL BucketTable::SetUpProber(size_t keyA, size_t keyB, Prober *prober)
3487{
3488 CONTRACTL {
3489 THROWS;
3490 GC_TRIGGERS;
3491 MODE_COOPERATIVE; // This is necessary for synchronization with BucketTable::Reclaim
3492 INJECT_FAULT(COMPlusThrowOM(););
3493 } CONTRACTL_END;
3494
3495 // The buckets[index] table starts off initialized to all CALL_STUB_EMPTY_ENTRY
3496 // and we should write each buckets[index] exactly once. However in a multi-proc
3497 // scenario each processor could see old memory values that would cause us to
3498 // leak memory.
3499 //
3500 // Since this a a fairly hot code path and it is very rare for buckets[index]
3501 // to be CALL_STUB_EMPTY_ENTRY, we can first try a non-volatile read and then
3502 // if it looks like we need to create a new FastTable we double check by doing
3503 // a volatile read.
3504 //
3505 // Note that BucketTable::GetMoreSpace also updates buckets[index] when the FastTable
3506 // grows to 90% full. (CALL_STUB_LOAD_FACTOR is 90%)
3507
3508 size_t index = ComputeBucketIndex(keyA, keyB);
3509 size_t bucket = buckets[index]; // non-volatile read
3510 if (bucket==CALL_STUB_EMPTY_ENTRY)
3511 {
3512 bucket = Read(index); // volatile read
3513 }
3514
3515 if (bucket==CALL_STUB_EMPTY_ENTRY)
3516 {
3517 FastTable* newBucket = FastTable::MakeTable(CALL_STUB_MIN_ENTRIES);
3518
3519 // Doing an interlocked exchange here ensures that if someone has raced and beaten us to
3520 // replacing the entry, then we will free the new bucket we just created.
3521 bucket = FastInterlockCompareExchangePointer(&buckets[index], reinterpret_cast<size_t>(newBucket), static_cast<size_t>(CALL_STUB_EMPTY_ENTRY));
3522 if (bucket == CALL_STUB_EMPTY_ENTRY)
3523 {
3524 // We successfully wrote newBucket into buckets[index], overwritting the CALL_STUB_EMPTY_ENTRY value
3525 stats.bucket_space += UINT32((newBucket->tableSize()+CALL_STUB_FIRST_INDEX)*sizeof(void*));
3526 bucket = (size_t) newBucket;
3527 }
3528 else
3529 {
3530 // Someone else wrote buckets[index] before us
3531 // and bucket contains the value that they wrote
3532 // We must free the memory that we allocated
3533 // and we will use the value that someone else wrote
3534 delete newBucket;
3535 newBucket = (FastTable*) bucket;
3536 }
3537 }
3538
3539 return ((FastTable*)(bucket))->SetUpProber(keyA, keyB, prober);
3540}
3541
3542size_t BucketTable::Add(size_t entry, Prober* probe)
3543{
3544 CONTRACTL {
3545 THROWS;
3546 GC_TRIGGERS;
3547 MODE_COOPERATIVE; // This is necessary for synchronization with BucketTable::Reclaim
3548 INJECT_FAULT(COMPlusThrowOM(););
3549 } CONTRACTL_END
3550
3551 FastTable* table = (FastTable*)(probe->items());
3552 size_t result = table->Add(entry,probe);
3553 if (result != CALL_STUB_EMPTY_ENTRY)
3554 {
3555 return result;
3556 }
3557 //we must have missed count(s) and the table is now full, so lets
3558 //grow and retry (this should be rare)
3559 if (!GetMoreSpace(probe)) return CALL_STUB_EMPTY_ENTRY;
3560 if (!SetUpProber(probe->keyA, probe->keyB, probe)) return CALL_STUB_EMPTY_ENTRY;
3561 return Add(entry, probe); //recurse in for the retry to write the entry
3562}
3563
3564void BucketTable::LogStats()
3565{
3566 LIMITED_METHOD_CONTRACT;
3567
3568 // Update stats
3569 g_bucket_space += stats.bucket_space;
3570 g_bucket_space_dead += stats.bucket_space_dead;
3571
3572 stats.bucket_space = 0;
3573 stats.bucket_space_dead = 0;
3574}
3575
3576DispatchCache::DispatchCache()
3577#ifdef CHAIN_LOOKUP
3578 : m_writeLock(CrstStubDispatchCache, CRST_UNSAFE_ANYMODE)
3579#endif
3580{
3581 CONTRACTL
3582 {
3583 THROWS;
3584 GC_TRIGGERS;
3585 INJECT_FAULT(COMPlusThrowOM());
3586 }
3587 CONTRACTL_END
3588
3589 //initialize the cache to be empty, i.e. all slots point to the empty entry
3590 ResolveCacheElem* e = new ResolveCacheElem();
3591 e->pMT = (void *) (-1); //force all method tables to be misses
3592 e->pNext = NULL; // null terminate the chain for the empty entry
3593 empty = e;
3594 for (int i = 0;i<CALL_STUB_CACHE_SIZE;i++)
3595 ClearCacheEntry(i);
3596
3597 // Initialize statistics
3598 memset(&stats, 0, sizeof(stats));
3599#ifdef STUB_LOGGING
3600 memset(&cacheData, 0, sizeof(cacheData));
3601#endif
3602}
3603
3604ResolveCacheElem* DispatchCache::Lookup(size_t token, UINT16 tokenHash, void* mt)
3605{
3606 WRAPPER_NO_CONTRACT;
3607 if (tokenHash == INVALID_HASH)
3608 tokenHash = HashToken(token);
3609 UINT16 idx = HashMT(tokenHash, mt);
3610 ResolveCacheElem *pCurElem = GetCacheEntry(idx);
3611
3612#if defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
3613 BOOL chainedLookup = FALSE;
3614#endif
3615 // No need to conditionlize on CHAIN_LOOKUP, since this loop
3616 // will only run once when CHAIN_LOOKUP is undefined, since
3617 // there will only ever be one element in a bucket (chain of 1).
3618 while (pCurElem != empty) {
3619 if (pCurElem->Equals(token, mt)) {
3620 return pCurElem;
3621 }
3622#if defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
3623 // Only want to inc the counter once per chain search.
3624 if (pCurElem == GetCacheEntry(idx)) {
3625 chainedLookup = TRUE;
3626 g_chained_lookup_external_call_counter++;
3627 }
3628#endif // defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
3629 pCurElem = pCurElem->Next();
3630 }
3631#if defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
3632 if (chainedLookup) {
3633 g_chained_lookup_external_miss_counter++;
3634 }
3635#endif // defined(STUB_LOGGING) && defined(CHAIN_LOOKUP)
3636 return NULL; /* with chain lookup disabled this returns NULL */
3637}
3638
3639// returns true if we wrote the resolver cache entry with the new elem
3640// also returns true if the cache entry already contained elem (the miss case)
3641//
3642BOOL DispatchCache::Insert(ResolveCacheElem* elem, InsertKind insertKind)
3643{
3644 CONTRACTL {
3645 THROWS;
3646 GC_TRIGGERS;
3647 FORBID_FAULT;
3648 PRECONDITION(insertKind != IK_NONE);
3649 } CONTRACTL_END;
3650
3651#ifdef CHAIN_LOOKUP
3652 CrstHolder lh(&m_writeLock);
3653#endif
3654
3655 // Figure out what bucket this element belongs in
3656 UINT16 tokHash = HashToken(elem->token);
3657 UINT16 hash = HashMT(tokHash, elem->pMT);
3658 UINT16 idx = hash;
3659 BOOL write = FALSE;
3660 BOOL miss = FALSE;
3661 BOOL hit = FALSE;
3662 BOOL collide = FALSE;
3663
3664#ifdef _DEBUG
3665 elem->debug_hash = tokHash;
3666 elem->debug_index = idx;
3667#endif // _DEBUG
3668
3669 ResolveCacheElem* cell = GetCacheEntry(idx);
3670
3671#ifdef CHAIN_LOOKUP
3672 // There is the possibility of a race where two threads will
3673 // try to generate a ResolveCacheElem for the same tuple, and
3674 // the first thread will get the lock and insert the element
3675 // and the second thread coming in should detect this and not
3676 // re-add the element, since it is already likely at the start
3677 // of the list, and would result in the element looping to
3678 // itself.
3679 if (Lookup(elem->token, tokHash, elem->pMT))
3680#else // !CHAIN_LOOKUP
3681 if (cell == elem)
3682#endif // !CHAIN_LOOKUP
3683 {
3684 miss = TRUE;
3685 write = FALSE;
3686 }
3687 else
3688 {
3689 if (cell == empty)
3690 {
3691 hit = TRUE;
3692 write = TRUE;
3693 }
3694 }
3695 CONSISTENCY_CHECK(!(hit && miss));
3696
3697 // If we didn't have a miss or a hit then we had a collision with
3698 // a non-empty entry in our resolver cache
3699 if (!hit && !miss)
3700 {
3701 collide = TRUE;
3702
3703#ifdef CHAIN_LOOKUP
3704 // Always insert the entry into the chain
3705 write = TRUE;
3706#else // !CHAIN_LOOKUP
3707
3708 if (STUB_COLLIDE_WRITE_PCT < 100)
3709 {
3710 UINT32 coin = UINT32(GetRandomInt(100));
3711
3712 write = (coin < STUB_COLLIDE_WRITE_PCT);
3713 }
3714 else
3715 {
3716 write = TRUE;
3717 }
3718
3719#endif // !CHAIN_LOOKUP
3720 }
3721
3722 if (write)
3723 {
3724#ifdef CHAIN_LOOKUP
3725 // We create a list with the last pNext pointing at empty
3726 elem->pNext = cell;
3727#else // !CHAIN_LOOKUP
3728 elem->pNext = empty;
3729#endif // !CHAIN_LOOKUP
3730 SetCacheEntry(idx, elem);
3731 stats.insert_cache_write++;
3732 }
3733
3734 LOG((LF_STUBS, LL_INFO1000, "%8s Insert(token" FMT_ADDR "MethodTable" FMT_ADDR ") at [%03x] %7s %5s \n",
3735 (insertKind == IK_DISPATCH) ? "Dispatch" : (insertKind == IK_RESOLVE) ? "Resolve" : "External",
3736 DBG_ADDR(elem->token), DBG_ADDR(elem->pMT), hash,
3737 hit ? "HIT" : miss ? "MISS" : "COLLIDE", write ? "WRITE" : "KEEP"));
3738
3739 if (insertKind == IK_DISPATCH)
3740 stats.insert_cache_dispatch++;
3741 else if (insertKind == IK_RESOLVE)
3742 stats.insert_cache_resolve++;
3743 else if (insertKind == IK_SHARED)
3744 stats.insert_cache_shared++;
3745 else if (insertKind == IK_EXTERNAL)
3746 stats.insert_cache_external++;
3747
3748 if (hit)
3749 stats.insert_cache_hit++;
3750 else if (miss)
3751 stats.insert_cache_miss++;
3752 else if (collide)
3753 stats.insert_cache_collide++;
3754
3755 return write || miss;
3756}
3757
3758#ifdef CHAIN_LOOKUP
3759void DispatchCache::PromoteChainEntry(ResolveCacheElem* elem)
3760{
3761 CONTRACTL {
3762 NOTHROW;
3763 GC_NOTRIGGER;
3764 FORBID_FAULT;
3765 } CONTRACTL_END;
3766
3767 CrstHolder lh(&m_writeLock);
3768 g_chained_entry_promoted++;
3769
3770 // Figure out what bucket this element belongs in
3771 UINT16 tokHash = HashToken(elem->token);
3772 UINT16 hash = HashMT(tokHash, elem->pMT);
3773 UINT16 idx = hash;
3774
3775 ResolveCacheElem *curElem = GetCacheEntry(idx);
3776
3777 // If someone raced in and promoted this element before us,
3778 // then we can just return. Furthermore, it would be an
3779 // error if we performed the below code, since we'd end up
3780 // with a self-referential element and an infinite loop.
3781 if (curElem == elem)
3782 {
3783 return;
3784 }
3785
3786 // Now loop through the chain to find the element that is
3787 // point to the element we're promoting so we can remove
3788 // it from the chain.
3789 while (curElem->Next() != elem)
3790 {
3791 curElem = curElem->pNext;
3792 CONSISTENCY_CHECK(curElem != NULL);
3793 }
3794
3795 // Remove the element from the chain
3796 CONSISTENCY_CHECK(curElem->pNext == elem);
3797 curElem->pNext = elem->pNext;
3798
3799 // Set the promoted entry to the head of the list.
3800 elem->pNext = GetCacheEntry(idx);
3801 SetCacheEntry(idx, elem);
3802}
3803#endif // CHAIN_LOOKUP
3804
3805void DispatchCache::LogStats()
3806{
3807 LIMITED_METHOD_CONTRACT;
3808
3809 g_insert_cache_external += stats.insert_cache_external;
3810 g_insert_cache_shared += stats.insert_cache_shared;
3811 g_insert_cache_dispatch += stats.insert_cache_dispatch;
3812 g_insert_cache_resolve += stats.insert_cache_resolve;
3813 g_insert_cache_hit += stats.insert_cache_hit;
3814 g_insert_cache_miss += stats.insert_cache_miss;
3815 g_insert_cache_collide += stats.insert_cache_collide;
3816 g_insert_cache_write += stats.insert_cache_write;
3817
3818 stats.insert_cache_external = 0;
3819 stats.insert_cache_shared = 0;
3820 stats.insert_cache_dispatch = 0;
3821 stats.insert_cache_resolve = 0;
3822 stats.insert_cache_hit = 0;
3823 stats.insert_cache_miss = 0;
3824 stats.insert_cache_collide = 0;
3825 stats.insert_cache_write = 0;
3826}
3827
3828/* The following tablse have bits that have the following properties:
3829 1. Each entry has 12-bits with 5,6 or 7 one bits and 5,6 or 7 zero bits.
3830 2. For every bit we try to have half one bits and half zero bits
3831 3. Adjacent entries when xor-ed should have 5,6 or 7 bits that are different
3832*/
3833#ifdef _WIN64
3834static const UINT16 tokenHashBits[64] =
3835#else // !_WIN64
3836static const UINT16 tokenHashBits[32] =
3837#endif // !_WIN64
3838{
3839 0xcd5, 0x8b9, 0x875, 0x439,
3840 0xbf0, 0x38d, 0xa5b, 0x6a7,
3841 0x78a, 0x9c8, 0xee2, 0x3d3,
3842 0xd94, 0x54e, 0x698, 0xa6a,
3843 0x753, 0x932, 0x4b7, 0x155,
3844 0x3a7, 0x9c8, 0x4e9, 0xe0b,
3845 0xf05, 0x994, 0x472, 0x626,
3846 0x15c, 0x3a8, 0x56e, 0xe2d,
3847
3848#ifdef _WIN64
3849 0xe3c, 0xbe2, 0x58e, 0x0f3,
3850 0x54d, 0x70f, 0xf88, 0xe2b,
3851 0x353, 0x153, 0x4a5, 0x943,
3852 0xaf2, 0x88f, 0x72e, 0x978,
3853 0xa13, 0xa0b, 0xc3c, 0xb72,
3854 0x0f7, 0x49a, 0xdd0, 0x366,
3855 0xd84, 0xba5, 0x4c5, 0x6bc,
3856 0x8ec, 0x0b9, 0x617, 0x85c,
3857#endif // _WIN64
3858};
3859
3860/*static*/ UINT16 DispatchCache::HashToken(size_t token)
3861{
3862 LIMITED_METHOD_CONTRACT;
3863
3864 UINT16 hash = 0;
3865 int index = 0;
3866
3867 // Note if you change the number of bits in CALL_STUB_CACHE_NUM_BITS
3868 // then we have to recompute the hash function
3869 // Though making the number of bits smaller should still be OK
3870 static_assert_no_msg(CALL_STUB_CACHE_NUM_BITS <= 12);
3871
3872 while (token)
3873 {
3874 if (token & 1)
3875 hash ^= tokenHashBits[index];
3876
3877 index++;
3878 token >>= 1;
3879 }
3880 _ASSERTE((hash & ~CALL_STUB_CACHE_MASK) == 0);
3881 return hash;
3882}
3883
3884/////////////////////////////////////////////////////////////////////////////////////////////
3885DispatchCache::Iterator::Iterator(DispatchCache *pCache) : m_pCache(pCache), m_curBucket(-1)
3886{
3887 CONTRACTL {
3888 NOTHROW;
3889 GC_NOTRIGGER;
3890 PRECONDITION(CheckPointer(pCache));
3891 } CONTRACTL_END;
3892
3893 // Move to the first valid entry
3894 NextValidBucket();
3895}
3896
3897/////////////////////////////////////////////////////////////////////////////////////////////
3898void DispatchCache::Iterator::Next()
3899{
3900 CONTRACTL {
3901 NOTHROW;
3902 GC_NOTRIGGER;
3903 } CONTRACTL_END;
3904
3905 if (!IsValid()) {
3906 return;
3907 }
3908
3909 // Move to the next element in the chain
3910 m_ppCurElem = &((*m_ppCurElem)->pNext);
3911
3912 // If the next element was the empty sentinel entry, move to the next valid bucket.
3913 if (*m_ppCurElem == m_pCache->empty) {
3914 NextValidBucket();
3915 }
3916}
3917
3918/////////////////////////////////////////////////////////////////////////////////////////////
3919// This doesn't actually delete the entry, it just unlinks it from the chain.
3920// Returns the unlinked entry.
3921ResolveCacheElem *DispatchCache::Iterator::UnlinkEntry()
3922{
3923 CONTRACTL {
3924 NOTHROW;
3925 GC_NOTRIGGER;
3926 CONSISTENCY_CHECK(IsValid());
3927 } CONTRACTL_END;
3928 ResolveCacheElem *pUnlinkedEntry = *m_ppCurElem;
3929 *m_ppCurElem = (*m_ppCurElem)->pNext;
3930 pUnlinkedEntry->pNext = m_pCache->empty;
3931 // If unlinking this entry took us to the end of this bucket, need to move to the next.
3932 if (*m_ppCurElem == m_pCache->empty) {
3933 NextValidBucket();
3934 }
3935 return pUnlinkedEntry;
3936}
3937
3938/////////////////////////////////////////////////////////////////////////////////////////////
3939void DispatchCache::Iterator::NextValidBucket()
3940{
3941 CONTRACTL {
3942 NOTHROW;
3943 GC_NOTRIGGER;
3944 CONSISTENCY_CHECK(IsValid());
3945 } CONTRACTL_END;
3946
3947 // Move to the next bucket that contains a cache entry
3948 do {
3949 NextBucket();
3950 } while (IsValid() && *m_ppCurElem == m_pCache->empty);
3951}
3952
3953#endif // !DACCESS_COMPILE
3954
3955/////////////////////////////////////////////////////////////////////////////////////////////
3956VirtualCallStubManager *VirtualCallStubManagerManager::FindVirtualCallStubManager(PCODE stubAddress)
3957{
3958 CONTRACTL {
3959 NOTHROW;
3960 GC_NOTRIGGER;
3961 } CONTRACTL_END;
3962
3963 SUPPORTS_DAC;
3964
3965#ifndef DACCESS_COMPILE
3966 // Check the cached element
3967 {
3968 VirtualCallStubManager *pMgr = m_pCacheElem;
3969 if (pMgr != NULL && pMgr->CheckIsStub_Internal(stubAddress))
3970 {
3971 return pMgr;
3972 }
3973 }
3974
3975 // Check the current and shared domains.
3976 {
3977 Thread *pThread = GetThread();
3978
3979 if (pThread != NULL)
3980 {
3981 // Check the current domain
3982 {
3983 BaseDomain *pDom = pThread->GetDomain();
3984 VirtualCallStubManager *pMgr = pDom->GetLoaderAllocator()->GetVirtualCallStubManager();
3985 if (pMgr->CheckIsStub_Internal(stubAddress))
3986 {
3987 m_pCacheElem = pMgr;
3988 return pMgr;
3989 }
3990 }
3991 }
3992 }
3993#endif
3994
3995 // If both previous attempts fail, run through the list. This is likely
3996 // because the thread is a debugger thread running outside of the domain
3997 // that owns the target stub.
3998 {
3999 VirtualCallStubManagerIterator it =
4000 VirtualCallStubManagerManager::GlobalManager()->IterateVirtualCallStubManagers();
4001
4002 while (it.Next())
4003 {
4004 if (it.Current()->CheckIsStub_Internal(stubAddress))
4005 {
4006#ifndef DACCESS_COMPILE
4007 m_pCacheElem = it.Current();
4008#endif
4009 return it.Current();
4010 }
4011 }
4012 }
4013
4014 // No VirtualCallStubManager owns this address.
4015 return NULL;
4016}
4017
4018static VirtualCallStubManager * const IT_START = (VirtualCallStubManager *)(-1);
4019
4020/////////////////////////////////////////////////////////////////////////////////////////////
4021// Move to the next element. Iterators are created at
4022// start-1, so must call Next before using Current
4023BOOL VirtualCallStubManagerIterator::Next()
4024{
4025 LIMITED_METHOD_DAC_CONTRACT;
4026
4027 if (m_fIsStart)
4028 {
4029 m_fIsStart = FALSE;
4030 }
4031 else if (m_pCurMgr != NULL)
4032 {
4033 m_pCurMgr = m_pCurMgr->m_pNext;
4034 }
4035
4036 return (m_pCurMgr != NULL);
4037}
4038
4039/////////////////////////////////////////////////////////////////////////////////////////////
4040// Get the current contents of the iterator
4041VirtualCallStubManager *VirtualCallStubManagerIterator::Current()
4042{
4043 LIMITED_METHOD_DAC_CONTRACT;
4044 CONSISTENCY_CHECK(!m_fIsStart);
4045 CONSISTENCY_CHECK(CheckPointer(m_pCurMgr));
4046
4047 return m_pCurMgr;
4048}
4049
4050#ifndef DACCESS_COMPILE
4051/////////////////////////////////////////////////////////////////////////////////////////////
4052VirtualCallStubManagerManager::VirtualCallStubManagerManager()
4053 : m_pManagers(NULL),
4054 m_pCacheElem(NULL),
4055 m_RWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT)
4056{
4057 LIMITED_METHOD_CONTRACT;
4058}
4059
4060/////////////////////////////////////////////////////////////////////////////////////////////
4061/* static */
4062void VirtualCallStubManagerManager::InitStatic()
4063{
4064 STANDARD_VM_CONTRACT;
4065
4066 CONSISTENCY_CHECK(g_pManager == NULL);
4067 g_pManager = new VirtualCallStubManagerManager();
4068}
4069#endif
4070
4071/////////////////////////////////////////////////////////////////////////////////////////////
4072VirtualCallStubManagerIterator VirtualCallStubManagerManager::IterateVirtualCallStubManagers()
4073{
4074 WRAPPER_NO_CONTRACT;
4075 SUPPORTS_DAC;
4076
4077 VirtualCallStubManagerIterator it(VirtualCallStubManagerManager::GlobalManager());
4078 return it;
4079}
4080
4081/////////////////////////////////////////////////////////////////////////////////////////////
4082BOOL VirtualCallStubManagerManager::CheckIsStub_Internal(
4083 PCODE stubStartAddress)
4084{
4085 WRAPPER_NO_CONTRACT;
4086 SUPPORTS_DAC;
4087
4088 VirtualCallStubManager *pMgr = FindVirtualCallStubManager(stubStartAddress);
4089 return (pMgr != NULL);
4090}
4091
4092/////////////////////////////////////////////////////////////////////////////////////////////
4093BOOL VirtualCallStubManagerManager::DoTraceStub(
4094 PCODE stubStartAddress,
4095 TraceDestination *trace)
4096{
4097 WRAPPER_NO_CONTRACT;
4098
4099 // Find the owning manager. We should succeed, since presumably someone already
4100 // called CheckIsStub on us to find out that we own the address, and already
4101 // called TraceManager to initiate a trace.
4102 VirtualCallStubManager *pMgr = FindVirtualCallStubManager(stubStartAddress);
4103 CONSISTENCY_CHECK(CheckPointer(pMgr));
4104
4105 return pMgr->DoTraceStub(stubStartAddress, trace);
4106}
4107
4108#ifndef DACCESS_COMPILE
4109/////////////////////////////////////////////////////////////////////////////////////////////
4110MethodDesc *VirtualCallStubManagerManager::Entry2MethodDesc(
4111 PCODE stubStartAddress,
4112 MethodTable *pMT)
4113{
4114 CONTRACTL
4115 {
4116 THROWS;
4117 GC_TRIGGERS;
4118 INJECT_FAULT(COMPlusThrowOM(););
4119 }
4120 CONTRACTL_END
4121
4122 if (pMT == NULL)
4123 return NULL;
4124
4125 VirtualCallStubManager::StubKind sk;
4126
4127 // Find the owning manager.
4128 VirtualCallStubManager *pMgr = VirtualCallStubManager::FindStubManager(stubStartAddress, &sk);
4129 if (pMgr == NULL)
4130 return NULL;
4131
4132 // Do the full resolve
4133 size_t token = VirtualCallStubManager::GetTokenFromStubQuick(pMgr, stubStartAddress, sk);
4134
4135 PCODE target = NULL;
4136 // TODO: passing NULL as protectedObj here can lead to incorrect behavior for ICastable objects
4137 // We need to review if this is the case and refactor this code if we want ICastable to become officially supported
4138 VirtualCallStubManager::Resolver(pMT, token, NULL, &target, TRUE /* throwOnConflict */);
4139
4140 return pMT->GetMethodDescForSlotAddress(target);
4141}
4142#endif
4143
4144#ifdef DACCESS_COMPILE
4145void VirtualCallStubManagerManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags)
4146{
4147 SUPPORTS_DAC;
4148 WRAPPER_NO_CONTRACT;
4149 VirtualCallStubManagerIterator it = IterateVirtualCallStubManagers();
4150 while (it.Next())
4151 {
4152 it.Current()->DoEnumMemoryRegions(flags);
4153 }
4154}
4155#endif
4156
4157//----------------------------------------------------------------------------
4158BOOL VirtualCallStubManagerManager::TraceManager(
4159 Thread *thread, TraceDestination *trace,
4160 T_CONTEXT *pContext, BYTE **pRetAddr)
4161{
4162 WRAPPER_NO_CONTRACT;
4163
4164 // Find the owning manager. We should succeed, since presumably someone already
4165 // called CheckIsStub on us to find out that we own the address.
4166 VirtualCallStubManager *pMgr = FindVirtualCallStubManager(GetIP(pContext));
4167 CONSISTENCY_CHECK(CheckPointer(pMgr));
4168
4169 // Forward the call to the appropriate manager.
4170 return pMgr->TraceManager(thread, trace, pContext, pRetAddr);
4171}
4172