1 | /* |
2 | * i386 CPUID helper functions |
3 | * |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2 of the License, or (at your option) any later version. |
10 | * |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
18 | */ |
19 | |
20 | #include "qemu/osdep.h" |
21 | #include "qemu/units.h" |
22 | #include "qemu/cutils.h" |
23 | #include "qemu/bitops.h" |
24 | #include "qemu/qemu-print.h" |
25 | |
26 | #include "cpu.h" |
27 | #include "exec/exec-all.h" |
28 | #include "sysemu/kvm.h" |
29 | #include "sysemu/reset.h" |
30 | #include "sysemu/hvf.h" |
31 | #include "sysemu/cpus.h" |
32 | #include "kvm_i386.h" |
33 | #include "sev_i386.h" |
34 | |
35 | #include "qemu/error-report.h" |
36 | #include "qemu/module.h" |
37 | #include "qemu/option.h" |
38 | #include "qemu/config-file.h" |
39 | #include "qapi/error.h" |
40 | #include "qapi/qapi-visit-machine.h" |
41 | #include "qapi/qapi-visit-run-state.h" |
42 | #include "qapi/qmp/qdict.h" |
43 | #include "qapi/qmp/qerror.h" |
44 | #include "qapi/visitor.h" |
45 | #include "qom/qom-qobject.h" |
46 | #include "sysemu/arch_init.h" |
47 | #include "qapi/qapi-commands-machine-target.h" |
48 | |
49 | #include "standard-headers/asm-x86/kvm_para.h" |
50 | |
51 | #include "sysemu/sysemu.h" |
52 | #include "sysemu/tcg.h" |
53 | #include "hw/qdev-properties.h" |
54 | #include "hw/i386/topology.h" |
55 | #ifndef CONFIG_USER_ONLY |
56 | #include "exec/address-spaces.h" |
57 | #include "hw/xen/xen.h" |
58 | #include "hw/i386/apic_internal.h" |
59 | #include "hw/boards.h" |
60 | #endif |
61 | |
62 | #include "disas/capstone.h" |
63 | |
64 | /* Helpers for building CPUID[2] descriptors: */ |
65 | |
66 | struct CPUID2CacheDescriptorInfo { |
67 | enum CacheType type; |
68 | int level; |
69 | int size; |
70 | int line_size; |
71 | int associativity; |
72 | }; |
73 | |
74 | /* |
75 | * Known CPUID 2 cache descriptors. |
76 | * From Intel SDM Volume 2A, CPUID instruction |
77 | */ |
78 | struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { |
79 | [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, |
80 | .associativity = 4, .line_size = 32, }, |
81 | [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, |
82 | .associativity = 4, .line_size = 32, }, |
83 | [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, |
84 | .associativity = 4, .line_size = 64, }, |
85 | [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, |
86 | .associativity = 2, .line_size = 32, }, |
87 | [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, |
88 | .associativity = 4, .line_size = 32, }, |
89 | [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, |
90 | .associativity = 4, .line_size = 64, }, |
91 | [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, |
92 | .associativity = 6, .line_size = 64, }, |
93 | [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, |
94 | .associativity = 2, .line_size = 64, }, |
95 | [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, |
96 | .associativity = 8, .line_size = 64, }, |
97 | /* lines per sector is not supported cpuid2_cache_descriptor(), |
98 | * so descriptors 0x22, 0x23 are not included |
99 | */ |
100 | [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, |
101 | .associativity = 16, .line_size = 64, }, |
102 | /* lines per sector is not supported cpuid2_cache_descriptor(), |
103 | * so descriptors 0x25, 0x20 are not included |
104 | */ |
105 | [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, |
106 | .associativity = 8, .line_size = 64, }, |
107 | [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, |
108 | .associativity = 8, .line_size = 64, }, |
109 | [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, |
110 | .associativity = 4, .line_size = 32, }, |
111 | [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, |
112 | .associativity = 4, .line_size = 32, }, |
113 | [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, |
114 | .associativity = 4, .line_size = 32, }, |
115 | [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, |
116 | .associativity = 4, .line_size = 32, }, |
117 | [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, |
118 | .associativity = 4, .line_size = 32, }, |
119 | [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, |
120 | .associativity = 4, .line_size = 64, }, |
121 | [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, |
122 | .associativity = 8, .line_size = 64, }, |
123 | [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, |
124 | .associativity = 12, .line_size = 64, }, |
125 | /* Descriptor 0x49 depends on CPU family/model, so it is not included */ |
126 | [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, |
127 | .associativity = 12, .line_size = 64, }, |
128 | [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, |
129 | .associativity = 16, .line_size = 64, }, |
130 | [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, |
131 | .associativity = 12, .line_size = 64, }, |
132 | [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, |
133 | .associativity = 16, .line_size = 64, }, |
134 | [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, |
135 | .associativity = 24, .line_size = 64, }, |
136 | [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, |
137 | .associativity = 8, .line_size = 64, }, |
138 | [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, |
139 | .associativity = 4, .line_size = 64, }, |
140 | [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, |
141 | .associativity = 4, .line_size = 64, }, |
142 | [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, |
143 | .associativity = 4, .line_size = 64, }, |
144 | [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, |
145 | .associativity = 4, .line_size = 64, }, |
146 | /* lines per sector is not supported cpuid2_cache_descriptor(), |
147 | * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. |
148 | */ |
149 | [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, |
150 | .associativity = 8, .line_size = 64, }, |
151 | [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, |
152 | .associativity = 2, .line_size = 64, }, |
153 | [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, |
154 | .associativity = 8, .line_size = 64, }, |
155 | [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, |
156 | .associativity = 8, .line_size = 32, }, |
157 | [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, |
158 | .associativity = 8, .line_size = 32, }, |
159 | [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, |
160 | .associativity = 8, .line_size = 32, }, |
161 | [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, |
162 | .associativity = 8, .line_size = 32, }, |
163 | [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, |
164 | .associativity = 4, .line_size = 64, }, |
165 | [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, |
166 | .associativity = 8, .line_size = 64, }, |
167 | [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, |
168 | .associativity = 4, .line_size = 64, }, |
169 | [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, |
170 | .associativity = 4, .line_size = 64, }, |
171 | [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, |
172 | .associativity = 4, .line_size = 64, }, |
173 | [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, |
174 | .associativity = 8, .line_size = 64, }, |
175 | [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, |
176 | .associativity = 8, .line_size = 64, }, |
177 | [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, |
178 | .associativity = 8, .line_size = 64, }, |
179 | [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, |
180 | .associativity = 12, .line_size = 64, }, |
181 | [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, |
182 | .associativity = 12, .line_size = 64, }, |
183 | [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, |
184 | .associativity = 12, .line_size = 64, }, |
185 | [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, |
186 | .associativity = 16, .line_size = 64, }, |
187 | [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, |
188 | .associativity = 16, .line_size = 64, }, |
189 | [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, |
190 | .associativity = 16, .line_size = 64, }, |
191 | [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, |
192 | .associativity = 24, .line_size = 64, }, |
193 | [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, |
194 | .associativity = 24, .line_size = 64, }, |
195 | [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, |
196 | .associativity = 24, .line_size = 64, }, |
197 | }; |
198 | |
199 | /* |
200 | * "CPUID leaf 2 does not report cache descriptor information, |
201 | * use CPUID leaf 4 to query cache parameters" |
202 | */ |
203 | #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF |
204 | |
205 | /* |
206 | * Return a CPUID 2 cache descriptor for a given cache. |
207 | * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE |
208 | */ |
209 | static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) |
210 | { |
211 | int i; |
212 | |
213 | assert(cache->size > 0); |
214 | assert(cache->level > 0); |
215 | assert(cache->line_size > 0); |
216 | assert(cache->associativity > 0); |
217 | for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { |
218 | struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; |
219 | if (d->level == cache->level && d->type == cache->type && |
220 | d->size == cache->size && d->line_size == cache->line_size && |
221 | d->associativity == cache->associativity) { |
222 | return i; |
223 | } |
224 | } |
225 | |
226 | return CACHE_DESCRIPTOR_UNAVAILABLE; |
227 | } |
228 | |
229 | /* CPUID Leaf 4 constants: */ |
230 | |
231 | /* EAX: */ |
232 | #define CACHE_TYPE_D 1 |
233 | #define CACHE_TYPE_I 2 |
234 | #define CACHE_TYPE_UNIFIED 3 |
235 | |
236 | #define CACHE_LEVEL(l) (l << 5) |
237 | |
238 | #define CACHE_SELF_INIT_LEVEL (1 << 8) |
239 | |
240 | /* EDX: */ |
241 | #define CACHE_NO_INVD_SHARING (1 << 0) |
242 | #define CACHE_INCLUSIVE (1 << 1) |
243 | #define CACHE_COMPLEX_IDX (1 << 2) |
244 | |
245 | /* Encode CacheType for CPUID[4].EAX */ |
246 | #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ |
247 | ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ |
248 | ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ |
249 | 0 /* Invalid value */) |
250 | |
251 | |
252 | /* Encode cache info for CPUID[4] */ |
253 | static void encode_cache_cpuid4(CPUCacheInfo *cache, |
254 | int num_apic_ids, int num_cores, |
255 | uint32_t *eax, uint32_t *ebx, |
256 | uint32_t *ecx, uint32_t *edx) |
257 | { |
258 | assert(cache->size == cache->line_size * cache->associativity * |
259 | cache->partitions * cache->sets); |
260 | |
261 | assert(num_apic_ids > 0); |
262 | *eax = CACHE_TYPE(cache->type) | |
263 | CACHE_LEVEL(cache->level) | |
264 | (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | |
265 | ((num_cores - 1) << 26) | |
266 | ((num_apic_ids - 1) << 14); |
267 | |
268 | assert(cache->line_size > 0); |
269 | assert(cache->partitions > 0); |
270 | assert(cache->associativity > 0); |
271 | /* We don't implement fully-associative caches */ |
272 | assert(cache->associativity < cache->sets); |
273 | *ebx = (cache->line_size - 1) | |
274 | ((cache->partitions - 1) << 12) | |
275 | ((cache->associativity - 1) << 22); |
276 | |
277 | assert(cache->sets > 0); |
278 | *ecx = cache->sets - 1; |
279 | |
280 | *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | |
281 | (cache->inclusive ? CACHE_INCLUSIVE : 0) | |
282 | (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); |
283 | } |
284 | |
285 | /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ |
286 | static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) |
287 | { |
288 | assert(cache->size % 1024 == 0); |
289 | assert(cache->lines_per_tag > 0); |
290 | assert(cache->associativity > 0); |
291 | assert(cache->line_size > 0); |
292 | return ((cache->size / 1024) << 24) | (cache->associativity << 16) | |
293 | (cache->lines_per_tag << 8) | (cache->line_size); |
294 | } |
295 | |
296 | #define ASSOC_FULL 0xFF |
297 | |
298 | /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ |
299 | #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ |
300 | a == 2 ? 0x2 : \ |
301 | a == 4 ? 0x4 : \ |
302 | a == 8 ? 0x6 : \ |
303 | a == 16 ? 0x8 : \ |
304 | a == 32 ? 0xA : \ |
305 | a == 48 ? 0xB : \ |
306 | a == 64 ? 0xC : \ |
307 | a == 96 ? 0xD : \ |
308 | a == 128 ? 0xE : \ |
309 | a == ASSOC_FULL ? 0xF : \ |
310 | 0 /* invalid value */) |
311 | |
312 | /* |
313 | * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX |
314 | * @l3 can be NULL. |
315 | */ |
316 | static void encode_cache_cpuid80000006(CPUCacheInfo *l2, |
317 | CPUCacheInfo *l3, |
318 | uint32_t *ecx, uint32_t *edx) |
319 | { |
320 | assert(l2->size % 1024 == 0); |
321 | assert(l2->associativity > 0); |
322 | assert(l2->lines_per_tag > 0); |
323 | assert(l2->line_size > 0); |
324 | *ecx = ((l2->size / 1024) << 16) | |
325 | (AMD_ENC_ASSOC(l2->associativity) << 12) | |
326 | (l2->lines_per_tag << 8) | (l2->line_size); |
327 | |
328 | if (l3) { |
329 | assert(l3->size % (512 * 1024) == 0); |
330 | assert(l3->associativity > 0); |
331 | assert(l3->lines_per_tag > 0); |
332 | assert(l3->line_size > 0); |
333 | *edx = ((l3->size / (512 * 1024)) << 18) | |
334 | (AMD_ENC_ASSOC(l3->associativity) << 12) | |
335 | (l3->lines_per_tag << 8) | (l3->line_size); |
336 | } else { |
337 | *edx = 0; |
338 | } |
339 | } |
340 | |
341 | /* |
342 | * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E |
343 | * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3. |
344 | * Define the constants to build the cpu topology. Right now, TOPOEXT |
345 | * feature is enabled only on EPYC. So, these constants are based on |
346 | * EPYC supported configurations. We may need to handle the cases if |
347 | * these values change in future. |
348 | */ |
349 | /* Maximum core complexes in a node */ |
350 | #define MAX_CCX 2 |
351 | /* Maximum cores in a core complex */ |
352 | #define MAX_CORES_IN_CCX 4 |
353 | /* Maximum cores in a node */ |
354 | #define MAX_CORES_IN_NODE 8 |
355 | /* Maximum nodes in a socket */ |
356 | #define MAX_NODES_PER_SOCKET 4 |
357 | |
358 | /* |
359 | * Figure out the number of nodes required to build this config. |
360 | * Max cores in a node is 8 |
361 | */ |
362 | static int nodes_in_socket(int nr_cores) |
363 | { |
364 | int nodes; |
365 | |
366 | nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE); |
367 | |
368 | /* Hardware does not support config with 3 nodes, return 4 in that case */ |
369 | return (nodes == 3) ? 4 : nodes; |
370 | } |
371 | |
372 | /* |
373 | * Decide the number of cores in a core complex with the given nr_cores using |
374 | * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and |
375 | * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible |
376 | * L3 cache is shared across all cores in a core complex. So, this will also |
377 | * tell us how many cores are sharing the L3 cache. |
378 | */ |
379 | static int cores_in_core_complex(int nr_cores) |
380 | { |
381 | int nodes; |
382 | |
383 | /* Check if we can fit all the cores in one core complex */ |
384 | if (nr_cores <= MAX_CORES_IN_CCX) { |
385 | return nr_cores; |
386 | } |
387 | /* Get the number of nodes required to build this config */ |
388 | nodes = nodes_in_socket(nr_cores); |
389 | |
390 | /* |
391 | * Divide the cores accros all the core complexes |
392 | * Return rounded up value |
393 | */ |
394 | return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX); |
395 | } |
396 | |
397 | /* Encode cache info for CPUID[8000001D] */ |
398 | static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs, |
399 | uint32_t *eax, uint32_t *ebx, |
400 | uint32_t *ecx, uint32_t *edx) |
401 | { |
402 | uint32_t l3_cores; |
403 | assert(cache->size == cache->line_size * cache->associativity * |
404 | cache->partitions * cache->sets); |
405 | |
406 | *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | |
407 | (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); |
408 | |
409 | /* L3 is shared among multiple cores */ |
410 | if (cache->level == 3) { |
411 | l3_cores = cores_in_core_complex(cs->nr_cores); |
412 | *eax |= ((l3_cores * cs->nr_threads) - 1) << 14; |
413 | } else { |
414 | *eax |= ((cs->nr_threads - 1) << 14); |
415 | } |
416 | |
417 | assert(cache->line_size > 0); |
418 | assert(cache->partitions > 0); |
419 | assert(cache->associativity > 0); |
420 | /* We don't implement fully-associative caches */ |
421 | assert(cache->associativity < cache->sets); |
422 | *ebx = (cache->line_size - 1) | |
423 | ((cache->partitions - 1) << 12) | |
424 | ((cache->associativity - 1) << 22); |
425 | |
426 | assert(cache->sets > 0); |
427 | *ecx = cache->sets - 1; |
428 | |
429 | *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | |
430 | (cache->inclusive ? CACHE_INCLUSIVE : 0) | |
431 | (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); |
432 | } |
433 | |
434 | /* Data structure to hold the configuration info for a given core index */ |
435 | struct core_topology { |
436 | /* core complex id of the current core index */ |
437 | int ccx_id; |
438 | /* |
439 | * Adjusted core index for this core in the topology |
440 | * This can be 0,1,2,3 with max 4 cores in a core complex |
441 | */ |
442 | int core_id; |
443 | /* Node id for this core index */ |
444 | int node_id; |
445 | /* Number of nodes in this config */ |
446 | int num_nodes; |
447 | }; |
448 | |
449 | /* |
450 | * Build the configuration closely match the EPYC hardware. Using the EPYC |
451 | * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE) |
452 | * right now. This could change in future. |
453 | * nr_cores : Total number of cores in the config |
454 | * core_id : Core index of the current CPU |
455 | * topo : Data structure to hold all the config info for this core index |
456 | */ |
457 | static void build_core_topology(int nr_cores, int core_id, |
458 | struct core_topology *topo) |
459 | { |
460 | int nodes, cores_in_ccx; |
461 | |
462 | /* First get the number of nodes required */ |
463 | nodes = nodes_in_socket(nr_cores); |
464 | |
465 | cores_in_ccx = cores_in_core_complex(nr_cores); |
466 | |
467 | topo->node_id = core_id / (cores_in_ccx * MAX_CCX); |
468 | topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx; |
469 | topo->core_id = core_id % cores_in_ccx; |
470 | topo->num_nodes = nodes; |
471 | } |
472 | |
473 | /* Encode cache info for CPUID[8000001E] */ |
474 | static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu, |
475 | uint32_t *eax, uint32_t *ebx, |
476 | uint32_t *ecx, uint32_t *edx) |
477 | { |
478 | struct core_topology topo = {0}; |
479 | unsigned long nodes; |
480 | int shift; |
481 | |
482 | build_core_topology(cs->nr_cores, cpu->core_id, &topo); |
483 | *eax = cpu->apic_id; |
484 | /* |
485 | * CPUID_Fn8000001E_EBX |
486 | * 31:16 Reserved |
487 | * 15:8 Threads per core (The number of threads per core is |
488 | * Threads per core + 1) |
489 | * 7:0 Core id (see bit decoding below) |
490 | * SMT: |
491 | * 4:3 node id |
492 | * 2 Core complex id |
493 | * 1:0 Core id |
494 | * Non SMT: |
495 | * 5:4 node id |
496 | * 3 Core complex id |
497 | * 1:0 Core id |
498 | */ |
499 | if (cs->nr_threads - 1) { |
500 | *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) | |
501 | (topo.ccx_id << 2) | topo.core_id; |
502 | } else { |
503 | *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id; |
504 | } |
505 | /* |
506 | * CPUID_Fn8000001E_ECX |
507 | * 31:11 Reserved |
508 | * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1) |
509 | * 7:0 Node id (see bit decoding below) |
510 | * 2 Socket id |
511 | * 1:0 Node id |
512 | */ |
513 | if (topo.num_nodes <= 4) { |
514 | *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) | |
515 | topo.node_id; |
516 | } else { |
517 | /* |
518 | * Node id fix up. Actual hardware supports up to 4 nodes. But with |
519 | * more than 32 cores, we may end up with more than 4 nodes. |
520 | * Node id is a combination of socket id and node id. Only requirement |
521 | * here is that this number should be unique accross the system. |
522 | * Shift the socket id to accommodate more nodes. We dont expect both |
523 | * socket id and node id to be big number at the same time. This is not |
524 | * an ideal config but we need to to support it. Max nodes we can have |
525 | * is 32 (255/8) with 8 cores per node and 255 max cores. We only need |
526 | * 5 bits for nodes. Find the left most set bit to represent the total |
527 | * number of nodes. find_last_bit returns last set bit(0 based). Left |
528 | * shift(+1) the socket id to represent all the nodes. |
529 | */ |
530 | nodes = topo.num_nodes - 1; |
531 | shift = find_last_bit(&nodes, 8); |
532 | *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) | |
533 | topo.node_id; |
534 | } |
535 | *edx = 0; |
536 | } |
537 | |
538 | /* |
539 | * Definitions of the hardcoded cache entries we expose: |
540 | * These are legacy cache values. If there is a need to change any |
541 | * of these values please use builtin_x86_defs |
542 | */ |
543 | |
544 | /* L1 data cache: */ |
545 | static CPUCacheInfo legacy_l1d_cache = { |
546 | .type = DATA_CACHE, |
547 | .level = 1, |
548 | .size = 32 * KiB, |
549 | .self_init = 1, |
550 | .line_size = 64, |
551 | .associativity = 8, |
552 | .sets = 64, |
553 | .partitions = 1, |
554 | .no_invd_sharing = true, |
555 | }; |
556 | |
557 | /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ |
558 | static CPUCacheInfo legacy_l1d_cache_amd = { |
559 | .type = DATA_CACHE, |
560 | .level = 1, |
561 | .size = 64 * KiB, |
562 | .self_init = 1, |
563 | .line_size = 64, |
564 | .associativity = 2, |
565 | .sets = 512, |
566 | .partitions = 1, |
567 | .lines_per_tag = 1, |
568 | .no_invd_sharing = true, |
569 | }; |
570 | |
571 | /* L1 instruction cache: */ |
572 | static CPUCacheInfo legacy_l1i_cache = { |
573 | .type = INSTRUCTION_CACHE, |
574 | .level = 1, |
575 | .size = 32 * KiB, |
576 | .self_init = 1, |
577 | .line_size = 64, |
578 | .associativity = 8, |
579 | .sets = 64, |
580 | .partitions = 1, |
581 | .no_invd_sharing = true, |
582 | }; |
583 | |
584 | /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ |
585 | static CPUCacheInfo legacy_l1i_cache_amd = { |
586 | .type = INSTRUCTION_CACHE, |
587 | .level = 1, |
588 | .size = 64 * KiB, |
589 | .self_init = 1, |
590 | .line_size = 64, |
591 | .associativity = 2, |
592 | .sets = 512, |
593 | .partitions = 1, |
594 | .lines_per_tag = 1, |
595 | .no_invd_sharing = true, |
596 | }; |
597 | |
598 | /* Level 2 unified cache: */ |
599 | static CPUCacheInfo legacy_l2_cache = { |
600 | .type = UNIFIED_CACHE, |
601 | .level = 2, |
602 | .size = 4 * MiB, |
603 | .self_init = 1, |
604 | .line_size = 64, |
605 | .associativity = 16, |
606 | .sets = 4096, |
607 | .partitions = 1, |
608 | .no_invd_sharing = true, |
609 | }; |
610 | |
611 | /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ |
612 | static CPUCacheInfo legacy_l2_cache_cpuid2 = { |
613 | .type = UNIFIED_CACHE, |
614 | .level = 2, |
615 | .size = 2 * MiB, |
616 | .line_size = 64, |
617 | .associativity = 8, |
618 | }; |
619 | |
620 | |
621 | /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ |
622 | static CPUCacheInfo legacy_l2_cache_amd = { |
623 | .type = UNIFIED_CACHE, |
624 | .level = 2, |
625 | .size = 512 * KiB, |
626 | .line_size = 64, |
627 | .lines_per_tag = 1, |
628 | .associativity = 16, |
629 | .sets = 512, |
630 | .partitions = 1, |
631 | }; |
632 | |
633 | /* Level 3 unified cache: */ |
634 | static CPUCacheInfo legacy_l3_cache = { |
635 | .type = UNIFIED_CACHE, |
636 | .level = 3, |
637 | .size = 16 * MiB, |
638 | .line_size = 64, |
639 | .associativity = 16, |
640 | .sets = 16384, |
641 | .partitions = 1, |
642 | .lines_per_tag = 1, |
643 | .self_init = true, |
644 | .inclusive = true, |
645 | .complex_indexing = true, |
646 | }; |
647 | |
648 | /* TLB definitions: */ |
649 | |
650 | #define L1_DTLB_2M_ASSOC 1 |
651 | #define L1_DTLB_2M_ENTRIES 255 |
652 | #define L1_DTLB_4K_ASSOC 1 |
653 | #define L1_DTLB_4K_ENTRIES 255 |
654 | |
655 | #define L1_ITLB_2M_ASSOC 1 |
656 | #define L1_ITLB_2M_ENTRIES 255 |
657 | #define L1_ITLB_4K_ASSOC 1 |
658 | #define L1_ITLB_4K_ENTRIES 255 |
659 | |
660 | #define L2_DTLB_2M_ASSOC 0 /* disabled */ |
661 | #define L2_DTLB_2M_ENTRIES 0 /* disabled */ |
662 | #define L2_DTLB_4K_ASSOC 4 |
663 | #define L2_DTLB_4K_ENTRIES 512 |
664 | |
665 | #define L2_ITLB_2M_ASSOC 0 /* disabled */ |
666 | #define L2_ITLB_2M_ENTRIES 0 /* disabled */ |
667 | #define L2_ITLB_4K_ASSOC 4 |
668 | #define L2_ITLB_4K_ENTRIES 512 |
669 | |
670 | /* CPUID Leaf 0x14 constants: */ |
671 | #define INTEL_PT_MAX_SUBLEAF 0x1 |
672 | /* |
673 | * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH |
674 | * MSR can be accessed; |
675 | * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; |
676 | * bit[02]: Support IP Filtering, TraceStop filtering, and preservation |
677 | * of Intel PT MSRs across warm reset; |
678 | * bit[03]: Support MTC timing packet and suppression of COFI-based packets; |
679 | */ |
680 | #define INTEL_PT_MINIMAL_EBX 0xf |
681 | /* |
682 | * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and |
683 | * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be |
684 | * accessed; |
685 | * bit[01]: ToPA tables can hold any number of output entries, up to the |
686 | * maximum allowed by the MaskOrTableOffset field of |
687 | * IA32_RTIT_OUTPUT_MASK_PTRS; |
688 | * bit[02]: Support Single-Range Output scheme; |
689 | */ |
690 | #define INTEL_PT_MINIMAL_ECX 0x7 |
691 | /* generated packets which contain IP payloads have LIP values */ |
692 | #define INTEL_PT_IP_LIP (1 << 31) |
693 | #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ |
694 | #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 |
695 | #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ |
696 | #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ |
697 | #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ |
698 | |
699 | static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, |
700 | uint32_t vendor2, uint32_t vendor3) |
701 | { |
702 | int i; |
703 | for (i = 0; i < 4; i++) { |
704 | dst[i] = vendor1 >> (8 * i); |
705 | dst[i + 4] = vendor2 >> (8 * i); |
706 | dst[i + 8] = vendor3 >> (8 * i); |
707 | } |
708 | dst[CPUID_VENDOR_SZ] = '\0'; |
709 | } |
710 | |
711 | #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) |
712 | #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ |
713 | CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) |
714 | #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ |
715 | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ |
716 | CPUID_PSE36 | CPUID_FXSR) |
717 | #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) |
718 | #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ |
719 | CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ |
720 | CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ |
721 | CPUID_PAE | CPUID_SEP | CPUID_APIC) |
722 | |
723 | #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ |
724 | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ |
725 | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ |
726 | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ |
727 | CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) |
728 | /* partly implemented: |
729 | CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ |
730 | /* missing: |
731 | CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ |
732 | #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ |
733 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ |
734 | CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ |
735 | CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ |
736 | CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ |
737 | CPUID_EXT_RDRAND) |
738 | /* missing: |
739 | CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, |
740 | CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, |
741 | CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, |
742 | CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, |
743 | CPUID_EXT_F16C */ |
744 | |
745 | #ifdef TARGET_X86_64 |
746 | #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) |
747 | #else |
748 | #define TCG_EXT2_X86_64_FEATURES 0 |
749 | #endif |
750 | |
751 | #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ |
752 | CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ |
753 | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ |
754 | TCG_EXT2_X86_64_FEATURES) |
755 | #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ |
756 | CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) |
757 | #define TCG_EXT4_FEATURES 0 |
758 | #define TCG_SVM_FEATURES CPUID_SVM_NPT |
759 | #define TCG_KVM_FEATURES 0 |
760 | #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ |
761 | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ |
762 | CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ |
763 | CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ |
764 | CPUID_7_0_EBX_ERMS) |
765 | /* missing: |
766 | CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, |
767 | CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, |
768 | CPUID_7_0_EBX_RDSEED */ |
769 | #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ |
770 | /* CPUID_7_0_ECX_OSPKE is dynamic */ \ |
771 | CPUID_7_0_ECX_LA57) |
772 | #define TCG_7_0_EDX_FEATURES 0 |
773 | #define TCG_7_1_EAX_FEATURES 0 |
774 | #define TCG_APM_FEATURES 0 |
775 | #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT |
776 | #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) |
777 | /* missing: |
778 | CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ |
779 | |
780 | typedef enum FeatureWordType { |
781 | CPUID_FEATURE_WORD, |
782 | MSR_FEATURE_WORD, |
783 | } FeatureWordType; |
784 | |
785 | typedef struct FeatureWordInfo { |
786 | FeatureWordType type; |
787 | /* feature flags names are taken from "Intel Processor Identification and |
788 | * the CPUID Instruction" and AMD's "CPUID Specification". |
789 | * In cases of disagreement between feature naming conventions, |
790 | * aliases may be added. |
791 | */ |
792 | const char *feat_names[32]; |
793 | union { |
794 | /* If type==CPUID_FEATURE_WORD */ |
795 | struct { |
796 | uint32_t eax; /* Input EAX for CPUID */ |
797 | bool needs_ecx; /* CPUID instruction uses ECX as input */ |
798 | uint32_t ecx; /* Input ECX value for CPUID */ |
799 | int reg; /* output register (R_* constant) */ |
800 | } cpuid; |
801 | /* If type==MSR_FEATURE_WORD */ |
802 | struct { |
803 | uint32_t index; |
804 | struct { /*CPUID that enumerate this MSR*/ |
805 | FeatureWord cpuid_class; |
806 | uint32_t cpuid_flag; |
807 | } cpuid_dep; |
808 | } msr; |
809 | }; |
810 | uint32_t tcg_features; /* Feature flags supported by TCG */ |
811 | uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */ |
812 | uint32_t migratable_flags; /* Feature flags known to be migratable */ |
813 | /* Features that shouldn't be auto-enabled by "-cpu host" */ |
814 | uint32_t no_autoenable_flags; |
815 | } FeatureWordInfo; |
816 | |
817 | static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { |
818 | [FEAT_1_EDX] = { |
819 | .type = CPUID_FEATURE_WORD, |
820 | .feat_names = { |
821 | "fpu" , "vme" , "de" , "pse" , |
822 | "tsc" , "msr" , "pae" , "mce" , |
823 | "cx8" , "apic" , NULL, "sep" , |
824 | "mtrr" , "pge" , "mca" , "cmov" , |
825 | "pat" , "pse36" , "pn" /* Intel psn */, "clflush" /* Intel clfsh */, |
826 | NULL, "ds" /* Intel dts */, "acpi" , "mmx" , |
827 | "fxsr" , "sse" , "sse2" , "ss" , |
828 | "ht" /* Intel htt */, "tm" , "ia64" , "pbe" , |
829 | }, |
830 | .cpuid = {.eax = 1, .reg = R_EDX, }, |
831 | .tcg_features = TCG_FEATURES, |
832 | }, |
833 | [FEAT_1_ECX] = { |
834 | .type = CPUID_FEATURE_WORD, |
835 | .feat_names = { |
836 | "pni" /* Intel,AMD sse3 */, "pclmulqdq" , "dtes64" , "monitor" , |
837 | "ds-cpl" , "vmx" , "smx" , "est" , |
838 | "tm2" , "ssse3" , "cid" , NULL, |
839 | "fma" , "cx16" , "xtpr" , "pdcm" , |
840 | NULL, "pcid" , "dca" , "sse4.1" , |
841 | "sse4.2" , "x2apic" , "movbe" , "popcnt" , |
842 | "tsc-deadline" , "aes" , "xsave" , NULL /* osxsave */, |
843 | "avx" , "f16c" , "rdrand" , "hypervisor" , |
844 | }, |
845 | .cpuid = { .eax = 1, .reg = R_ECX, }, |
846 | .tcg_features = TCG_EXT_FEATURES, |
847 | }, |
848 | /* Feature names that are already defined on feature_name[] but |
849 | * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their |
850 | * names on feat_names below. They are copied automatically |
851 | * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. |
852 | */ |
853 | [FEAT_8000_0001_EDX] = { |
854 | .type = CPUID_FEATURE_WORD, |
855 | .feat_names = { |
856 | NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, |
857 | NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, |
858 | NULL /* cx8 */, NULL /* apic */, NULL, "syscall" , |
859 | NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, |
860 | NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, |
861 | "nx" , NULL, "mmxext" , NULL /* mmx */, |
862 | NULL /* fxsr */, "fxsr-opt" , "pdpe1gb" , "rdtscp" , |
863 | NULL, "lm" , "3dnowext" , "3dnow" , |
864 | }, |
865 | .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, |
866 | .tcg_features = TCG_EXT2_FEATURES, |
867 | }, |
868 | [FEAT_8000_0001_ECX] = { |
869 | .type = CPUID_FEATURE_WORD, |
870 | .feat_names = { |
871 | "lahf-lm" , "cmp-legacy" , "svm" , "extapic" , |
872 | "cr8legacy" , "abm" , "sse4a" , "misalignsse" , |
873 | "3dnowprefetch" , "osvw" , "ibs" , "xop" , |
874 | "skinit" , "wdt" , NULL, "lwp" , |
875 | "fma4" , "tce" , NULL, "nodeid-msr" , |
876 | NULL, "tbm" , "topoext" , "perfctr-core" , |
877 | "perfctr-nb" , NULL, NULL, NULL, |
878 | NULL, NULL, NULL, NULL, |
879 | }, |
880 | .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, |
881 | .tcg_features = TCG_EXT3_FEATURES, |
882 | /* |
883 | * TOPOEXT is always allowed but can't be enabled blindly by |
884 | * "-cpu host", as it requires consistent cache topology info |
885 | * to be provided so it doesn't confuse guests. |
886 | */ |
887 | .no_autoenable_flags = CPUID_EXT3_TOPOEXT, |
888 | }, |
889 | [FEAT_C000_0001_EDX] = { |
890 | .type = CPUID_FEATURE_WORD, |
891 | .feat_names = { |
892 | NULL, NULL, "xstore" , "xstore-en" , |
893 | NULL, NULL, "xcrypt" , "xcrypt-en" , |
894 | "ace2" , "ace2-en" , "phe" , "phe-en" , |
895 | "pmm" , "pmm-en" , NULL, NULL, |
896 | NULL, NULL, NULL, NULL, |
897 | NULL, NULL, NULL, NULL, |
898 | NULL, NULL, NULL, NULL, |
899 | NULL, NULL, NULL, NULL, |
900 | }, |
901 | .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, |
902 | .tcg_features = TCG_EXT4_FEATURES, |
903 | }, |
904 | [FEAT_KVM] = { |
905 | .type = CPUID_FEATURE_WORD, |
906 | .feat_names = { |
907 | "kvmclock" , "kvm-nopiodelay" , "kvm-mmu" , "kvmclock" , |
908 | "kvm-asyncpf" , "kvm-steal-time" , "kvm-pv-eoi" , "kvm-pv-unhalt" , |
909 | NULL, "kvm-pv-tlb-flush" , NULL, "kvm-pv-ipi" , |
910 | "kvm-poll-control" , "kvm-pv-sched-yield" , NULL, NULL, |
911 | NULL, NULL, NULL, NULL, |
912 | NULL, NULL, NULL, NULL, |
913 | "kvmclock-stable-bit" , NULL, NULL, NULL, |
914 | NULL, NULL, NULL, NULL, |
915 | }, |
916 | .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, |
917 | .tcg_features = TCG_KVM_FEATURES, |
918 | }, |
919 | [FEAT_KVM_HINTS] = { |
920 | .type = CPUID_FEATURE_WORD, |
921 | .feat_names = { |
922 | "kvm-hint-dedicated" , NULL, NULL, NULL, |
923 | NULL, NULL, NULL, NULL, |
924 | NULL, NULL, NULL, NULL, |
925 | NULL, NULL, NULL, NULL, |
926 | NULL, NULL, NULL, NULL, |
927 | NULL, NULL, NULL, NULL, |
928 | NULL, NULL, NULL, NULL, |
929 | NULL, NULL, NULL, NULL, |
930 | }, |
931 | .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, |
932 | .tcg_features = TCG_KVM_FEATURES, |
933 | /* |
934 | * KVM hints aren't auto-enabled by -cpu host, they need to be |
935 | * explicitly enabled in the command-line. |
936 | */ |
937 | .no_autoenable_flags = ~0U, |
938 | }, |
939 | /* |
940 | * .feat_names are commented out for Hyper-V enlightenments because we |
941 | * don't want to have two different ways for enabling them on QEMU command |
942 | * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require |
943 | * enabling several feature bits simultaneously, exposing these bits |
944 | * individually may just confuse guests. |
945 | */ |
946 | [FEAT_HYPERV_EAX] = { |
947 | .type = CPUID_FEATURE_WORD, |
948 | .feat_names = { |
949 | NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, |
950 | NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, |
951 | NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, |
952 | NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, |
953 | NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, |
954 | NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, |
955 | NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, |
956 | NULL, NULL, |
957 | NULL, NULL, NULL, NULL, |
958 | NULL, NULL, NULL, NULL, |
959 | NULL, NULL, NULL, NULL, |
960 | NULL, NULL, NULL, NULL, |
961 | }, |
962 | .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, |
963 | }, |
964 | [FEAT_HYPERV_EBX] = { |
965 | .type = CPUID_FEATURE_WORD, |
966 | .feat_names = { |
967 | NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, |
968 | NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, |
969 | NULL /* hv_post_messages */, NULL /* hv_signal_events */, |
970 | NULL /* hv_create_port */, NULL /* hv_connect_port */, |
971 | NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, |
972 | NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, |
973 | NULL, NULL, |
974 | NULL, NULL, NULL, NULL, |
975 | NULL, NULL, NULL, NULL, |
976 | NULL, NULL, NULL, NULL, |
977 | NULL, NULL, NULL, NULL, |
978 | }, |
979 | .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, |
980 | }, |
981 | [FEAT_HYPERV_EDX] = { |
982 | .type = CPUID_FEATURE_WORD, |
983 | .feat_names = { |
984 | NULL /* hv_mwait */, NULL /* hv_guest_debugging */, |
985 | NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, |
986 | NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, |
987 | NULL, NULL, |
988 | NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, |
989 | NULL, NULL, NULL, NULL, |
990 | NULL, NULL, NULL, NULL, |
991 | NULL, NULL, NULL, NULL, |
992 | NULL, NULL, NULL, NULL, |
993 | NULL, NULL, NULL, NULL, |
994 | }, |
995 | .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, |
996 | }, |
997 | [FEAT_HV_RECOMM_EAX] = { |
998 | .type = CPUID_FEATURE_WORD, |
999 | .feat_names = { |
1000 | NULL /* hv_recommend_pv_as_switch */, |
1001 | NULL /* hv_recommend_pv_tlbflush_local */, |
1002 | NULL /* hv_recommend_pv_tlbflush_remote */, |
1003 | NULL /* hv_recommend_msr_apic_access */, |
1004 | NULL /* hv_recommend_msr_reset */, |
1005 | NULL /* hv_recommend_relaxed_timing */, |
1006 | NULL /* hv_recommend_dma_remapping */, |
1007 | NULL /* hv_recommend_int_remapping */, |
1008 | NULL /* hv_recommend_x2apic_msrs */, |
1009 | NULL /* hv_recommend_autoeoi_deprecation */, |
1010 | NULL /* hv_recommend_pv_ipi */, |
1011 | NULL /* hv_recommend_ex_hypercalls */, |
1012 | NULL /* hv_hypervisor_is_nested */, |
1013 | NULL /* hv_recommend_int_mbec */, |
1014 | NULL /* hv_recommend_evmcs */, |
1015 | NULL, |
1016 | NULL, NULL, NULL, NULL, |
1017 | NULL, NULL, NULL, NULL, |
1018 | NULL, NULL, NULL, NULL, |
1019 | NULL, NULL, NULL, NULL, |
1020 | }, |
1021 | .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, |
1022 | }, |
1023 | [FEAT_HV_NESTED_EAX] = { |
1024 | .type = CPUID_FEATURE_WORD, |
1025 | .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, |
1026 | }, |
1027 | [FEAT_SVM] = { |
1028 | .type = CPUID_FEATURE_WORD, |
1029 | .feat_names = { |
1030 | "npt" , "lbrv" , "svm-lock" , "nrip-save" , |
1031 | "tsc-scale" , "vmcb-clean" , "flushbyasid" , "decodeassists" , |
1032 | NULL, NULL, "pause-filter" , NULL, |
1033 | "pfthreshold" , NULL, NULL, NULL, |
1034 | NULL, NULL, NULL, NULL, |
1035 | NULL, NULL, NULL, NULL, |
1036 | NULL, NULL, NULL, NULL, |
1037 | NULL, NULL, NULL, NULL, |
1038 | }, |
1039 | .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, |
1040 | .tcg_features = TCG_SVM_FEATURES, |
1041 | }, |
1042 | [FEAT_7_0_EBX] = { |
1043 | .type = CPUID_FEATURE_WORD, |
1044 | .feat_names = { |
1045 | "fsgsbase" , "tsc-adjust" , NULL, "bmi1" , |
1046 | "hle" , "avx2" , NULL, "smep" , |
1047 | "bmi2" , "erms" , "invpcid" , "rtm" , |
1048 | NULL, NULL, "mpx" , NULL, |
1049 | "avx512f" , "avx512dq" , "rdseed" , "adx" , |
1050 | "smap" , "avx512ifma" , "pcommit" , "clflushopt" , |
1051 | "clwb" , "intel-pt" , "avx512pf" , "avx512er" , |
1052 | "avx512cd" , "sha-ni" , "avx512bw" , "avx512vl" , |
1053 | }, |
1054 | .cpuid = { |
1055 | .eax = 7, |
1056 | .needs_ecx = true, .ecx = 0, |
1057 | .reg = R_EBX, |
1058 | }, |
1059 | .tcg_features = TCG_7_0_EBX_FEATURES, |
1060 | }, |
1061 | [FEAT_7_0_ECX] = { |
1062 | .type = CPUID_FEATURE_WORD, |
1063 | .feat_names = { |
1064 | NULL, "avx512vbmi" , "umip" , "pku" , |
1065 | NULL /* ospke */, NULL, "avx512vbmi2" , NULL, |
1066 | "gfni" , "vaes" , "vpclmulqdq" , "avx512vnni" , |
1067 | "avx512bitalg" , NULL, "avx512-vpopcntdq" , NULL, |
1068 | "la57" , NULL, NULL, NULL, |
1069 | NULL, NULL, "rdpid" , NULL, |
1070 | NULL, "cldemote" , NULL, "movdiri" , |
1071 | "movdir64b" , NULL, NULL, NULL, |
1072 | }, |
1073 | .cpuid = { |
1074 | .eax = 7, |
1075 | .needs_ecx = true, .ecx = 0, |
1076 | .reg = R_ECX, |
1077 | }, |
1078 | .tcg_features = TCG_7_0_ECX_FEATURES, |
1079 | }, |
1080 | [FEAT_7_0_EDX] = { |
1081 | .type = CPUID_FEATURE_WORD, |
1082 | .feat_names = { |
1083 | NULL, NULL, "avx512-4vnniw" , "avx512-4fmaps" , |
1084 | NULL, NULL, NULL, NULL, |
1085 | NULL, NULL, "md-clear" , NULL, |
1086 | NULL, NULL, NULL, NULL, |
1087 | NULL, NULL, NULL /* pconfig */, NULL, |
1088 | NULL, NULL, NULL, NULL, |
1089 | NULL, NULL, "spec-ctrl" , "stibp" , |
1090 | NULL, "arch-capabilities" , "core-capability" , "ssbd" , |
1091 | }, |
1092 | .cpuid = { |
1093 | .eax = 7, |
1094 | .needs_ecx = true, .ecx = 0, |
1095 | .reg = R_EDX, |
1096 | }, |
1097 | .tcg_features = TCG_7_0_EDX_FEATURES, |
1098 | }, |
1099 | [FEAT_7_1_EAX] = { |
1100 | .type = CPUID_FEATURE_WORD, |
1101 | .feat_names = { |
1102 | NULL, NULL, NULL, NULL, |
1103 | NULL, "avx512-bf16" , NULL, NULL, |
1104 | NULL, NULL, NULL, NULL, |
1105 | NULL, NULL, NULL, NULL, |
1106 | NULL, NULL, NULL, NULL, |
1107 | NULL, NULL, NULL, NULL, |
1108 | NULL, NULL, NULL, NULL, |
1109 | NULL, NULL, NULL, NULL, |
1110 | }, |
1111 | .cpuid = { |
1112 | .eax = 7, |
1113 | .needs_ecx = true, .ecx = 1, |
1114 | .reg = R_EAX, |
1115 | }, |
1116 | .tcg_features = TCG_7_1_EAX_FEATURES, |
1117 | }, |
1118 | [FEAT_8000_0007_EDX] = { |
1119 | .type = CPUID_FEATURE_WORD, |
1120 | .feat_names = { |
1121 | NULL, NULL, NULL, NULL, |
1122 | NULL, NULL, NULL, NULL, |
1123 | "invtsc" , NULL, NULL, NULL, |
1124 | NULL, NULL, NULL, NULL, |
1125 | NULL, NULL, NULL, NULL, |
1126 | NULL, NULL, NULL, NULL, |
1127 | NULL, NULL, NULL, NULL, |
1128 | NULL, NULL, NULL, NULL, |
1129 | }, |
1130 | .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, |
1131 | .tcg_features = TCG_APM_FEATURES, |
1132 | .unmigratable_flags = CPUID_APM_INVTSC, |
1133 | }, |
1134 | [FEAT_8000_0008_EBX] = { |
1135 | .type = CPUID_FEATURE_WORD, |
1136 | .feat_names = { |
1137 | NULL, NULL, NULL, NULL, |
1138 | NULL, NULL, NULL, NULL, |
1139 | NULL, "wbnoinvd" , NULL, NULL, |
1140 | "ibpb" , NULL, NULL, NULL, |
1141 | NULL, NULL, NULL, NULL, |
1142 | NULL, NULL, NULL, NULL, |
1143 | "amd-ssbd" , "virt-ssbd" , "amd-no-ssb" , NULL, |
1144 | NULL, NULL, NULL, NULL, |
1145 | }, |
1146 | .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, |
1147 | .tcg_features = 0, |
1148 | .unmigratable_flags = 0, |
1149 | }, |
1150 | [FEAT_XSAVE] = { |
1151 | .type = CPUID_FEATURE_WORD, |
1152 | .feat_names = { |
1153 | "xsaveopt" , "xsavec" , "xgetbv1" , "xsaves" , |
1154 | NULL, NULL, NULL, NULL, |
1155 | NULL, NULL, NULL, NULL, |
1156 | NULL, NULL, NULL, NULL, |
1157 | NULL, NULL, NULL, NULL, |
1158 | NULL, NULL, NULL, NULL, |
1159 | NULL, NULL, NULL, NULL, |
1160 | NULL, NULL, NULL, NULL, |
1161 | }, |
1162 | .cpuid = { |
1163 | .eax = 0xd, |
1164 | .needs_ecx = true, .ecx = 1, |
1165 | .reg = R_EAX, |
1166 | }, |
1167 | .tcg_features = TCG_XSAVE_FEATURES, |
1168 | }, |
1169 | [FEAT_6_EAX] = { |
1170 | .type = CPUID_FEATURE_WORD, |
1171 | .feat_names = { |
1172 | NULL, NULL, "arat" , NULL, |
1173 | NULL, NULL, NULL, NULL, |
1174 | NULL, NULL, NULL, NULL, |
1175 | NULL, NULL, NULL, NULL, |
1176 | NULL, NULL, NULL, NULL, |
1177 | NULL, NULL, NULL, NULL, |
1178 | NULL, NULL, NULL, NULL, |
1179 | NULL, NULL, NULL, NULL, |
1180 | }, |
1181 | .cpuid = { .eax = 6, .reg = R_EAX, }, |
1182 | .tcg_features = TCG_6_EAX_FEATURES, |
1183 | }, |
1184 | [FEAT_XSAVE_COMP_LO] = { |
1185 | .type = CPUID_FEATURE_WORD, |
1186 | .cpuid = { |
1187 | .eax = 0xD, |
1188 | .needs_ecx = true, .ecx = 0, |
1189 | .reg = R_EAX, |
1190 | }, |
1191 | .tcg_features = ~0U, |
1192 | .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | |
1193 | XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | |
1194 | XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | |
1195 | XSTATE_PKRU_MASK, |
1196 | }, |
1197 | [FEAT_XSAVE_COMP_HI] = { |
1198 | .type = CPUID_FEATURE_WORD, |
1199 | .cpuid = { |
1200 | .eax = 0xD, |
1201 | .needs_ecx = true, .ecx = 0, |
1202 | .reg = R_EDX, |
1203 | }, |
1204 | .tcg_features = ~0U, |
1205 | }, |
1206 | /*Below are MSR exposed features*/ |
1207 | [FEAT_ARCH_CAPABILITIES] = { |
1208 | .type = MSR_FEATURE_WORD, |
1209 | .feat_names = { |
1210 | "rdctl-no" , "ibrs-all" , "rsba" , "skip-l1dfl-vmentry" , |
1211 | "ssb-no" , "mds-no" , NULL, NULL, |
1212 | NULL, NULL, NULL, NULL, |
1213 | NULL, NULL, NULL, NULL, |
1214 | NULL, NULL, NULL, NULL, |
1215 | NULL, NULL, NULL, NULL, |
1216 | NULL, NULL, NULL, NULL, |
1217 | NULL, NULL, NULL, NULL, |
1218 | }, |
1219 | .msr = { |
1220 | .index = MSR_IA32_ARCH_CAPABILITIES, |
1221 | .cpuid_dep = { |
1222 | FEAT_7_0_EDX, |
1223 | CPUID_7_0_EDX_ARCH_CAPABILITIES |
1224 | } |
1225 | }, |
1226 | }, |
1227 | [FEAT_CORE_CAPABILITY] = { |
1228 | .type = MSR_FEATURE_WORD, |
1229 | .feat_names = { |
1230 | NULL, NULL, NULL, NULL, |
1231 | NULL, "split-lock-detect" , NULL, NULL, |
1232 | NULL, NULL, NULL, NULL, |
1233 | NULL, NULL, NULL, NULL, |
1234 | NULL, NULL, NULL, NULL, |
1235 | NULL, NULL, NULL, NULL, |
1236 | NULL, NULL, NULL, NULL, |
1237 | NULL, NULL, NULL, NULL, |
1238 | }, |
1239 | .msr = { |
1240 | .index = MSR_IA32_CORE_CAPABILITY, |
1241 | .cpuid_dep = { |
1242 | FEAT_7_0_EDX, |
1243 | CPUID_7_0_EDX_CORE_CAPABILITY, |
1244 | }, |
1245 | }, |
1246 | }, |
1247 | }; |
1248 | |
1249 | typedef struct X86RegisterInfo32 { |
1250 | /* Name of register */ |
1251 | const char *name; |
1252 | /* QAPI enum value register */ |
1253 | X86CPURegister32 qapi_enum; |
1254 | } X86RegisterInfo32; |
1255 | |
1256 | #define REGISTER(reg) \ |
1257 | [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } |
1258 | static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { |
1259 | REGISTER(EAX), |
1260 | REGISTER(ECX), |
1261 | REGISTER(EDX), |
1262 | REGISTER(EBX), |
1263 | REGISTER(ESP), |
1264 | REGISTER(EBP), |
1265 | REGISTER(ESI), |
1266 | REGISTER(EDI), |
1267 | }; |
1268 | #undef REGISTER |
1269 | |
1270 | typedef struct ExtSaveArea { |
1271 | uint32_t feature, bits; |
1272 | uint32_t offset, size; |
1273 | } ExtSaveArea; |
1274 | |
1275 | static const ExtSaveArea x86_ext_save_areas[] = { |
1276 | [XSTATE_FP_BIT] = { |
1277 | /* x87 FP state component is always enabled if XSAVE is supported */ |
1278 | .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, |
1279 | /* x87 state is in the legacy region of the XSAVE area */ |
1280 | .offset = 0, |
1281 | .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), |
1282 | }, |
1283 | [XSTATE_SSE_BIT] = { |
1284 | /* SSE state component is always enabled if XSAVE is supported */ |
1285 | .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, |
1286 | /* SSE state is in the legacy region of the XSAVE area */ |
1287 | .offset = 0, |
1288 | .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), |
1289 | }, |
1290 | [XSTATE_YMM_BIT] = |
1291 | { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, |
1292 | .offset = offsetof(X86XSaveArea, avx_state), |
1293 | .size = sizeof(XSaveAVX) }, |
1294 | [XSTATE_BNDREGS_BIT] = |
1295 | { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, |
1296 | .offset = offsetof(X86XSaveArea, bndreg_state), |
1297 | .size = sizeof(XSaveBNDREG) }, |
1298 | [XSTATE_BNDCSR_BIT] = |
1299 | { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, |
1300 | .offset = offsetof(X86XSaveArea, bndcsr_state), |
1301 | .size = sizeof(XSaveBNDCSR) }, |
1302 | [XSTATE_OPMASK_BIT] = |
1303 | { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, |
1304 | .offset = offsetof(X86XSaveArea, opmask_state), |
1305 | .size = sizeof(XSaveOpmask) }, |
1306 | [XSTATE_ZMM_Hi256_BIT] = |
1307 | { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, |
1308 | .offset = offsetof(X86XSaveArea, zmm_hi256_state), |
1309 | .size = sizeof(XSaveZMM_Hi256) }, |
1310 | [XSTATE_Hi16_ZMM_BIT] = |
1311 | { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, |
1312 | .offset = offsetof(X86XSaveArea, hi16_zmm_state), |
1313 | .size = sizeof(XSaveHi16_ZMM) }, |
1314 | [XSTATE_PKRU_BIT] = |
1315 | { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, |
1316 | .offset = offsetof(X86XSaveArea, pkru_state), |
1317 | .size = sizeof(XSavePKRU) }, |
1318 | }; |
1319 | |
1320 | static uint32_t xsave_area_size(uint64_t mask) |
1321 | { |
1322 | int i; |
1323 | uint64_t ret = 0; |
1324 | |
1325 | for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { |
1326 | const ExtSaveArea *esa = &x86_ext_save_areas[i]; |
1327 | if ((mask >> i) & 1) { |
1328 | ret = MAX(ret, esa->offset + esa->size); |
1329 | } |
1330 | } |
1331 | return ret; |
1332 | } |
1333 | |
1334 | static inline bool accel_uses_host_cpuid(void) |
1335 | { |
1336 | return kvm_enabled() || hvf_enabled(); |
1337 | } |
1338 | |
1339 | static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) |
1340 | { |
1341 | return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | |
1342 | cpu->env.features[FEAT_XSAVE_COMP_LO]; |
1343 | } |
1344 | |
1345 | const char *get_register_name_32(unsigned int reg) |
1346 | { |
1347 | if (reg >= CPU_NB_REGS32) { |
1348 | return NULL; |
1349 | } |
1350 | return x86_reg_info_32[reg].name; |
1351 | } |
1352 | |
1353 | /* |
1354 | * Returns the set of feature flags that are supported and migratable by |
1355 | * QEMU, for a given FeatureWord. |
1356 | */ |
1357 | static uint32_t x86_cpu_get_migratable_flags(FeatureWord w) |
1358 | { |
1359 | FeatureWordInfo *wi = &feature_word_info[w]; |
1360 | uint32_t r = 0; |
1361 | int i; |
1362 | |
1363 | for (i = 0; i < 32; i++) { |
1364 | uint32_t f = 1U << i; |
1365 | |
1366 | /* If the feature name is known, it is implicitly considered migratable, |
1367 | * unless it is explicitly set in unmigratable_flags */ |
1368 | if ((wi->migratable_flags & f) || |
1369 | (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { |
1370 | r |= f; |
1371 | } |
1372 | } |
1373 | return r; |
1374 | } |
1375 | |
1376 | void host_cpuid(uint32_t function, uint32_t count, |
1377 | uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) |
1378 | { |
1379 | uint32_t vec[4]; |
1380 | |
1381 | #ifdef __x86_64__ |
1382 | asm volatile("cpuid" |
1383 | : "=a" (vec[0]), "=b" (vec[1]), |
1384 | "=c" (vec[2]), "=d" (vec[3]) |
1385 | : "0" (function), "c" (count) : "cc" ); |
1386 | #elif defined(__i386__) |
1387 | asm volatile("pusha \n\t" |
1388 | "cpuid \n\t" |
1389 | "mov %%eax, 0(%2) \n\t" |
1390 | "mov %%ebx, 4(%2) \n\t" |
1391 | "mov %%ecx, 8(%2) \n\t" |
1392 | "mov %%edx, 12(%2) \n\t" |
1393 | "popa" |
1394 | : : "a" (function), "c" (count), "S" (vec) |
1395 | : "memory" , "cc" ); |
1396 | #else |
1397 | abort(); |
1398 | #endif |
1399 | |
1400 | if (eax) |
1401 | *eax = vec[0]; |
1402 | if (ebx) |
1403 | *ebx = vec[1]; |
1404 | if (ecx) |
1405 | *ecx = vec[2]; |
1406 | if (edx) |
1407 | *edx = vec[3]; |
1408 | } |
1409 | |
1410 | void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) |
1411 | { |
1412 | uint32_t eax, ebx, ecx, edx; |
1413 | |
1414 | host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); |
1415 | x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); |
1416 | |
1417 | host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); |
1418 | if (family) { |
1419 | *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); |
1420 | } |
1421 | if (model) { |
1422 | *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); |
1423 | } |
1424 | if (stepping) { |
1425 | *stepping = eax & 0x0F; |
1426 | } |
1427 | } |
1428 | |
1429 | /* CPU class name definitions: */ |
1430 | |
1431 | /* Return type name for a given CPU model name |
1432 | * Caller is responsible for freeing the returned string. |
1433 | */ |
1434 | static char *x86_cpu_type_name(const char *model_name) |
1435 | { |
1436 | return g_strdup_printf(X86_CPU_TYPE_NAME("%s" ), model_name); |
1437 | } |
1438 | |
1439 | static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) |
1440 | { |
1441 | ObjectClass *oc; |
1442 | char *typename = x86_cpu_type_name(cpu_model); |
1443 | oc = object_class_by_name(typename); |
1444 | g_free(typename); |
1445 | return oc; |
1446 | } |
1447 | |
1448 | static char *x86_cpu_class_get_model_name(X86CPUClass *cc) |
1449 | { |
1450 | const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); |
1451 | assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); |
1452 | return g_strndup(class_name, |
1453 | strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); |
1454 | } |
1455 | |
1456 | typedef struct PropValue { |
1457 | const char *prop, *value; |
1458 | } PropValue; |
1459 | |
1460 | typedef struct X86CPUVersionDefinition { |
1461 | X86CPUVersion version; |
1462 | const char *alias; |
1463 | PropValue *props; |
1464 | } X86CPUVersionDefinition; |
1465 | |
1466 | /* Base definition for a CPU model */ |
1467 | typedef struct X86CPUDefinition { |
1468 | const char *name; |
1469 | uint32_t level; |
1470 | uint32_t xlevel; |
1471 | /* vendor is zero-terminated, 12 character ASCII string */ |
1472 | char vendor[CPUID_VENDOR_SZ + 1]; |
1473 | int family; |
1474 | int model; |
1475 | int stepping; |
1476 | FeatureWordArray features; |
1477 | const char *model_id; |
1478 | CPUCaches *cache_info; |
1479 | /* |
1480 | * Definitions for alternative versions of CPU model. |
1481 | * List is terminated by item with version == 0. |
1482 | * If NULL, version 1 will be registered automatically. |
1483 | */ |
1484 | const X86CPUVersionDefinition *versions; |
1485 | } X86CPUDefinition; |
1486 | |
1487 | /* Reference to a specific CPU model version */ |
1488 | struct X86CPUModel { |
1489 | /* Base CPU definition */ |
1490 | X86CPUDefinition *cpudef; |
1491 | /* CPU model version */ |
1492 | X86CPUVersion version; |
1493 | /* |
1494 | * If true, this is an alias CPU model. |
1495 | * This matters only for "-cpu help" and query-cpu-definitions |
1496 | */ |
1497 | bool is_alias; |
1498 | }; |
1499 | |
1500 | /* Get full model name for CPU version */ |
1501 | static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef, |
1502 | X86CPUVersion version) |
1503 | { |
1504 | assert(version > 0); |
1505 | return g_strdup_printf("%s-v%d" , cpudef->name, (int)version); |
1506 | } |
1507 | |
1508 | static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def) |
1509 | { |
1510 | /* When X86CPUDefinition::versions is NULL, we register only v1 */ |
1511 | static const X86CPUVersionDefinition default_version_list[] = { |
1512 | { 1 }, |
1513 | { /* end of list */ } |
1514 | }; |
1515 | |
1516 | return def->versions ?: default_version_list; |
1517 | } |
1518 | |
1519 | static CPUCaches epyc_cache_info = { |
1520 | .l1d_cache = &(CPUCacheInfo) { |
1521 | .type = DATA_CACHE, |
1522 | .level = 1, |
1523 | .size = 32 * KiB, |
1524 | .line_size = 64, |
1525 | .associativity = 8, |
1526 | .partitions = 1, |
1527 | .sets = 64, |
1528 | .lines_per_tag = 1, |
1529 | .self_init = 1, |
1530 | .no_invd_sharing = true, |
1531 | }, |
1532 | .l1i_cache = &(CPUCacheInfo) { |
1533 | .type = INSTRUCTION_CACHE, |
1534 | .level = 1, |
1535 | .size = 64 * KiB, |
1536 | .line_size = 64, |
1537 | .associativity = 4, |
1538 | .partitions = 1, |
1539 | .sets = 256, |
1540 | .lines_per_tag = 1, |
1541 | .self_init = 1, |
1542 | .no_invd_sharing = true, |
1543 | }, |
1544 | .l2_cache = &(CPUCacheInfo) { |
1545 | .type = UNIFIED_CACHE, |
1546 | .level = 2, |
1547 | .size = 512 * KiB, |
1548 | .line_size = 64, |
1549 | .associativity = 8, |
1550 | .partitions = 1, |
1551 | .sets = 1024, |
1552 | .lines_per_tag = 1, |
1553 | }, |
1554 | .l3_cache = &(CPUCacheInfo) { |
1555 | .type = UNIFIED_CACHE, |
1556 | .level = 3, |
1557 | .size = 8 * MiB, |
1558 | .line_size = 64, |
1559 | .associativity = 16, |
1560 | .partitions = 1, |
1561 | .sets = 8192, |
1562 | .lines_per_tag = 1, |
1563 | .self_init = true, |
1564 | .inclusive = true, |
1565 | .complex_indexing = true, |
1566 | }, |
1567 | }; |
1568 | |
1569 | static X86CPUDefinition builtin_x86_defs[] = { |
1570 | { |
1571 | .name = "qemu64" , |
1572 | .level = 0xd, |
1573 | .vendor = CPUID_VENDOR_AMD, |
1574 | .family = 6, |
1575 | .model = 6, |
1576 | .stepping = 3, |
1577 | .features[FEAT_1_EDX] = |
1578 | PPRO_FEATURES | |
1579 | CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | |
1580 | CPUID_PSE36, |
1581 | .features[FEAT_1_ECX] = |
1582 | CPUID_EXT_SSE3 | CPUID_EXT_CX16, |
1583 | .features[FEAT_8000_0001_EDX] = |
1584 | CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, |
1585 | .features[FEAT_8000_0001_ECX] = |
1586 | CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, |
1587 | .xlevel = 0x8000000A, |
1588 | .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, |
1589 | }, |
1590 | { |
1591 | .name = "phenom" , |
1592 | .level = 5, |
1593 | .vendor = CPUID_VENDOR_AMD, |
1594 | .family = 16, |
1595 | .model = 2, |
1596 | .stepping = 3, |
1597 | /* Missing: CPUID_HT */ |
1598 | .features[FEAT_1_EDX] = |
1599 | PPRO_FEATURES | |
1600 | CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | |
1601 | CPUID_PSE36 | CPUID_VME, |
1602 | .features[FEAT_1_ECX] = |
1603 | CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | |
1604 | CPUID_EXT_POPCNT, |
1605 | .features[FEAT_8000_0001_EDX] = |
1606 | CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | |
1607 | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | |
1608 | CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, |
1609 | /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, |
1610 | CPUID_EXT3_CR8LEG, |
1611 | CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, |
1612 | CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ |
1613 | .features[FEAT_8000_0001_ECX] = |
1614 | CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | |
1615 | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, |
1616 | /* Missing: CPUID_SVM_LBRV */ |
1617 | .features[FEAT_SVM] = |
1618 | CPUID_SVM_NPT, |
1619 | .xlevel = 0x8000001A, |
1620 | .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" |
1621 | }, |
1622 | { |
1623 | .name = "core2duo" , |
1624 | .level = 10, |
1625 | .vendor = CPUID_VENDOR_INTEL, |
1626 | .family = 6, |
1627 | .model = 15, |
1628 | .stepping = 11, |
1629 | /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ |
1630 | .features[FEAT_1_EDX] = |
1631 | PPRO_FEATURES | |
1632 | CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | |
1633 | CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, |
1634 | /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, |
1635 | * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ |
1636 | .features[FEAT_1_ECX] = |
1637 | CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | |
1638 | CPUID_EXT_CX16, |
1639 | .features[FEAT_8000_0001_EDX] = |
1640 | CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, |
1641 | .features[FEAT_8000_0001_ECX] = |
1642 | CPUID_EXT3_LAHF_LM, |
1643 | .xlevel = 0x80000008, |
1644 | .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz" , |
1645 | }, |
1646 | { |
1647 | .name = "kvm64" , |
1648 | .level = 0xd, |
1649 | .vendor = CPUID_VENDOR_INTEL, |
1650 | .family = 15, |
1651 | .model = 6, |
1652 | .stepping = 1, |
1653 | /* Missing: CPUID_HT */ |
1654 | .features[FEAT_1_EDX] = |
1655 | PPRO_FEATURES | CPUID_VME | |
1656 | CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | |
1657 | CPUID_PSE36, |
1658 | /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ |
1659 | .features[FEAT_1_ECX] = |
1660 | CPUID_EXT_SSE3 | CPUID_EXT_CX16, |
1661 | /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ |
1662 | .features[FEAT_8000_0001_EDX] = |
1663 | CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, |
1664 | /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, |
1665 | CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, |
1666 | CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, |
1667 | CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ |
1668 | .features[FEAT_8000_0001_ECX] = |
1669 | 0, |
1670 | .xlevel = 0x80000008, |
1671 | .model_id = "Common KVM processor" |
1672 | }, |
1673 | { |
1674 | .name = "qemu32" , |
1675 | .level = 4, |
1676 | .vendor = CPUID_VENDOR_INTEL, |
1677 | .family = 6, |
1678 | .model = 6, |
1679 | .stepping = 3, |
1680 | .features[FEAT_1_EDX] = |
1681 | PPRO_FEATURES, |
1682 | .features[FEAT_1_ECX] = |
1683 | CPUID_EXT_SSE3, |
1684 | .xlevel = 0x80000004, |
1685 | .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, |
1686 | }, |
1687 | { |
1688 | .name = "kvm32" , |
1689 | .level = 5, |
1690 | .vendor = CPUID_VENDOR_INTEL, |
1691 | .family = 15, |
1692 | .model = 6, |
1693 | .stepping = 1, |
1694 | .features[FEAT_1_EDX] = |
1695 | PPRO_FEATURES | CPUID_VME | |
1696 | CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, |
1697 | .features[FEAT_1_ECX] = |
1698 | CPUID_EXT_SSE3, |
1699 | .features[FEAT_8000_0001_ECX] = |
1700 | 0, |
1701 | .xlevel = 0x80000008, |
1702 | .model_id = "Common 32-bit KVM processor" |
1703 | }, |
1704 | { |
1705 | .name = "coreduo" , |
1706 | .level = 10, |
1707 | .vendor = CPUID_VENDOR_INTEL, |
1708 | .family = 6, |
1709 | .model = 14, |
1710 | .stepping = 8, |
1711 | /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ |
1712 | .features[FEAT_1_EDX] = |
1713 | PPRO_FEATURES | CPUID_VME | |
1714 | CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | |
1715 | CPUID_SS, |
1716 | /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, |
1717 | * CPUID_EXT_PDCM, CPUID_EXT_VMX */ |
1718 | .features[FEAT_1_ECX] = |
1719 | CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, |
1720 | .features[FEAT_8000_0001_EDX] = |
1721 | CPUID_EXT2_NX, |
1722 | .xlevel = 0x80000008, |
1723 | .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz" , |
1724 | }, |
1725 | { |
1726 | .name = "486" , |
1727 | .level = 1, |
1728 | .vendor = CPUID_VENDOR_INTEL, |
1729 | .family = 4, |
1730 | .model = 8, |
1731 | .stepping = 0, |
1732 | .features[FEAT_1_EDX] = |
1733 | I486_FEATURES, |
1734 | .xlevel = 0, |
1735 | .model_id = "" , |
1736 | }, |
1737 | { |
1738 | .name = "pentium" , |
1739 | .level = 1, |
1740 | .vendor = CPUID_VENDOR_INTEL, |
1741 | .family = 5, |
1742 | .model = 4, |
1743 | .stepping = 3, |
1744 | .features[FEAT_1_EDX] = |
1745 | PENTIUM_FEATURES, |
1746 | .xlevel = 0, |
1747 | .model_id = "" , |
1748 | }, |
1749 | { |
1750 | .name = "pentium2" , |
1751 | .level = 2, |
1752 | .vendor = CPUID_VENDOR_INTEL, |
1753 | .family = 6, |
1754 | .model = 5, |
1755 | .stepping = 2, |
1756 | .features[FEAT_1_EDX] = |
1757 | PENTIUM2_FEATURES, |
1758 | .xlevel = 0, |
1759 | .model_id = "" , |
1760 | }, |
1761 | { |
1762 | .name = "pentium3" , |
1763 | .level = 3, |
1764 | .vendor = CPUID_VENDOR_INTEL, |
1765 | .family = 6, |
1766 | .model = 7, |
1767 | .stepping = 3, |
1768 | .features[FEAT_1_EDX] = |
1769 | PENTIUM3_FEATURES, |
1770 | .xlevel = 0, |
1771 | .model_id = "" , |
1772 | }, |
1773 | { |
1774 | .name = "athlon" , |
1775 | .level = 2, |
1776 | .vendor = CPUID_VENDOR_AMD, |
1777 | .family = 6, |
1778 | .model = 2, |
1779 | .stepping = 3, |
1780 | .features[FEAT_1_EDX] = |
1781 | PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | |
1782 | CPUID_MCA, |
1783 | .features[FEAT_8000_0001_EDX] = |
1784 | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, |
1785 | .xlevel = 0x80000008, |
1786 | .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, |
1787 | }, |
1788 | { |
1789 | .name = "n270" , |
1790 | .level = 10, |
1791 | .vendor = CPUID_VENDOR_INTEL, |
1792 | .family = 6, |
1793 | .model = 28, |
1794 | .stepping = 2, |
1795 | /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ |
1796 | .features[FEAT_1_EDX] = |
1797 | PPRO_FEATURES | |
1798 | CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | |
1799 | CPUID_ACPI | CPUID_SS, |
1800 | /* Some CPUs got no CPUID_SEP */ |
1801 | /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, |
1802 | * CPUID_EXT_XTPR */ |
1803 | .features[FEAT_1_ECX] = |
1804 | CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | |
1805 | CPUID_EXT_MOVBE, |
1806 | .features[FEAT_8000_0001_EDX] = |
1807 | CPUID_EXT2_NX, |
1808 | .features[FEAT_8000_0001_ECX] = |
1809 | CPUID_EXT3_LAHF_LM, |
1810 | .xlevel = 0x80000008, |
1811 | .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz" , |
1812 | }, |
1813 | { |
1814 | .name = "Conroe" , |
1815 | .level = 10, |
1816 | .vendor = CPUID_VENDOR_INTEL, |
1817 | .family = 6, |
1818 | .model = 15, |
1819 | .stepping = 3, |
1820 | .features[FEAT_1_EDX] = |
1821 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
1822 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
1823 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
1824 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
1825 | CPUID_DE | CPUID_FP87, |
1826 | .features[FEAT_1_ECX] = |
1827 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, |
1828 | .features[FEAT_8000_0001_EDX] = |
1829 | CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, |
1830 | .features[FEAT_8000_0001_ECX] = |
1831 | CPUID_EXT3_LAHF_LM, |
1832 | .xlevel = 0x80000008, |
1833 | .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)" , |
1834 | }, |
1835 | { |
1836 | .name = "Penryn" , |
1837 | .level = 10, |
1838 | .vendor = CPUID_VENDOR_INTEL, |
1839 | .family = 6, |
1840 | .model = 23, |
1841 | .stepping = 3, |
1842 | .features[FEAT_1_EDX] = |
1843 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
1844 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
1845 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
1846 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
1847 | CPUID_DE | CPUID_FP87, |
1848 | .features[FEAT_1_ECX] = |
1849 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | |
1850 | CPUID_EXT_SSE3, |
1851 | .features[FEAT_8000_0001_EDX] = |
1852 | CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, |
1853 | .features[FEAT_8000_0001_ECX] = |
1854 | CPUID_EXT3_LAHF_LM, |
1855 | .xlevel = 0x80000008, |
1856 | .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)" , |
1857 | }, |
1858 | { |
1859 | .name = "Nehalem" , |
1860 | .level = 11, |
1861 | .vendor = CPUID_VENDOR_INTEL, |
1862 | .family = 6, |
1863 | .model = 26, |
1864 | .stepping = 3, |
1865 | .features[FEAT_1_EDX] = |
1866 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
1867 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
1868 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
1869 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
1870 | CPUID_DE | CPUID_FP87, |
1871 | .features[FEAT_1_ECX] = |
1872 | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | |
1873 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, |
1874 | .features[FEAT_8000_0001_EDX] = |
1875 | CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, |
1876 | .features[FEAT_8000_0001_ECX] = |
1877 | CPUID_EXT3_LAHF_LM, |
1878 | .xlevel = 0x80000008, |
1879 | .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)" , |
1880 | .versions = (X86CPUVersionDefinition[]) { |
1881 | { .version = 1 }, |
1882 | { |
1883 | .version = 2, |
1884 | .alias = "Nehalem-IBRS" , |
1885 | .props = (PropValue[]) { |
1886 | { "spec-ctrl" , "on" }, |
1887 | { "model-id" , |
1888 | "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, |
1889 | { /* end of list */ } |
1890 | } |
1891 | }, |
1892 | { /* end of list */ } |
1893 | } |
1894 | }, |
1895 | { |
1896 | .name = "Westmere" , |
1897 | .level = 11, |
1898 | .vendor = CPUID_VENDOR_INTEL, |
1899 | .family = 6, |
1900 | .model = 44, |
1901 | .stepping = 1, |
1902 | .features[FEAT_1_EDX] = |
1903 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
1904 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
1905 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
1906 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
1907 | CPUID_DE | CPUID_FP87, |
1908 | .features[FEAT_1_ECX] = |
1909 | CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | |
1910 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | |
1911 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, |
1912 | .features[FEAT_8000_0001_EDX] = |
1913 | CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, |
1914 | .features[FEAT_8000_0001_ECX] = |
1915 | CPUID_EXT3_LAHF_LM, |
1916 | .features[FEAT_6_EAX] = |
1917 | CPUID_6_EAX_ARAT, |
1918 | .xlevel = 0x80000008, |
1919 | .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)" , |
1920 | .versions = (X86CPUVersionDefinition[]) { |
1921 | { .version = 1 }, |
1922 | { |
1923 | .version = 2, |
1924 | .alias = "Westmere-IBRS" , |
1925 | .props = (PropValue[]) { |
1926 | { "spec-ctrl" , "on" }, |
1927 | { "model-id" , |
1928 | "Westmere E56xx/L56xx/X56xx (IBRS update)" }, |
1929 | { /* end of list */ } |
1930 | } |
1931 | }, |
1932 | { /* end of list */ } |
1933 | } |
1934 | }, |
1935 | { |
1936 | .name = "SandyBridge" , |
1937 | .level = 0xd, |
1938 | .vendor = CPUID_VENDOR_INTEL, |
1939 | .family = 6, |
1940 | .model = 42, |
1941 | .stepping = 1, |
1942 | .features[FEAT_1_EDX] = |
1943 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
1944 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
1945 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
1946 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
1947 | CPUID_DE | CPUID_FP87, |
1948 | .features[FEAT_1_ECX] = |
1949 | CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | |
1950 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | |
1951 | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | |
1952 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | |
1953 | CPUID_EXT_SSE3, |
1954 | .features[FEAT_8000_0001_EDX] = |
1955 | CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | |
1956 | CPUID_EXT2_SYSCALL, |
1957 | .features[FEAT_8000_0001_ECX] = |
1958 | CPUID_EXT3_LAHF_LM, |
1959 | .features[FEAT_XSAVE] = |
1960 | CPUID_XSAVE_XSAVEOPT, |
1961 | .features[FEAT_6_EAX] = |
1962 | CPUID_6_EAX_ARAT, |
1963 | .xlevel = 0x80000008, |
1964 | .model_id = "Intel Xeon E312xx (Sandy Bridge)" , |
1965 | .versions = (X86CPUVersionDefinition[]) { |
1966 | { .version = 1 }, |
1967 | { |
1968 | .version = 2, |
1969 | .alias = "SandyBridge-IBRS" , |
1970 | .props = (PropValue[]) { |
1971 | { "spec-ctrl" , "on" }, |
1972 | { "model-id" , |
1973 | "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, |
1974 | { /* end of list */ } |
1975 | } |
1976 | }, |
1977 | { /* end of list */ } |
1978 | } |
1979 | }, |
1980 | { |
1981 | .name = "IvyBridge" , |
1982 | .level = 0xd, |
1983 | .vendor = CPUID_VENDOR_INTEL, |
1984 | .family = 6, |
1985 | .model = 58, |
1986 | .stepping = 9, |
1987 | .features[FEAT_1_EDX] = |
1988 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
1989 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
1990 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
1991 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
1992 | CPUID_DE | CPUID_FP87, |
1993 | .features[FEAT_1_ECX] = |
1994 | CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | |
1995 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | |
1996 | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | |
1997 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | |
1998 | CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, |
1999 | .features[FEAT_7_0_EBX] = |
2000 | CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | |
2001 | CPUID_7_0_EBX_ERMS, |
2002 | .features[FEAT_8000_0001_EDX] = |
2003 | CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | |
2004 | CPUID_EXT2_SYSCALL, |
2005 | .features[FEAT_8000_0001_ECX] = |
2006 | CPUID_EXT3_LAHF_LM, |
2007 | .features[FEAT_XSAVE] = |
2008 | CPUID_XSAVE_XSAVEOPT, |
2009 | .features[FEAT_6_EAX] = |
2010 | CPUID_6_EAX_ARAT, |
2011 | .xlevel = 0x80000008, |
2012 | .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)" , |
2013 | .versions = (X86CPUVersionDefinition[]) { |
2014 | { .version = 1 }, |
2015 | { |
2016 | .version = 2, |
2017 | .alias = "IvyBridge-IBRS" , |
2018 | .props = (PropValue[]) { |
2019 | { "spec-ctrl" , "on" }, |
2020 | { "model-id" , |
2021 | "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, |
2022 | { /* end of list */ } |
2023 | } |
2024 | }, |
2025 | { /* end of list */ } |
2026 | } |
2027 | }, |
2028 | { |
2029 | .name = "Haswell" , |
2030 | .level = 0xd, |
2031 | .vendor = CPUID_VENDOR_INTEL, |
2032 | .family = 6, |
2033 | .model = 60, |
2034 | .stepping = 4, |
2035 | .features[FEAT_1_EDX] = |
2036 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
2037 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
2038 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
2039 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
2040 | CPUID_DE | CPUID_FP87, |
2041 | .features[FEAT_1_ECX] = |
2042 | CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | |
2043 | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | |
2044 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | |
2045 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | |
2046 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | |
2047 | CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, |
2048 | .features[FEAT_8000_0001_EDX] = |
2049 | CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | |
2050 | CPUID_EXT2_SYSCALL, |
2051 | .features[FEAT_8000_0001_ECX] = |
2052 | CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, |
2053 | .features[FEAT_7_0_EBX] = |
2054 | CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | |
2055 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | |
2056 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | |
2057 | CPUID_7_0_EBX_RTM, |
2058 | .features[FEAT_XSAVE] = |
2059 | CPUID_XSAVE_XSAVEOPT, |
2060 | .features[FEAT_6_EAX] = |
2061 | CPUID_6_EAX_ARAT, |
2062 | .xlevel = 0x80000008, |
2063 | .model_id = "Intel Core Processor (Haswell)" , |
2064 | .versions = (X86CPUVersionDefinition[]) { |
2065 | { .version = 1 }, |
2066 | { |
2067 | .version = 2, |
2068 | .alias = "Haswell-noTSX" , |
2069 | .props = (PropValue[]) { |
2070 | { "hle" , "off" }, |
2071 | { "rtm" , "off" }, |
2072 | { "stepping" , "1" }, |
2073 | { "model-id" , "Intel Core Processor (Haswell, no TSX)" , }, |
2074 | { /* end of list */ } |
2075 | }, |
2076 | }, |
2077 | { |
2078 | .version = 3, |
2079 | .alias = "Haswell-IBRS" , |
2080 | .props = (PropValue[]) { |
2081 | /* Restore TSX features removed by -v2 above */ |
2082 | { "hle" , "on" }, |
2083 | { "rtm" , "on" }, |
2084 | /* |
2085 | * Haswell and Haswell-IBRS had stepping=4 in |
2086 | * QEMU 4.0 and older |
2087 | */ |
2088 | { "stepping" , "4" }, |
2089 | { "spec-ctrl" , "on" }, |
2090 | { "model-id" , |
2091 | "Intel Core Processor (Haswell, IBRS)" }, |
2092 | { /* end of list */ } |
2093 | } |
2094 | }, |
2095 | { |
2096 | .version = 4, |
2097 | .alias = "Haswell-noTSX-IBRS" , |
2098 | .props = (PropValue[]) { |
2099 | { "hle" , "off" }, |
2100 | { "rtm" , "off" }, |
2101 | /* spec-ctrl was already enabled by -v3 above */ |
2102 | { "stepping" , "1" }, |
2103 | { "model-id" , |
2104 | "Intel Core Processor (Haswell, no TSX, IBRS)" }, |
2105 | { /* end of list */ } |
2106 | } |
2107 | }, |
2108 | { /* end of list */ } |
2109 | } |
2110 | }, |
2111 | { |
2112 | .name = "Broadwell" , |
2113 | .level = 0xd, |
2114 | .vendor = CPUID_VENDOR_INTEL, |
2115 | .family = 6, |
2116 | .model = 61, |
2117 | .stepping = 2, |
2118 | .features[FEAT_1_EDX] = |
2119 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
2120 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
2121 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
2122 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
2123 | CPUID_DE | CPUID_FP87, |
2124 | .features[FEAT_1_ECX] = |
2125 | CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | |
2126 | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | |
2127 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | |
2128 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | |
2129 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | |
2130 | CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, |
2131 | .features[FEAT_8000_0001_EDX] = |
2132 | CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | |
2133 | CPUID_EXT2_SYSCALL, |
2134 | .features[FEAT_8000_0001_ECX] = |
2135 | CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, |
2136 | .features[FEAT_7_0_EBX] = |
2137 | CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | |
2138 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | |
2139 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | |
2140 | CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | |
2141 | CPUID_7_0_EBX_SMAP, |
2142 | .features[FEAT_XSAVE] = |
2143 | CPUID_XSAVE_XSAVEOPT, |
2144 | .features[FEAT_6_EAX] = |
2145 | CPUID_6_EAX_ARAT, |
2146 | .xlevel = 0x80000008, |
2147 | .model_id = "Intel Core Processor (Broadwell)" , |
2148 | .versions = (X86CPUVersionDefinition[]) { |
2149 | { .version = 1 }, |
2150 | { |
2151 | .version = 2, |
2152 | .alias = "Broadwell-noTSX" , |
2153 | .props = (PropValue[]) { |
2154 | { "hle" , "off" }, |
2155 | { "rtm" , "off" }, |
2156 | { "model-id" , "Intel Core Processor (Broadwell, no TSX)" , }, |
2157 | { /* end of list */ } |
2158 | }, |
2159 | }, |
2160 | { |
2161 | .version = 3, |
2162 | .alias = "Broadwell-IBRS" , |
2163 | .props = (PropValue[]) { |
2164 | /* Restore TSX features removed by -v2 above */ |
2165 | { "hle" , "on" }, |
2166 | { "rtm" , "on" }, |
2167 | { "spec-ctrl" , "on" }, |
2168 | { "model-id" , |
2169 | "Intel Core Processor (Broadwell, IBRS)" }, |
2170 | { /* end of list */ } |
2171 | } |
2172 | }, |
2173 | { |
2174 | .version = 4, |
2175 | .alias = "Broadwell-noTSX-IBRS" , |
2176 | .props = (PropValue[]) { |
2177 | { "hle" , "off" }, |
2178 | { "rtm" , "off" }, |
2179 | /* spec-ctrl was already enabled by -v3 above */ |
2180 | { "model-id" , |
2181 | "Intel Core Processor (Broadwell, no TSX, IBRS)" }, |
2182 | { /* end of list */ } |
2183 | } |
2184 | }, |
2185 | { /* end of list */ } |
2186 | } |
2187 | }, |
2188 | { |
2189 | .name = "Skylake-Client" , |
2190 | .level = 0xd, |
2191 | .vendor = CPUID_VENDOR_INTEL, |
2192 | .family = 6, |
2193 | .model = 94, |
2194 | .stepping = 3, |
2195 | .features[FEAT_1_EDX] = |
2196 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
2197 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
2198 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
2199 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
2200 | CPUID_DE | CPUID_FP87, |
2201 | .features[FEAT_1_ECX] = |
2202 | CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | |
2203 | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | |
2204 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | |
2205 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | |
2206 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | |
2207 | CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, |
2208 | .features[FEAT_8000_0001_EDX] = |
2209 | CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | |
2210 | CPUID_EXT2_SYSCALL, |
2211 | .features[FEAT_8000_0001_ECX] = |
2212 | CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, |
2213 | .features[FEAT_7_0_EBX] = |
2214 | CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | |
2215 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | |
2216 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | |
2217 | CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | |
2218 | CPUID_7_0_EBX_SMAP, |
2219 | /* Missing: XSAVES (not supported by some Linux versions, |
2220 | * including v4.1 to v4.12). |
2221 | * KVM doesn't yet expose any XSAVES state save component, |
2222 | * and the only one defined in Skylake (processor tracing) |
2223 | * probably will block migration anyway. |
2224 | */ |
2225 | .features[FEAT_XSAVE] = |
2226 | CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | |
2227 | CPUID_XSAVE_XGETBV1, |
2228 | .features[FEAT_6_EAX] = |
2229 | CPUID_6_EAX_ARAT, |
2230 | .xlevel = 0x80000008, |
2231 | .model_id = "Intel Core Processor (Skylake)" , |
2232 | .versions = (X86CPUVersionDefinition[]) { |
2233 | { .version = 1 }, |
2234 | { |
2235 | .version = 2, |
2236 | .alias = "Skylake-Client-IBRS" , |
2237 | .props = (PropValue[]) { |
2238 | { "spec-ctrl" , "on" }, |
2239 | { "model-id" , |
2240 | "Intel Core Processor (Skylake, IBRS)" }, |
2241 | { /* end of list */ } |
2242 | } |
2243 | }, |
2244 | { /* end of list */ } |
2245 | } |
2246 | }, |
2247 | { |
2248 | .name = "Skylake-Server" , |
2249 | .level = 0xd, |
2250 | .vendor = CPUID_VENDOR_INTEL, |
2251 | .family = 6, |
2252 | .model = 85, |
2253 | .stepping = 4, |
2254 | .features[FEAT_1_EDX] = |
2255 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
2256 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
2257 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
2258 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
2259 | CPUID_DE | CPUID_FP87, |
2260 | .features[FEAT_1_ECX] = |
2261 | CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | |
2262 | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | |
2263 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | |
2264 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | |
2265 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | |
2266 | CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, |
2267 | .features[FEAT_8000_0001_EDX] = |
2268 | CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | |
2269 | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, |
2270 | .features[FEAT_8000_0001_ECX] = |
2271 | CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, |
2272 | .features[FEAT_7_0_EBX] = |
2273 | CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | |
2274 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | |
2275 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | |
2276 | CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | |
2277 | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | |
2278 | CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | |
2279 | CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | |
2280 | CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, |
2281 | .features[FEAT_7_0_ECX] = |
2282 | CPUID_7_0_ECX_PKU, |
2283 | /* Missing: XSAVES (not supported by some Linux versions, |
2284 | * including v4.1 to v4.12). |
2285 | * KVM doesn't yet expose any XSAVES state save component, |
2286 | * and the only one defined in Skylake (processor tracing) |
2287 | * probably will block migration anyway. |
2288 | */ |
2289 | .features[FEAT_XSAVE] = |
2290 | CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | |
2291 | CPUID_XSAVE_XGETBV1, |
2292 | .features[FEAT_6_EAX] = |
2293 | CPUID_6_EAX_ARAT, |
2294 | .xlevel = 0x80000008, |
2295 | .model_id = "Intel Xeon Processor (Skylake)" , |
2296 | .versions = (X86CPUVersionDefinition[]) { |
2297 | { .version = 1 }, |
2298 | { |
2299 | .version = 2, |
2300 | .alias = "Skylake-Server-IBRS" , |
2301 | .props = (PropValue[]) { |
2302 | /* clflushopt was not added to Skylake-Server-IBRS */ |
2303 | /* TODO: add -v3 including clflushopt */ |
2304 | { "clflushopt" , "off" }, |
2305 | { "spec-ctrl" , "on" }, |
2306 | { "model-id" , |
2307 | "Intel Xeon Processor (Skylake, IBRS)" }, |
2308 | { /* end of list */ } |
2309 | } |
2310 | }, |
2311 | { /* end of list */ } |
2312 | } |
2313 | }, |
2314 | { |
2315 | .name = "Cascadelake-Server" , |
2316 | .level = 0xd, |
2317 | .vendor = CPUID_VENDOR_INTEL, |
2318 | .family = 6, |
2319 | .model = 85, |
2320 | .stepping = 6, |
2321 | .features[FEAT_1_EDX] = |
2322 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
2323 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
2324 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
2325 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
2326 | CPUID_DE | CPUID_FP87, |
2327 | .features[FEAT_1_ECX] = |
2328 | CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | |
2329 | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | |
2330 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | |
2331 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | |
2332 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | |
2333 | CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, |
2334 | .features[FEAT_8000_0001_EDX] = |
2335 | CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | |
2336 | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, |
2337 | .features[FEAT_8000_0001_ECX] = |
2338 | CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, |
2339 | .features[FEAT_7_0_EBX] = |
2340 | CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | |
2341 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | |
2342 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | |
2343 | CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | |
2344 | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | |
2345 | CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | |
2346 | CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | |
2347 | CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, |
2348 | .features[FEAT_7_0_ECX] = |
2349 | CPUID_7_0_ECX_PKU | |
2350 | CPUID_7_0_ECX_AVX512VNNI, |
2351 | .features[FEAT_7_0_EDX] = |
2352 | CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, |
2353 | /* Missing: XSAVES (not supported by some Linux versions, |
2354 | * including v4.1 to v4.12). |
2355 | * KVM doesn't yet expose any XSAVES state save component, |
2356 | * and the only one defined in Skylake (processor tracing) |
2357 | * probably will block migration anyway. |
2358 | */ |
2359 | .features[FEAT_XSAVE] = |
2360 | CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | |
2361 | CPUID_XSAVE_XGETBV1, |
2362 | .features[FEAT_6_EAX] = |
2363 | CPUID_6_EAX_ARAT, |
2364 | .xlevel = 0x80000008, |
2365 | .model_id = "Intel Xeon Processor (Cascadelake)" , |
2366 | .versions = (X86CPUVersionDefinition[]) { |
2367 | { .version = 1 }, |
2368 | { .version = 2, |
2369 | .props = (PropValue[]) { |
2370 | { "arch-capabilities" , "on" }, |
2371 | { "rdctl-no" , "on" }, |
2372 | { "ibrs-all" , "on" }, |
2373 | { "skip-l1dfl-vmentry" , "on" }, |
2374 | { "mds-no" , "on" }, |
2375 | { /* end of list */ } |
2376 | }, |
2377 | }, |
2378 | { /* end of list */ } |
2379 | } |
2380 | }, |
2381 | { |
2382 | .name = "Icelake-Client" , |
2383 | .level = 0xd, |
2384 | .vendor = CPUID_VENDOR_INTEL, |
2385 | .family = 6, |
2386 | .model = 126, |
2387 | .stepping = 0, |
2388 | .features[FEAT_1_EDX] = |
2389 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
2390 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
2391 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
2392 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
2393 | CPUID_DE | CPUID_FP87, |
2394 | .features[FEAT_1_ECX] = |
2395 | CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | |
2396 | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | |
2397 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | |
2398 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | |
2399 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | |
2400 | CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, |
2401 | .features[FEAT_8000_0001_EDX] = |
2402 | CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | |
2403 | CPUID_EXT2_SYSCALL, |
2404 | .features[FEAT_8000_0001_ECX] = |
2405 | CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, |
2406 | .features[FEAT_8000_0008_EBX] = |
2407 | CPUID_8000_0008_EBX_WBNOINVD, |
2408 | .features[FEAT_7_0_EBX] = |
2409 | CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | |
2410 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | |
2411 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | |
2412 | CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | |
2413 | CPUID_7_0_EBX_SMAP, |
2414 | .features[FEAT_7_0_ECX] = |
2415 | CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | |
2416 | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI | |
2417 | CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | |
2418 | CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | |
2419 | CPUID_7_0_ECX_AVX512_VPOPCNTDQ, |
2420 | .features[FEAT_7_0_EDX] = |
2421 | CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, |
2422 | /* Missing: XSAVES (not supported by some Linux versions, |
2423 | * including v4.1 to v4.12). |
2424 | * KVM doesn't yet expose any XSAVES state save component, |
2425 | * and the only one defined in Skylake (processor tracing) |
2426 | * probably will block migration anyway. |
2427 | */ |
2428 | .features[FEAT_XSAVE] = |
2429 | CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | |
2430 | CPUID_XSAVE_XGETBV1, |
2431 | .features[FEAT_6_EAX] = |
2432 | CPUID_6_EAX_ARAT, |
2433 | .xlevel = 0x80000008, |
2434 | .model_id = "Intel Core Processor (Icelake)" , |
2435 | }, |
2436 | { |
2437 | .name = "Icelake-Server" , |
2438 | .level = 0xd, |
2439 | .vendor = CPUID_VENDOR_INTEL, |
2440 | .family = 6, |
2441 | .model = 134, |
2442 | .stepping = 0, |
2443 | .features[FEAT_1_EDX] = |
2444 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
2445 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
2446 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
2447 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
2448 | CPUID_DE | CPUID_FP87, |
2449 | .features[FEAT_1_ECX] = |
2450 | CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | |
2451 | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | |
2452 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | |
2453 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | |
2454 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | |
2455 | CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, |
2456 | .features[FEAT_8000_0001_EDX] = |
2457 | CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | |
2458 | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, |
2459 | .features[FEAT_8000_0001_ECX] = |
2460 | CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, |
2461 | .features[FEAT_8000_0008_EBX] = |
2462 | CPUID_8000_0008_EBX_WBNOINVD, |
2463 | .features[FEAT_7_0_EBX] = |
2464 | CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | |
2465 | CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | |
2466 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | |
2467 | CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | |
2468 | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | |
2469 | CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | |
2470 | CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | |
2471 | CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, |
2472 | .features[FEAT_7_0_ECX] = |
2473 | CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | |
2474 | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI | |
2475 | CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | |
2476 | CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | |
2477 | CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, |
2478 | .features[FEAT_7_0_EDX] = |
2479 | CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, |
2480 | /* Missing: XSAVES (not supported by some Linux versions, |
2481 | * including v4.1 to v4.12). |
2482 | * KVM doesn't yet expose any XSAVES state save component, |
2483 | * and the only one defined in Skylake (processor tracing) |
2484 | * probably will block migration anyway. |
2485 | */ |
2486 | .features[FEAT_XSAVE] = |
2487 | CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | |
2488 | CPUID_XSAVE_XGETBV1, |
2489 | .features[FEAT_6_EAX] = |
2490 | CPUID_6_EAX_ARAT, |
2491 | .xlevel = 0x80000008, |
2492 | .model_id = "Intel Xeon Processor (Icelake)" , |
2493 | }, |
2494 | { |
2495 | .name = "Snowridge" , |
2496 | .level = 27, |
2497 | .vendor = CPUID_VENDOR_INTEL, |
2498 | .family = 6, |
2499 | .model = 134, |
2500 | .stepping = 1, |
2501 | .features[FEAT_1_EDX] = |
2502 | /* missing: CPUID_PN CPUID_IA64 */ |
2503 | /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ |
2504 | CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | |
2505 | CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | |
2506 | CPUID_CX8 | CPUID_APIC | CPUID_SEP | |
2507 | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | |
2508 | CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | |
2509 | CPUID_MMX | |
2510 | CPUID_FXSR | CPUID_SSE | CPUID_SSE2, |
2511 | .features[FEAT_1_ECX] = |
2512 | CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | |
2513 | CPUID_EXT_SSSE3 | |
2514 | CPUID_EXT_CX16 | |
2515 | CPUID_EXT_SSE41 | |
2516 | CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | |
2517 | CPUID_EXT_POPCNT | |
2518 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | |
2519 | CPUID_EXT_RDRAND, |
2520 | .features[FEAT_8000_0001_EDX] = |
2521 | CPUID_EXT2_SYSCALL | |
2522 | CPUID_EXT2_NX | |
2523 | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | |
2524 | CPUID_EXT2_LM, |
2525 | .features[FEAT_8000_0001_ECX] = |
2526 | CPUID_EXT3_LAHF_LM | |
2527 | CPUID_EXT3_3DNOWPREFETCH, |
2528 | .features[FEAT_7_0_EBX] = |
2529 | CPUID_7_0_EBX_FSGSBASE | |
2530 | CPUID_7_0_EBX_SMEP | |
2531 | CPUID_7_0_EBX_ERMS | |
2532 | CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ |
2533 | CPUID_7_0_EBX_RDSEED | |
2534 | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | |
2535 | CPUID_7_0_EBX_CLWB | |
2536 | CPUID_7_0_EBX_SHA_NI, |
2537 | .features[FEAT_7_0_ECX] = |
2538 | CPUID_7_0_ECX_UMIP | |
2539 | /* missing bit 5 */ |
2540 | CPUID_7_0_ECX_GFNI | |
2541 | CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | |
2542 | CPUID_7_0_ECX_MOVDIR64B, |
2543 | .features[FEAT_7_0_EDX] = |
2544 | CPUID_7_0_EDX_SPEC_CTRL | |
2545 | CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | |
2546 | CPUID_7_0_EDX_CORE_CAPABILITY, |
2547 | .features[FEAT_CORE_CAPABILITY] = |
2548 | MSR_CORE_CAP_SPLIT_LOCK_DETECT, |
2549 | /* |
2550 | * Missing: XSAVES (not supported by some Linux versions, |
2551 | * including v4.1 to v4.12). |
2552 | * KVM doesn't yet expose any XSAVES state save component, |
2553 | * and the only one defined in Skylake (processor tracing) |
2554 | * probably will block migration anyway. |
2555 | */ |
2556 | .features[FEAT_XSAVE] = |
2557 | CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | |
2558 | CPUID_XSAVE_XGETBV1, |
2559 | .features[FEAT_6_EAX] = |
2560 | CPUID_6_EAX_ARAT, |
2561 | .xlevel = 0x80000008, |
2562 | .model_id = "Intel Atom Processor (SnowRidge)" , |
2563 | }, |
2564 | { |
2565 | .name = "KnightsMill" , |
2566 | .level = 0xd, |
2567 | .vendor = CPUID_VENDOR_INTEL, |
2568 | .family = 6, |
2569 | .model = 133, |
2570 | .stepping = 0, |
2571 | .features[FEAT_1_EDX] = |
2572 | CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | |
2573 | CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | |
2574 | CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | |
2575 | CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | |
2576 | CPUID_PSE | CPUID_DE | CPUID_FP87, |
2577 | .features[FEAT_1_ECX] = |
2578 | CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | |
2579 | CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | |
2580 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | |
2581 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | |
2582 | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | |
2583 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, |
2584 | .features[FEAT_8000_0001_EDX] = |
2585 | CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | |
2586 | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, |
2587 | .features[FEAT_8000_0001_ECX] = |
2588 | CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, |
2589 | .features[FEAT_7_0_EBX] = |
2590 | CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | |
2591 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | |
2592 | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | |
2593 | CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | |
2594 | CPUID_7_0_EBX_AVX512ER, |
2595 | .features[FEAT_7_0_ECX] = |
2596 | CPUID_7_0_ECX_AVX512_VPOPCNTDQ, |
2597 | .features[FEAT_7_0_EDX] = |
2598 | CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, |
2599 | .features[FEAT_XSAVE] = |
2600 | CPUID_XSAVE_XSAVEOPT, |
2601 | .features[FEAT_6_EAX] = |
2602 | CPUID_6_EAX_ARAT, |
2603 | .xlevel = 0x80000008, |
2604 | .model_id = "Intel Xeon Phi Processor (Knights Mill)" , |
2605 | }, |
2606 | { |
2607 | .name = "Opteron_G1" , |
2608 | .level = 5, |
2609 | .vendor = CPUID_VENDOR_AMD, |
2610 | .family = 15, |
2611 | .model = 6, |
2612 | .stepping = 1, |
2613 | .features[FEAT_1_EDX] = |
2614 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
2615 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
2616 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
2617 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
2618 | CPUID_DE | CPUID_FP87, |
2619 | .features[FEAT_1_ECX] = |
2620 | CPUID_EXT_SSE3, |
2621 | .features[FEAT_8000_0001_EDX] = |
2622 | CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, |
2623 | .xlevel = 0x80000008, |
2624 | .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)" , |
2625 | }, |
2626 | { |
2627 | .name = "Opteron_G2" , |
2628 | .level = 5, |
2629 | .vendor = CPUID_VENDOR_AMD, |
2630 | .family = 15, |
2631 | .model = 6, |
2632 | .stepping = 1, |
2633 | .features[FEAT_1_EDX] = |
2634 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
2635 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
2636 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
2637 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
2638 | CPUID_DE | CPUID_FP87, |
2639 | .features[FEAT_1_ECX] = |
2640 | CPUID_EXT_CX16 | CPUID_EXT_SSE3, |
2641 | .features[FEAT_8000_0001_EDX] = |
2642 | CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, |
2643 | .features[FEAT_8000_0001_ECX] = |
2644 | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, |
2645 | .xlevel = 0x80000008, |
2646 | .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)" , |
2647 | }, |
2648 | { |
2649 | .name = "Opteron_G3" , |
2650 | .level = 5, |
2651 | .vendor = CPUID_VENDOR_AMD, |
2652 | .family = 16, |
2653 | .model = 2, |
2654 | .stepping = 3, |
2655 | .features[FEAT_1_EDX] = |
2656 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
2657 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
2658 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
2659 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
2660 | CPUID_DE | CPUID_FP87, |
2661 | .features[FEAT_1_ECX] = |
2662 | CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | |
2663 | CPUID_EXT_SSE3, |
2664 | .features[FEAT_8000_0001_EDX] = |
2665 | CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | |
2666 | CPUID_EXT2_RDTSCP, |
2667 | .features[FEAT_8000_0001_ECX] = |
2668 | CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | |
2669 | CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, |
2670 | .xlevel = 0x80000008, |
2671 | .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)" , |
2672 | }, |
2673 | { |
2674 | .name = "Opteron_G4" , |
2675 | .level = 0xd, |
2676 | .vendor = CPUID_VENDOR_AMD, |
2677 | .family = 21, |
2678 | .model = 1, |
2679 | .stepping = 2, |
2680 | .features[FEAT_1_EDX] = |
2681 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
2682 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
2683 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
2684 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
2685 | CPUID_DE | CPUID_FP87, |
2686 | .features[FEAT_1_ECX] = |
2687 | CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | |
2688 | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | |
2689 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | |
2690 | CPUID_EXT_SSE3, |
2691 | .features[FEAT_8000_0001_EDX] = |
2692 | CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | |
2693 | CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, |
2694 | .features[FEAT_8000_0001_ECX] = |
2695 | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | |
2696 | CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | |
2697 | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | |
2698 | CPUID_EXT3_LAHF_LM, |
2699 | .features[FEAT_SVM] = |
2700 | CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, |
2701 | /* no xsaveopt! */ |
2702 | .xlevel = 0x8000001A, |
2703 | .model_id = "AMD Opteron 62xx class CPU" , |
2704 | }, |
2705 | { |
2706 | .name = "Opteron_G5" , |
2707 | .level = 0xd, |
2708 | .vendor = CPUID_VENDOR_AMD, |
2709 | .family = 21, |
2710 | .model = 2, |
2711 | .stepping = 0, |
2712 | .features[FEAT_1_EDX] = |
2713 | CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | |
2714 | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | |
2715 | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | |
2716 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | |
2717 | CPUID_DE | CPUID_FP87, |
2718 | .features[FEAT_1_ECX] = |
2719 | CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | |
2720 | CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | |
2721 | CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | |
2722 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, |
2723 | .features[FEAT_8000_0001_EDX] = |
2724 | CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | |
2725 | CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, |
2726 | .features[FEAT_8000_0001_ECX] = |
2727 | CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | |
2728 | CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | |
2729 | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | |
2730 | CPUID_EXT3_LAHF_LM, |
2731 | .features[FEAT_SVM] = |
2732 | CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, |
2733 | /* no xsaveopt! */ |
2734 | .xlevel = 0x8000001A, |
2735 | .model_id = "AMD Opteron 63xx class CPU" , |
2736 | }, |
2737 | { |
2738 | .name = "EPYC" , |
2739 | .level = 0xd, |
2740 | .vendor = CPUID_VENDOR_AMD, |
2741 | .family = 23, |
2742 | .model = 1, |
2743 | .stepping = 2, |
2744 | .features[FEAT_1_EDX] = |
2745 | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | |
2746 | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | |
2747 | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | |
2748 | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | |
2749 | CPUID_VME | CPUID_FP87, |
2750 | .features[FEAT_1_ECX] = |
2751 | CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | |
2752 | CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | |
2753 | CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | |
2754 | CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | |
2755 | CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, |
2756 | .features[FEAT_8000_0001_EDX] = |
2757 | CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | |
2758 | CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | |
2759 | CPUID_EXT2_SYSCALL, |
2760 | .features[FEAT_8000_0001_ECX] = |
2761 | CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | |
2762 | CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | |
2763 | CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | |
2764 | CPUID_EXT3_TOPOEXT, |
2765 | .features[FEAT_7_0_EBX] = |
2766 | CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | |
2767 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | |
2768 | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | |
2769 | CPUID_7_0_EBX_SHA_NI, |
2770 | /* Missing: XSAVES (not supported by some Linux versions, |
2771 | * including v4.1 to v4.12). |
2772 | * KVM doesn't yet expose any XSAVES state save component. |
2773 | */ |
2774 | .features[FEAT_XSAVE] = |
2775 | CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | |
2776 | CPUID_XSAVE_XGETBV1, |
2777 | .features[FEAT_6_EAX] = |
2778 | CPUID_6_EAX_ARAT, |
2779 | .features[FEAT_SVM] = |
2780 | CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, |
2781 | .xlevel = 0x8000001E, |
2782 | .model_id = "AMD EPYC Processor" , |
2783 | .cache_info = &epyc_cache_info, |
2784 | .versions = (X86CPUVersionDefinition[]) { |
2785 | { .version = 1 }, |
2786 | { |
2787 | .version = 2, |
2788 | .alias = "EPYC-IBPB" , |
2789 | .props = (PropValue[]) { |
2790 | { "ibpb" , "on" }, |
2791 | { "model-id" , |
2792 | "AMD EPYC Processor (with IBPB)" }, |
2793 | { /* end of list */ } |
2794 | } |
2795 | }, |
2796 | { /* end of list */ } |
2797 | } |
2798 | }, |
2799 | { |
2800 | .name = "Dhyana" , |
2801 | .level = 0xd, |
2802 | .vendor = CPUID_VENDOR_HYGON, |
2803 | .family = 24, |
2804 | .model = 0, |
2805 | .stepping = 1, |
2806 | .features[FEAT_1_EDX] = |
2807 | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | |
2808 | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | |
2809 | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | |
2810 | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | |
2811 | CPUID_VME | CPUID_FP87, |
2812 | .features[FEAT_1_ECX] = |
2813 | CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | |
2814 | CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | |
2815 | CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | |
2816 | CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | |
2817 | CPUID_EXT_MONITOR | CPUID_EXT_SSE3, |
2818 | .features[FEAT_8000_0001_EDX] = |
2819 | CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | |
2820 | CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | |
2821 | CPUID_EXT2_SYSCALL, |
2822 | .features[FEAT_8000_0001_ECX] = |
2823 | CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | |
2824 | CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | |
2825 | CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | |
2826 | CPUID_EXT3_TOPOEXT, |
2827 | .features[FEAT_8000_0008_EBX] = |
2828 | CPUID_8000_0008_EBX_IBPB, |
2829 | .features[FEAT_7_0_EBX] = |
2830 | CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | |
2831 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | |
2832 | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, |
2833 | /* |
2834 | * Missing: XSAVES (not supported by some Linux versions, |
2835 | * including v4.1 to v4.12). |
2836 | * KVM doesn't yet expose any XSAVES state save component. |
2837 | */ |
2838 | .features[FEAT_XSAVE] = |
2839 | CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | |
2840 | CPUID_XSAVE_XGETBV1, |
2841 | .features[FEAT_6_EAX] = |
2842 | CPUID_6_EAX_ARAT, |
2843 | .features[FEAT_SVM] = |
2844 | CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, |
2845 | .xlevel = 0x8000001E, |
2846 | .model_id = "Hygon Dhyana Processor" , |
2847 | .cache_info = &epyc_cache_info, |
2848 | }, |
2849 | }; |
2850 | |
2851 | /* KVM-specific features that are automatically added/removed |
2852 | * from all CPU models when KVM is enabled. |
2853 | */ |
2854 | static PropValue kvm_default_props[] = { |
2855 | { "kvmclock" , "on" }, |
2856 | { "kvm-nopiodelay" , "on" }, |
2857 | { "kvm-asyncpf" , "on" }, |
2858 | { "kvm-steal-time" , "on" }, |
2859 | { "kvm-pv-eoi" , "on" }, |
2860 | { "kvmclock-stable-bit" , "on" }, |
2861 | { "x2apic" , "on" }, |
2862 | { "acpi" , "off" }, |
2863 | { "monitor" , "off" }, |
2864 | { "svm" , "off" }, |
2865 | { NULL, NULL }, |
2866 | }; |
2867 | |
2868 | /* TCG-specific defaults that override all CPU models when using TCG |
2869 | */ |
2870 | static PropValue tcg_default_props[] = { |
2871 | { "vme" , "off" }, |
2872 | { NULL, NULL }, |
2873 | }; |
2874 | |
2875 | |
2876 | X86CPUVersion default_cpu_version = CPU_VERSION_LATEST; |
2877 | |
2878 | void x86_cpu_set_default_version(X86CPUVersion version) |
2879 | { |
2880 | /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ |
2881 | assert(version != CPU_VERSION_AUTO); |
2882 | default_cpu_version = version; |
2883 | } |
2884 | |
2885 | static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model) |
2886 | { |
2887 | int v = 0; |
2888 | const X86CPUVersionDefinition *vdef = |
2889 | x86_cpu_def_get_versions(model->cpudef); |
2890 | while (vdef->version) { |
2891 | v = vdef->version; |
2892 | vdef++; |
2893 | } |
2894 | return v; |
2895 | } |
2896 | |
2897 | /* Return the actual version being used for a specific CPU model */ |
2898 | static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) |
2899 | { |
2900 | X86CPUVersion v = model->version; |
2901 | if (v == CPU_VERSION_AUTO) { |
2902 | v = default_cpu_version; |
2903 | } |
2904 | if (v == CPU_VERSION_LATEST) { |
2905 | return x86_cpu_model_last_version(model); |
2906 | } |
2907 | return v; |
2908 | } |
2909 | |
2910 | void x86_cpu_change_kvm_default(const char *prop, const char *value) |
2911 | { |
2912 | PropValue *pv; |
2913 | for (pv = kvm_default_props; pv->prop; pv++) { |
2914 | if (!strcmp(pv->prop, prop)) { |
2915 | pv->value = value; |
2916 | break; |
2917 | } |
2918 | } |
2919 | |
2920 | /* It is valid to call this function only for properties that |
2921 | * are already present in the kvm_default_props table. |
2922 | */ |
2923 | assert(pv->prop); |
2924 | } |
2925 | |
2926 | static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, |
2927 | bool migratable_only); |
2928 | |
2929 | static bool lmce_supported(void) |
2930 | { |
2931 | uint64_t mce_cap = 0; |
2932 | |
2933 | #ifdef CONFIG_KVM |
2934 | if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { |
2935 | return false; |
2936 | } |
2937 | #endif |
2938 | |
2939 | return !!(mce_cap & MCG_LMCE_P); |
2940 | } |
2941 | |
2942 | #define CPUID_MODEL_ID_SZ 48 |
2943 | |
2944 | /** |
2945 | * cpu_x86_fill_model_id: |
2946 | * Get CPUID model ID string from host CPU. |
2947 | * |
2948 | * @str should have at least CPUID_MODEL_ID_SZ bytes |
2949 | * |
2950 | * The function does NOT add a null terminator to the string |
2951 | * automatically. |
2952 | */ |
2953 | static int cpu_x86_fill_model_id(char *str) |
2954 | { |
2955 | uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; |
2956 | int i; |
2957 | |
2958 | for (i = 0; i < 3; i++) { |
2959 | host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); |
2960 | memcpy(str + i * 16 + 0, &eax, 4); |
2961 | memcpy(str + i * 16 + 4, &ebx, 4); |
2962 | memcpy(str + i * 16 + 8, &ecx, 4); |
2963 | memcpy(str + i * 16 + 12, &edx, 4); |
2964 | } |
2965 | return 0; |
2966 | } |
2967 | |
2968 | static Property max_x86_cpu_properties[] = { |
2969 | DEFINE_PROP_BOOL("migratable" , X86CPU, migratable, true), |
2970 | DEFINE_PROP_BOOL("host-cache-info" , X86CPU, cache_info_passthrough, false), |
2971 | DEFINE_PROP_END_OF_LIST() |
2972 | }; |
2973 | |
2974 | static void max_x86_cpu_class_init(ObjectClass *oc, void *data) |
2975 | { |
2976 | DeviceClass *dc = DEVICE_CLASS(oc); |
2977 | X86CPUClass *xcc = X86_CPU_CLASS(oc); |
2978 | |
2979 | xcc->ordering = 9; |
2980 | |
2981 | xcc->model_description = |
2982 | "Enables all features supported by the accelerator in the current host" ; |
2983 | |
2984 | dc->props = max_x86_cpu_properties; |
2985 | } |
2986 | |
2987 | static void max_x86_cpu_initfn(Object *obj) |
2988 | { |
2989 | X86CPU *cpu = X86_CPU(obj); |
2990 | CPUX86State *env = &cpu->env; |
2991 | KVMState *s = kvm_state; |
2992 | |
2993 | /* We can't fill the features array here because we don't know yet if |
2994 | * "migratable" is true or false. |
2995 | */ |
2996 | cpu->max_features = true; |
2997 | |
2998 | if (accel_uses_host_cpuid()) { |
2999 | char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; |
3000 | char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; |
3001 | int family, model, stepping; |
3002 | |
3003 | host_vendor_fms(vendor, &family, &model, &stepping); |
3004 | cpu_x86_fill_model_id(model_id); |
3005 | |
3006 | object_property_set_str(OBJECT(cpu), vendor, "vendor" , &error_abort); |
3007 | object_property_set_int(OBJECT(cpu), family, "family" , &error_abort); |
3008 | object_property_set_int(OBJECT(cpu), model, "model" , &error_abort); |
3009 | object_property_set_int(OBJECT(cpu), stepping, "stepping" , |
3010 | &error_abort); |
3011 | object_property_set_str(OBJECT(cpu), model_id, "model-id" , |
3012 | &error_abort); |
3013 | |
3014 | if (kvm_enabled()) { |
3015 | env->cpuid_min_level = |
3016 | kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); |
3017 | env->cpuid_min_xlevel = |
3018 | kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); |
3019 | env->cpuid_min_xlevel2 = |
3020 | kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); |
3021 | } else { |
3022 | env->cpuid_min_level = |
3023 | hvf_get_supported_cpuid(0x0, 0, R_EAX); |
3024 | env->cpuid_min_xlevel = |
3025 | hvf_get_supported_cpuid(0x80000000, 0, R_EAX); |
3026 | env->cpuid_min_xlevel2 = |
3027 | hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); |
3028 | } |
3029 | |
3030 | if (lmce_supported()) { |
3031 | object_property_set_bool(OBJECT(cpu), true, "lmce" , &error_abort); |
3032 | } |
3033 | } else { |
3034 | object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD, |
3035 | "vendor" , &error_abort); |
3036 | object_property_set_int(OBJECT(cpu), 6, "family" , &error_abort); |
3037 | object_property_set_int(OBJECT(cpu), 6, "model" , &error_abort); |
3038 | object_property_set_int(OBJECT(cpu), 3, "stepping" , &error_abort); |
3039 | object_property_set_str(OBJECT(cpu), |
3040 | "QEMU TCG CPU version " QEMU_HW_VERSION, |
3041 | "model-id" , &error_abort); |
3042 | } |
3043 | |
3044 | object_property_set_bool(OBJECT(cpu), true, "pmu" , &error_abort); |
3045 | } |
3046 | |
3047 | static const TypeInfo max_x86_cpu_type_info = { |
3048 | .name = X86_CPU_TYPE_NAME("max" ), |
3049 | .parent = TYPE_X86_CPU, |
3050 | .instance_init = max_x86_cpu_initfn, |
3051 | .class_init = max_x86_cpu_class_init, |
3052 | }; |
3053 | |
3054 | #if defined(CONFIG_KVM) || defined(CONFIG_HVF) |
3055 | static void host_x86_cpu_class_init(ObjectClass *oc, void *data) |
3056 | { |
3057 | X86CPUClass *xcc = X86_CPU_CLASS(oc); |
3058 | |
3059 | xcc->host_cpuid_required = true; |
3060 | xcc->ordering = 8; |
3061 | |
3062 | #if defined(CONFIG_KVM) |
3063 | xcc->model_description = |
3064 | "KVM processor with all supported host features " ; |
3065 | #elif defined(CONFIG_HVF) |
3066 | xcc->model_description = |
3067 | "HVF processor with all supported host features " ; |
3068 | #endif |
3069 | } |
3070 | |
3071 | static const TypeInfo host_x86_cpu_type_info = { |
3072 | .name = X86_CPU_TYPE_NAME("host" ), |
3073 | .parent = X86_CPU_TYPE_NAME("max" ), |
3074 | .class_init = host_x86_cpu_class_init, |
3075 | }; |
3076 | |
3077 | #endif |
3078 | |
3079 | static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) |
3080 | { |
3081 | assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); |
3082 | |
3083 | switch (f->type) { |
3084 | case CPUID_FEATURE_WORD: |
3085 | { |
3086 | const char *reg = get_register_name_32(f->cpuid.reg); |
3087 | assert(reg); |
3088 | return g_strdup_printf("CPUID.%02XH:%s" , |
3089 | f->cpuid.eax, reg); |
3090 | } |
3091 | case MSR_FEATURE_WORD: |
3092 | return g_strdup_printf("MSR(%02XH)" , |
3093 | f->msr.index); |
3094 | } |
3095 | |
3096 | return NULL; |
3097 | } |
3098 | |
3099 | static void report_unavailable_features(FeatureWord w, uint32_t mask) |
3100 | { |
3101 | FeatureWordInfo *f = &feature_word_info[w]; |
3102 | int i; |
3103 | char *feat_word_str; |
3104 | |
3105 | for (i = 0; i < 32; ++i) { |
3106 | if ((1UL << i) & mask) { |
3107 | feat_word_str = feature_word_description(f, i); |
3108 | warn_report("%s doesn't support requested feature: %s%s%s [bit %d]" , |
3109 | accel_uses_host_cpuid() ? "host" : "TCG" , |
3110 | feat_word_str, |
3111 | f->feat_names[i] ? "." : "" , |
3112 | f->feat_names[i] ? f->feat_names[i] : "" , i); |
3113 | g_free(feat_word_str); |
3114 | } |
3115 | } |
3116 | } |
3117 | |
3118 | static void x86_cpuid_version_get_family(Object *obj, Visitor *v, |
3119 | const char *name, void *opaque, |
3120 | Error **errp) |
3121 | { |
3122 | X86CPU *cpu = X86_CPU(obj); |
3123 | CPUX86State *env = &cpu->env; |
3124 | int64_t value; |
3125 | |
3126 | value = (env->cpuid_version >> 8) & 0xf; |
3127 | if (value == 0xf) { |
3128 | value += (env->cpuid_version >> 20) & 0xff; |
3129 | } |
3130 | visit_type_int(v, name, &value, errp); |
3131 | } |
3132 | |
3133 | static void x86_cpuid_version_set_family(Object *obj, Visitor *v, |
3134 | const char *name, void *opaque, |
3135 | Error **errp) |
3136 | { |
3137 | X86CPU *cpu = X86_CPU(obj); |
3138 | CPUX86State *env = &cpu->env; |
3139 | const int64_t min = 0; |
3140 | const int64_t max = 0xff + 0xf; |
3141 | Error *local_err = NULL; |
3142 | int64_t value; |
3143 | |
3144 | visit_type_int(v, name, &value, &local_err); |
3145 | if (local_err) { |
3146 | error_propagate(errp, local_err); |
3147 | return; |
3148 | } |
3149 | if (value < min || value > max) { |
3150 | error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "" , |
3151 | name ? name : "null" , value, min, max); |
3152 | return; |
3153 | } |
3154 | |
3155 | env->cpuid_version &= ~0xff00f00; |
3156 | if (value > 0x0f) { |
3157 | env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); |
3158 | } else { |
3159 | env->cpuid_version |= value << 8; |
3160 | } |
3161 | } |
3162 | |
3163 | static void x86_cpuid_version_get_model(Object *obj, Visitor *v, |
3164 | const char *name, void *opaque, |
3165 | Error **errp) |
3166 | { |
3167 | X86CPU *cpu = X86_CPU(obj); |
3168 | CPUX86State *env = &cpu->env; |
3169 | int64_t value; |
3170 | |
3171 | value = (env->cpuid_version >> 4) & 0xf; |
3172 | value |= ((env->cpuid_version >> 16) & 0xf) << 4; |
3173 | visit_type_int(v, name, &value, errp); |
3174 | } |
3175 | |
3176 | static void x86_cpuid_version_set_model(Object *obj, Visitor *v, |
3177 | const char *name, void *opaque, |
3178 | Error **errp) |
3179 | { |
3180 | X86CPU *cpu = X86_CPU(obj); |
3181 | CPUX86State *env = &cpu->env; |
3182 | const int64_t min = 0; |
3183 | const int64_t max = 0xff; |
3184 | Error *local_err = NULL; |
3185 | int64_t value; |
3186 | |
3187 | visit_type_int(v, name, &value, &local_err); |
3188 | if (local_err) { |
3189 | error_propagate(errp, local_err); |
3190 | return; |
3191 | } |
3192 | if (value < min || value > max) { |
3193 | error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "" , |
3194 | name ? name : "null" , value, min, max); |
3195 | return; |
3196 | } |
3197 | |
3198 | env->cpuid_version &= ~0xf00f0; |
3199 | env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); |
3200 | } |
3201 | |
3202 | static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, |
3203 | const char *name, void *opaque, |
3204 | Error **errp) |
3205 | { |
3206 | X86CPU *cpu = X86_CPU(obj); |
3207 | CPUX86State *env = &cpu->env; |
3208 | int64_t value; |
3209 | |
3210 | value = env->cpuid_version & 0xf; |
3211 | visit_type_int(v, name, &value, errp); |
3212 | } |
3213 | |
3214 | static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, |
3215 | const char *name, void *opaque, |
3216 | Error **errp) |
3217 | { |
3218 | X86CPU *cpu = X86_CPU(obj); |
3219 | CPUX86State *env = &cpu->env; |
3220 | const int64_t min = 0; |
3221 | const int64_t max = 0xf; |
3222 | Error *local_err = NULL; |
3223 | int64_t value; |
3224 | |
3225 | visit_type_int(v, name, &value, &local_err); |
3226 | if (local_err) { |
3227 | error_propagate(errp, local_err); |
3228 | return; |
3229 | } |
3230 | if (value < min || value > max) { |
3231 | error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "" , |
3232 | name ? name : "null" , value, min, max); |
3233 | return; |
3234 | } |
3235 | |
3236 | env->cpuid_version &= ~0xf; |
3237 | env->cpuid_version |= value & 0xf; |
3238 | } |
3239 | |
3240 | static char *x86_cpuid_get_vendor(Object *obj, Error **errp) |
3241 | { |
3242 | X86CPU *cpu = X86_CPU(obj); |
3243 | CPUX86State *env = &cpu->env; |
3244 | char *value; |
3245 | |
3246 | value = g_malloc(CPUID_VENDOR_SZ + 1); |
3247 | x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, |
3248 | env->cpuid_vendor3); |
3249 | return value; |
3250 | } |
3251 | |
3252 | static void x86_cpuid_set_vendor(Object *obj, const char *value, |
3253 | Error **errp) |
3254 | { |
3255 | X86CPU *cpu = X86_CPU(obj); |
3256 | CPUX86State *env = &cpu->env; |
3257 | int i; |
3258 | |
3259 | if (strlen(value) != CPUID_VENDOR_SZ) { |
3260 | error_setg(errp, QERR_PROPERTY_VALUE_BAD, "" , "vendor" , value); |
3261 | return; |
3262 | } |
3263 | |
3264 | env->cpuid_vendor1 = 0; |
3265 | env->cpuid_vendor2 = 0; |
3266 | env->cpuid_vendor3 = 0; |
3267 | for (i = 0; i < 4; i++) { |
3268 | env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); |
3269 | env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); |
3270 | env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); |
3271 | } |
3272 | } |
3273 | |
3274 | static char *x86_cpuid_get_model_id(Object *obj, Error **errp) |
3275 | { |
3276 | X86CPU *cpu = X86_CPU(obj); |
3277 | CPUX86State *env = &cpu->env; |
3278 | char *value; |
3279 | int i; |
3280 | |
3281 | value = g_malloc(48 + 1); |
3282 | for (i = 0; i < 48; i++) { |
3283 | value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); |
3284 | } |
3285 | value[48] = '\0'; |
3286 | return value; |
3287 | } |
3288 | |
3289 | static void x86_cpuid_set_model_id(Object *obj, const char *model_id, |
3290 | Error **errp) |
3291 | { |
3292 | X86CPU *cpu = X86_CPU(obj); |
3293 | CPUX86State *env = &cpu->env; |
3294 | int c, len, i; |
3295 | |
3296 | if (model_id == NULL) { |
3297 | model_id = "" ; |
3298 | } |
3299 | len = strlen(model_id); |
3300 | memset(env->cpuid_model, 0, 48); |
3301 | for (i = 0; i < 48; i++) { |
3302 | if (i >= len) { |
3303 | c = '\0'; |
3304 | } else { |
3305 | c = (uint8_t)model_id[i]; |
3306 | } |
3307 | env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); |
3308 | } |
3309 | } |
3310 | |
3311 | static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, |
3312 | void *opaque, Error **errp) |
3313 | { |
3314 | X86CPU *cpu = X86_CPU(obj); |
3315 | int64_t value; |
3316 | |
3317 | value = cpu->env.tsc_khz * 1000; |
3318 | visit_type_int(v, name, &value, errp); |
3319 | } |
3320 | |
3321 | static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, |
3322 | void *opaque, Error **errp) |
3323 | { |
3324 | X86CPU *cpu = X86_CPU(obj); |
3325 | const int64_t min = 0; |
3326 | const int64_t max = INT64_MAX; |
3327 | Error *local_err = NULL; |
3328 | int64_t value; |
3329 | |
3330 | visit_type_int(v, name, &value, &local_err); |
3331 | if (local_err) { |
3332 | error_propagate(errp, local_err); |
3333 | return; |
3334 | } |
3335 | if (value < min || value > max) { |
3336 | error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "" , |
3337 | name ? name : "null" , value, min, max); |
3338 | return; |
3339 | } |
3340 | |
3341 | cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; |
3342 | } |
3343 | |
3344 | /* Generic getter for "feature-words" and "filtered-features" properties */ |
3345 | static void x86_cpu_get_feature_words(Object *obj, Visitor *v, |
3346 | const char *name, void *opaque, |
3347 | Error **errp) |
3348 | { |
3349 | uint32_t *array = (uint32_t *)opaque; |
3350 | FeatureWord w; |
3351 | X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; |
3352 | X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; |
3353 | X86CPUFeatureWordInfoList *list = NULL; |
3354 | |
3355 | for (w = 0; w < FEATURE_WORDS; w++) { |
3356 | FeatureWordInfo *wi = &feature_word_info[w]; |
3357 | /* |
3358 | * We didn't have MSR features when "feature-words" was |
3359 | * introduced. Therefore skipped other type entries. |
3360 | */ |
3361 | if (wi->type != CPUID_FEATURE_WORD) { |
3362 | continue; |
3363 | } |
3364 | X86CPUFeatureWordInfo *qwi = &word_infos[w]; |
3365 | qwi->cpuid_input_eax = wi->cpuid.eax; |
3366 | qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; |
3367 | qwi->cpuid_input_ecx = wi->cpuid.ecx; |
3368 | qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; |
3369 | qwi->features = array[w]; |
3370 | |
3371 | /* List will be in reverse order, but order shouldn't matter */ |
3372 | list_entries[w].next = list; |
3373 | list_entries[w].value = &word_infos[w]; |
3374 | list = &list_entries[w]; |
3375 | } |
3376 | |
3377 | visit_type_X86CPUFeatureWordInfoList(v, "feature-words" , &list, errp); |
3378 | } |
3379 | |
3380 | /* Convert all '_' in a feature string option name to '-', to make feature |
3381 | * name conform to QOM property naming rule, which uses '-' instead of '_'. |
3382 | */ |
3383 | static inline void feat2prop(char *s) |
3384 | { |
3385 | while ((s = strchr(s, '_'))) { |
3386 | *s = '-'; |
3387 | } |
3388 | } |
3389 | |
3390 | /* Return the feature property name for a feature flag bit */ |
3391 | static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) |
3392 | { |
3393 | /* XSAVE components are automatically enabled by other features, |
3394 | * so return the original feature name instead |
3395 | */ |
3396 | if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { |
3397 | int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; |
3398 | |
3399 | if (comp < ARRAY_SIZE(x86_ext_save_areas) && |
3400 | x86_ext_save_areas[comp].bits) { |
3401 | w = x86_ext_save_areas[comp].feature; |
3402 | bitnr = ctz32(x86_ext_save_areas[comp].bits); |
3403 | } |
3404 | } |
3405 | |
3406 | assert(bitnr < 32); |
3407 | assert(w < FEATURE_WORDS); |
3408 | return feature_word_info[w].feat_names[bitnr]; |
3409 | } |
3410 | |
3411 | /* Compatibily hack to maintain legacy +-feat semantic, |
3412 | * where +-feat overwrites any feature set by |
3413 | * feat=on|feat even if the later is parsed after +-feat |
3414 | * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) |
3415 | */ |
3416 | static GList *plus_features, *minus_features; |
3417 | |
3418 | static gint compare_string(gconstpointer a, gconstpointer b) |
3419 | { |
3420 | return g_strcmp0(a, b); |
3421 | } |
3422 | |
3423 | /* Parse "+feature,-feature,feature=foo" CPU feature string |
3424 | */ |
3425 | static void x86_cpu_parse_featurestr(const char *typename, char *features, |
3426 | Error **errp) |
3427 | { |
3428 | char *featurestr; /* Single 'key=value" string being parsed */ |
3429 | static bool cpu_globals_initialized; |
3430 | bool ambiguous = false; |
3431 | |
3432 | if (cpu_globals_initialized) { |
3433 | return; |
3434 | } |
3435 | cpu_globals_initialized = true; |
3436 | |
3437 | if (!features) { |
3438 | return; |
3439 | } |
3440 | |
3441 | for (featurestr = strtok(features, "," ); |
3442 | featurestr; |
3443 | featurestr = strtok(NULL, "," )) { |
3444 | const char *name; |
3445 | const char *val = NULL; |
3446 | char *eq = NULL; |
3447 | char num[32]; |
3448 | GlobalProperty *prop; |
3449 | |
3450 | /* Compatibility syntax: */ |
3451 | if (featurestr[0] == '+') { |
3452 | plus_features = g_list_append(plus_features, |
3453 | g_strdup(featurestr + 1)); |
3454 | continue; |
3455 | } else if (featurestr[0] == '-') { |
3456 | minus_features = g_list_append(minus_features, |
3457 | g_strdup(featurestr + 1)); |
3458 | continue; |
3459 | } |
3460 | |
3461 | eq = strchr(featurestr, '='); |
3462 | if (eq) { |
3463 | *eq++ = 0; |
3464 | val = eq; |
3465 | } else { |
3466 | val = "on" ; |
3467 | } |
3468 | |
3469 | feat2prop(featurestr); |
3470 | name = featurestr; |
3471 | |
3472 | if (g_list_find_custom(plus_features, name, compare_string)) { |
3473 | warn_report("Ambiguous CPU model string. " |
3474 | "Don't mix both \"+%s\" and \"%s=%s\"" , |
3475 | name, name, val); |
3476 | ambiguous = true; |
3477 | } |
3478 | if (g_list_find_custom(minus_features, name, compare_string)) { |
3479 | warn_report("Ambiguous CPU model string. " |
3480 | "Don't mix both \"-%s\" and \"%s=%s\"" , |
3481 | name, name, val); |
3482 | ambiguous = true; |
3483 | } |
3484 | |
3485 | /* Special case: */ |
3486 | if (!strcmp(name, "tsc-freq" )) { |
3487 | int ret; |
3488 | uint64_t tsc_freq; |
3489 | |
3490 | ret = qemu_strtosz_metric(val, NULL, &tsc_freq); |
3491 | if (ret < 0 || tsc_freq > INT64_MAX) { |
3492 | error_setg(errp, "bad numerical value %s" , val); |
3493 | return; |
3494 | } |
3495 | snprintf(num, sizeof(num), "%" PRId64, tsc_freq); |
3496 | val = num; |
3497 | name = "tsc-frequency" ; |
3498 | } |
3499 | |
3500 | prop = g_new0(typeof(*prop), 1); |
3501 | prop->driver = typename; |
3502 | prop->property = g_strdup(name); |
3503 | prop->value = g_strdup(val); |
3504 | qdev_prop_register_global(prop); |
3505 | } |
3506 | |
3507 | if (ambiguous) { |
3508 | warn_report("Compatibility of ambiguous CPU model " |
3509 | "strings won't be kept on future QEMU versions" ); |
3510 | } |
3511 | } |
3512 | |
3513 | static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); |
3514 | static int x86_cpu_filter_features(X86CPU *cpu); |
3515 | |
3516 | /* Build a list with the name of all features on a feature word array */ |
3517 | static void x86_cpu_list_feature_names(FeatureWordArray features, |
3518 | strList **feat_names) |
3519 | { |
3520 | FeatureWord w; |
3521 | strList **next = feat_names; |
3522 | |
3523 | for (w = 0; w < FEATURE_WORDS; w++) { |
3524 | uint32_t filtered = features[w]; |
3525 | int i; |
3526 | for (i = 0; i < 32; i++) { |
3527 | if (filtered & (1UL << i)) { |
3528 | strList *new = g_new0(strList, 1); |
3529 | new->value = g_strdup(x86_cpu_feature_name(w, i)); |
3530 | *next = new; |
3531 | next = &new->next; |
3532 | } |
3533 | } |
3534 | } |
3535 | } |
3536 | |
3537 | static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, |
3538 | const char *name, void *opaque, |
3539 | Error **errp) |
3540 | { |
3541 | X86CPU *xc = X86_CPU(obj); |
3542 | strList *result = NULL; |
3543 | |
3544 | x86_cpu_list_feature_names(xc->filtered_features, &result); |
3545 | visit_type_strList(v, "unavailable-features" , &result, errp); |
3546 | } |
3547 | |
3548 | /* Check for missing features that may prevent the CPU class from |
3549 | * running using the current machine and accelerator. |
3550 | */ |
3551 | static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, |
3552 | strList **missing_feats) |
3553 | { |
3554 | X86CPU *xc; |
3555 | Error *err = NULL; |
3556 | strList **next = missing_feats; |
3557 | |
3558 | if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { |
3559 | strList *new = g_new0(strList, 1); |
3560 | new->value = g_strdup("kvm" ); |
3561 | *missing_feats = new; |
3562 | return; |
3563 | } |
3564 | |
3565 | xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); |
3566 | |
3567 | x86_cpu_expand_features(xc, &err); |
3568 | if (err) { |
3569 | /* Errors at x86_cpu_expand_features should never happen, |
3570 | * but in case it does, just report the model as not |
3571 | * runnable at all using the "type" property. |
3572 | */ |
3573 | strList *new = g_new0(strList, 1); |
3574 | new->value = g_strdup("type" ); |
3575 | *next = new; |
3576 | next = &new->next; |
3577 | } |
3578 | |
3579 | x86_cpu_filter_features(xc); |
3580 | |
3581 | x86_cpu_list_feature_names(xc->filtered_features, next); |
3582 | |
3583 | object_unref(OBJECT(xc)); |
3584 | } |
3585 | |
3586 | /* Print all cpuid feature names in featureset |
3587 | */ |
3588 | static void listflags(GList *features) |
3589 | { |
3590 | size_t len = 0; |
3591 | GList *tmp; |
3592 | |
3593 | for (tmp = features; tmp; tmp = tmp->next) { |
3594 | const char *name = tmp->data; |
3595 | if ((len + strlen(name) + 1) >= 75) { |
3596 | qemu_printf("\n" ); |
3597 | len = 0; |
3598 | } |
3599 | qemu_printf("%s%s" , len == 0 ? " " : " " , name); |
3600 | len += strlen(name) + 1; |
3601 | } |
3602 | qemu_printf("\n" ); |
3603 | } |
3604 | |
3605 | /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ |
3606 | static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) |
3607 | { |
3608 | ObjectClass *class_a = (ObjectClass *)a; |
3609 | ObjectClass *class_b = (ObjectClass *)b; |
3610 | X86CPUClass *cc_a = X86_CPU_CLASS(class_a); |
3611 | X86CPUClass *cc_b = X86_CPU_CLASS(class_b); |
3612 | char *name_a, *name_b; |
3613 | int ret; |
3614 | |
3615 | if (cc_a->ordering != cc_b->ordering) { |
3616 | ret = cc_a->ordering - cc_b->ordering; |
3617 | } else { |
3618 | name_a = x86_cpu_class_get_model_name(cc_a); |
3619 | name_b = x86_cpu_class_get_model_name(cc_b); |
3620 | ret = strcmp(name_a, name_b); |
3621 | g_free(name_a); |
3622 | g_free(name_b); |
3623 | } |
3624 | return ret; |
3625 | } |
3626 | |
3627 | static GSList *get_sorted_cpu_model_list(void) |
3628 | { |
3629 | GSList *list = object_class_get_list(TYPE_X86_CPU, false); |
3630 | list = g_slist_sort(list, x86_cpu_list_compare); |
3631 | return list; |
3632 | } |
3633 | |
3634 | static char *x86_cpu_class_get_model_id(X86CPUClass *xc) |
3635 | { |
3636 | Object *obj = object_new(object_class_get_name(OBJECT_CLASS(xc))); |
3637 | char *r = object_property_get_str(obj, "model-id" , &error_abort); |
3638 | object_unref(obj); |
3639 | return r; |
3640 | } |
3641 | |
3642 | static char *x86_cpu_class_get_alias_of(X86CPUClass *cc) |
3643 | { |
3644 | X86CPUVersion version; |
3645 | |
3646 | if (!cc->model || !cc->model->is_alias) { |
3647 | return NULL; |
3648 | } |
3649 | version = x86_cpu_model_resolve_version(cc->model); |
3650 | if (version <= 0) { |
3651 | return NULL; |
3652 | } |
3653 | return x86_cpu_versioned_model_name(cc->model->cpudef, version); |
3654 | } |
3655 | |
3656 | static void x86_cpu_list_entry(gpointer data, gpointer user_data) |
3657 | { |
3658 | ObjectClass *oc = data; |
3659 | X86CPUClass *cc = X86_CPU_CLASS(oc); |
3660 | char *name = x86_cpu_class_get_model_name(cc); |
3661 | char *desc = g_strdup(cc->model_description); |
3662 | char *alias_of = x86_cpu_class_get_alias_of(cc); |
3663 | |
3664 | if (!desc && alias_of) { |
3665 | if (cc->model && cc->model->version == CPU_VERSION_AUTO) { |
3666 | desc = g_strdup("(alias configured by machine type)" ); |
3667 | } else { |
3668 | desc = g_strdup_printf("(alias of %s)" , alias_of); |
3669 | } |
3670 | } |
3671 | if (!desc) { |
3672 | desc = x86_cpu_class_get_model_id(cc); |
3673 | } |
3674 | |
3675 | qemu_printf("x86 %-20s %-48s\n" , name, desc); |
3676 | g_free(name); |
3677 | g_free(desc); |
3678 | g_free(alias_of); |
3679 | } |
3680 | |
3681 | /* list available CPU models and flags */ |
3682 | void x86_cpu_list(void) |
3683 | { |
3684 | int i, j; |
3685 | GSList *list; |
3686 | GList *names = NULL; |
3687 | |
3688 | qemu_printf("Available CPUs:\n" ); |
3689 | list = get_sorted_cpu_model_list(); |
3690 | g_slist_foreach(list, x86_cpu_list_entry, NULL); |
3691 | g_slist_free(list); |
3692 | |
3693 | names = NULL; |
3694 | for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { |
3695 | FeatureWordInfo *fw = &feature_word_info[i]; |
3696 | for (j = 0; j < 32; j++) { |
3697 | if (fw->feat_names[j]) { |
3698 | names = g_list_append(names, (gpointer)fw->feat_names[j]); |
3699 | } |
3700 | } |
3701 | } |
3702 | |
3703 | names = g_list_sort(names, (GCompareFunc)strcmp); |
3704 | |
3705 | qemu_printf("\nRecognized CPUID flags:\n" ); |
3706 | listflags(names); |
3707 | qemu_printf("\n" ); |
3708 | g_list_free(names); |
3709 | } |
3710 | |
3711 | static void x86_cpu_definition_entry(gpointer data, gpointer user_data) |
3712 | { |
3713 | ObjectClass *oc = data; |
3714 | X86CPUClass *cc = X86_CPU_CLASS(oc); |
3715 | CpuDefinitionInfoList **cpu_list = user_data; |
3716 | CpuDefinitionInfoList *entry; |
3717 | CpuDefinitionInfo *info; |
3718 | |
3719 | info = g_malloc0(sizeof(*info)); |
3720 | info->name = x86_cpu_class_get_model_name(cc); |
3721 | x86_cpu_class_check_missing_features(cc, &info->unavailable_features); |
3722 | info->has_unavailable_features = true; |
3723 | info->q_typename = g_strdup(object_class_get_name(oc)); |
3724 | info->migration_safe = cc->migration_safe; |
3725 | info->has_migration_safe = true; |
3726 | info->q_static = cc->static_model; |
3727 | /* |
3728 | * Old machine types won't report aliases, so that alias translation |
3729 | * doesn't break compatibility with previous QEMU versions. |
3730 | */ |
3731 | if (default_cpu_version != CPU_VERSION_LEGACY) { |
3732 | info->alias_of = x86_cpu_class_get_alias_of(cc); |
3733 | info->has_alias_of = !!info->alias_of; |
3734 | } |
3735 | |
3736 | entry = g_malloc0(sizeof(*entry)); |
3737 | entry->value = info; |
3738 | entry->next = *cpu_list; |
3739 | *cpu_list = entry; |
3740 | } |
3741 | |
3742 | CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) |
3743 | { |
3744 | CpuDefinitionInfoList *cpu_list = NULL; |
3745 | GSList *list = get_sorted_cpu_model_list(); |
3746 | g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); |
3747 | g_slist_free(list); |
3748 | return cpu_list; |
3749 | } |
3750 | |
3751 | static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w, |
3752 | bool migratable_only) |
3753 | { |
3754 | FeatureWordInfo *wi = &feature_word_info[w]; |
3755 | uint32_t r = 0; |
3756 | |
3757 | if (kvm_enabled()) { |
3758 | switch (wi->type) { |
3759 | case CPUID_FEATURE_WORD: |
3760 | r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, |
3761 | wi->cpuid.ecx, |
3762 | wi->cpuid.reg); |
3763 | break; |
3764 | case MSR_FEATURE_WORD: |
3765 | r = kvm_arch_get_supported_msr_feature(kvm_state, |
3766 | wi->msr.index); |
3767 | break; |
3768 | } |
3769 | } else if (hvf_enabled()) { |
3770 | if (wi->type != CPUID_FEATURE_WORD) { |
3771 | return 0; |
3772 | } |
3773 | r = hvf_get_supported_cpuid(wi->cpuid.eax, |
3774 | wi->cpuid.ecx, |
3775 | wi->cpuid.reg); |
3776 | } else if (tcg_enabled()) { |
3777 | r = wi->tcg_features; |
3778 | } else { |
3779 | return ~0; |
3780 | } |
3781 | if (migratable_only) { |
3782 | r &= x86_cpu_get_migratable_flags(w); |
3783 | } |
3784 | return r; |
3785 | } |
3786 | |
3787 | static void x86_cpu_report_filtered_features(X86CPU *cpu) |
3788 | { |
3789 | FeatureWord w; |
3790 | |
3791 | for (w = 0; w < FEATURE_WORDS; w++) { |
3792 | report_unavailable_features(w, cpu->filtered_features[w]); |
3793 | } |
3794 | } |
3795 | |
3796 | static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) |
3797 | { |
3798 | PropValue *pv; |
3799 | for (pv = props; pv->prop; pv++) { |
3800 | if (!pv->value) { |
3801 | continue; |
3802 | } |
3803 | object_property_parse(OBJECT(cpu), pv->value, pv->prop, |
3804 | &error_abort); |
3805 | } |
3806 | } |
3807 | |
3808 | /* Apply properties for the CPU model version specified in model */ |
3809 | static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) |
3810 | { |
3811 | const X86CPUVersionDefinition *vdef; |
3812 | X86CPUVersion version = x86_cpu_model_resolve_version(model); |
3813 | |
3814 | if (version == CPU_VERSION_LEGACY) { |
3815 | return; |
3816 | } |
3817 | |
3818 | for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { |
3819 | PropValue *p; |
3820 | |
3821 | for (p = vdef->props; p && p->prop; p++) { |
3822 | object_property_parse(OBJECT(cpu), p->value, p->prop, |
3823 | &error_abort); |
3824 | } |
3825 | |
3826 | if (vdef->version == version) { |
3827 | break; |
3828 | } |
3829 | } |
3830 | |
3831 | /* |
3832 | * If we reached the end of the list, version number was invalid |
3833 | */ |
3834 | assert(vdef->version == version); |
3835 | } |
3836 | |
3837 | /* Load data from X86CPUDefinition into a X86CPU object |
3838 | */ |
3839 | static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp) |
3840 | { |
3841 | X86CPUDefinition *def = model->cpudef; |
3842 | CPUX86State *env = &cpu->env; |
3843 | const char *vendor; |
3844 | char host_vendor[CPUID_VENDOR_SZ + 1]; |
3845 | FeatureWord w; |
3846 | |
3847 | /*NOTE: any property set by this function should be returned by |
3848 | * x86_cpu_static_props(), so static expansion of |
3849 | * query-cpu-model-expansion is always complete. |
3850 | */ |
3851 | |
3852 | /* CPU models only set _minimum_ values for level/xlevel: */ |
3853 | object_property_set_uint(OBJECT(cpu), def->level, "min-level" , errp); |
3854 | object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel" , errp); |
3855 | |
3856 | object_property_set_int(OBJECT(cpu), def->family, "family" , errp); |
3857 | object_property_set_int(OBJECT(cpu), def->model, "model" , errp); |
3858 | object_property_set_int(OBJECT(cpu), def->stepping, "stepping" , errp); |
3859 | object_property_set_str(OBJECT(cpu), def->model_id, "model-id" , errp); |
3860 | for (w = 0; w < FEATURE_WORDS; w++) { |
3861 | env->features[w] = def->features[w]; |
3862 | } |
3863 | |
3864 | /* legacy-cache defaults to 'off' if CPU model provides cache info */ |
3865 | cpu->legacy_cache = !def->cache_info; |
3866 | |
3867 | /* Special cases not set in the X86CPUDefinition structs: */ |
3868 | /* TODO: in-kernel irqchip for hvf */ |
3869 | if (kvm_enabled()) { |
3870 | if (!kvm_irqchip_in_kernel()) { |
3871 | x86_cpu_change_kvm_default("x2apic" , "off" ); |
3872 | } |
3873 | |
3874 | x86_cpu_apply_props(cpu, kvm_default_props); |
3875 | } else if (tcg_enabled()) { |
3876 | x86_cpu_apply_props(cpu, tcg_default_props); |
3877 | } |
3878 | |
3879 | env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; |
3880 | |
3881 | /* sysenter isn't supported in compatibility mode on AMD, |
3882 | * syscall isn't supported in compatibility mode on Intel. |
3883 | * Normally we advertise the actual CPU vendor, but you can |
3884 | * override this using the 'vendor' property if you want to use |
3885 | * KVM's sysenter/syscall emulation in compatibility mode and |
3886 | * when doing cross vendor migration |
3887 | */ |
3888 | vendor = def->vendor; |
3889 | if (accel_uses_host_cpuid()) { |
3890 | uint32_t ebx = 0, ecx = 0, edx = 0; |
3891 | host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); |
3892 | x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); |
3893 | vendor = host_vendor; |
3894 | } |
3895 | |
3896 | object_property_set_str(OBJECT(cpu), vendor, "vendor" , errp); |
3897 | |
3898 | x86_cpu_apply_version_props(cpu, model); |
3899 | } |
3900 | |
3901 | #ifndef CONFIG_USER_ONLY |
3902 | /* Return a QDict containing keys for all properties that can be included |
3903 | * in static expansion of CPU models. All properties set by x86_cpu_load_model() |
3904 | * must be included in the dictionary. |
3905 | */ |
3906 | static QDict *x86_cpu_static_props(void) |
3907 | { |
3908 | FeatureWord w; |
3909 | int i; |
3910 | static const char *props[] = { |
3911 | "min-level" , |
3912 | "min-xlevel" , |
3913 | "family" , |
3914 | "model" , |
3915 | "stepping" , |
3916 | "model-id" , |
3917 | "vendor" , |
3918 | "lmce" , |
3919 | NULL, |
3920 | }; |
3921 | static QDict *d; |
3922 | |
3923 | if (d) { |
3924 | return d; |
3925 | } |
3926 | |
3927 | d = qdict_new(); |
3928 | for (i = 0; props[i]; i++) { |
3929 | qdict_put_null(d, props[i]); |
3930 | } |
3931 | |
3932 | for (w = 0; w < FEATURE_WORDS; w++) { |
3933 | FeatureWordInfo *fi = &feature_word_info[w]; |
3934 | int bit; |
3935 | for (bit = 0; bit < 32; bit++) { |
3936 | if (!fi->feat_names[bit]) { |
3937 | continue; |
3938 | } |
3939 | qdict_put_null(d, fi->feat_names[bit]); |
3940 | } |
3941 | } |
3942 | |
3943 | return d; |
3944 | } |
3945 | |
3946 | /* Add an entry to @props dict, with the value for property. */ |
3947 | static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) |
3948 | { |
3949 | QObject *value = object_property_get_qobject(OBJECT(cpu), prop, |
3950 | &error_abort); |
3951 | |
3952 | qdict_put_obj(props, prop, value); |
3953 | } |
3954 | |
3955 | /* Convert CPU model data from X86CPU object to a property dictionary |
3956 | * that can recreate exactly the same CPU model. |
3957 | */ |
3958 | static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) |
3959 | { |
3960 | QDict *sprops = x86_cpu_static_props(); |
3961 | const QDictEntry *e; |
3962 | |
3963 | for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { |
3964 | const char *prop = qdict_entry_key(e); |
3965 | x86_cpu_expand_prop(cpu, props, prop); |
3966 | } |
3967 | } |
3968 | |
3969 | /* Convert CPU model data from X86CPU object to a property dictionary |
3970 | * that can recreate exactly the same CPU model, including every |
3971 | * writeable QOM property. |
3972 | */ |
3973 | static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) |
3974 | { |
3975 | ObjectPropertyIterator iter; |
3976 | ObjectProperty *prop; |
3977 | |
3978 | object_property_iter_init(&iter, OBJECT(cpu)); |
3979 | while ((prop = object_property_iter_next(&iter))) { |
3980 | /* skip read-only or write-only properties */ |
3981 | if (!prop->get || !prop->set) { |
3982 | continue; |
3983 | } |
3984 | |
3985 | /* "hotplugged" is the only property that is configurable |
3986 | * on the command-line but will be set differently on CPUs |
3987 | * created using "-cpu ... -smp ..." and by CPUs created |
3988 | * on the fly by x86_cpu_from_model() for querying. Skip it. |
3989 | */ |
3990 | if (!strcmp(prop->name, "hotplugged" )) { |
3991 | continue; |
3992 | } |
3993 | x86_cpu_expand_prop(cpu, props, prop->name); |
3994 | } |
3995 | } |
3996 | |
3997 | static void object_apply_props(Object *obj, QDict *props, Error **errp) |
3998 | { |
3999 | const QDictEntry *prop; |
4000 | Error *err = NULL; |
4001 | |
4002 | for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { |
4003 | object_property_set_qobject(obj, qdict_entry_value(prop), |
4004 | qdict_entry_key(prop), &err); |
4005 | if (err) { |
4006 | break; |
4007 | } |
4008 | } |
4009 | |
4010 | error_propagate(errp, err); |
4011 | } |
4012 | |
4013 | /* Create X86CPU object according to model+props specification */ |
4014 | static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) |
4015 | { |
4016 | X86CPU *xc = NULL; |
4017 | X86CPUClass *xcc; |
4018 | Error *err = NULL; |
4019 | |
4020 | xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); |
4021 | if (xcc == NULL) { |
4022 | error_setg(&err, "CPU model '%s' not found" , model); |
4023 | goto out; |
4024 | } |
4025 | |
4026 | xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); |
4027 | if (props) { |
4028 | object_apply_props(OBJECT(xc), props, &err); |
4029 | if (err) { |
4030 | goto out; |
4031 | } |
4032 | } |
4033 | |
4034 | x86_cpu_expand_features(xc, &err); |
4035 | if (err) { |
4036 | goto out; |
4037 | } |
4038 | |
4039 | out: |
4040 | if (err) { |
4041 | error_propagate(errp, err); |
4042 | object_unref(OBJECT(xc)); |
4043 | xc = NULL; |
4044 | } |
4045 | return xc; |
4046 | } |
4047 | |
4048 | CpuModelExpansionInfo * |
4049 | qmp_query_cpu_model_expansion(CpuModelExpansionType type, |
4050 | CpuModelInfo *model, |
4051 | Error **errp) |
4052 | { |
4053 | X86CPU *xc = NULL; |
4054 | Error *err = NULL; |
4055 | CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); |
4056 | QDict *props = NULL; |
4057 | const char *base_name; |
4058 | |
4059 | xc = x86_cpu_from_model(model->name, |
4060 | model->has_props ? |
4061 | qobject_to(QDict, model->props) : |
4062 | NULL, &err); |
4063 | if (err) { |
4064 | goto out; |
4065 | } |
4066 | |
4067 | props = qdict_new(); |
4068 | ret->model = g_new0(CpuModelInfo, 1); |
4069 | ret->model->props = QOBJECT(props); |
4070 | ret->model->has_props = true; |
4071 | |
4072 | switch (type) { |
4073 | case CPU_MODEL_EXPANSION_TYPE_STATIC: |
4074 | /* Static expansion will be based on "base" only */ |
4075 | base_name = "base" ; |
4076 | x86_cpu_to_dict(xc, props); |
4077 | break; |
4078 | case CPU_MODEL_EXPANSION_TYPE_FULL: |
4079 | /* As we don't return every single property, full expansion needs |
4080 | * to keep the original model name+props, and add extra |
4081 | * properties on top of that. |
4082 | */ |
4083 | base_name = model->name; |
4084 | x86_cpu_to_dict_full(xc, props); |
4085 | break; |
4086 | default: |
4087 | error_setg(&err, "Unsupported expansion type" ); |
4088 | goto out; |
4089 | } |
4090 | |
4091 | x86_cpu_to_dict(xc, props); |
4092 | |
4093 | ret->model->name = g_strdup(base_name); |
4094 | |
4095 | out: |
4096 | object_unref(OBJECT(xc)); |
4097 | if (err) { |
4098 | error_propagate(errp, err); |
4099 | qapi_free_CpuModelExpansionInfo(ret); |
4100 | ret = NULL; |
4101 | } |
4102 | return ret; |
4103 | } |
4104 | #endif /* !CONFIG_USER_ONLY */ |
4105 | |
4106 | static gchar *x86_gdb_arch_name(CPUState *cs) |
4107 | { |
4108 | #ifdef TARGET_X86_64 |
4109 | return g_strdup("i386:x86-64" ); |
4110 | #else |
4111 | return g_strdup("i386" ); |
4112 | #endif |
4113 | } |
4114 | |
4115 | static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) |
4116 | { |
4117 | X86CPUModel *model = data; |
4118 | X86CPUClass *xcc = X86_CPU_CLASS(oc); |
4119 | |
4120 | xcc->model = model; |
4121 | xcc->migration_safe = true; |
4122 | } |
4123 | |
4124 | static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) |
4125 | { |
4126 | char *typename = x86_cpu_type_name(name); |
4127 | TypeInfo ti = { |
4128 | .name = typename, |
4129 | .parent = TYPE_X86_CPU, |
4130 | .class_init = x86_cpu_cpudef_class_init, |
4131 | .class_data = model, |
4132 | }; |
4133 | |
4134 | type_register(&ti); |
4135 | g_free(typename); |
4136 | } |
4137 | |
4138 | static void x86_register_cpudef_types(X86CPUDefinition *def) |
4139 | { |
4140 | X86CPUModel *m; |
4141 | const X86CPUVersionDefinition *vdef; |
4142 | char *name; |
4143 | |
4144 | /* AMD aliases are handled at runtime based on CPUID vendor, so |
4145 | * they shouldn't be set on the CPU model table. |
4146 | */ |
4147 | assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); |
4148 | /* catch mistakes instead of silently truncating model_id when too long */ |
4149 | assert(def->model_id && strlen(def->model_id) <= 48); |
4150 | |
4151 | /* Unversioned model: */ |
4152 | m = g_new0(X86CPUModel, 1); |
4153 | m->cpudef = def; |
4154 | m->version = CPU_VERSION_AUTO; |
4155 | m->is_alias = true; |
4156 | x86_register_cpu_model_type(def->name, m); |
4157 | |
4158 | /* Versioned models: */ |
4159 | |
4160 | for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) { |
4161 | X86CPUModel *m = g_new0(X86CPUModel, 1); |
4162 | m->cpudef = def; |
4163 | m->version = vdef->version; |
4164 | name = x86_cpu_versioned_model_name(def, vdef->version); |
4165 | x86_register_cpu_model_type(name, m); |
4166 | g_free(name); |
4167 | |
4168 | if (vdef->alias) { |
4169 | X86CPUModel *am = g_new0(X86CPUModel, 1); |
4170 | am->cpudef = def; |
4171 | am->version = vdef->version; |
4172 | am->is_alias = true; |
4173 | x86_register_cpu_model_type(vdef->alias, am); |
4174 | } |
4175 | } |
4176 | |
4177 | } |
4178 | |
4179 | #if !defined(CONFIG_USER_ONLY) |
4180 | |
4181 | void cpu_clear_apic_feature(CPUX86State *env) |
4182 | { |
4183 | env->features[FEAT_1_EDX] &= ~CPUID_APIC; |
4184 | } |
4185 | |
4186 | #endif /* !CONFIG_USER_ONLY */ |
4187 | |
4188 | void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, |
4189 | uint32_t *eax, uint32_t *ebx, |
4190 | uint32_t *ecx, uint32_t *edx) |
4191 | { |
4192 | X86CPU *cpu = env_archcpu(env); |
4193 | CPUState *cs = env_cpu(env); |
4194 | uint32_t die_offset; |
4195 | uint32_t limit; |
4196 | uint32_t signature[3]; |
4197 | |
4198 | /* Calculate & apply limits for different index ranges */ |
4199 | if (index >= 0xC0000000) { |
4200 | limit = env->cpuid_xlevel2; |
4201 | } else if (index >= 0x80000000) { |
4202 | limit = env->cpuid_xlevel; |
4203 | } else if (index >= 0x40000000) { |
4204 | limit = 0x40000001; |
4205 | } else { |
4206 | limit = env->cpuid_level; |
4207 | } |
4208 | |
4209 | if (index > limit) { |
4210 | /* Intel documentation states that invalid EAX input will |
4211 | * return the same information as EAX=cpuid_level |
4212 | * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) |
4213 | */ |
4214 | index = env->cpuid_level; |
4215 | } |
4216 | |
4217 | switch(index) { |
4218 | case 0: |
4219 | *eax = env->cpuid_level; |
4220 | *ebx = env->cpuid_vendor1; |
4221 | *edx = env->cpuid_vendor2; |
4222 | *ecx = env->cpuid_vendor3; |
4223 | break; |
4224 | case 1: |
4225 | *eax = env->cpuid_version; |
4226 | *ebx = (cpu->apic_id << 24) | |
4227 | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ |
4228 | *ecx = env->features[FEAT_1_ECX]; |
4229 | if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { |
4230 | *ecx |= CPUID_EXT_OSXSAVE; |
4231 | } |
4232 | *edx = env->features[FEAT_1_EDX]; |
4233 | if (cs->nr_cores * cs->nr_threads > 1) { |
4234 | *ebx |= (cs->nr_cores * cs->nr_threads) << 16; |
4235 | *edx |= CPUID_HT; |
4236 | } |
4237 | break; |
4238 | case 2: |
4239 | /* cache info: needed for Pentium Pro compatibility */ |
4240 | if (cpu->cache_info_passthrough) { |
4241 | host_cpuid(index, 0, eax, ebx, ecx, edx); |
4242 | break; |
4243 | } |
4244 | *eax = 1; /* Number of CPUID[EAX=2] calls required */ |
4245 | *ebx = 0; |
4246 | if (!cpu->enable_l3_cache) { |
4247 | *ecx = 0; |
4248 | } else { |
4249 | *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); |
4250 | } |
4251 | *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | |
4252 | (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | |
4253 | (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); |
4254 | break; |
4255 | case 4: |
4256 | /* cache info: needed for Core compatibility */ |
4257 | if (cpu->cache_info_passthrough) { |
4258 | host_cpuid(index, count, eax, ebx, ecx, edx); |
4259 | /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ |
4260 | *eax &= ~0xFC000000; |
4261 | if ((*eax & 31) && cs->nr_cores > 1) { |
4262 | *eax |= (cs->nr_cores - 1) << 26; |
4263 | } |
4264 | } else { |
4265 | *eax = 0; |
4266 | switch (count) { |
4267 | case 0: /* L1 dcache info */ |
4268 | encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, |
4269 | 1, cs->nr_cores, |
4270 | eax, ebx, ecx, edx); |
4271 | break; |
4272 | case 1: /* L1 icache info */ |
4273 | encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, |
4274 | 1, cs->nr_cores, |
4275 | eax, ebx, ecx, edx); |
4276 | break; |
4277 | case 2: /* L2 cache info */ |
4278 | encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, |
4279 | cs->nr_threads, cs->nr_cores, |
4280 | eax, ebx, ecx, edx); |
4281 | break; |
4282 | case 3: /* L3 cache info */ |
4283 | die_offset = apicid_die_offset(env->nr_dies, |
4284 | cs->nr_cores, cs->nr_threads); |
4285 | if (cpu->enable_l3_cache) { |
4286 | encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, |
4287 | (1 << die_offset), cs->nr_cores, |
4288 | eax, ebx, ecx, edx); |
4289 | break; |
4290 | } |
4291 | /* fall through */ |
4292 | default: /* end of info */ |
4293 | *eax = *ebx = *ecx = *edx = 0; |
4294 | break; |
4295 | } |
4296 | } |
4297 | break; |
4298 | case 5: |
4299 | /* MONITOR/MWAIT Leaf */ |
4300 | *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ |
4301 | *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ |
4302 | *ecx = cpu->mwait.ecx; /* flags */ |
4303 | *edx = cpu->mwait.edx; /* mwait substates */ |
4304 | break; |
4305 | case 6: |
4306 | /* Thermal and Power Leaf */ |
4307 | *eax = env->features[FEAT_6_EAX]; |
4308 | *ebx = 0; |
4309 | *ecx = 0; |
4310 | *edx = 0; |
4311 | break; |
4312 | case 7: |
4313 | /* Structured Extended Feature Flags Enumeration Leaf */ |
4314 | if (count == 0) { |
4315 | /* Maximum ECX value for sub-leaves */ |
4316 | *eax = env->cpuid_level_func7; |
4317 | *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ |
4318 | *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ |
4319 | if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { |
4320 | *ecx |= CPUID_7_0_ECX_OSPKE; |
4321 | } |
4322 | *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ |
4323 | } else if (count == 1) { |
4324 | *eax = env->features[FEAT_7_1_EAX]; |
4325 | *ebx = 0; |
4326 | *ecx = 0; |
4327 | *edx = 0; |
4328 | } else { |
4329 | *eax = 0; |
4330 | *ebx = 0; |
4331 | *ecx = 0; |
4332 | *edx = 0; |
4333 | } |
4334 | break; |
4335 | case 9: |
4336 | /* Direct Cache Access Information Leaf */ |
4337 | *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ |
4338 | *ebx = 0; |
4339 | *ecx = 0; |
4340 | *edx = 0; |
4341 | break; |
4342 | case 0xA: |
4343 | /* Architectural Performance Monitoring Leaf */ |
4344 | if (kvm_enabled() && cpu->enable_pmu) { |
4345 | KVMState *s = cs->kvm_state; |
4346 | |
4347 | *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); |
4348 | *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); |
4349 | *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); |
4350 | *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); |
4351 | } else if (hvf_enabled() && cpu->enable_pmu) { |
4352 | *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); |
4353 | *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); |
4354 | *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); |
4355 | *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); |
4356 | } else { |
4357 | *eax = 0; |
4358 | *ebx = 0; |
4359 | *ecx = 0; |
4360 | *edx = 0; |
4361 | } |
4362 | break; |
4363 | case 0xB: |
4364 | /* Extended Topology Enumeration Leaf */ |
4365 | if (!cpu->enable_cpuid_0xb) { |
4366 | *eax = *ebx = *ecx = *edx = 0; |
4367 | break; |
4368 | } |
4369 | |
4370 | *ecx = count & 0xff; |
4371 | *edx = cpu->apic_id; |
4372 | |
4373 | switch (count) { |
4374 | case 0: |
4375 | *eax = apicid_core_offset(env->nr_dies, |
4376 | cs->nr_cores, cs->nr_threads); |
4377 | *ebx = cs->nr_threads; |
4378 | *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; |
4379 | break; |
4380 | case 1: |
4381 | *eax = apicid_pkg_offset(env->nr_dies, |
4382 | cs->nr_cores, cs->nr_threads); |
4383 | *ebx = cs->nr_cores * cs->nr_threads; |
4384 | *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; |
4385 | break; |
4386 | default: |
4387 | *eax = 0; |
4388 | *ebx = 0; |
4389 | *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; |
4390 | } |
4391 | |
4392 | assert(!(*eax & ~0x1f)); |
4393 | *ebx &= 0xffff; /* The count doesn't need to be reliable. */ |
4394 | break; |
4395 | case 0x1F: |
4396 | /* V2 Extended Topology Enumeration Leaf */ |
4397 | if (env->nr_dies < 2) { |
4398 | *eax = *ebx = *ecx = *edx = 0; |
4399 | break; |
4400 | } |
4401 | |
4402 | *ecx = count & 0xff; |
4403 | *edx = cpu->apic_id; |
4404 | switch (count) { |
4405 | case 0: |
4406 | *eax = apicid_core_offset(env->nr_dies, cs->nr_cores, |
4407 | cs->nr_threads); |
4408 | *ebx = cs->nr_threads; |
4409 | *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; |
4410 | break; |
4411 | case 1: |
4412 | *eax = apicid_die_offset(env->nr_dies, cs->nr_cores, |
4413 | cs->nr_threads); |
4414 | *ebx = cs->nr_cores * cs->nr_threads; |
4415 | *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; |
4416 | break; |
4417 | case 2: |
4418 | *eax = apicid_pkg_offset(env->nr_dies, cs->nr_cores, |
4419 | cs->nr_threads); |
4420 | *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; |
4421 | *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; |
4422 | break; |
4423 | default: |
4424 | *eax = 0; |
4425 | *ebx = 0; |
4426 | *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; |
4427 | } |
4428 | assert(!(*eax & ~0x1f)); |
4429 | *ebx &= 0xffff; /* The count doesn't need to be reliable. */ |
4430 | break; |
4431 | case 0xD: { |
4432 | /* Processor Extended State */ |
4433 | *eax = 0; |
4434 | *ebx = 0; |
4435 | *ecx = 0; |
4436 | *edx = 0; |
4437 | if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { |
4438 | break; |
4439 | } |
4440 | |
4441 | if (count == 0) { |
4442 | *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); |
4443 | *eax = env->features[FEAT_XSAVE_COMP_LO]; |
4444 | *edx = env->features[FEAT_XSAVE_COMP_HI]; |
4445 | *ebx = xsave_area_size(env->xcr0); |
4446 | } else if (count == 1) { |
4447 | *eax = env->features[FEAT_XSAVE]; |
4448 | } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { |
4449 | if ((x86_cpu_xsave_components(cpu) >> count) & 1) { |
4450 | const ExtSaveArea *esa = &x86_ext_save_areas[count]; |
4451 | *eax = esa->size; |
4452 | *ebx = esa->offset; |
4453 | } |
4454 | } |
4455 | break; |
4456 | } |
4457 | case 0x14: { |
4458 | /* Intel Processor Trace Enumeration */ |
4459 | *eax = 0; |
4460 | *ebx = 0; |
4461 | *ecx = 0; |
4462 | *edx = 0; |
4463 | if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || |
4464 | !kvm_enabled()) { |
4465 | break; |
4466 | } |
4467 | |
4468 | if (count == 0) { |
4469 | *eax = INTEL_PT_MAX_SUBLEAF; |
4470 | *ebx = INTEL_PT_MINIMAL_EBX; |
4471 | *ecx = INTEL_PT_MINIMAL_ECX; |
4472 | } else if (count == 1) { |
4473 | *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; |
4474 | *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; |
4475 | } |
4476 | break; |
4477 | } |
4478 | case 0x40000000: |
4479 | /* |
4480 | * CPUID code in kvm_arch_init_vcpu() ignores stuff |
4481 | * set here, but we restrict to TCG none the less. |
4482 | */ |
4483 | if (tcg_enabled() && cpu->expose_tcg) { |
4484 | memcpy(signature, "TCGTCGTCGTCG" , 12); |
4485 | *eax = 0x40000001; |
4486 | *ebx = signature[0]; |
4487 | *ecx = signature[1]; |
4488 | *edx = signature[2]; |
4489 | } else { |
4490 | *eax = 0; |
4491 | *ebx = 0; |
4492 | *ecx = 0; |
4493 | *edx = 0; |
4494 | } |
4495 | break; |
4496 | case 0x40000001: |
4497 | *eax = 0; |
4498 | *ebx = 0; |
4499 | *ecx = 0; |
4500 | *edx = 0; |
4501 | break; |
4502 | case 0x80000000: |
4503 | *eax = env->cpuid_xlevel; |
4504 | *ebx = env->cpuid_vendor1; |
4505 | *edx = env->cpuid_vendor2; |
4506 | *ecx = env->cpuid_vendor3; |
4507 | break; |
4508 | case 0x80000001: |
4509 | *eax = env->cpuid_version; |
4510 | *ebx = 0; |
4511 | *ecx = env->features[FEAT_8000_0001_ECX]; |
4512 | *edx = env->features[FEAT_8000_0001_EDX]; |
4513 | |
4514 | /* The Linux kernel checks for the CMPLegacy bit and |
4515 | * discards multiple thread information if it is set. |
4516 | * So don't set it here for Intel to make Linux guests happy. |
4517 | */ |
4518 | if (cs->nr_cores * cs->nr_threads > 1) { |
4519 | if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || |
4520 | env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || |
4521 | env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { |
4522 | *ecx |= 1 << 1; /* CmpLegacy bit */ |
4523 | } |
4524 | } |
4525 | break; |
4526 | case 0x80000002: |
4527 | case 0x80000003: |
4528 | case 0x80000004: |
4529 | *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; |
4530 | *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; |
4531 | *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; |
4532 | *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; |
4533 | break; |
4534 | case 0x80000005: |
4535 | /* cache info (L1 cache) */ |
4536 | if (cpu->cache_info_passthrough) { |
4537 | host_cpuid(index, 0, eax, ebx, ecx, edx); |
4538 | break; |
4539 | } |
4540 | *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \ |
4541 | (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); |
4542 | *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ |
4543 | (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); |
4544 | *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); |
4545 | *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); |
4546 | break; |
4547 | case 0x80000006: |
4548 | /* cache info (L2 cache) */ |
4549 | if (cpu->cache_info_passthrough) { |
4550 | host_cpuid(index, 0, eax, ebx, ecx, edx); |
4551 | break; |
4552 | } |
4553 | *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \ |
4554 | (L2_DTLB_2M_ENTRIES << 16) | \ |
4555 | (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \ |
4556 | (L2_ITLB_2M_ENTRIES); |
4557 | *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \ |
4558 | (L2_DTLB_4K_ENTRIES << 16) | \ |
4559 | (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ |
4560 | (L2_ITLB_4K_ENTRIES); |
4561 | encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, |
4562 | cpu->enable_l3_cache ? |
4563 | env->cache_info_amd.l3_cache : NULL, |
4564 | ecx, edx); |
4565 | break; |
4566 | case 0x80000007: |
4567 | *eax = 0; |
4568 | *ebx = 0; |
4569 | *ecx = 0; |
4570 | *edx = env->features[FEAT_8000_0007_EDX]; |
4571 | break; |
4572 | case 0x80000008: |
4573 | /* virtual & phys address size in low 2 bytes. */ |
4574 | if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { |
4575 | /* 64 bit processor */ |
4576 | *eax = cpu->phys_bits; /* configurable physical bits */ |
4577 | if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { |
4578 | *eax |= 0x00003900; /* 57 bits virtual */ |
4579 | } else { |
4580 | *eax |= 0x00003000; /* 48 bits virtual */ |
4581 | } |
4582 | } else { |
4583 | *eax = cpu->phys_bits; |
4584 | } |
4585 | *ebx = env->features[FEAT_8000_0008_EBX]; |
4586 | *ecx = 0; |
4587 | *edx = 0; |
4588 | if (cs->nr_cores * cs->nr_threads > 1) { |
4589 | *ecx |= (cs->nr_cores * cs->nr_threads) - 1; |
4590 | } |
4591 | break; |
4592 | case 0x8000000A: |
4593 | if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { |
4594 | *eax = 0x00000001; /* SVM Revision */ |
4595 | *ebx = 0x00000010; /* nr of ASIDs */ |
4596 | *ecx = 0; |
4597 | *edx = env->features[FEAT_SVM]; /* optional features */ |
4598 | } else { |
4599 | *eax = 0; |
4600 | *ebx = 0; |
4601 | *ecx = 0; |
4602 | *edx = 0; |
4603 | } |
4604 | break; |
4605 | case 0x8000001D: |
4606 | *eax = 0; |
4607 | if (cpu->cache_info_passthrough) { |
4608 | host_cpuid(index, count, eax, ebx, ecx, edx); |
4609 | break; |
4610 | } |
4611 | switch (count) { |
4612 | case 0: /* L1 dcache info */ |
4613 | encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs, |
4614 | eax, ebx, ecx, edx); |
4615 | break; |
4616 | case 1: /* L1 icache info */ |
4617 | encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs, |
4618 | eax, ebx, ecx, edx); |
4619 | break; |
4620 | case 2: /* L2 cache info */ |
4621 | encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs, |
4622 | eax, ebx, ecx, edx); |
4623 | break; |
4624 | case 3: /* L3 cache info */ |
4625 | encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs, |
4626 | eax, ebx, ecx, edx); |
4627 | break; |
4628 | default: /* end of info */ |
4629 | *eax = *ebx = *ecx = *edx = 0; |
4630 | break; |
4631 | } |
4632 | break; |
4633 | case 0x8000001E: |
4634 | assert(cpu->core_id <= 255); |
4635 | encode_topo_cpuid8000001e(cs, cpu, |
4636 | eax, ebx, ecx, edx); |
4637 | break; |
4638 | case 0xC0000000: |
4639 | *eax = env->cpuid_xlevel2; |
4640 | *ebx = 0; |
4641 | *ecx = 0; |
4642 | *edx = 0; |
4643 | break; |
4644 | case 0xC0000001: |
4645 | /* Support for VIA CPU's CPUID instruction */ |
4646 | *eax = env->cpuid_version; |
4647 | *ebx = 0; |
4648 | *ecx = 0; |
4649 | *edx = env->features[FEAT_C000_0001_EDX]; |
4650 | break; |
4651 | case 0xC0000002: |
4652 | case 0xC0000003: |
4653 | case 0xC0000004: |
4654 | /* Reserved for the future, and now filled with zero */ |
4655 | *eax = 0; |
4656 | *ebx = 0; |
4657 | *ecx = 0; |
4658 | *edx = 0; |
4659 | break; |
4660 | case 0x8000001F: |
4661 | *eax = sev_enabled() ? 0x2 : 0; |
4662 | *ebx = sev_get_cbit_position(); |
4663 | *ebx |= sev_get_reduced_phys_bits() << 6; |
4664 | *ecx = 0; |
4665 | *edx = 0; |
4666 | break; |
4667 | default: |
4668 | /* reserved values: zero */ |
4669 | *eax = 0; |
4670 | *ebx = 0; |
4671 | *ecx = 0; |
4672 | *edx = 0; |
4673 | break; |
4674 | } |
4675 | } |
4676 | |
4677 | /* CPUClass::reset() */ |
4678 | static void x86_cpu_reset(CPUState *s) |
4679 | { |
4680 | X86CPU *cpu = X86_CPU(s); |
4681 | X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); |
4682 | CPUX86State *env = &cpu->env; |
4683 | target_ulong cr4; |
4684 | uint64_t xcr0; |
4685 | int i; |
4686 | |
4687 | xcc->parent_reset(s); |
4688 | |
4689 | memset(env, 0, offsetof(CPUX86State, end_reset_fields)); |
4690 | |
4691 | env->old_exception = -1; |
4692 | |
4693 | /* init to reset state */ |
4694 | |
4695 | env->hflags2 |= HF2_GIF_MASK; |
4696 | |
4697 | cpu_x86_update_cr0(env, 0x60000010); |
4698 | env->a20_mask = ~0x0; |
4699 | env->smbase = 0x30000; |
4700 | env->msr_smi_count = 0; |
4701 | |
4702 | env->idt.limit = 0xffff; |
4703 | env->gdt.limit = 0xffff; |
4704 | env->ldt.limit = 0xffff; |
4705 | env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); |
4706 | env->tr.limit = 0xffff; |
4707 | env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); |
4708 | |
4709 | cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, |
4710 | DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | |
4711 | DESC_R_MASK | DESC_A_MASK); |
4712 | cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, |
4713 | DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | |
4714 | DESC_A_MASK); |
4715 | cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, |
4716 | DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | |
4717 | DESC_A_MASK); |
4718 | cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, |
4719 | DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | |
4720 | DESC_A_MASK); |
4721 | cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, |
4722 | DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | |
4723 | DESC_A_MASK); |
4724 | cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, |
4725 | DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | |
4726 | DESC_A_MASK); |
4727 | |
4728 | env->eip = 0xfff0; |
4729 | env->regs[R_EDX] = env->cpuid_version; |
4730 | |
4731 | env->eflags = 0x2; |
4732 | |
4733 | /* FPU init */ |
4734 | for (i = 0; i < 8; i++) { |
4735 | env->fptags[i] = 1; |
4736 | } |
4737 | cpu_set_fpuc(env, 0x37f); |
4738 | |
4739 | env->mxcsr = 0x1f80; |
4740 | /* All units are in INIT state. */ |
4741 | env->xstate_bv = 0; |
4742 | |
4743 | env->pat = 0x0007040600070406ULL; |
4744 | env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; |
4745 | if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { |
4746 | env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; |
4747 | } |
4748 | |
4749 | memset(env->dr, 0, sizeof(env->dr)); |
4750 | env->dr[6] = DR6_FIXED_1; |
4751 | env->dr[7] = DR7_FIXED_1; |
4752 | cpu_breakpoint_remove_all(s, BP_CPU); |
4753 | cpu_watchpoint_remove_all(s, BP_CPU); |
4754 | |
4755 | cr4 = 0; |
4756 | xcr0 = XSTATE_FP_MASK; |
4757 | |
4758 | #ifdef CONFIG_USER_ONLY |
4759 | /* Enable all the features for user-mode. */ |
4760 | if (env->features[FEAT_1_EDX] & CPUID_SSE) { |
4761 | xcr0 |= XSTATE_SSE_MASK; |
4762 | } |
4763 | for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { |
4764 | const ExtSaveArea *esa = &x86_ext_save_areas[i]; |
4765 | if (env->features[esa->feature] & esa->bits) { |
4766 | xcr0 |= 1ull << i; |
4767 | } |
4768 | } |
4769 | |
4770 | if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { |
4771 | cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; |
4772 | } |
4773 | if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { |
4774 | cr4 |= CR4_FSGSBASE_MASK; |
4775 | } |
4776 | #endif |
4777 | |
4778 | env->xcr0 = xcr0; |
4779 | cpu_x86_update_cr4(env, cr4); |
4780 | |
4781 | /* |
4782 | * SDM 11.11.5 requires: |
4783 | * - IA32_MTRR_DEF_TYPE MSR.E = 0 |
4784 | * - IA32_MTRR_PHYSMASKn.V = 0 |
4785 | * All other bits are undefined. For simplification, zero it all. |
4786 | */ |
4787 | env->mtrr_deftype = 0; |
4788 | memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); |
4789 | memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); |
4790 | |
4791 | env->interrupt_injected = -1; |
4792 | env->exception_nr = -1; |
4793 | env->exception_pending = 0; |
4794 | env->exception_injected = 0; |
4795 | env->exception_has_payload = false; |
4796 | env->exception_payload = 0; |
4797 | env->nmi_injected = false; |
4798 | #if !defined(CONFIG_USER_ONLY) |
4799 | /* We hard-wire the BSP to the first CPU. */ |
4800 | apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); |
4801 | |
4802 | s->halted = !cpu_is_bsp(cpu); |
4803 | |
4804 | if (kvm_enabled()) { |
4805 | kvm_arch_reset_vcpu(cpu); |
4806 | } |
4807 | else if (hvf_enabled()) { |
4808 | hvf_reset_vcpu(s); |
4809 | } |
4810 | #endif |
4811 | } |
4812 | |
4813 | #ifndef CONFIG_USER_ONLY |
4814 | bool cpu_is_bsp(X86CPU *cpu) |
4815 | { |
4816 | return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; |
4817 | } |
4818 | |
4819 | /* TODO: remove me, when reset over QOM tree is implemented */ |
4820 | static void x86_cpu_machine_reset_cb(void *opaque) |
4821 | { |
4822 | X86CPU *cpu = opaque; |
4823 | cpu_reset(CPU(cpu)); |
4824 | } |
4825 | #endif |
4826 | |
4827 | static void mce_init(X86CPU *cpu) |
4828 | { |
4829 | CPUX86State *cenv = &cpu->env; |
4830 | unsigned int bank; |
4831 | |
4832 | if (((cenv->cpuid_version >> 8) & 0xf) >= 6 |
4833 | && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == |
4834 | (CPUID_MCE | CPUID_MCA)) { |
4835 | cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | |
4836 | (cpu->enable_lmce ? MCG_LMCE_P : 0); |
4837 | cenv->mcg_ctl = ~(uint64_t)0; |
4838 | for (bank = 0; bank < MCE_BANKS_DEF; bank++) { |
4839 | cenv->mce_banks[bank * 4] = ~(uint64_t)0; |
4840 | } |
4841 | } |
4842 | } |
4843 | |
4844 | #ifndef CONFIG_USER_ONLY |
4845 | APICCommonClass *apic_get_class(void) |
4846 | { |
4847 | const char *apic_type = "apic" ; |
4848 | |
4849 | /* TODO: in-kernel irqchip for hvf */ |
4850 | if (kvm_apic_in_kernel()) { |
4851 | apic_type = "kvm-apic" ; |
4852 | } else if (xen_enabled()) { |
4853 | apic_type = "xen-apic" ; |
4854 | } |
4855 | |
4856 | return APIC_COMMON_CLASS(object_class_by_name(apic_type)); |
4857 | } |
4858 | |
4859 | static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) |
4860 | { |
4861 | APICCommonState *apic; |
4862 | ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); |
4863 | |
4864 | cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class))); |
4865 | |
4866 | object_property_add_child(OBJECT(cpu), "lapic" , |
4867 | OBJECT(cpu->apic_state), &error_abort); |
4868 | object_unref(OBJECT(cpu->apic_state)); |
4869 | |
4870 | qdev_prop_set_uint32(cpu->apic_state, "id" , cpu->apic_id); |
4871 | /* TODO: convert to link<> */ |
4872 | apic = APIC_COMMON(cpu->apic_state); |
4873 | apic->cpu = cpu; |
4874 | apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; |
4875 | } |
4876 | |
4877 | static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) |
4878 | { |
4879 | APICCommonState *apic; |
4880 | static bool apic_mmio_map_once; |
4881 | |
4882 | if (cpu->apic_state == NULL) { |
4883 | return; |
4884 | } |
4885 | object_property_set_bool(OBJECT(cpu->apic_state), true, "realized" , |
4886 | errp); |
4887 | |
4888 | /* Map APIC MMIO area */ |
4889 | apic = APIC_COMMON(cpu->apic_state); |
4890 | if (!apic_mmio_map_once) { |
4891 | memory_region_add_subregion_overlap(get_system_memory(), |
4892 | apic->apicbase & |
4893 | MSR_IA32_APICBASE_BASE, |
4894 | &apic->io_memory, |
4895 | 0x1000); |
4896 | apic_mmio_map_once = true; |
4897 | } |
4898 | } |
4899 | |
4900 | static void x86_cpu_machine_done(Notifier *n, void *unused) |
4901 | { |
4902 | X86CPU *cpu = container_of(n, X86CPU, machine_done); |
4903 | MemoryRegion *smram = |
4904 | (MemoryRegion *) object_resolve_path("/machine/smram" , NULL); |
4905 | |
4906 | if (smram) { |
4907 | cpu->smram = g_new(MemoryRegion, 1); |
4908 | memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram" , |
4909 | smram, 0, 1ull << 32); |
4910 | memory_region_set_enabled(cpu->smram, true); |
4911 | memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); |
4912 | } |
4913 | } |
4914 | #else |
4915 | static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) |
4916 | { |
4917 | } |
4918 | #endif |
4919 | |
4920 | /* Note: Only safe for use on x86(-64) hosts */ |
4921 | static uint32_t x86_host_phys_bits(void) |
4922 | { |
4923 | uint32_t eax; |
4924 | uint32_t host_phys_bits; |
4925 | |
4926 | host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); |
4927 | if (eax >= 0x80000008) { |
4928 | host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); |
4929 | /* Note: According to AMD doc 25481 rev 2.34 they have a field |
4930 | * at 23:16 that can specify a maximum physical address bits for |
4931 | * the guest that can override this value; but I've not seen |
4932 | * anything with that set. |
4933 | */ |
4934 | host_phys_bits = eax & 0xff; |
4935 | } else { |
4936 | /* It's an odd 64 bit machine that doesn't have the leaf for |
4937 | * physical address bits; fall back to 36 that's most older |
4938 | * Intel. |
4939 | */ |
4940 | host_phys_bits = 36; |
4941 | } |
4942 | |
4943 | return host_phys_bits; |
4944 | } |
4945 | |
4946 | static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) |
4947 | { |
4948 | if (*min < value) { |
4949 | *min = value; |
4950 | } |
4951 | } |
4952 | |
4953 | /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ |
4954 | static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) |
4955 | { |
4956 | CPUX86State *env = &cpu->env; |
4957 | FeatureWordInfo *fi = &feature_word_info[w]; |
4958 | uint32_t eax = fi->cpuid.eax; |
4959 | uint32_t region = eax & 0xF0000000; |
4960 | |
4961 | assert(feature_word_info[w].type == CPUID_FEATURE_WORD); |
4962 | if (!env->features[w]) { |
4963 | return; |
4964 | } |
4965 | |
4966 | switch (region) { |
4967 | case 0x00000000: |
4968 | x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); |
4969 | break; |
4970 | case 0x80000000: |
4971 | x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); |
4972 | break; |
4973 | case 0xC0000000: |
4974 | x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); |
4975 | break; |
4976 | } |
4977 | |
4978 | if (eax == 7) { |
4979 | x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7, |
4980 | fi->cpuid.ecx); |
4981 | } |
4982 | } |
4983 | |
4984 | /* Calculate XSAVE components based on the configured CPU feature flags */ |
4985 | static void x86_cpu_enable_xsave_components(X86CPU *cpu) |
4986 | { |
4987 | CPUX86State *env = &cpu->env; |
4988 | int i; |
4989 | uint64_t mask; |
4990 | |
4991 | if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { |
4992 | return; |
4993 | } |
4994 | |
4995 | mask = 0; |
4996 | for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { |
4997 | const ExtSaveArea *esa = &x86_ext_save_areas[i]; |
4998 | if (env->features[esa->feature] & esa->bits) { |
4999 | mask |= (1ULL << i); |
5000 | } |
5001 | } |
5002 | |
5003 | env->features[FEAT_XSAVE_COMP_LO] = mask; |
5004 | env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; |
5005 | } |
5006 | |
5007 | /***** Steps involved on loading and filtering CPUID data |
5008 | * |
5009 | * When initializing and realizing a CPU object, the steps |
5010 | * involved in setting up CPUID data are: |
5011 | * |
5012 | * 1) Loading CPU model definition (X86CPUDefinition). This is |
5013 | * implemented by x86_cpu_load_model() and should be completely |
5014 | * transparent, as it is done automatically by instance_init. |
5015 | * No code should need to look at X86CPUDefinition structs |
5016 | * outside instance_init. |
5017 | * |
5018 | * 2) CPU expansion. This is done by realize before CPUID |
5019 | * filtering, and will make sure host/accelerator data is |
5020 | * loaded for CPU models that depend on host capabilities |
5021 | * (e.g. "host"). Done by x86_cpu_expand_features(). |
5022 | * |
5023 | * 3) CPUID filtering. This initializes extra data related to |
5024 | * CPUID, and checks if the host supports all capabilities |
5025 | * required by the CPU. Runnability of a CPU model is |
5026 | * determined at this step. Done by x86_cpu_filter_features(). |
5027 | * |
5028 | * Some operations don't require all steps to be performed. |
5029 | * More precisely: |
5030 | * |
5031 | * - CPU instance creation (instance_init) will run only CPU |
5032 | * model loading. CPU expansion can't run at instance_init-time |
5033 | * because host/accelerator data may be not available yet. |
5034 | * - CPU realization will perform both CPU model expansion and CPUID |
5035 | * filtering, and return an error in case one of them fails. |
5036 | * - query-cpu-definitions needs to run all 3 steps. It needs |
5037 | * to run CPUID filtering, as the 'unavailable-features' |
5038 | * field is set based on the filtering results. |
5039 | * - The query-cpu-model-expansion QMP command only needs to run |
5040 | * CPU model loading and CPU expansion. It should not filter |
5041 | * any CPUID data based on host capabilities. |
5042 | */ |
5043 | |
5044 | /* Expand CPU configuration data, based on configured features |
5045 | * and host/accelerator capabilities when appropriate. |
5046 | */ |
5047 | static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) |
5048 | { |
5049 | CPUX86State *env = &cpu->env; |
5050 | FeatureWord w; |
5051 | GList *l; |
5052 | Error *local_err = NULL; |
5053 | |
5054 | /*TODO: Now cpu->max_features doesn't overwrite features |
5055 | * set using QOM properties, and we can convert |
5056 | * plus_features & minus_features to global properties |
5057 | * inside x86_cpu_parse_featurestr() too. |
5058 | */ |
5059 | if (cpu->max_features) { |
5060 | for (w = 0; w < FEATURE_WORDS; w++) { |
5061 | /* Override only features that weren't set explicitly |
5062 | * by the user. |
5063 | */ |
5064 | env->features[w] |= |
5065 | x86_cpu_get_supported_feature_word(w, cpu->migratable) & |
5066 | ~env->user_features[w] & \ |
5067 | ~feature_word_info[w].no_autoenable_flags; |
5068 | } |
5069 | } |
5070 | |
5071 | for (l = plus_features; l; l = l->next) { |
5072 | const char *prop = l->data; |
5073 | object_property_set_bool(OBJECT(cpu), true, prop, &local_err); |
5074 | if (local_err) { |
5075 | goto out; |
5076 | } |
5077 | } |
5078 | |
5079 | for (l = minus_features; l; l = l->next) { |
5080 | const char *prop = l->data; |
5081 | object_property_set_bool(OBJECT(cpu), false, prop, &local_err); |
5082 | if (local_err) { |
5083 | goto out; |
5084 | } |
5085 | } |
5086 | |
5087 | if (!kvm_enabled() || !cpu->expose_kvm) { |
5088 | env->features[FEAT_KVM] = 0; |
5089 | } |
5090 | |
5091 | x86_cpu_enable_xsave_components(cpu); |
5092 | |
5093 | /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ |
5094 | x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); |
5095 | if (cpu->full_cpuid_auto_level) { |
5096 | x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); |
5097 | x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); |
5098 | x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); |
5099 | x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); |
5100 | x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); |
5101 | x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); |
5102 | x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); |
5103 | x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); |
5104 | x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); |
5105 | x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); |
5106 | x86_cpu_adjust_feat_level(cpu, FEAT_SVM); |
5107 | x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); |
5108 | |
5109 | /* Intel Processor Trace requires CPUID[0x14] */ |
5110 | if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && |
5111 | kvm_enabled() && cpu->intel_pt_auto_level) { |
5112 | x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); |
5113 | } |
5114 | |
5115 | /* CPU topology with multi-dies support requires CPUID[0x1F] */ |
5116 | if (env->nr_dies > 1) { |
5117 | x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); |
5118 | } |
5119 | |
5120 | /* SVM requires CPUID[0x8000000A] */ |
5121 | if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { |
5122 | x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); |
5123 | } |
5124 | |
5125 | /* SEV requires CPUID[0x8000001F] */ |
5126 | if (sev_enabled()) { |
5127 | x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); |
5128 | } |
5129 | } |
5130 | |
5131 | /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ |
5132 | if (env->cpuid_level_func7 == UINT32_MAX) { |
5133 | env->cpuid_level_func7 = env->cpuid_min_level_func7; |
5134 | } |
5135 | if (env->cpuid_level == UINT32_MAX) { |
5136 | env->cpuid_level = env->cpuid_min_level; |
5137 | } |
5138 | if (env->cpuid_xlevel == UINT32_MAX) { |
5139 | env->cpuid_xlevel = env->cpuid_min_xlevel; |
5140 | } |
5141 | if (env->cpuid_xlevel2 == UINT32_MAX) { |
5142 | env->cpuid_xlevel2 = env->cpuid_min_xlevel2; |
5143 | } |
5144 | |
5145 | out: |
5146 | if (local_err != NULL) { |
5147 | error_propagate(errp, local_err); |
5148 | } |
5149 | } |
5150 | |
5151 | /* |
5152 | * Finishes initialization of CPUID data, filters CPU feature |
5153 | * words based on host availability of each feature. |
5154 | * |
5155 | * Returns: 0 if all flags are supported by the host, non-zero otherwise. |
5156 | */ |
5157 | static int x86_cpu_filter_features(X86CPU *cpu) |
5158 | { |
5159 | CPUX86State *env = &cpu->env; |
5160 | FeatureWord w; |
5161 | int rv = 0; |
5162 | |
5163 | for (w = 0; w < FEATURE_WORDS; w++) { |
5164 | uint32_t host_feat = |
5165 | x86_cpu_get_supported_feature_word(w, false); |
5166 | uint32_t requested_features = env->features[w]; |
5167 | uint32_t available_features = requested_features & host_feat; |
5168 | if (!cpu->force_features) { |
5169 | env->features[w] = available_features; |
5170 | } |
5171 | cpu->filtered_features[w] = requested_features & ~available_features; |
5172 | if (cpu->filtered_features[w]) { |
5173 | rv = 1; |
5174 | } |
5175 | } |
5176 | |
5177 | if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && |
5178 | kvm_enabled()) { |
5179 | KVMState *s = CPU(cpu)->kvm_state; |
5180 | uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); |
5181 | uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); |
5182 | uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); |
5183 | uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); |
5184 | uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); |
5185 | |
5186 | if (!eax_0 || |
5187 | ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || |
5188 | ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || |
5189 | ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || |
5190 | ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < |
5191 | INTEL_PT_ADDR_RANGES_NUM) || |
5192 | ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != |
5193 | (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || |
5194 | (ecx_0 & INTEL_PT_IP_LIP)) { |
5195 | /* |
5196 | * Processor Trace capabilities aren't configurable, so if the |
5197 | * host can't emulate the capabilities we report on |
5198 | * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. |
5199 | */ |
5200 | env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT; |
5201 | cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT; |
5202 | rv = 1; |
5203 | } |
5204 | } |
5205 | |
5206 | return rv; |
5207 | } |
5208 | |
5209 | static void x86_cpu_realizefn(DeviceState *dev, Error **errp) |
5210 | { |
5211 | CPUState *cs = CPU(dev); |
5212 | X86CPU *cpu = X86_CPU(dev); |
5213 | X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); |
5214 | CPUX86State *env = &cpu->env; |
5215 | Error *local_err = NULL; |
5216 | static bool ht_warned; |
5217 | |
5218 | if (xcc->host_cpuid_required) { |
5219 | if (!accel_uses_host_cpuid()) { |
5220 | char *name = x86_cpu_class_get_model_name(xcc); |
5221 | error_setg(&local_err, "CPU model '%s' requires KVM" , name); |
5222 | g_free(name); |
5223 | goto out; |
5224 | } |
5225 | |
5226 | if (enable_cpu_pm) { |
5227 | host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, |
5228 | &cpu->mwait.ecx, &cpu->mwait.edx); |
5229 | env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; |
5230 | } |
5231 | } |
5232 | |
5233 | /* mwait extended info: needed for Core compatibility */ |
5234 | /* We always wake on interrupt even if host does not have the capability */ |
5235 | cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; |
5236 | |
5237 | if (cpu->apic_id == UNASSIGNED_APIC_ID) { |
5238 | error_setg(errp, "apic-id property was not initialized properly" ); |
5239 | return; |
5240 | } |
5241 | |
5242 | x86_cpu_expand_features(cpu, &local_err); |
5243 | if (local_err) { |
5244 | goto out; |
5245 | } |
5246 | |
5247 | if (x86_cpu_filter_features(cpu) && |
5248 | (cpu->check_cpuid || cpu->enforce_cpuid)) { |
5249 | x86_cpu_report_filtered_features(cpu); |
5250 | if (cpu->enforce_cpuid) { |
5251 | error_setg(&local_err, |
5252 | accel_uses_host_cpuid() ? |
5253 | "Host doesn't support requested features" : |
5254 | "TCG doesn't support requested features" ); |
5255 | goto out; |
5256 | } |
5257 | } |
5258 | |
5259 | /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on |
5260 | * CPUID[1].EDX. |
5261 | */ |
5262 | if (IS_AMD_CPU(env)) { |
5263 | env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; |
5264 | env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] |
5265 | & CPUID_EXT2_AMD_ALIASES); |
5266 | } |
5267 | |
5268 | /* For 64bit systems think about the number of physical bits to present. |
5269 | * ideally this should be the same as the host; anything other than matching |
5270 | * the host can cause incorrect guest behaviour. |
5271 | * QEMU used to pick the magic value of 40 bits that corresponds to |
5272 | * consumer AMD devices but nothing else. |
5273 | */ |
5274 | if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { |
5275 | if (accel_uses_host_cpuid()) { |
5276 | uint32_t host_phys_bits = x86_host_phys_bits(); |
5277 | static bool warned; |
5278 | |
5279 | /* Print a warning if the user set it to a value that's not the |
5280 | * host value. |
5281 | */ |
5282 | if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && |
5283 | !warned) { |
5284 | warn_report("Host physical bits (%u)" |
5285 | " does not match phys-bits property (%u)" , |
5286 | host_phys_bits, cpu->phys_bits); |
5287 | warned = true; |
5288 | } |
5289 | |
5290 | if (cpu->host_phys_bits) { |
5291 | /* The user asked for us to use the host physical bits */ |
5292 | cpu->phys_bits = host_phys_bits; |
5293 | if (cpu->host_phys_bits_limit && |
5294 | cpu->phys_bits > cpu->host_phys_bits_limit) { |
5295 | cpu->phys_bits = cpu->host_phys_bits_limit; |
5296 | } |
5297 | } |
5298 | |
5299 | if (cpu->phys_bits && |
5300 | (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || |
5301 | cpu->phys_bits < 32)) { |
5302 | error_setg(errp, "phys-bits should be between 32 and %u " |
5303 | " (but is %u)" , |
5304 | TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); |
5305 | return; |
5306 | } |
5307 | } else { |
5308 | if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { |
5309 | error_setg(errp, "TCG only supports phys-bits=%u" , |
5310 | TCG_PHYS_ADDR_BITS); |
5311 | return; |
5312 | } |
5313 | } |
5314 | /* 0 means it was not explicitly set by the user (or by machine |
5315 | * compat_props or by the host code above). In this case, the default |
5316 | * is the value used by TCG (40). |
5317 | */ |
5318 | if (cpu->phys_bits == 0) { |
5319 | cpu->phys_bits = TCG_PHYS_ADDR_BITS; |
5320 | } |
5321 | } else { |
5322 | /* For 32 bit systems don't use the user set value, but keep |
5323 | * phys_bits consistent with what we tell the guest. |
5324 | */ |
5325 | if (cpu->phys_bits != 0) { |
5326 | error_setg(errp, "phys-bits is not user-configurable in 32 bit" ); |
5327 | return; |
5328 | } |
5329 | |
5330 | if (env->features[FEAT_1_EDX] & CPUID_PSE36) { |
5331 | cpu->phys_bits = 36; |
5332 | } else { |
5333 | cpu->phys_bits = 32; |
5334 | } |
5335 | } |
5336 | |
5337 | /* Cache information initialization */ |
5338 | if (!cpu->legacy_cache) { |
5339 | if (!xcc->model || !xcc->model->cpudef->cache_info) { |
5340 | char *name = x86_cpu_class_get_model_name(xcc); |
5341 | error_setg(errp, |
5342 | "CPU model '%s' doesn't support legacy-cache=off" , name); |
5343 | g_free(name); |
5344 | return; |
5345 | } |
5346 | env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = |
5347 | *xcc->model->cpudef->cache_info; |
5348 | } else { |
5349 | /* Build legacy cache information */ |
5350 | env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; |
5351 | env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; |
5352 | env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; |
5353 | env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; |
5354 | |
5355 | env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; |
5356 | env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; |
5357 | env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; |
5358 | env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; |
5359 | |
5360 | env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; |
5361 | env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; |
5362 | env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; |
5363 | env->cache_info_amd.l3_cache = &legacy_l3_cache; |
5364 | } |
5365 | |
5366 | |
5367 | cpu_exec_realizefn(cs, &local_err); |
5368 | if (local_err != NULL) { |
5369 | error_propagate(errp, local_err); |
5370 | return; |
5371 | } |
5372 | |
5373 | #ifndef CONFIG_USER_ONLY |
5374 | MachineState *ms = MACHINE(qdev_get_machine()); |
5375 | qemu_register_reset(x86_cpu_machine_reset_cb, cpu); |
5376 | |
5377 | if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { |
5378 | x86_cpu_apic_create(cpu, &local_err); |
5379 | if (local_err != NULL) { |
5380 | goto out; |
5381 | } |
5382 | } |
5383 | #endif |
5384 | |
5385 | mce_init(cpu); |
5386 | |
5387 | #ifndef CONFIG_USER_ONLY |
5388 | if (tcg_enabled()) { |
5389 | cpu->cpu_as_mem = g_new(MemoryRegion, 1); |
5390 | cpu->cpu_as_root = g_new(MemoryRegion, 1); |
5391 | |
5392 | /* Outer container... */ |
5393 | memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory" , ~0ull); |
5394 | memory_region_set_enabled(cpu->cpu_as_root, true); |
5395 | |
5396 | /* ... with two regions inside: normal system memory with low |
5397 | * priority, and... |
5398 | */ |
5399 | memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory" , |
5400 | get_system_memory(), 0, ~0ull); |
5401 | memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); |
5402 | memory_region_set_enabled(cpu->cpu_as_mem, true); |
5403 | |
5404 | cs->num_ases = 2; |
5405 | cpu_address_space_init(cs, 0, "cpu-memory" , cs->memory); |
5406 | cpu_address_space_init(cs, 1, "cpu-smm" , cpu->cpu_as_root); |
5407 | |
5408 | /* ... SMRAM with higher priority, linked from /machine/smram. */ |
5409 | cpu->machine_done.notify = x86_cpu_machine_done; |
5410 | qemu_add_machine_init_done_notifier(&cpu->machine_done); |
5411 | } |
5412 | #endif |
5413 | |
5414 | qemu_init_vcpu(cs); |
5415 | |
5416 | /* |
5417 | * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU |
5418 | * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX |
5419 | * based on inputs (sockets,cores,threads), it is still better to give |
5420 | * users a warning. |
5421 | * |
5422 | * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise |
5423 | * cs->nr_threads hasn't be populated yet and the checking is incorrect. |
5424 | */ |
5425 | if (IS_AMD_CPU(env) && |
5426 | !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && |
5427 | cs->nr_threads > 1 && !ht_warned) { |
5428 | warn_report("This family of AMD CPU doesn't support " |
5429 | "hyperthreading(%d)" , |
5430 | cs->nr_threads); |
5431 | error_printf("Please configure -smp options properly" |
5432 | " or try enabling topoext feature.\n" ); |
5433 | ht_warned = true; |
5434 | } |
5435 | |
5436 | x86_cpu_apic_realize(cpu, &local_err); |
5437 | if (local_err != NULL) { |
5438 | goto out; |
5439 | } |
5440 | cpu_reset(cs); |
5441 | |
5442 | xcc->parent_realize(dev, &local_err); |
5443 | |
5444 | out: |
5445 | if (local_err != NULL) { |
5446 | error_propagate(errp, local_err); |
5447 | return; |
5448 | } |
5449 | } |
5450 | |
5451 | static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp) |
5452 | { |
5453 | X86CPU *cpu = X86_CPU(dev); |
5454 | X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); |
5455 | Error *local_err = NULL; |
5456 | |
5457 | #ifndef CONFIG_USER_ONLY |
5458 | cpu_remove_sync(CPU(dev)); |
5459 | qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); |
5460 | #endif |
5461 | |
5462 | if (cpu->apic_state) { |
5463 | object_unparent(OBJECT(cpu->apic_state)); |
5464 | cpu->apic_state = NULL; |
5465 | } |
5466 | |
5467 | xcc->parent_unrealize(dev, &local_err); |
5468 | if (local_err != NULL) { |
5469 | error_propagate(errp, local_err); |
5470 | return; |
5471 | } |
5472 | } |
5473 | |
5474 | typedef struct BitProperty { |
5475 | FeatureWord w; |
5476 | uint32_t mask; |
5477 | } BitProperty; |
5478 | |
5479 | static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, |
5480 | void *opaque, Error **errp) |
5481 | { |
5482 | X86CPU *cpu = X86_CPU(obj); |
5483 | BitProperty *fp = opaque; |
5484 | uint32_t f = cpu->env.features[fp->w]; |
5485 | bool value = (f & fp->mask) == fp->mask; |
5486 | visit_type_bool(v, name, &value, errp); |
5487 | } |
5488 | |
5489 | static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, |
5490 | void *opaque, Error **errp) |
5491 | { |
5492 | DeviceState *dev = DEVICE(obj); |
5493 | X86CPU *cpu = X86_CPU(obj); |
5494 | BitProperty *fp = opaque; |
5495 | Error *local_err = NULL; |
5496 | bool value; |
5497 | |
5498 | if (dev->realized) { |
5499 | qdev_prop_set_after_realize(dev, name, errp); |
5500 | return; |
5501 | } |
5502 | |
5503 | visit_type_bool(v, name, &value, &local_err); |
5504 | if (local_err) { |
5505 | error_propagate(errp, local_err); |
5506 | return; |
5507 | } |
5508 | |
5509 | if (value) { |
5510 | cpu->env.features[fp->w] |= fp->mask; |
5511 | } else { |
5512 | cpu->env.features[fp->w] &= ~fp->mask; |
5513 | } |
5514 | cpu->env.user_features[fp->w] |= fp->mask; |
5515 | } |
5516 | |
5517 | static void x86_cpu_release_bit_prop(Object *obj, const char *name, |
5518 | void *opaque) |
5519 | { |
5520 | BitProperty *prop = opaque; |
5521 | g_free(prop); |
5522 | } |
5523 | |
5524 | /* Register a boolean property to get/set a single bit in a uint32_t field. |
5525 | * |
5526 | * The same property name can be registered multiple times to make it affect |
5527 | * multiple bits in the same FeatureWord. In that case, the getter will return |
5528 | * true only if all bits are set. |
5529 | */ |
5530 | static void x86_cpu_register_bit_prop(X86CPU *cpu, |
5531 | const char *prop_name, |
5532 | FeatureWord w, |
5533 | int bitnr) |
5534 | { |
5535 | BitProperty *fp; |
5536 | ObjectProperty *op; |
5537 | uint32_t mask = (1UL << bitnr); |
5538 | |
5539 | op = object_property_find(OBJECT(cpu), prop_name, NULL); |
5540 | if (op) { |
5541 | fp = op->opaque; |
5542 | assert(fp->w == w); |
5543 | fp->mask |= mask; |
5544 | } else { |
5545 | fp = g_new0(BitProperty, 1); |
5546 | fp->w = w; |
5547 | fp->mask = mask; |
5548 | object_property_add(OBJECT(cpu), prop_name, "bool" , |
5549 | x86_cpu_get_bit_prop, |
5550 | x86_cpu_set_bit_prop, |
5551 | x86_cpu_release_bit_prop, fp, &error_abort); |
5552 | } |
5553 | } |
5554 | |
5555 | static void x86_cpu_register_feature_bit_props(X86CPU *cpu, |
5556 | FeatureWord w, |
5557 | int bitnr) |
5558 | { |
5559 | FeatureWordInfo *fi = &feature_word_info[w]; |
5560 | const char *name = fi->feat_names[bitnr]; |
5561 | |
5562 | if (!name) { |
5563 | return; |
5564 | } |
5565 | |
5566 | /* Property names should use "-" instead of "_". |
5567 | * Old names containing underscores are registered as aliases |
5568 | * using object_property_add_alias() |
5569 | */ |
5570 | assert(!strchr(name, '_')); |
5571 | /* aliases don't use "|" delimiters anymore, they are registered |
5572 | * manually using object_property_add_alias() */ |
5573 | assert(!strchr(name, '|')); |
5574 | x86_cpu_register_bit_prop(cpu, name, w, bitnr); |
5575 | } |
5576 | |
5577 | static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) |
5578 | { |
5579 | X86CPU *cpu = X86_CPU(cs); |
5580 | CPUX86State *env = &cpu->env; |
5581 | GuestPanicInformation *panic_info = NULL; |
5582 | |
5583 | if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { |
5584 | panic_info = g_malloc0(sizeof(GuestPanicInformation)); |
5585 | |
5586 | panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; |
5587 | |
5588 | assert(HV_CRASH_PARAMS >= 5); |
5589 | panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; |
5590 | panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; |
5591 | panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; |
5592 | panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; |
5593 | panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; |
5594 | } |
5595 | |
5596 | return panic_info; |
5597 | } |
5598 | static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, |
5599 | const char *name, void *opaque, |
5600 | Error **errp) |
5601 | { |
5602 | CPUState *cs = CPU(obj); |
5603 | GuestPanicInformation *panic_info; |
5604 | |
5605 | if (!cs->crash_occurred) { |
5606 | error_setg(errp, "No crash occured" ); |
5607 | return; |
5608 | } |
5609 | |
5610 | panic_info = x86_cpu_get_crash_info(cs); |
5611 | if (panic_info == NULL) { |
5612 | error_setg(errp, "No crash information" ); |
5613 | return; |
5614 | } |
5615 | |
5616 | visit_type_GuestPanicInformation(v, "crash-information" , &panic_info, |
5617 | errp); |
5618 | qapi_free_GuestPanicInformation(panic_info); |
5619 | } |
5620 | |
5621 | static void x86_cpu_initfn(Object *obj) |
5622 | { |
5623 | X86CPU *cpu = X86_CPU(obj); |
5624 | X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); |
5625 | CPUX86State *env = &cpu->env; |
5626 | FeatureWord w; |
5627 | |
5628 | env->nr_dies = 1; |
5629 | cpu_set_cpustate_pointers(cpu); |
5630 | |
5631 | object_property_add(obj, "family" , "int" , |
5632 | x86_cpuid_version_get_family, |
5633 | x86_cpuid_version_set_family, NULL, NULL, NULL); |
5634 | object_property_add(obj, "model" , "int" , |
5635 | x86_cpuid_version_get_model, |
5636 | x86_cpuid_version_set_model, NULL, NULL, NULL); |
5637 | object_property_add(obj, "stepping" , "int" , |
5638 | x86_cpuid_version_get_stepping, |
5639 | x86_cpuid_version_set_stepping, NULL, NULL, NULL); |
5640 | object_property_add_str(obj, "vendor" , |
5641 | x86_cpuid_get_vendor, |
5642 | x86_cpuid_set_vendor, NULL); |
5643 | object_property_add_str(obj, "model-id" , |
5644 | x86_cpuid_get_model_id, |
5645 | x86_cpuid_set_model_id, NULL); |
5646 | object_property_add(obj, "tsc-frequency" , "int" , |
5647 | x86_cpuid_get_tsc_freq, |
5648 | x86_cpuid_set_tsc_freq, NULL, NULL, NULL); |
5649 | object_property_add(obj, "feature-words" , "X86CPUFeatureWordInfo" , |
5650 | x86_cpu_get_feature_words, |
5651 | NULL, NULL, (void *)env->features, NULL); |
5652 | object_property_add(obj, "filtered-features" , "X86CPUFeatureWordInfo" , |
5653 | x86_cpu_get_feature_words, |
5654 | NULL, NULL, (void *)cpu->filtered_features, NULL); |
5655 | /* |
5656 | * The "unavailable-features" property has the same semantics as |
5657 | * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" |
5658 | * QMP command: they list the features that would have prevented the |
5659 | * CPU from running if the "enforce" flag was set. |
5660 | */ |
5661 | object_property_add(obj, "unavailable-features" , "strList" , |
5662 | x86_cpu_get_unavailable_features, |
5663 | NULL, NULL, NULL, &error_abort); |
5664 | |
5665 | object_property_add(obj, "crash-information" , "GuestPanicInformation" , |
5666 | x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL); |
5667 | |
5668 | for (w = 0; w < FEATURE_WORDS; w++) { |
5669 | int bitnr; |
5670 | |
5671 | for (bitnr = 0; bitnr < 32; bitnr++) { |
5672 | x86_cpu_register_feature_bit_props(cpu, w, bitnr); |
5673 | } |
5674 | } |
5675 | |
5676 | object_property_add_alias(obj, "sse3" , obj, "pni" , &error_abort); |
5677 | object_property_add_alias(obj, "pclmuldq" , obj, "pclmulqdq" , &error_abort); |
5678 | object_property_add_alias(obj, "sse4-1" , obj, "sse4.1" , &error_abort); |
5679 | object_property_add_alias(obj, "sse4-2" , obj, "sse4.2" , &error_abort); |
5680 | object_property_add_alias(obj, "xd" , obj, "nx" , &error_abort); |
5681 | object_property_add_alias(obj, "ffxsr" , obj, "fxsr-opt" , &error_abort); |
5682 | object_property_add_alias(obj, "i64" , obj, "lm" , &error_abort); |
5683 | |
5684 | object_property_add_alias(obj, "ds_cpl" , obj, "ds-cpl" , &error_abort); |
5685 | object_property_add_alias(obj, "tsc_adjust" , obj, "tsc-adjust" , &error_abort); |
5686 | object_property_add_alias(obj, "fxsr_opt" , obj, "fxsr-opt" , &error_abort); |
5687 | object_property_add_alias(obj, "lahf_lm" , obj, "lahf-lm" , &error_abort); |
5688 | object_property_add_alias(obj, "cmp_legacy" , obj, "cmp-legacy" , &error_abort); |
5689 | object_property_add_alias(obj, "nodeid_msr" , obj, "nodeid-msr" , &error_abort); |
5690 | object_property_add_alias(obj, "perfctr_core" , obj, "perfctr-core" , &error_abort); |
5691 | object_property_add_alias(obj, "perfctr_nb" , obj, "perfctr-nb" , &error_abort); |
5692 | object_property_add_alias(obj, "kvm_nopiodelay" , obj, "kvm-nopiodelay" , &error_abort); |
5693 | object_property_add_alias(obj, "kvm_mmu" , obj, "kvm-mmu" , &error_abort); |
5694 | object_property_add_alias(obj, "kvm_asyncpf" , obj, "kvm-asyncpf" , &error_abort); |
5695 | object_property_add_alias(obj, "kvm_steal_time" , obj, "kvm-steal-time" , &error_abort); |
5696 | object_property_add_alias(obj, "kvm_pv_eoi" , obj, "kvm-pv-eoi" , &error_abort); |
5697 | object_property_add_alias(obj, "kvm_pv_unhalt" , obj, "kvm-pv-unhalt" , &error_abort); |
5698 | object_property_add_alias(obj, "kvm_poll_control" , obj, "kvm-poll-control" , |
5699 | &error_abort); |
5700 | object_property_add_alias(obj, "svm_lock" , obj, "svm-lock" , &error_abort); |
5701 | object_property_add_alias(obj, "nrip_save" , obj, "nrip-save" , &error_abort); |
5702 | object_property_add_alias(obj, "tsc_scale" , obj, "tsc-scale" , &error_abort); |
5703 | object_property_add_alias(obj, "vmcb_clean" , obj, "vmcb-clean" , &error_abort); |
5704 | object_property_add_alias(obj, "pause_filter" , obj, "pause-filter" , &error_abort); |
5705 | object_property_add_alias(obj, "sse4_1" , obj, "sse4.1" , &error_abort); |
5706 | object_property_add_alias(obj, "sse4_2" , obj, "sse4.2" , &error_abort); |
5707 | |
5708 | if (xcc->model) { |
5709 | x86_cpu_load_model(cpu, xcc->model, &error_abort); |
5710 | } |
5711 | } |
5712 | |
5713 | static int64_t x86_cpu_get_arch_id(CPUState *cs) |
5714 | { |
5715 | X86CPU *cpu = X86_CPU(cs); |
5716 | |
5717 | return cpu->apic_id; |
5718 | } |
5719 | |
5720 | static bool x86_cpu_get_paging_enabled(const CPUState *cs) |
5721 | { |
5722 | X86CPU *cpu = X86_CPU(cs); |
5723 | |
5724 | return cpu->env.cr[0] & CR0_PG_MASK; |
5725 | } |
5726 | |
5727 | static void x86_cpu_set_pc(CPUState *cs, vaddr value) |
5728 | { |
5729 | X86CPU *cpu = X86_CPU(cs); |
5730 | |
5731 | cpu->env.eip = value; |
5732 | } |
5733 | |
5734 | static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) |
5735 | { |
5736 | X86CPU *cpu = X86_CPU(cs); |
5737 | |
5738 | cpu->env.eip = tb->pc - tb->cs_base; |
5739 | } |
5740 | |
5741 | int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) |
5742 | { |
5743 | X86CPU *cpu = X86_CPU(cs); |
5744 | CPUX86State *env = &cpu->env; |
5745 | |
5746 | #if !defined(CONFIG_USER_ONLY) |
5747 | if (interrupt_request & CPU_INTERRUPT_POLL) { |
5748 | return CPU_INTERRUPT_POLL; |
5749 | } |
5750 | #endif |
5751 | if (interrupt_request & CPU_INTERRUPT_SIPI) { |
5752 | return CPU_INTERRUPT_SIPI; |
5753 | } |
5754 | |
5755 | if (env->hflags2 & HF2_GIF_MASK) { |
5756 | if ((interrupt_request & CPU_INTERRUPT_SMI) && |
5757 | !(env->hflags & HF_SMM_MASK)) { |
5758 | return CPU_INTERRUPT_SMI; |
5759 | } else if ((interrupt_request & CPU_INTERRUPT_NMI) && |
5760 | !(env->hflags2 & HF2_NMI_MASK)) { |
5761 | return CPU_INTERRUPT_NMI; |
5762 | } else if (interrupt_request & CPU_INTERRUPT_MCE) { |
5763 | return CPU_INTERRUPT_MCE; |
5764 | } else if ((interrupt_request & CPU_INTERRUPT_HARD) && |
5765 | (((env->hflags2 & HF2_VINTR_MASK) && |
5766 | (env->hflags2 & HF2_HIF_MASK)) || |
5767 | (!(env->hflags2 & HF2_VINTR_MASK) && |
5768 | (env->eflags & IF_MASK && |
5769 | !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { |
5770 | return CPU_INTERRUPT_HARD; |
5771 | #if !defined(CONFIG_USER_ONLY) |
5772 | } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && |
5773 | (env->eflags & IF_MASK) && |
5774 | !(env->hflags & HF_INHIBIT_IRQ_MASK)) { |
5775 | return CPU_INTERRUPT_VIRQ; |
5776 | #endif |
5777 | } |
5778 | } |
5779 | |
5780 | return 0; |
5781 | } |
5782 | |
5783 | static bool x86_cpu_has_work(CPUState *cs) |
5784 | { |
5785 | return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; |
5786 | } |
5787 | |
5788 | static void x86_disas_set_info(CPUState *cs, disassemble_info *info) |
5789 | { |
5790 | X86CPU *cpu = X86_CPU(cs); |
5791 | CPUX86State *env = &cpu->env; |
5792 | |
5793 | info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 |
5794 | : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 |
5795 | : bfd_mach_i386_i8086); |
5796 | info->print_insn = print_insn_i386; |
5797 | |
5798 | info->cap_arch = CS_ARCH_X86; |
5799 | info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 |
5800 | : env->hflags & HF_CS32_MASK ? CS_MODE_32 |
5801 | : CS_MODE_16); |
5802 | info->cap_insn_unit = 1; |
5803 | info->cap_insn_split = 8; |
5804 | } |
5805 | |
5806 | void x86_update_hflags(CPUX86State *env) |
5807 | { |
5808 | uint32_t hflags; |
5809 | #define HFLAG_COPY_MASK \ |
5810 | ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ |
5811 | HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ |
5812 | HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ |
5813 | HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) |
5814 | |
5815 | hflags = env->hflags & HFLAG_COPY_MASK; |
5816 | hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; |
5817 | hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); |
5818 | hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & |
5819 | (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); |
5820 | hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); |
5821 | |
5822 | if (env->cr[4] & CR4_OSFXSR_MASK) { |
5823 | hflags |= HF_OSFXSR_MASK; |
5824 | } |
5825 | |
5826 | if (env->efer & MSR_EFER_LMA) { |
5827 | hflags |= HF_LMA_MASK; |
5828 | } |
5829 | |
5830 | if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { |
5831 | hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; |
5832 | } else { |
5833 | hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> |
5834 | (DESC_B_SHIFT - HF_CS32_SHIFT); |
5835 | hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> |
5836 | (DESC_B_SHIFT - HF_SS32_SHIFT); |
5837 | if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || |
5838 | !(hflags & HF_CS32_MASK)) { |
5839 | hflags |= HF_ADDSEG_MASK; |
5840 | } else { |
5841 | hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | |
5842 | env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; |
5843 | } |
5844 | } |
5845 | env->hflags = hflags; |
5846 | } |
5847 | |
5848 | static Property x86_cpu_properties[] = { |
5849 | #ifdef CONFIG_USER_ONLY |
5850 | /* apic_id = 0 by default for *-user, see commit 9886e834 */ |
5851 | DEFINE_PROP_UINT32("apic-id" , X86CPU, apic_id, 0), |
5852 | DEFINE_PROP_INT32("thread-id" , X86CPU, thread_id, 0), |
5853 | DEFINE_PROP_INT32("core-id" , X86CPU, core_id, 0), |
5854 | DEFINE_PROP_INT32("die-id" , X86CPU, die_id, 0), |
5855 | DEFINE_PROP_INT32("socket-id" , X86CPU, socket_id, 0), |
5856 | #else |
5857 | DEFINE_PROP_UINT32("apic-id" , X86CPU, apic_id, UNASSIGNED_APIC_ID), |
5858 | DEFINE_PROP_INT32("thread-id" , X86CPU, thread_id, -1), |
5859 | DEFINE_PROP_INT32("core-id" , X86CPU, core_id, -1), |
5860 | DEFINE_PROP_INT32("die-id" , X86CPU, die_id, -1), |
5861 | DEFINE_PROP_INT32("socket-id" , X86CPU, socket_id, -1), |
5862 | #endif |
5863 | DEFINE_PROP_INT32("node-id" , X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), |
5864 | DEFINE_PROP_BOOL("pmu" , X86CPU, enable_pmu, false), |
5865 | |
5866 | DEFINE_PROP_UINT32("hv-spinlocks" , X86CPU, hyperv_spinlock_attempts, |
5867 | HYPERV_SPINLOCK_NEVER_RETRY), |
5868 | DEFINE_PROP_BIT64("hv-relaxed" , X86CPU, hyperv_features, |
5869 | HYPERV_FEAT_RELAXED, 0), |
5870 | DEFINE_PROP_BIT64("hv-vapic" , X86CPU, hyperv_features, |
5871 | HYPERV_FEAT_VAPIC, 0), |
5872 | DEFINE_PROP_BIT64("hv-time" , X86CPU, hyperv_features, |
5873 | HYPERV_FEAT_TIME, 0), |
5874 | DEFINE_PROP_BIT64("hv-crash" , X86CPU, hyperv_features, |
5875 | HYPERV_FEAT_CRASH, 0), |
5876 | DEFINE_PROP_BIT64("hv-reset" , X86CPU, hyperv_features, |
5877 | HYPERV_FEAT_RESET, 0), |
5878 | DEFINE_PROP_BIT64("hv-vpindex" , X86CPU, hyperv_features, |
5879 | HYPERV_FEAT_VPINDEX, 0), |
5880 | DEFINE_PROP_BIT64("hv-runtime" , X86CPU, hyperv_features, |
5881 | HYPERV_FEAT_RUNTIME, 0), |
5882 | DEFINE_PROP_BIT64("hv-synic" , X86CPU, hyperv_features, |
5883 | HYPERV_FEAT_SYNIC, 0), |
5884 | DEFINE_PROP_BIT64("hv-stimer" , X86CPU, hyperv_features, |
5885 | HYPERV_FEAT_STIMER, 0), |
5886 | DEFINE_PROP_BIT64("hv-frequencies" , X86CPU, hyperv_features, |
5887 | HYPERV_FEAT_FREQUENCIES, 0), |
5888 | DEFINE_PROP_BIT64("hv-reenlightenment" , X86CPU, hyperv_features, |
5889 | HYPERV_FEAT_REENLIGHTENMENT, 0), |
5890 | DEFINE_PROP_BIT64("hv-tlbflush" , X86CPU, hyperv_features, |
5891 | HYPERV_FEAT_TLBFLUSH, 0), |
5892 | DEFINE_PROP_BIT64("hv-evmcs" , X86CPU, hyperv_features, |
5893 | HYPERV_FEAT_EVMCS, 0), |
5894 | DEFINE_PROP_BIT64("hv-ipi" , X86CPU, hyperv_features, |
5895 | HYPERV_FEAT_IPI, 0), |
5896 | DEFINE_PROP_BIT64("hv-stimer-direct" , X86CPU, hyperv_features, |
5897 | HYPERV_FEAT_STIMER_DIRECT, 0), |
5898 | DEFINE_PROP_BOOL("hv-passthrough" , X86CPU, hyperv_passthrough, false), |
5899 | |
5900 | DEFINE_PROP_BOOL("check" , X86CPU, check_cpuid, true), |
5901 | DEFINE_PROP_BOOL("enforce" , X86CPU, enforce_cpuid, false), |
5902 | DEFINE_PROP_BOOL("x-force-features" , X86CPU, force_features, false), |
5903 | DEFINE_PROP_BOOL("kvm" , X86CPU, expose_kvm, true), |
5904 | DEFINE_PROP_UINT32("phys-bits" , X86CPU, phys_bits, 0), |
5905 | DEFINE_PROP_BOOL("host-phys-bits" , X86CPU, host_phys_bits, false), |
5906 | DEFINE_PROP_UINT8("host-phys-bits-limit" , X86CPU, host_phys_bits_limit, 0), |
5907 | DEFINE_PROP_BOOL("fill-mtrr-mask" , X86CPU, fill_mtrr_mask, true), |
5908 | DEFINE_PROP_UINT32("level-func7" , X86CPU, env.cpuid_level_func7, |
5909 | UINT32_MAX), |
5910 | DEFINE_PROP_UINT32("level" , X86CPU, env.cpuid_level, UINT32_MAX), |
5911 | DEFINE_PROP_UINT32("xlevel" , X86CPU, env.cpuid_xlevel, UINT32_MAX), |
5912 | DEFINE_PROP_UINT32("xlevel2" , X86CPU, env.cpuid_xlevel2, UINT32_MAX), |
5913 | DEFINE_PROP_UINT32("min-level" , X86CPU, env.cpuid_min_level, 0), |
5914 | DEFINE_PROP_UINT32("min-xlevel" , X86CPU, env.cpuid_min_xlevel, 0), |
5915 | DEFINE_PROP_UINT32("min-xlevel2" , X86CPU, env.cpuid_min_xlevel2, 0), |
5916 | DEFINE_PROP_BOOL("full-cpuid-auto-level" , X86CPU, full_cpuid_auto_level, true), |
5917 | DEFINE_PROP_STRING("hv-vendor-id" , X86CPU, hyperv_vendor_id), |
5918 | DEFINE_PROP_BOOL("cpuid-0xb" , X86CPU, enable_cpuid_0xb, true), |
5919 | DEFINE_PROP_BOOL("lmce" , X86CPU, enable_lmce, false), |
5920 | DEFINE_PROP_BOOL("l3-cache" , X86CPU, enable_l3_cache, true), |
5921 | DEFINE_PROP_BOOL("kvm-no-smi-migration" , X86CPU, kvm_no_smi_migration, |
5922 | false), |
5923 | DEFINE_PROP_BOOL("vmware-cpuid-freq" , X86CPU, vmware_cpuid_freq, true), |
5924 | DEFINE_PROP_BOOL("tcg-cpuid" , X86CPU, expose_tcg, true), |
5925 | DEFINE_PROP_BOOL("x-migrate-smi-count" , X86CPU, migrate_smi_count, |
5926 | true), |
5927 | /* |
5928 | * lecacy_cache defaults to true unless the CPU model provides its |
5929 | * own cache information (see x86_cpu_load_def()). |
5930 | */ |
5931 | DEFINE_PROP_BOOL("legacy-cache" , X86CPU, legacy_cache, true), |
5932 | |
5933 | /* |
5934 | * From "Requirements for Implementing the Microsoft |
5935 | * Hypervisor Interface": |
5936 | * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs |
5937 | * |
5938 | * "Starting with Windows Server 2012 and Windows 8, if |
5939 | * CPUID.40000005.EAX contains a value of -1, Windows assumes that |
5940 | * the hypervisor imposes no specific limit to the number of VPs. |
5941 | * In this case, Windows Server 2012 guest VMs may use more than |
5942 | * 64 VPs, up to the maximum supported number of processors applicable |
5943 | * to the specific Windows version being used." |
5944 | */ |
5945 | DEFINE_PROP_INT32("x-hv-max-vps" , X86CPU, hv_max_vps, -1), |
5946 | DEFINE_PROP_BOOL("x-hv-synic-kvm-only" , X86CPU, hyperv_synic_kvm_only, |
5947 | false), |
5948 | DEFINE_PROP_BOOL("x-intel-pt-auto-level" , X86CPU, intel_pt_auto_level, |
5949 | true), |
5950 | DEFINE_PROP_END_OF_LIST() |
5951 | }; |
5952 | |
5953 | static void x86_cpu_common_class_init(ObjectClass *oc, void *data) |
5954 | { |
5955 | X86CPUClass *xcc = X86_CPU_CLASS(oc); |
5956 | CPUClass *cc = CPU_CLASS(oc); |
5957 | DeviceClass *dc = DEVICE_CLASS(oc); |
5958 | |
5959 | device_class_set_parent_realize(dc, x86_cpu_realizefn, |
5960 | &xcc->parent_realize); |
5961 | device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, |
5962 | &xcc->parent_unrealize); |
5963 | dc->props = x86_cpu_properties; |
5964 | |
5965 | xcc->parent_reset = cc->reset; |
5966 | cc->reset = x86_cpu_reset; |
5967 | cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; |
5968 | |
5969 | cc->class_by_name = x86_cpu_class_by_name; |
5970 | cc->parse_features = x86_cpu_parse_featurestr; |
5971 | cc->has_work = x86_cpu_has_work; |
5972 | #ifdef CONFIG_TCG |
5973 | cc->do_interrupt = x86_cpu_do_interrupt; |
5974 | cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; |
5975 | #endif |
5976 | cc->dump_state = x86_cpu_dump_state; |
5977 | cc->get_crash_info = x86_cpu_get_crash_info; |
5978 | cc->set_pc = x86_cpu_set_pc; |
5979 | cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; |
5980 | cc->gdb_read_register = x86_cpu_gdb_read_register; |
5981 | cc->gdb_write_register = x86_cpu_gdb_write_register; |
5982 | cc->get_arch_id = x86_cpu_get_arch_id; |
5983 | cc->get_paging_enabled = x86_cpu_get_paging_enabled; |
5984 | #ifndef CONFIG_USER_ONLY |
5985 | cc->asidx_from_attrs = x86_asidx_from_attrs; |
5986 | cc->get_memory_mapping = x86_cpu_get_memory_mapping; |
5987 | cc->get_phys_page_debug = x86_cpu_get_phys_page_debug; |
5988 | cc->write_elf64_note = x86_cpu_write_elf64_note; |
5989 | cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; |
5990 | cc->write_elf32_note = x86_cpu_write_elf32_note; |
5991 | cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; |
5992 | cc->vmsd = &vmstate_x86_cpu; |
5993 | #endif |
5994 | cc->gdb_arch_name = x86_gdb_arch_name; |
5995 | #ifdef TARGET_X86_64 |
5996 | cc->gdb_core_xml_file = "i386-64bit.xml" ; |
5997 | cc->gdb_num_core_regs = 66; |
5998 | #else |
5999 | cc->gdb_core_xml_file = "i386-32bit.xml" ; |
6000 | cc->gdb_num_core_regs = 50; |
6001 | #endif |
6002 | #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) |
6003 | cc->debug_excp_handler = breakpoint_handler; |
6004 | #endif |
6005 | cc->cpu_exec_enter = x86_cpu_exec_enter; |
6006 | cc->cpu_exec_exit = x86_cpu_exec_exit; |
6007 | #ifdef CONFIG_TCG |
6008 | cc->tcg_initialize = tcg_x86_init; |
6009 | cc->tlb_fill = x86_cpu_tlb_fill; |
6010 | #endif |
6011 | cc->disas_set_info = x86_disas_set_info; |
6012 | |
6013 | dc->user_creatable = true; |
6014 | } |
6015 | |
6016 | static const TypeInfo x86_cpu_type_info = { |
6017 | .name = TYPE_X86_CPU, |
6018 | .parent = TYPE_CPU, |
6019 | .instance_size = sizeof(X86CPU), |
6020 | .instance_init = x86_cpu_initfn, |
6021 | .abstract = true, |
6022 | .class_size = sizeof(X86CPUClass), |
6023 | .class_init = x86_cpu_common_class_init, |
6024 | }; |
6025 | |
6026 | |
6027 | /* "base" CPU model, used by query-cpu-model-expansion */ |
6028 | static void x86_cpu_base_class_init(ObjectClass *oc, void *data) |
6029 | { |
6030 | X86CPUClass *xcc = X86_CPU_CLASS(oc); |
6031 | |
6032 | xcc->static_model = true; |
6033 | xcc->migration_safe = true; |
6034 | xcc->model_description = "base CPU model type with no features enabled" ; |
6035 | xcc->ordering = 8; |
6036 | } |
6037 | |
6038 | static const TypeInfo x86_base_cpu_type_info = { |
6039 | .name = X86_CPU_TYPE_NAME("base" ), |
6040 | .parent = TYPE_X86_CPU, |
6041 | .class_init = x86_cpu_base_class_init, |
6042 | }; |
6043 | |
6044 | static void x86_cpu_register_types(void) |
6045 | { |
6046 | int i; |
6047 | |
6048 | type_register_static(&x86_cpu_type_info); |
6049 | for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { |
6050 | x86_register_cpudef_types(&builtin_x86_defs[i]); |
6051 | } |
6052 | type_register_static(&max_x86_cpu_type_info); |
6053 | type_register_static(&x86_base_cpu_type_info); |
6054 | #if defined(CONFIG_KVM) || defined(CONFIG_HVF) |
6055 | type_register_static(&host_x86_cpu_type_info); |
6056 | #endif |
6057 | } |
6058 | |
6059 | type_init(x86_cpu_register_types) |
6060 | |