1 | /* |
2 | * QEMU VMWARE VMXNET3 paravirtual NIC |
3 | * |
4 | * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) |
5 | * |
6 | * Developed by Daynix Computing LTD (http://www.daynix.com) |
7 | * |
8 | * Authors: |
9 | * Dmitry Fleytman <dmitry@daynix.com> |
10 | * Tamir Shomer <tamirs@daynix.com> |
11 | * Yan Vugenfirer <yan@daynix.com> |
12 | * |
13 | * This work is licensed under the terms of the GNU GPL, version 2. |
14 | * See the COPYING file in the top-level directory. |
15 | * |
16 | */ |
17 | |
18 | #include "qemu/osdep.h" |
19 | #include "hw/hw.h" |
20 | #include "hw/pci/pci.h" |
21 | #include "hw/qdev-properties.h" |
22 | #include "net/tap.h" |
23 | #include "net/checksum.h" |
24 | #include "sysemu/sysemu.h" |
25 | #include "qemu/bswap.h" |
26 | #include "qemu/module.h" |
27 | #include "hw/pci/msix.h" |
28 | #include "hw/pci/msi.h" |
29 | #include "migration/register.h" |
30 | #include "migration/vmstate.h" |
31 | |
32 | #include "vmxnet3.h" |
33 | #include "vmxnet3_defs.h" |
34 | #include "vmxnet_debug.h" |
35 | #include "vmware_utils.h" |
36 | #include "net_tx_pkt.h" |
37 | #include "net_rx_pkt.h" |
38 | |
39 | #define PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION 0x1 |
40 | #define VMXNET3_MSIX_BAR_SIZE 0x2000 |
41 | #define MIN_BUF_SIZE 60 |
42 | |
43 | /* Compatibility flags for migration */ |
44 | #define VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT 0 |
45 | #define VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS \ |
46 | (1 << VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT) |
47 | #define VMXNET3_COMPAT_FLAG_DISABLE_PCIE_BIT 1 |
48 | #define VMXNET3_COMPAT_FLAG_DISABLE_PCIE \ |
49 | (1 << VMXNET3_COMPAT_FLAG_DISABLE_PCIE_BIT) |
50 | |
51 | #define VMXNET3_EXP_EP_OFFSET (0x48) |
52 | #define VMXNET3_MSI_OFFSET(s) \ |
53 | ((s)->compat_flags & VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS ? 0x50 : 0x84) |
54 | #define VMXNET3_MSIX_OFFSET(s) \ |
55 | ((s)->compat_flags & VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS ? 0 : 0x9c) |
56 | #define VMXNET3_DSN_OFFSET (0x100) |
57 | |
58 | #define VMXNET3_BAR0_IDX (0) |
59 | #define VMXNET3_BAR1_IDX (1) |
60 | #define VMXNET3_MSIX_BAR_IDX (2) |
61 | |
62 | #define VMXNET3_OFF_MSIX_TABLE (0x000) |
63 | #define VMXNET3_OFF_MSIX_PBA(s) \ |
64 | ((s)->compat_flags & VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS ? 0x800 : 0x1000) |
65 | |
66 | /* Link speed in Mbps should be shifted by 16 */ |
67 | #define VMXNET3_LINK_SPEED (1000 << 16) |
68 | |
69 | /* Link status: 1 - up, 0 - down. */ |
70 | #define VMXNET3_LINK_STATUS_UP 0x1 |
71 | |
72 | /* Least significant bit should be set for revision and version */ |
73 | #define VMXNET3_UPT_REVISION 0x1 |
74 | #define VMXNET3_DEVICE_REVISION 0x1 |
75 | |
76 | /* Number of interrupt vectors for non-MSIx modes */ |
77 | #define VMXNET3_MAX_NMSIX_INTRS (1) |
78 | |
79 | /* Macros for rings descriptors access */ |
80 | #define VMXNET3_READ_TX_QUEUE_DESCR8(_d, dpa, field) \ |
81 | (vmw_shmem_ld8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) |
82 | |
83 | #define VMXNET3_WRITE_TX_QUEUE_DESCR8(_d, dpa, field, value) \ |
84 | (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value))) |
85 | |
86 | #define VMXNET3_READ_TX_QUEUE_DESCR32(_d, dpa, field) \ |
87 | (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) |
88 | |
89 | #define VMXNET3_WRITE_TX_QUEUE_DESCR32(_d, dpa, field, value) \ |
90 | (vmw_shmem_st32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value)) |
91 | |
92 | #define VMXNET3_READ_TX_QUEUE_DESCR64(_d, dpa, field) \ |
93 | (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) |
94 | |
95 | #define VMXNET3_WRITE_TX_QUEUE_DESCR64(_d, dpa, field, value) \ |
96 | (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value)) |
97 | |
98 | #define VMXNET3_READ_RX_QUEUE_DESCR64(_d, dpa, field) \ |
99 | (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field))) |
100 | |
101 | #define VMXNET3_READ_RX_QUEUE_DESCR32(_d, dpa, field) \ |
102 | (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field))) |
103 | |
104 | #define VMXNET3_WRITE_RX_QUEUE_DESCR64(_d, dpa, field, value) \ |
105 | (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value)) |
106 | |
107 | #define VMXNET3_WRITE_RX_QUEUE_DESCR8(_d, dpa, field, value) \ |
108 | (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value)) |
109 | |
110 | /* Macros for guest driver shared area access */ |
111 | #define VMXNET3_READ_DRV_SHARED64(_d, shpa, field) \ |
112 | (vmw_shmem_ld64(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) |
113 | |
114 | #define VMXNET3_READ_DRV_SHARED32(_d, shpa, field) \ |
115 | (vmw_shmem_ld32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) |
116 | |
117 | #define VMXNET3_WRITE_DRV_SHARED32(_d, shpa, field, val) \ |
118 | (vmw_shmem_st32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), val)) |
119 | |
120 | #define VMXNET3_READ_DRV_SHARED16(_d, shpa, field) \ |
121 | (vmw_shmem_ld16(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) |
122 | |
123 | #define VMXNET3_READ_DRV_SHARED8(_d, shpa, field) \ |
124 | (vmw_shmem_ld8(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) |
125 | |
126 | #define VMXNET3_READ_DRV_SHARED(_d, shpa, field, b, l) \ |
127 | (vmw_shmem_read(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), b, l)) |
128 | |
129 | #define VMXNET_FLAG_IS_SET(field, flag) (((field) & (flag)) == (flag)) |
130 | |
131 | typedef struct VMXNET3Class { |
132 | PCIDeviceClass parent_class; |
133 | DeviceRealize parent_dc_realize; |
134 | } VMXNET3Class; |
135 | |
136 | #define VMXNET3_DEVICE_CLASS(klass) \ |
137 | OBJECT_CLASS_CHECK(VMXNET3Class, (klass), TYPE_VMXNET3) |
138 | #define VMXNET3_DEVICE_GET_CLASS(obj) \ |
139 | OBJECT_GET_CLASS(VMXNET3Class, (obj), TYPE_VMXNET3) |
140 | |
141 | static inline void vmxnet3_ring_init(PCIDevice *d, |
142 | Vmxnet3Ring *ring, |
143 | hwaddr pa, |
144 | uint32_t size, |
145 | uint32_t cell_size, |
146 | bool zero_region) |
147 | { |
148 | ring->pa = pa; |
149 | ring->size = size; |
150 | ring->cell_size = cell_size; |
151 | ring->gen = VMXNET3_INIT_GEN; |
152 | ring->next = 0; |
153 | |
154 | if (zero_region) { |
155 | vmw_shmem_set(d, pa, 0, size * cell_size); |
156 | } |
157 | } |
158 | |
159 | #define VMXNET3_RING_DUMP(macro, ring_name, ridx, r) \ |
160 | macro("%s#%d: base %" PRIx64 " size %u cell_size %u gen %d next %u", \ |
161 | (ring_name), (ridx), \ |
162 | (r)->pa, (r)->size, (r)->cell_size, (r)->gen, (r)->next) |
163 | |
164 | static inline void vmxnet3_ring_inc(Vmxnet3Ring *ring) |
165 | { |
166 | if (++ring->next >= ring->size) { |
167 | ring->next = 0; |
168 | ring->gen ^= 1; |
169 | } |
170 | } |
171 | |
172 | static inline void vmxnet3_ring_dec(Vmxnet3Ring *ring) |
173 | { |
174 | if (ring->next-- == 0) { |
175 | ring->next = ring->size - 1; |
176 | ring->gen ^= 1; |
177 | } |
178 | } |
179 | |
180 | static inline hwaddr vmxnet3_ring_curr_cell_pa(Vmxnet3Ring *ring) |
181 | { |
182 | return ring->pa + ring->next * ring->cell_size; |
183 | } |
184 | |
185 | static inline void vmxnet3_ring_read_curr_cell(PCIDevice *d, Vmxnet3Ring *ring, |
186 | void *buff) |
187 | { |
188 | vmw_shmem_read(d, vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); |
189 | } |
190 | |
191 | static inline void vmxnet3_ring_write_curr_cell(PCIDevice *d, Vmxnet3Ring *ring, |
192 | void *buff) |
193 | { |
194 | vmw_shmem_write(d, vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); |
195 | } |
196 | |
197 | static inline size_t vmxnet3_ring_curr_cell_idx(Vmxnet3Ring *ring) |
198 | { |
199 | return ring->next; |
200 | } |
201 | |
202 | static inline uint8_t vmxnet3_ring_curr_gen(Vmxnet3Ring *ring) |
203 | { |
204 | return ring->gen; |
205 | } |
206 | |
207 | /* Debug trace-related functions */ |
208 | static inline void |
209 | vmxnet3_dump_tx_descr(struct Vmxnet3_TxDesc *descr) |
210 | { |
211 | VMW_PKPRN("TX DESCR: " |
212 | "addr %" PRIx64 ", len: %d, gen: %d, rsvd: %d, " |
213 | "dtype: %d, ext1: %d, msscof: %d, hlen: %d, om: %d, " |
214 | "eop: %d, cq: %d, ext2: %d, ti: %d, tci: %d" , |
215 | descr->addr, descr->len, descr->gen, descr->rsvd, |
216 | descr->dtype, descr->ext1, descr->msscof, descr->hlen, descr->om, |
217 | descr->eop, descr->cq, descr->ext2, descr->ti, descr->tci); |
218 | } |
219 | |
220 | static inline void |
221 | vmxnet3_dump_virt_hdr(struct virtio_net_hdr *vhdr) |
222 | { |
223 | VMW_PKPRN("VHDR: flags 0x%x, gso_type: 0x%x, hdr_len: %d, gso_size: %d, " |
224 | "csum_start: %d, csum_offset: %d" , |
225 | vhdr->flags, vhdr->gso_type, vhdr->hdr_len, vhdr->gso_size, |
226 | vhdr->csum_start, vhdr->csum_offset); |
227 | } |
228 | |
229 | static inline void |
230 | vmxnet3_dump_rx_descr(struct Vmxnet3_RxDesc *descr) |
231 | { |
232 | VMW_PKPRN("RX DESCR: addr %" PRIx64 ", len: %d, gen: %d, rsvd: %d, " |
233 | "dtype: %d, ext1: %d, btype: %d" , |
234 | descr->addr, descr->len, descr->gen, |
235 | descr->rsvd, descr->dtype, descr->ext1, descr->btype); |
236 | } |
237 | |
238 | /* Interrupt management */ |
239 | |
240 | /* |
241 | * This function returns sign whether interrupt line is in asserted state |
242 | * This depends on the type of interrupt used. For INTX interrupt line will |
243 | * be asserted until explicit deassertion, for MSI(X) interrupt line will |
244 | * be deasserted automatically due to notification semantics of the MSI(X) |
245 | * interrupts |
246 | */ |
247 | static bool _vmxnet3_assert_interrupt_line(VMXNET3State *s, uint32_t int_idx) |
248 | { |
249 | PCIDevice *d = PCI_DEVICE(s); |
250 | |
251 | if (s->msix_used && msix_enabled(d)) { |
252 | VMW_IRPRN("Sending MSI-X notification for vector %u" , int_idx); |
253 | msix_notify(d, int_idx); |
254 | return false; |
255 | } |
256 | if (msi_enabled(d)) { |
257 | VMW_IRPRN("Sending MSI notification for vector %u" , int_idx); |
258 | msi_notify(d, int_idx); |
259 | return false; |
260 | } |
261 | |
262 | VMW_IRPRN("Asserting line for interrupt %u" , int_idx); |
263 | pci_irq_assert(d); |
264 | return true; |
265 | } |
266 | |
267 | static void _vmxnet3_deassert_interrupt_line(VMXNET3State *s, int lidx) |
268 | { |
269 | PCIDevice *d = PCI_DEVICE(s); |
270 | |
271 | /* |
272 | * This function should never be called for MSI(X) interrupts |
273 | * because deassertion never required for message interrupts |
274 | */ |
275 | assert(!s->msix_used || !msix_enabled(d)); |
276 | /* |
277 | * This function should never be called for MSI(X) interrupts |
278 | * because deassertion never required for message interrupts |
279 | */ |
280 | assert(!msi_enabled(d)); |
281 | |
282 | VMW_IRPRN("Deasserting line for interrupt %u" , lidx); |
283 | pci_irq_deassert(d); |
284 | } |
285 | |
286 | static void vmxnet3_update_interrupt_line_state(VMXNET3State *s, int lidx) |
287 | { |
288 | if (!s->interrupt_states[lidx].is_pending && |
289 | s->interrupt_states[lidx].is_asserted) { |
290 | VMW_IRPRN("New interrupt line state for index %d is DOWN" , lidx); |
291 | _vmxnet3_deassert_interrupt_line(s, lidx); |
292 | s->interrupt_states[lidx].is_asserted = false; |
293 | return; |
294 | } |
295 | |
296 | if (s->interrupt_states[lidx].is_pending && |
297 | !s->interrupt_states[lidx].is_masked && |
298 | !s->interrupt_states[lidx].is_asserted) { |
299 | VMW_IRPRN("New interrupt line state for index %d is UP" , lidx); |
300 | s->interrupt_states[lidx].is_asserted = |
301 | _vmxnet3_assert_interrupt_line(s, lidx); |
302 | s->interrupt_states[lidx].is_pending = false; |
303 | return; |
304 | } |
305 | } |
306 | |
307 | static void vmxnet3_trigger_interrupt(VMXNET3State *s, int lidx) |
308 | { |
309 | PCIDevice *d = PCI_DEVICE(s); |
310 | s->interrupt_states[lidx].is_pending = true; |
311 | vmxnet3_update_interrupt_line_state(s, lidx); |
312 | |
313 | if (s->msix_used && msix_enabled(d) && s->auto_int_masking) { |
314 | goto do_automask; |
315 | } |
316 | |
317 | if (msi_enabled(d) && s->auto_int_masking) { |
318 | goto do_automask; |
319 | } |
320 | |
321 | return; |
322 | |
323 | do_automask: |
324 | s->interrupt_states[lidx].is_masked = true; |
325 | vmxnet3_update_interrupt_line_state(s, lidx); |
326 | } |
327 | |
328 | static bool vmxnet3_interrupt_asserted(VMXNET3State *s, int lidx) |
329 | { |
330 | return s->interrupt_states[lidx].is_asserted; |
331 | } |
332 | |
333 | static void vmxnet3_clear_interrupt(VMXNET3State *s, int int_idx) |
334 | { |
335 | s->interrupt_states[int_idx].is_pending = false; |
336 | if (s->auto_int_masking) { |
337 | s->interrupt_states[int_idx].is_masked = true; |
338 | } |
339 | vmxnet3_update_interrupt_line_state(s, int_idx); |
340 | } |
341 | |
342 | static void |
343 | vmxnet3_on_interrupt_mask_changed(VMXNET3State *s, int lidx, bool is_masked) |
344 | { |
345 | s->interrupt_states[lidx].is_masked = is_masked; |
346 | vmxnet3_update_interrupt_line_state(s, lidx); |
347 | } |
348 | |
349 | static bool vmxnet3_verify_driver_magic(PCIDevice *d, hwaddr dshmem) |
350 | { |
351 | return (VMXNET3_READ_DRV_SHARED32(d, dshmem, magic) == VMXNET3_REV1_MAGIC); |
352 | } |
353 | |
354 | #define VMXNET3_GET_BYTE(x, byte_num) (((x) >> (byte_num)*8) & 0xFF) |
355 | #define VMXNET3_MAKE_BYTE(byte_num, val) \ |
356 | (((uint32_t)((val) & 0xFF)) << (byte_num)*8) |
357 | |
358 | static void vmxnet3_set_variable_mac(VMXNET3State *s, uint32_t h, uint32_t l) |
359 | { |
360 | s->conf.macaddr.a[0] = VMXNET3_GET_BYTE(l, 0); |
361 | s->conf.macaddr.a[1] = VMXNET3_GET_BYTE(l, 1); |
362 | s->conf.macaddr.a[2] = VMXNET3_GET_BYTE(l, 2); |
363 | s->conf.macaddr.a[3] = VMXNET3_GET_BYTE(l, 3); |
364 | s->conf.macaddr.a[4] = VMXNET3_GET_BYTE(h, 0); |
365 | s->conf.macaddr.a[5] = VMXNET3_GET_BYTE(h, 1); |
366 | |
367 | VMW_CFPRN("Variable MAC: " MAC_FMT, MAC_ARG(s->conf.macaddr.a)); |
368 | |
369 | qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); |
370 | } |
371 | |
372 | static uint64_t vmxnet3_get_mac_low(MACAddr *addr) |
373 | { |
374 | return VMXNET3_MAKE_BYTE(0, addr->a[0]) | |
375 | VMXNET3_MAKE_BYTE(1, addr->a[1]) | |
376 | VMXNET3_MAKE_BYTE(2, addr->a[2]) | |
377 | VMXNET3_MAKE_BYTE(3, addr->a[3]); |
378 | } |
379 | |
380 | static uint64_t vmxnet3_get_mac_high(MACAddr *addr) |
381 | { |
382 | return VMXNET3_MAKE_BYTE(0, addr->a[4]) | |
383 | VMXNET3_MAKE_BYTE(1, addr->a[5]); |
384 | } |
385 | |
386 | static void |
387 | vmxnet3_inc_tx_consumption_counter(VMXNET3State *s, int qidx) |
388 | { |
389 | vmxnet3_ring_inc(&s->txq_descr[qidx].tx_ring); |
390 | } |
391 | |
392 | static inline void |
393 | vmxnet3_inc_rx_consumption_counter(VMXNET3State *s, int qidx, int ridx) |
394 | { |
395 | vmxnet3_ring_inc(&s->rxq_descr[qidx].rx_ring[ridx]); |
396 | } |
397 | |
398 | static inline void |
399 | vmxnet3_inc_tx_completion_counter(VMXNET3State *s, int qidx) |
400 | { |
401 | vmxnet3_ring_inc(&s->txq_descr[qidx].comp_ring); |
402 | } |
403 | |
404 | static void |
405 | vmxnet3_inc_rx_completion_counter(VMXNET3State *s, int qidx) |
406 | { |
407 | vmxnet3_ring_inc(&s->rxq_descr[qidx].comp_ring); |
408 | } |
409 | |
410 | static void |
411 | vmxnet3_dec_rx_completion_counter(VMXNET3State *s, int qidx) |
412 | { |
413 | vmxnet3_ring_dec(&s->rxq_descr[qidx].comp_ring); |
414 | } |
415 | |
416 | static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx) |
417 | { |
418 | struct Vmxnet3_TxCompDesc txcq_descr; |
419 | PCIDevice *d = PCI_DEVICE(s); |
420 | |
421 | VMXNET3_RING_DUMP(VMW_RIPRN, "TXC" , qidx, &s->txq_descr[qidx].comp_ring); |
422 | |
423 | memset(&txcq_descr, 0, sizeof(txcq_descr)); |
424 | txcq_descr.txdIdx = tx_ridx; |
425 | txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring); |
426 | txcq_descr.val1 = cpu_to_le32(txcq_descr.val1); |
427 | txcq_descr.val2 = cpu_to_le32(txcq_descr.val2); |
428 | vmxnet3_ring_write_curr_cell(d, &s->txq_descr[qidx].comp_ring, &txcq_descr); |
429 | |
430 | /* Flush changes in TX descriptor before changing the counter value */ |
431 | smp_wmb(); |
432 | |
433 | vmxnet3_inc_tx_completion_counter(s, qidx); |
434 | vmxnet3_trigger_interrupt(s, s->txq_descr[qidx].intr_idx); |
435 | } |
436 | |
437 | static bool |
438 | vmxnet3_setup_tx_offloads(VMXNET3State *s) |
439 | { |
440 | switch (s->offload_mode) { |
441 | case VMXNET3_OM_NONE: |
442 | net_tx_pkt_build_vheader(s->tx_pkt, false, false, 0); |
443 | break; |
444 | |
445 | case VMXNET3_OM_CSUM: |
446 | net_tx_pkt_build_vheader(s->tx_pkt, false, true, 0); |
447 | VMW_PKPRN("L4 CSO requested\n" ); |
448 | break; |
449 | |
450 | case VMXNET3_OM_TSO: |
451 | net_tx_pkt_build_vheader(s->tx_pkt, true, true, |
452 | s->cso_or_gso_size); |
453 | net_tx_pkt_update_ip_checksums(s->tx_pkt); |
454 | VMW_PKPRN("GSO offload requested." ); |
455 | break; |
456 | |
457 | default: |
458 | g_assert_not_reached(); |
459 | return false; |
460 | } |
461 | |
462 | return true; |
463 | } |
464 | |
465 | static void |
466 | vmxnet3_tx_retrieve_metadata(VMXNET3State *s, |
467 | const struct Vmxnet3_TxDesc *txd) |
468 | { |
469 | s->offload_mode = txd->om; |
470 | s->cso_or_gso_size = txd->msscof; |
471 | s->tci = txd->tci; |
472 | s->needs_vlan = txd->ti; |
473 | } |
474 | |
475 | typedef enum { |
476 | VMXNET3_PKT_STATUS_OK, |
477 | VMXNET3_PKT_STATUS_ERROR, |
478 | VMXNET3_PKT_STATUS_DISCARD,/* only for tx */ |
479 | VMXNET3_PKT_STATUS_OUT_OF_BUF /* only for rx */ |
480 | } Vmxnet3PktStatus; |
481 | |
482 | static void |
483 | vmxnet3_on_tx_done_update_stats(VMXNET3State *s, int qidx, |
484 | Vmxnet3PktStatus status) |
485 | { |
486 | size_t tot_len = net_tx_pkt_get_total_len(s->tx_pkt); |
487 | struct UPT1_TxStats *stats = &s->txq_descr[qidx].txq_stats; |
488 | |
489 | switch (status) { |
490 | case VMXNET3_PKT_STATUS_OK: |
491 | switch (net_tx_pkt_get_packet_type(s->tx_pkt)) { |
492 | case ETH_PKT_BCAST: |
493 | stats->bcastPktsTxOK++; |
494 | stats->bcastBytesTxOK += tot_len; |
495 | break; |
496 | case ETH_PKT_MCAST: |
497 | stats->mcastPktsTxOK++; |
498 | stats->mcastBytesTxOK += tot_len; |
499 | break; |
500 | case ETH_PKT_UCAST: |
501 | stats->ucastPktsTxOK++; |
502 | stats->ucastBytesTxOK += tot_len; |
503 | break; |
504 | default: |
505 | g_assert_not_reached(); |
506 | } |
507 | |
508 | if (s->offload_mode == VMXNET3_OM_TSO) { |
509 | /* |
510 | * According to VMWARE headers this statistic is a number |
511 | * of packets after segmentation but since we don't have |
512 | * this information in QEMU model, the best we can do is to |
513 | * provide number of non-segmented packets |
514 | */ |
515 | stats->TSOPktsTxOK++; |
516 | stats->TSOBytesTxOK += tot_len; |
517 | } |
518 | break; |
519 | |
520 | case VMXNET3_PKT_STATUS_DISCARD: |
521 | stats->pktsTxDiscard++; |
522 | break; |
523 | |
524 | case VMXNET3_PKT_STATUS_ERROR: |
525 | stats->pktsTxError++; |
526 | break; |
527 | |
528 | default: |
529 | g_assert_not_reached(); |
530 | } |
531 | } |
532 | |
533 | static void |
534 | vmxnet3_on_rx_done_update_stats(VMXNET3State *s, |
535 | int qidx, |
536 | Vmxnet3PktStatus status) |
537 | { |
538 | struct UPT1_RxStats *stats = &s->rxq_descr[qidx].rxq_stats; |
539 | size_t tot_len = net_rx_pkt_get_total_len(s->rx_pkt); |
540 | |
541 | switch (status) { |
542 | case VMXNET3_PKT_STATUS_OUT_OF_BUF: |
543 | stats->pktsRxOutOfBuf++; |
544 | break; |
545 | |
546 | case VMXNET3_PKT_STATUS_ERROR: |
547 | stats->pktsRxError++; |
548 | break; |
549 | case VMXNET3_PKT_STATUS_OK: |
550 | switch (net_rx_pkt_get_packet_type(s->rx_pkt)) { |
551 | case ETH_PKT_BCAST: |
552 | stats->bcastPktsRxOK++; |
553 | stats->bcastBytesRxOK += tot_len; |
554 | break; |
555 | case ETH_PKT_MCAST: |
556 | stats->mcastPktsRxOK++; |
557 | stats->mcastBytesRxOK += tot_len; |
558 | break; |
559 | case ETH_PKT_UCAST: |
560 | stats->ucastPktsRxOK++; |
561 | stats->ucastBytesRxOK += tot_len; |
562 | break; |
563 | default: |
564 | g_assert_not_reached(); |
565 | } |
566 | |
567 | if (tot_len > s->mtu) { |
568 | stats->LROPktsRxOK++; |
569 | stats->LROBytesRxOK += tot_len; |
570 | } |
571 | break; |
572 | default: |
573 | g_assert_not_reached(); |
574 | } |
575 | } |
576 | |
577 | static inline void |
578 | vmxnet3_ring_read_curr_txdesc(PCIDevice *pcidev, Vmxnet3Ring *ring, |
579 | struct Vmxnet3_TxDesc *txd) |
580 | { |
581 | vmxnet3_ring_read_curr_cell(pcidev, ring, txd); |
582 | txd->addr = le64_to_cpu(txd->addr); |
583 | txd->val1 = le32_to_cpu(txd->val1); |
584 | txd->val2 = le32_to_cpu(txd->val2); |
585 | } |
586 | |
587 | static inline bool |
588 | vmxnet3_pop_next_tx_descr(VMXNET3State *s, |
589 | int qidx, |
590 | struct Vmxnet3_TxDesc *txd, |
591 | uint32_t *descr_idx) |
592 | { |
593 | Vmxnet3Ring *ring = &s->txq_descr[qidx].tx_ring; |
594 | PCIDevice *d = PCI_DEVICE(s); |
595 | |
596 | vmxnet3_ring_read_curr_txdesc(d, ring, txd); |
597 | if (txd->gen == vmxnet3_ring_curr_gen(ring)) { |
598 | /* Only read after generation field verification */ |
599 | smp_rmb(); |
600 | /* Re-read to be sure we got the latest version */ |
601 | vmxnet3_ring_read_curr_txdesc(d, ring, txd); |
602 | VMXNET3_RING_DUMP(VMW_RIPRN, "TX" , qidx, ring); |
603 | *descr_idx = vmxnet3_ring_curr_cell_idx(ring); |
604 | vmxnet3_inc_tx_consumption_counter(s, qidx); |
605 | return true; |
606 | } |
607 | |
608 | return false; |
609 | } |
610 | |
611 | static bool |
612 | vmxnet3_send_packet(VMXNET3State *s, uint32_t qidx) |
613 | { |
614 | Vmxnet3PktStatus status = VMXNET3_PKT_STATUS_OK; |
615 | |
616 | if (!vmxnet3_setup_tx_offloads(s)) { |
617 | status = VMXNET3_PKT_STATUS_ERROR; |
618 | goto func_exit; |
619 | } |
620 | |
621 | /* debug prints */ |
622 | vmxnet3_dump_virt_hdr(net_tx_pkt_get_vhdr(s->tx_pkt)); |
623 | net_tx_pkt_dump(s->tx_pkt); |
624 | |
625 | if (!net_tx_pkt_send(s->tx_pkt, qemu_get_queue(s->nic))) { |
626 | status = VMXNET3_PKT_STATUS_DISCARD; |
627 | goto func_exit; |
628 | } |
629 | |
630 | func_exit: |
631 | vmxnet3_on_tx_done_update_stats(s, qidx, status); |
632 | return (status == VMXNET3_PKT_STATUS_OK); |
633 | } |
634 | |
635 | static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx) |
636 | { |
637 | struct Vmxnet3_TxDesc txd; |
638 | uint32_t txd_idx; |
639 | uint32_t data_len; |
640 | hwaddr data_pa; |
641 | |
642 | for (;;) { |
643 | if (!vmxnet3_pop_next_tx_descr(s, qidx, &txd, &txd_idx)) { |
644 | break; |
645 | } |
646 | |
647 | vmxnet3_dump_tx_descr(&txd); |
648 | |
649 | if (!s->skip_current_tx_pkt) { |
650 | data_len = (txd.len > 0) ? txd.len : VMXNET3_MAX_TX_BUF_SIZE; |
651 | data_pa = txd.addr; |
652 | |
653 | if (!net_tx_pkt_add_raw_fragment(s->tx_pkt, |
654 | data_pa, |
655 | data_len)) { |
656 | s->skip_current_tx_pkt = true; |
657 | } |
658 | } |
659 | |
660 | if (s->tx_sop) { |
661 | vmxnet3_tx_retrieve_metadata(s, &txd); |
662 | s->tx_sop = false; |
663 | } |
664 | |
665 | if (txd.eop) { |
666 | if (!s->skip_current_tx_pkt && net_tx_pkt_parse(s->tx_pkt)) { |
667 | if (s->needs_vlan) { |
668 | net_tx_pkt_setup_vlan_header(s->tx_pkt, s->tci); |
669 | } |
670 | |
671 | vmxnet3_send_packet(s, qidx); |
672 | } else { |
673 | vmxnet3_on_tx_done_update_stats(s, qidx, |
674 | VMXNET3_PKT_STATUS_ERROR); |
675 | } |
676 | |
677 | vmxnet3_complete_packet(s, qidx, txd_idx); |
678 | s->tx_sop = true; |
679 | s->skip_current_tx_pkt = false; |
680 | net_tx_pkt_reset(s->tx_pkt); |
681 | } |
682 | } |
683 | } |
684 | |
685 | static inline void |
686 | vmxnet3_read_next_rx_descr(VMXNET3State *s, int qidx, int ridx, |
687 | struct Vmxnet3_RxDesc *dbuf, uint32_t *didx) |
688 | { |
689 | PCIDevice *d = PCI_DEVICE(s); |
690 | |
691 | Vmxnet3Ring *ring = &s->rxq_descr[qidx].rx_ring[ridx]; |
692 | *didx = vmxnet3_ring_curr_cell_idx(ring); |
693 | vmxnet3_ring_read_curr_cell(d, ring, dbuf); |
694 | dbuf->addr = le64_to_cpu(dbuf->addr); |
695 | dbuf->val1 = le32_to_cpu(dbuf->val1); |
696 | dbuf->ext1 = le32_to_cpu(dbuf->ext1); |
697 | } |
698 | |
699 | static inline uint8_t |
700 | vmxnet3_get_rx_ring_gen(VMXNET3State *s, int qidx, int ridx) |
701 | { |
702 | return s->rxq_descr[qidx].rx_ring[ridx].gen; |
703 | } |
704 | |
705 | static inline hwaddr |
706 | vmxnet3_pop_rxc_descr(VMXNET3State *s, int qidx, uint32_t *descr_gen) |
707 | { |
708 | uint8_t ring_gen; |
709 | struct Vmxnet3_RxCompDesc rxcd; |
710 | |
711 | hwaddr daddr = |
712 | vmxnet3_ring_curr_cell_pa(&s->rxq_descr[qidx].comp_ring); |
713 | |
714 | pci_dma_read(PCI_DEVICE(s), |
715 | daddr, &rxcd, sizeof(struct Vmxnet3_RxCompDesc)); |
716 | rxcd.val1 = le32_to_cpu(rxcd.val1); |
717 | rxcd.val2 = le32_to_cpu(rxcd.val2); |
718 | rxcd.val3 = le32_to_cpu(rxcd.val3); |
719 | ring_gen = vmxnet3_ring_curr_gen(&s->rxq_descr[qidx].comp_ring); |
720 | |
721 | if (rxcd.gen != ring_gen) { |
722 | *descr_gen = ring_gen; |
723 | vmxnet3_inc_rx_completion_counter(s, qidx); |
724 | return daddr; |
725 | } |
726 | |
727 | return 0; |
728 | } |
729 | |
730 | static inline void |
731 | vmxnet3_revert_rxc_descr(VMXNET3State *s, int qidx) |
732 | { |
733 | vmxnet3_dec_rx_completion_counter(s, qidx); |
734 | } |
735 | |
736 | #define RXQ_IDX (0) |
737 | #define RX_HEAD_BODY_RING (0) |
738 | #define RX_BODY_ONLY_RING (1) |
739 | |
740 | static bool |
741 | vmxnet3_get_next_head_rx_descr(VMXNET3State *s, |
742 | struct Vmxnet3_RxDesc *descr_buf, |
743 | uint32_t *descr_idx, |
744 | uint32_t *ridx) |
745 | { |
746 | for (;;) { |
747 | uint32_t ring_gen; |
748 | vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, |
749 | descr_buf, descr_idx); |
750 | |
751 | /* If no more free descriptors - return */ |
752 | ring_gen = vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_HEAD_BODY_RING); |
753 | if (descr_buf->gen != ring_gen) { |
754 | return false; |
755 | } |
756 | |
757 | /* Only read after generation field verification */ |
758 | smp_rmb(); |
759 | /* Re-read to be sure we got the latest version */ |
760 | vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, |
761 | descr_buf, descr_idx); |
762 | |
763 | /* Mark current descriptor as used/skipped */ |
764 | vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_HEAD_BODY_RING); |
765 | |
766 | /* If this is what we are looking for - return */ |
767 | if (descr_buf->btype == VMXNET3_RXD_BTYPE_HEAD) { |
768 | *ridx = RX_HEAD_BODY_RING; |
769 | return true; |
770 | } |
771 | } |
772 | } |
773 | |
774 | static bool |
775 | vmxnet3_get_next_body_rx_descr(VMXNET3State *s, |
776 | struct Vmxnet3_RxDesc *d, |
777 | uint32_t *didx, |
778 | uint32_t *ridx) |
779 | { |
780 | vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, d, didx); |
781 | |
782 | /* Try to find corresponding descriptor in head/body ring */ |
783 | if (d->gen == vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_HEAD_BODY_RING)) { |
784 | /* Only read after generation field verification */ |
785 | smp_rmb(); |
786 | /* Re-read to be sure we got the latest version */ |
787 | vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, d, didx); |
788 | if (d->btype == VMXNET3_RXD_BTYPE_BODY) { |
789 | vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_HEAD_BODY_RING); |
790 | *ridx = RX_HEAD_BODY_RING; |
791 | return true; |
792 | } |
793 | } |
794 | |
795 | /* |
796 | * If there is no free descriptors on head/body ring or next free |
797 | * descriptor is a head descriptor switch to body only ring |
798 | */ |
799 | vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_BODY_ONLY_RING, d, didx); |
800 | |
801 | /* If no more free descriptors - return */ |
802 | if (d->gen == vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_BODY_ONLY_RING)) { |
803 | /* Only read after generation field verification */ |
804 | smp_rmb(); |
805 | /* Re-read to be sure we got the latest version */ |
806 | vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_BODY_ONLY_RING, d, didx); |
807 | assert(d->btype == VMXNET3_RXD_BTYPE_BODY); |
808 | *ridx = RX_BODY_ONLY_RING; |
809 | vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_BODY_ONLY_RING); |
810 | return true; |
811 | } |
812 | |
813 | return false; |
814 | } |
815 | |
816 | static inline bool |
817 | vmxnet3_get_next_rx_descr(VMXNET3State *s, bool is_head, |
818 | struct Vmxnet3_RxDesc *descr_buf, |
819 | uint32_t *descr_idx, |
820 | uint32_t *ridx) |
821 | { |
822 | if (is_head || !s->rx_packets_compound) { |
823 | return vmxnet3_get_next_head_rx_descr(s, descr_buf, descr_idx, ridx); |
824 | } else { |
825 | return vmxnet3_get_next_body_rx_descr(s, descr_buf, descr_idx, ridx); |
826 | } |
827 | } |
828 | |
829 | /* In case packet was csum offloaded (either NEEDS_CSUM or DATA_VALID), |
830 | * the implementation always passes an RxCompDesc with a "Checksum |
831 | * calculated and found correct" to the OS (cnc=0 and tuc=1, see |
832 | * vmxnet3_rx_update_descr). This emulates the observed ESXi behavior. |
833 | * |
834 | * Therefore, if packet has the NEEDS_CSUM set, we must calculate |
835 | * and place a fully computed checksum into the tcp/udp header. |
836 | * Otherwise, the OS driver will receive a checksum-correct indication |
837 | * (CHECKSUM_UNNECESSARY), but with the actual tcp/udp checksum field |
838 | * having just the pseudo header csum value. |
839 | * |
840 | * While this is not a problem if packet is destined for local delivery, |
841 | * in the case the host OS performs forwarding, it will forward an |
842 | * incorrectly checksummed packet. |
843 | */ |
844 | static void vmxnet3_rx_need_csum_calculate(struct NetRxPkt *pkt, |
845 | const void *pkt_data, |
846 | size_t pkt_len) |
847 | { |
848 | struct virtio_net_hdr *vhdr; |
849 | bool isip4, isip6, istcp, isudp; |
850 | uint8_t *data; |
851 | int len; |
852 | |
853 | if (!net_rx_pkt_has_virt_hdr(pkt)) { |
854 | return; |
855 | } |
856 | |
857 | vhdr = net_rx_pkt_get_vhdr(pkt); |
858 | if (!VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_NEEDS_CSUM)) { |
859 | return; |
860 | } |
861 | |
862 | net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); |
863 | if (!(isip4 || isip6) || !(istcp || isudp)) { |
864 | return; |
865 | } |
866 | |
867 | vmxnet3_dump_virt_hdr(vhdr); |
868 | |
869 | /* Validate packet len: csum_start + scum_offset + length of csum field */ |
870 | if (pkt_len < (vhdr->csum_start + vhdr->csum_offset + 2)) { |
871 | VMW_PKPRN("packet len:%zu < csum_start(%d) + csum_offset(%d) + 2, " |
872 | "cannot calculate checksum" , |
873 | pkt_len, vhdr->csum_start, vhdr->csum_offset); |
874 | return; |
875 | } |
876 | |
877 | data = (uint8_t *)pkt_data + vhdr->csum_start; |
878 | len = pkt_len - vhdr->csum_start; |
879 | /* Put the checksum obtained into the packet */ |
880 | stw_be_p(data + vhdr->csum_offset, |
881 | net_checksum_finish_nozero(net_checksum_add(len, data))); |
882 | |
883 | vhdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM; |
884 | vhdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID; |
885 | } |
886 | |
887 | static void vmxnet3_rx_update_descr(struct NetRxPkt *pkt, |
888 | struct Vmxnet3_RxCompDesc *rxcd) |
889 | { |
890 | int csum_ok, is_gso; |
891 | bool isip4, isip6, istcp, isudp; |
892 | struct virtio_net_hdr *vhdr; |
893 | uint8_t offload_type; |
894 | |
895 | if (net_rx_pkt_is_vlan_stripped(pkt)) { |
896 | rxcd->ts = 1; |
897 | rxcd->tci = net_rx_pkt_get_vlan_tag(pkt); |
898 | } |
899 | |
900 | if (!net_rx_pkt_has_virt_hdr(pkt)) { |
901 | goto nocsum; |
902 | } |
903 | |
904 | vhdr = net_rx_pkt_get_vhdr(pkt); |
905 | /* |
906 | * Checksum is valid when lower level tell so or when lower level |
907 | * requires checksum offload telling that packet produced/bridged |
908 | * locally and did travel over network after last checksum calculation |
909 | * or production |
910 | */ |
911 | csum_ok = VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_DATA_VALID) || |
912 | VMXNET_FLAG_IS_SET(vhdr->flags, VIRTIO_NET_HDR_F_NEEDS_CSUM); |
913 | |
914 | offload_type = vhdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN; |
915 | is_gso = (offload_type != VIRTIO_NET_HDR_GSO_NONE) ? 1 : 0; |
916 | |
917 | if (!csum_ok && !is_gso) { |
918 | goto nocsum; |
919 | } |
920 | |
921 | net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); |
922 | if ((!istcp && !isudp) || (!isip4 && !isip6)) { |
923 | goto nocsum; |
924 | } |
925 | |
926 | rxcd->cnc = 0; |
927 | rxcd->v4 = isip4 ? 1 : 0; |
928 | rxcd->v6 = isip6 ? 1 : 0; |
929 | rxcd->tcp = istcp ? 1 : 0; |
930 | rxcd->udp = isudp ? 1 : 0; |
931 | rxcd->fcs = rxcd->tuc = rxcd->ipc = 1; |
932 | return; |
933 | |
934 | nocsum: |
935 | rxcd->cnc = 1; |
936 | return; |
937 | } |
938 | |
939 | static void |
940 | vmxnet3_pci_dma_writev(PCIDevice *pci_dev, |
941 | const struct iovec *iov, |
942 | size_t start_iov_off, |
943 | hwaddr target_addr, |
944 | size_t bytes_to_copy) |
945 | { |
946 | size_t curr_off = 0; |
947 | size_t copied = 0; |
948 | |
949 | while (bytes_to_copy) { |
950 | if (start_iov_off < (curr_off + iov->iov_len)) { |
951 | size_t chunk_len = |
952 | MIN((curr_off + iov->iov_len) - start_iov_off, bytes_to_copy); |
953 | |
954 | pci_dma_write(pci_dev, target_addr + copied, |
955 | iov->iov_base + start_iov_off - curr_off, |
956 | chunk_len); |
957 | |
958 | copied += chunk_len; |
959 | start_iov_off += chunk_len; |
960 | curr_off = start_iov_off; |
961 | bytes_to_copy -= chunk_len; |
962 | } else { |
963 | curr_off += iov->iov_len; |
964 | } |
965 | iov++; |
966 | } |
967 | } |
968 | |
969 | static void |
970 | vmxnet3_pci_dma_write_rxcd(PCIDevice *pcidev, dma_addr_t pa, |
971 | struct Vmxnet3_RxCompDesc *rxcd) |
972 | { |
973 | rxcd->val1 = cpu_to_le32(rxcd->val1); |
974 | rxcd->val2 = cpu_to_le32(rxcd->val2); |
975 | rxcd->val3 = cpu_to_le32(rxcd->val3); |
976 | pci_dma_write(pcidev, pa, rxcd, sizeof(*rxcd)); |
977 | } |
978 | |
979 | static bool |
980 | vmxnet3_indicate_packet(VMXNET3State *s) |
981 | { |
982 | struct Vmxnet3_RxDesc rxd; |
983 | PCIDevice *d = PCI_DEVICE(s); |
984 | bool is_head = true; |
985 | uint32_t rxd_idx; |
986 | uint32_t rx_ridx = 0; |
987 | |
988 | struct Vmxnet3_RxCompDesc rxcd; |
989 | uint32_t new_rxcd_gen = VMXNET3_INIT_GEN; |
990 | hwaddr new_rxcd_pa = 0; |
991 | hwaddr ready_rxcd_pa = 0; |
992 | struct iovec *data = net_rx_pkt_get_iovec(s->rx_pkt); |
993 | size_t bytes_copied = 0; |
994 | size_t bytes_left = net_rx_pkt_get_total_len(s->rx_pkt); |
995 | uint16_t num_frags = 0; |
996 | size_t chunk_size; |
997 | |
998 | net_rx_pkt_dump(s->rx_pkt); |
999 | |
1000 | while (bytes_left > 0) { |
1001 | |
1002 | /* cannot add more frags to packet */ |
1003 | if (num_frags == s->max_rx_frags) { |
1004 | break; |
1005 | } |
1006 | |
1007 | new_rxcd_pa = vmxnet3_pop_rxc_descr(s, RXQ_IDX, &new_rxcd_gen); |
1008 | if (!new_rxcd_pa) { |
1009 | break; |
1010 | } |
1011 | |
1012 | if (!vmxnet3_get_next_rx_descr(s, is_head, &rxd, &rxd_idx, &rx_ridx)) { |
1013 | break; |
1014 | } |
1015 | |
1016 | chunk_size = MIN(bytes_left, rxd.len); |
1017 | vmxnet3_pci_dma_writev(d, data, bytes_copied, rxd.addr, chunk_size); |
1018 | bytes_copied += chunk_size; |
1019 | bytes_left -= chunk_size; |
1020 | |
1021 | vmxnet3_dump_rx_descr(&rxd); |
1022 | |
1023 | if (ready_rxcd_pa != 0) { |
1024 | vmxnet3_pci_dma_write_rxcd(d, ready_rxcd_pa, &rxcd); |
1025 | } |
1026 | |
1027 | memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc)); |
1028 | rxcd.rxdIdx = rxd_idx; |
1029 | rxcd.len = chunk_size; |
1030 | rxcd.sop = is_head; |
1031 | rxcd.gen = new_rxcd_gen; |
1032 | rxcd.rqID = RXQ_IDX + rx_ridx * s->rxq_num; |
1033 | |
1034 | if (bytes_left == 0) { |
1035 | vmxnet3_rx_update_descr(s->rx_pkt, &rxcd); |
1036 | } |
1037 | |
1038 | VMW_RIPRN("RX Completion descriptor: rxRing: %lu rxIdx %lu len %lu " |
1039 | "sop %d csum_correct %lu" , |
1040 | (unsigned long) rx_ridx, |
1041 | (unsigned long) rxcd.rxdIdx, |
1042 | (unsigned long) rxcd.len, |
1043 | (int) rxcd.sop, |
1044 | (unsigned long) rxcd.tuc); |
1045 | |
1046 | is_head = false; |
1047 | ready_rxcd_pa = new_rxcd_pa; |
1048 | new_rxcd_pa = 0; |
1049 | num_frags++; |
1050 | } |
1051 | |
1052 | if (ready_rxcd_pa != 0) { |
1053 | rxcd.eop = 1; |
1054 | rxcd.err = (bytes_left != 0); |
1055 | |
1056 | vmxnet3_pci_dma_write_rxcd(d, ready_rxcd_pa, &rxcd); |
1057 | |
1058 | /* Flush RX descriptor changes */ |
1059 | smp_wmb(); |
1060 | } |
1061 | |
1062 | if (new_rxcd_pa != 0) { |
1063 | vmxnet3_revert_rxc_descr(s, RXQ_IDX); |
1064 | } |
1065 | |
1066 | vmxnet3_trigger_interrupt(s, s->rxq_descr[RXQ_IDX].intr_idx); |
1067 | |
1068 | if (bytes_left == 0) { |
1069 | vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, VMXNET3_PKT_STATUS_OK); |
1070 | return true; |
1071 | } else if (num_frags == s->max_rx_frags) { |
1072 | vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, VMXNET3_PKT_STATUS_ERROR); |
1073 | return false; |
1074 | } else { |
1075 | vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, |
1076 | VMXNET3_PKT_STATUS_OUT_OF_BUF); |
1077 | return false; |
1078 | } |
1079 | } |
1080 | |
1081 | static void |
1082 | vmxnet3_io_bar0_write(void *opaque, hwaddr addr, |
1083 | uint64_t val, unsigned size) |
1084 | { |
1085 | VMXNET3State *s = opaque; |
1086 | |
1087 | if (!s->device_active) { |
1088 | return; |
1089 | } |
1090 | |
1091 | if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_TXPROD, |
1092 | VMXNET3_DEVICE_MAX_TX_QUEUES, VMXNET3_REG_ALIGN)) { |
1093 | int tx_queue_idx = |
1094 | VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_TXPROD, |
1095 | VMXNET3_REG_ALIGN); |
1096 | assert(tx_queue_idx <= s->txq_num); |
1097 | vmxnet3_process_tx_queue(s, tx_queue_idx); |
1098 | return; |
1099 | } |
1100 | |
1101 | if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_IMR, |
1102 | VMXNET3_MAX_INTRS, VMXNET3_REG_ALIGN)) { |
1103 | int l = VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_IMR, |
1104 | VMXNET3_REG_ALIGN); |
1105 | |
1106 | VMW_CBPRN("Interrupt mask for line %d written: 0x%" PRIx64, l, val); |
1107 | |
1108 | vmxnet3_on_interrupt_mask_changed(s, l, val); |
1109 | return; |
1110 | } |
1111 | |
1112 | if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD, |
1113 | VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN) || |
1114 | VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD2, |
1115 | VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN)) { |
1116 | return; |
1117 | } |
1118 | |
1119 | VMW_WRPRN("BAR0 unknown write [%" PRIx64 "] = %" PRIx64 ", size %d" , |
1120 | (uint64_t) addr, val, size); |
1121 | } |
1122 | |
1123 | static uint64_t |
1124 | vmxnet3_io_bar0_read(void *opaque, hwaddr addr, unsigned size) |
1125 | { |
1126 | VMXNET3State *s = opaque; |
1127 | |
1128 | if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_IMR, |
1129 | VMXNET3_MAX_INTRS, VMXNET3_REG_ALIGN)) { |
1130 | int l = VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_IMR, |
1131 | VMXNET3_REG_ALIGN); |
1132 | return s->interrupt_states[l].is_masked; |
1133 | } |
1134 | |
1135 | VMW_CBPRN("BAR0 unknown read [%" PRIx64 "], size %d" , addr, size); |
1136 | return 0; |
1137 | } |
1138 | |
1139 | static void vmxnet3_reset_interrupt_states(VMXNET3State *s) |
1140 | { |
1141 | int i; |
1142 | for (i = 0; i < ARRAY_SIZE(s->interrupt_states); i++) { |
1143 | s->interrupt_states[i].is_asserted = false; |
1144 | s->interrupt_states[i].is_pending = false; |
1145 | s->interrupt_states[i].is_masked = true; |
1146 | } |
1147 | } |
1148 | |
1149 | static void vmxnet3_reset_mac(VMXNET3State *s) |
1150 | { |
1151 | memcpy(&s->conf.macaddr.a, &s->perm_mac.a, sizeof(s->perm_mac.a)); |
1152 | VMW_CFPRN("MAC address set to: " MAC_FMT, MAC_ARG(s->conf.macaddr.a)); |
1153 | } |
1154 | |
1155 | static void vmxnet3_deactivate_device(VMXNET3State *s) |
1156 | { |
1157 | if (s->device_active) { |
1158 | VMW_CBPRN("Deactivating vmxnet3..." ); |
1159 | net_tx_pkt_reset(s->tx_pkt); |
1160 | net_tx_pkt_uninit(s->tx_pkt); |
1161 | net_rx_pkt_uninit(s->rx_pkt); |
1162 | s->device_active = false; |
1163 | } |
1164 | } |
1165 | |
1166 | static void vmxnet3_reset(VMXNET3State *s) |
1167 | { |
1168 | VMW_CBPRN("Resetting vmxnet3..." ); |
1169 | |
1170 | vmxnet3_deactivate_device(s); |
1171 | vmxnet3_reset_interrupt_states(s); |
1172 | s->drv_shmem = 0; |
1173 | s->tx_sop = true; |
1174 | s->skip_current_tx_pkt = false; |
1175 | } |
1176 | |
1177 | static void vmxnet3_update_rx_mode(VMXNET3State *s) |
1178 | { |
1179 | PCIDevice *d = PCI_DEVICE(s); |
1180 | |
1181 | s->rx_mode = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, |
1182 | devRead.rxFilterConf.rxMode); |
1183 | VMW_CFPRN("RX mode: 0x%08X" , s->rx_mode); |
1184 | } |
1185 | |
1186 | static void vmxnet3_update_vlan_filters(VMXNET3State *s) |
1187 | { |
1188 | int i; |
1189 | PCIDevice *d = PCI_DEVICE(s); |
1190 | |
1191 | /* Copy configuration from shared memory */ |
1192 | VMXNET3_READ_DRV_SHARED(d, s->drv_shmem, |
1193 | devRead.rxFilterConf.vfTable, |
1194 | s->vlan_table, |
1195 | sizeof(s->vlan_table)); |
1196 | |
1197 | /* Invert byte order when needed */ |
1198 | for (i = 0; i < ARRAY_SIZE(s->vlan_table); i++) { |
1199 | s->vlan_table[i] = le32_to_cpu(s->vlan_table[i]); |
1200 | } |
1201 | |
1202 | /* Dump configuration for debugging purposes */ |
1203 | VMW_CFPRN("Configured VLANs:" ); |
1204 | for (i = 0; i < sizeof(s->vlan_table) * 8; i++) { |
1205 | if (VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, i)) { |
1206 | VMW_CFPRN("\tVLAN %d is present" , i); |
1207 | } |
1208 | } |
1209 | } |
1210 | |
1211 | static void vmxnet3_update_mcast_filters(VMXNET3State *s) |
1212 | { |
1213 | PCIDevice *d = PCI_DEVICE(s); |
1214 | |
1215 | uint16_t list_bytes = |
1216 | VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem, |
1217 | devRead.rxFilterConf.mfTableLen); |
1218 | |
1219 | s->mcast_list_len = list_bytes / sizeof(s->mcast_list[0]); |
1220 | |
1221 | s->mcast_list = g_realloc(s->mcast_list, list_bytes); |
1222 | if (!s->mcast_list) { |
1223 | if (s->mcast_list_len == 0) { |
1224 | VMW_CFPRN("Current multicast list is empty" ); |
1225 | } else { |
1226 | VMW_ERPRN("Failed to allocate multicast list of %d elements" , |
1227 | s->mcast_list_len); |
1228 | } |
1229 | s->mcast_list_len = 0; |
1230 | } else { |
1231 | int i; |
1232 | hwaddr mcast_list_pa = |
1233 | VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, |
1234 | devRead.rxFilterConf.mfTablePA); |
1235 | |
1236 | pci_dma_read(d, mcast_list_pa, s->mcast_list, list_bytes); |
1237 | |
1238 | VMW_CFPRN("Current multicast list len is %d:" , s->mcast_list_len); |
1239 | for (i = 0; i < s->mcast_list_len; i++) { |
1240 | VMW_CFPRN("\t" MAC_FMT, MAC_ARG(s->mcast_list[i].a)); |
1241 | } |
1242 | } |
1243 | } |
1244 | |
1245 | static void vmxnet3_setup_rx_filtering(VMXNET3State *s) |
1246 | { |
1247 | vmxnet3_update_rx_mode(s); |
1248 | vmxnet3_update_vlan_filters(s); |
1249 | vmxnet3_update_mcast_filters(s); |
1250 | } |
1251 | |
1252 | static uint32_t vmxnet3_get_interrupt_config(VMXNET3State *s) |
1253 | { |
1254 | uint32_t interrupt_mode = VMXNET3_IT_AUTO | (VMXNET3_IMM_AUTO << 2); |
1255 | VMW_CFPRN("Interrupt config is 0x%X" , interrupt_mode); |
1256 | return interrupt_mode; |
1257 | } |
1258 | |
1259 | static void vmxnet3_fill_stats(VMXNET3State *s) |
1260 | { |
1261 | int i; |
1262 | PCIDevice *d = PCI_DEVICE(s); |
1263 | |
1264 | if (!s->device_active) |
1265 | return; |
1266 | |
1267 | for (i = 0; i < s->txq_num; i++) { |
1268 | pci_dma_write(d, |
1269 | s->txq_descr[i].tx_stats_pa, |
1270 | &s->txq_descr[i].txq_stats, |
1271 | sizeof(s->txq_descr[i].txq_stats)); |
1272 | } |
1273 | |
1274 | for (i = 0; i < s->rxq_num; i++) { |
1275 | pci_dma_write(d, |
1276 | s->rxq_descr[i].rx_stats_pa, |
1277 | &s->rxq_descr[i].rxq_stats, |
1278 | sizeof(s->rxq_descr[i].rxq_stats)); |
1279 | } |
1280 | } |
1281 | |
1282 | static void vmxnet3_adjust_by_guest_type(VMXNET3State *s) |
1283 | { |
1284 | struct Vmxnet3_GOSInfo gos; |
1285 | PCIDevice *d = PCI_DEVICE(s); |
1286 | |
1287 | VMXNET3_READ_DRV_SHARED(d, s->drv_shmem, devRead.misc.driverInfo.gos, |
1288 | &gos, sizeof(gos)); |
1289 | s->rx_packets_compound = |
1290 | (gos.gosType == VMXNET3_GOS_TYPE_WIN) ? false : true; |
1291 | |
1292 | VMW_CFPRN("Guest type specifics: RXCOMPOUND: %d" , s->rx_packets_compound); |
1293 | } |
1294 | |
1295 | static void |
1296 | vmxnet3_dump_conf_descr(const char *name, |
1297 | struct Vmxnet3_VariableLenConfDesc *pm_descr) |
1298 | { |
1299 | VMW_CFPRN("%s descriptor dump: Version %u, Length %u" , |
1300 | name, pm_descr->confVer, pm_descr->confLen); |
1301 | |
1302 | }; |
1303 | |
1304 | static void vmxnet3_update_pm_state(VMXNET3State *s) |
1305 | { |
1306 | struct Vmxnet3_VariableLenConfDesc pm_descr; |
1307 | PCIDevice *d = PCI_DEVICE(s); |
1308 | |
1309 | pm_descr.confLen = |
1310 | VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.pmConfDesc.confLen); |
1311 | pm_descr.confVer = |
1312 | VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.pmConfDesc.confVer); |
1313 | pm_descr.confPA = |
1314 | VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.pmConfDesc.confPA); |
1315 | |
1316 | vmxnet3_dump_conf_descr("PM State" , &pm_descr); |
1317 | } |
1318 | |
1319 | static void vmxnet3_update_features(VMXNET3State *s) |
1320 | { |
1321 | uint32_t guest_features; |
1322 | int rxcso_supported; |
1323 | PCIDevice *d = PCI_DEVICE(s); |
1324 | |
1325 | guest_features = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, |
1326 | devRead.misc.uptFeatures); |
1327 | |
1328 | rxcso_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXCSUM); |
1329 | s->rx_vlan_stripping = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXVLAN); |
1330 | s->lro_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_LRO); |
1331 | |
1332 | VMW_CFPRN("Features configuration: LRO: %d, RXCSUM: %d, VLANSTRIP: %d" , |
1333 | s->lro_supported, rxcso_supported, |
1334 | s->rx_vlan_stripping); |
1335 | if (s->peer_has_vhdr) { |
1336 | qemu_set_offload(qemu_get_queue(s->nic)->peer, |
1337 | rxcso_supported, |
1338 | s->lro_supported, |
1339 | s->lro_supported, |
1340 | 0, |
1341 | 0); |
1342 | } |
1343 | } |
1344 | |
1345 | static bool vmxnet3_verify_intx(VMXNET3State *s, int intx) |
1346 | { |
1347 | return s->msix_used || msi_enabled(PCI_DEVICE(s)) |
1348 | || intx == pci_get_byte(s->parent_obj.config + PCI_INTERRUPT_PIN) - 1; |
1349 | } |
1350 | |
1351 | static void vmxnet3_validate_interrupt_idx(bool is_msix, int idx) |
1352 | { |
1353 | int max_ints = is_msix ? VMXNET3_MAX_INTRS : VMXNET3_MAX_NMSIX_INTRS; |
1354 | if (idx >= max_ints) { |
1355 | hw_error("Bad interrupt index: %d\n" , idx); |
1356 | } |
1357 | } |
1358 | |
1359 | static void vmxnet3_validate_interrupts(VMXNET3State *s) |
1360 | { |
1361 | int i; |
1362 | |
1363 | VMW_CFPRN("Verifying event interrupt index (%d)" , s->event_int_idx); |
1364 | vmxnet3_validate_interrupt_idx(s->msix_used, s->event_int_idx); |
1365 | |
1366 | for (i = 0; i < s->txq_num; i++) { |
1367 | int idx = s->txq_descr[i].intr_idx; |
1368 | VMW_CFPRN("Verifying TX queue %d interrupt index (%d)" , i, idx); |
1369 | vmxnet3_validate_interrupt_idx(s->msix_used, idx); |
1370 | } |
1371 | |
1372 | for (i = 0; i < s->rxq_num; i++) { |
1373 | int idx = s->rxq_descr[i].intr_idx; |
1374 | VMW_CFPRN("Verifying RX queue %d interrupt index (%d)" , i, idx); |
1375 | vmxnet3_validate_interrupt_idx(s->msix_used, idx); |
1376 | } |
1377 | } |
1378 | |
1379 | static void vmxnet3_validate_queues(VMXNET3State *s) |
1380 | { |
1381 | /* |
1382 | * txq_num and rxq_num are total number of queues |
1383 | * configured by guest. These numbers must not |
1384 | * exceed corresponding maximal values. |
1385 | */ |
1386 | |
1387 | if (s->txq_num > VMXNET3_DEVICE_MAX_TX_QUEUES) { |
1388 | hw_error("Bad TX queues number: %d\n" , s->txq_num); |
1389 | } |
1390 | |
1391 | if (s->rxq_num > VMXNET3_DEVICE_MAX_RX_QUEUES) { |
1392 | hw_error("Bad RX queues number: %d\n" , s->rxq_num); |
1393 | } |
1394 | } |
1395 | |
1396 | static void vmxnet3_activate_device(VMXNET3State *s) |
1397 | { |
1398 | int i; |
1399 | static const uint32_t VMXNET3_DEF_TX_THRESHOLD = 1; |
1400 | PCIDevice *d = PCI_DEVICE(s); |
1401 | hwaddr qdescr_table_pa; |
1402 | uint64_t pa; |
1403 | uint32_t size; |
1404 | |
1405 | /* Verify configuration consistency */ |
1406 | if (!vmxnet3_verify_driver_magic(d, s->drv_shmem)) { |
1407 | VMW_ERPRN("Device configuration received from driver is invalid" ); |
1408 | return; |
1409 | } |
1410 | |
1411 | /* Verify if device is active */ |
1412 | if (s->device_active) { |
1413 | VMW_CFPRN("Vmxnet3 device is active" ); |
1414 | return; |
1415 | } |
1416 | |
1417 | vmxnet3_adjust_by_guest_type(s); |
1418 | vmxnet3_update_features(s); |
1419 | vmxnet3_update_pm_state(s); |
1420 | vmxnet3_setup_rx_filtering(s); |
1421 | /* Cache fields from shared memory */ |
1422 | s->mtu = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.mtu); |
1423 | VMW_CFPRN("MTU is %u" , s->mtu); |
1424 | |
1425 | s->max_rx_frags = |
1426 | VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem, devRead.misc.maxNumRxSG); |
1427 | |
1428 | if (s->max_rx_frags == 0) { |
1429 | s->max_rx_frags = 1; |
1430 | } |
1431 | |
1432 | VMW_CFPRN("Max RX fragments is %u" , s->max_rx_frags); |
1433 | |
1434 | s->event_int_idx = |
1435 | VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.intrConf.eventIntrIdx); |
1436 | assert(vmxnet3_verify_intx(s, s->event_int_idx)); |
1437 | VMW_CFPRN("Events interrupt line is %u" , s->event_int_idx); |
1438 | |
1439 | s->auto_int_masking = |
1440 | VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.intrConf.autoMask); |
1441 | VMW_CFPRN("Automatic interrupt masking is %d" , (int)s->auto_int_masking); |
1442 | |
1443 | s->txq_num = |
1444 | VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numTxQueues); |
1445 | s->rxq_num = |
1446 | VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numRxQueues); |
1447 | |
1448 | VMW_CFPRN("Number of TX/RX queues %u/%u" , s->txq_num, s->rxq_num); |
1449 | vmxnet3_validate_queues(s); |
1450 | |
1451 | qdescr_table_pa = |
1452 | VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.misc.queueDescPA); |
1453 | VMW_CFPRN("TX queues descriptors table is at 0x%" PRIx64, qdescr_table_pa); |
1454 | |
1455 | /* |
1456 | * Worst-case scenario is a packet that holds all TX rings space so |
1457 | * we calculate total size of all TX rings for max TX fragments number |
1458 | */ |
1459 | s->max_tx_frags = 0; |
1460 | |
1461 | /* TX queues */ |
1462 | for (i = 0; i < s->txq_num; i++) { |
1463 | hwaddr qdescr_pa = |
1464 | qdescr_table_pa + i * sizeof(struct Vmxnet3_TxQueueDesc); |
1465 | |
1466 | /* Read interrupt number for this TX queue */ |
1467 | s->txq_descr[i].intr_idx = |
1468 | VMXNET3_READ_TX_QUEUE_DESCR8(d, qdescr_pa, conf.intrIdx); |
1469 | assert(vmxnet3_verify_intx(s, s->txq_descr[i].intr_idx)); |
1470 | |
1471 | VMW_CFPRN("TX Queue %d interrupt: %d" , i, s->txq_descr[i].intr_idx); |
1472 | |
1473 | /* Read rings memory locations for TX queues */ |
1474 | pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.txRingBasePA); |
1475 | size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.txRingSize); |
1476 | |
1477 | vmxnet3_ring_init(d, &s->txq_descr[i].tx_ring, pa, size, |
1478 | sizeof(struct Vmxnet3_TxDesc), false); |
1479 | VMXNET3_RING_DUMP(VMW_CFPRN, "TX" , i, &s->txq_descr[i].tx_ring); |
1480 | |
1481 | s->max_tx_frags += size; |
1482 | |
1483 | /* TXC ring */ |
1484 | pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.compRingBasePA); |
1485 | size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.compRingSize); |
1486 | vmxnet3_ring_init(d, &s->txq_descr[i].comp_ring, pa, size, |
1487 | sizeof(struct Vmxnet3_TxCompDesc), true); |
1488 | VMXNET3_RING_DUMP(VMW_CFPRN, "TXC" , i, &s->txq_descr[i].comp_ring); |
1489 | |
1490 | s->txq_descr[i].tx_stats_pa = |
1491 | qdescr_pa + offsetof(struct Vmxnet3_TxQueueDesc, stats); |
1492 | |
1493 | memset(&s->txq_descr[i].txq_stats, 0, |
1494 | sizeof(s->txq_descr[i].txq_stats)); |
1495 | |
1496 | /* Fill device-managed parameters for queues */ |
1497 | VMXNET3_WRITE_TX_QUEUE_DESCR32(d, qdescr_pa, |
1498 | ctrl.txThreshold, |
1499 | VMXNET3_DEF_TX_THRESHOLD); |
1500 | } |
1501 | |
1502 | /* Preallocate TX packet wrapper */ |
1503 | VMW_CFPRN("Max TX fragments is %u" , s->max_tx_frags); |
1504 | net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), |
1505 | s->max_tx_frags, s->peer_has_vhdr); |
1506 | net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr); |
1507 | |
1508 | /* Read rings memory locations for RX queues */ |
1509 | for (i = 0; i < s->rxq_num; i++) { |
1510 | int j; |
1511 | hwaddr qd_pa = |
1512 | qdescr_table_pa + s->txq_num * sizeof(struct Vmxnet3_TxQueueDesc) + |
1513 | i * sizeof(struct Vmxnet3_RxQueueDesc); |
1514 | |
1515 | /* Read interrupt number for this RX queue */ |
1516 | s->rxq_descr[i].intr_idx = |
1517 | VMXNET3_READ_TX_QUEUE_DESCR8(d, qd_pa, conf.intrIdx); |
1518 | assert(vmxnet3_verify_intx(s, s->rxq_descr[i].intr_idx)); |
1519 | |
1520 | VMW_CFPRN("RX Queue %d interrupt: %d" , i, s->rxq_descr[i].intr_idx); |
1521 | |
1522 | /* Read rings memory locations */ |
1523 | for (j = 0; j < VMXNET3_RX_RINGS_PER_QUEUE; j++) { |
1524 | /* RX rings */ |
1525 | pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.rxRingBasePA[j]); |
1526 | size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.rxRingSize[j]); |
1527 | vmxnet3_ring_init(d, &s->rxq_descr[i].rx_ring[j], pa, size, |
1528 | sizeof(struct Vmxnet3_RxDesc), false); |
1529 | VMW_CFPRN("RX queue %d:%d: Base: %" PRIx64 ", Size: %d" , |
1530 | i, j, pa, size); |
1531 | } |
1532 | |
1533 | /* RXC ring */ |
1534 | pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.compRingBasePA); |
1535 | size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.compRingSize); |
1536 | vmxnet3_ring_init(d, &s->rxq_descr[i].comp_ring, pa, size, |
1537 | sizeof(struct Vmxnet3_RxCompDesc), true); |
1538 | VMW_CFPRN("RXC queue %d: Base: %" PRIx64 ", Size: %d" , i, pa, size); |
1539 | |
1540 | s->rxq_descr[i].rx_stats_pa = |
1541 | qd_pa + offsetof(struct Vmxnet3_RxQueueDesc, stats); |
1542 | memset(&s->rxq_descr[i].rxq_stats, 0, |
1543 | sizeof(s->rxq_descr[i].rxq_stats)); |
1544 | } |
1545 | |
1546 | vmxnet3_validate_interrupts(s); |
1547 | |
1548 | /* Make sure everything is in place before device activation */ |
1549 | smp_wmb(); |
1550 | |
1551 | vmxnet3_reset_mac(s); |
1552 | |
1553 | s->device_active = true; |
1554 | } |
1555 | |
1556 | static void vmxnet3_handle_command(VMXNET3State *s, uint64_t cmd) |
1557 | { |
1558 | s->last_command = cmd; |
1559 | |
1560 | switch (cmd) { |
1561 | case VMXNET3_CMD_GET_PERM_MAC_HI: |
1562 | VMW_CBPRN("Set: Get upper part of permanent MAC" ); |
1563 | break; |
1564 | |
1565 | case VMXNET3_CMD_GET_PERM_MAC_LO: |
1566 | VMW_CBPRN("Set: Get lower part of permanent MAC" ); |
1567 | break; |
1568 | |
1569 | case VMXNET3_CMD_GET_STATS: |
1570 | VMW_CBPRN("Set: Get device statistics" ); |
1571 | vmxnet3_fill_stats(s); |
1572 | break; |
1573 | |
1574 | case VMXNET3_CMD_ACTIVATE_DEV: |
1575 | VMW_CBPRN("Set: Activating vmxnet3 device" ); |
1576 | vmxnet3_activate_device(s); |
1577 | break; |
1578 | |
1579 | case VMXNET3_CMD_UPDATE_RX_MODE: |
1580 | VMW_CBPRN("Set: Update rx mode" ); |
1581 | vmxnet3_update_rx_mode(s); |
1582 | break; |
1583 | |
1584 | case VMXNET3_CMD_UPDATE_VLAN_FILTERS: |
1585 | VMW_CBPRN("Set: Update VLAN filters" ); |
1586 | vmxnet3_update_vlan_filters(s); |
1587 | break; |
1588 | |
1589 | case VMXNET3_CMD_UPDATE_MAC_FILTERS: |
1590 | VMW_CBPRN("Set: Update MAC filters" ); |
1591 | vmxnet3_update_mcast_filters(s); |
1592 | break; |
1593 | |
1594 | case VMXNET3_CMD_UPDATE_FEATURE: |
1595 | VMW_CBPRN("Set: Update features" ); |
1596 | vmxnet3_update_features(s); |
1597 | break; |
1598 | |
1599 | case VMXNET3_CMD_UPDATE_PMCFG: |
1600 | VMW_CBPRN("Set: Update power management config" ); |
1601 | vmxnet3_update_pm_state(s); |
1602 | break; |
1603 | |
1604 | case VMXNET3_CMD_GET_LINK: |
1605 | VMW_CBPRN("Set: Get link" ); |
1606 | break; |
1607 | |
1608 | case VMXNET3_CMD_RESET_DEV: |
1609 | VMW_CBPRN("Set: Reset device" ); |
1610 | vmxnet3_reset(s); |
1611 | break; |
1612 | |
1613 | case VMXNET3_CMD_QUIESCE_DEV: |
1614 | VMW_CBPRN("Set: VMXNET3_CMD_QUIESCE_DEV - deactivate the device" ); |
1615 | vmxnet3_deactivate_device(s); |
1616 | break; |
1617 | |
1618 | case VMXNET3_CMD_GET_CONF_INTR: |
1619 | VMW_CBPRN("Set: VMXNET3_CMD_GET_CONF_INTR - interrupt configuration" ); |
1620 | break; |
1621 | |
1622 | case VMXNET3_CMD_GET_ADAPTIVE_RING_INFO: |
1623 | VMW_CBPRN("Set: VMXNET3_CMD_GET_ADAPTIVE_RING_INFO - " |
1624 | "adaptive ring info flags" ); |
1625 | break; |
1626 | |
1627 | case VMXNET3_CMD_GET_DID_LO: |
1628 | VMW_CBPRN("Set: Get lower part of device ID" ); |
1629 | break; |
1630 | |
1631 | case VMXNET3_CMD_GET_DID_HI: |
1632 | VMW_CBPRN("Set: Get upper part of device ID" ); |
1633 | break; |
1634 | |
1635 | case VMXNET3_CMD_GET_DEV_EXTRA_INFO: |
1636 | VMW_CBPRN("Set: Get device extra info" ); |
1637 | break; |
1638 | |
1639 | default: |
1640 | VMW_CBPRN("Received unknown command: %" PRIx64, cmd); |
1641 | break; |
1642 | } |
1643 | } |
1644 | |
1645 | static uint64_t vmxnet3_get_command_status(VMXNET3State *s) |
1646 | { |
1647 | uint64_t ret; |
1648 | |
1649 | switch (s->last_command) { |
1650 | case VMXNET3_CMD_ACTIVATE_DEV: |
1651 | ret = (s->device_active) ? 0 : 1; |
1652 | VMW_CFPRN("Device active: %" PRIx64, ret); |
1653 | break; |
1654 | |
1655 | case VMXNET3_CMD_RESET_DEV: |
1656 | case VMXNET3_CMD_QUIESCE_DEV: |
1657 | case VMXNET3_CMD_GET_QUEUE_STATUS: |
1658 | case VMXNET3_CMD_GET_DEV_EXTRA_INFO: |
1659 | ret = 0; |
1660 | break; |
1661 | |
1662 | case VMXNET3_CMD_GET_LINK: |
1663 | ret = s->link_status_and_speed; |
1664 | VMW_CFPRN("Link and speed: %" PRIx64, ret); |
1665 | break; |
1666 | |
1667 | case VMXNET3_CMD_GET_PERM_MAC_LO: |
1668 | ret = vmxnet3_get_mac_low(&s->perm_mac); |
1669 | break; |
1670 | |
1671 | case VMXNET3_CMD_GET_PERM_MAC_HI: |
1672 | ret = vmxnet3_get_mac_high(&s->perm_mac); |
1673 | break; |
1674 | |
1675 | case VMXNET3_CMD_GET_CONF_INTR: |
1676 | ret = vmxnet3_get_interrupt_config(s); |
1677 | break; |
1678 | |
1679 | case VMXNET3_CMD_GET_ADAPTIVE_RING_INFO: |
1680 | ret = VMXNET3_DISABLE_ADAPTIVE_RING; |
1681 | break; |
1682 | |
1683 | case VMXNET3_CMD_GET_DID_LO: |
1684 | ret = PCI_DEVICE_ID_VMWARE_VMXNET3; |
1685 | break; |
1686 | |
1687 | case VMXNET3_CMD_GET_DID_HI: |
1688 | ret = VMXNET3_DEVICE_REVISION; |
1689 | break; |
1690 | |
1691 | default: |
1692 | VMW_WRPRN("Received request for unknown command: %x" , s->last_command); |
1693 | ret = 0; |
1694 | break; |
1695 | } |
1696 | |
1697 | return ret; |
1698 | } |
1699 | |
1700 | static void vmxnet3_set_events(VMXNET3State *s, uint32_t val) |
1701 | { |
1702 | uint32_t events; |
1703 | PCIDevice *d = PCI_DEVICE(s); |
1704 | |
1705 | VMW_CBPRN("Setting events: 0x%x" , val); |
1706 | events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) | val; |
1707 | VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events); |
1708 | } |
1709 | |
1710 | static void vmxnet3_ack_events(VMXNET3State *s, uint32_t val) |
1711 | { |
1712 | PCIDevice *d = PCI_DEVICE(s); |
1713 | uint32_t events; |
1714 | |
1715 | VMW_CBPRN("Clearing events: 0x%x" , val); |
1716 | events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) & ~val; |
1717 | VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events); |
1718 | } |
1719 | |
1720 | static void |
1721 | vmxnet3_io_bar1_write(void *opaque, |
1722 | hwaddr addr, |
1723 | uint64_t val, |
1724 | unsigned size) |
1725 | { |
1726 | VMXNET3State *s = opaque; |
1727 | |
1728 | switch (addr) { |
1729 | /* Vmxnet3 Revision Report Selection */ |
1730 | case VMXNET3_REG_VRRS: |
1731 | VMW_CBPRN("Write BAR1 [VMXNET3_REG_VRRS] = %" PRIx64 ", size %d" , |
1732 | val, size); |
1733 | break; |
1734 | |
1735 | /* UPT Version Report Selection */ |
1736 | case VMXNET3_REG_UVRS: |
1737 | VMW_CBPRN("Write BAR1 [VMXNET3_REG_UVRS] = %" PRIx64 ", size %d" , |
1738 | val, size); |
1739 | break; |
1740 | |
1741 | /* Driver Shared Address Low */ |
1742 | case VMXNET3_REG_DSAL: |
1743 | VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAL] = %" PRIx64 ", size %d" , |
1744 | val, size); |
1745 | /* |
1746 | * Guest driver will first write the low part of the shared |
1747 | * memory address. We save it to temp variable and set the |
1748 | * shared address only after we get the high part |
1749 | */ |
1750 | if (val == 0) { |
1751 | vmxnet3_deactivate_device(s); |
1752 | } |
1753 | s->temp_shared_guest_driver_memory = val; |
1754 | s->drv_shmem = 0; |
1755 | break; |
1756 | |
1757 | /* Driver Shared Address High */ |
1758 | case VMXNET3_REG_DSAH: |
1759 | VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAH] = %" PRIx64 ", size %d" , |
1760 | val, size); |
1761 | /* |
1762 | * Set the shared memory between guest driver and device. |
1763 | * We already should have low address part. |
1764 | */ |
1765 | s->drv_shmem = s->temp_shared_guest_driver_memory | (val << 32); |
1766 | break; |
1767 | |
1768 | /* Command */ |
1769 | case VMXNET3_REG_CMD: |
1770 | VMW_CBPRN("Write BAR1 [VMXNET3_REG_CMD] = %" PRIx64 ", size %d" , |
1771 | val, size); |
1772 | vmxnet3_handle_command(s, val); |
1773 | break; |
1774 | |
1775 | /* MAC Address Low */ |
1776 | case VMXNET3_REG_MACL: |
1777 | VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACL] = %" PRIx64 ", size %d" , |
1778 | val, size); |
1779 | s->temp_mac = val; |
1780 | break; |
1781 | |
1782 | /* MAC Address High */ |
1783 | case VMXNET3_REG_MACH: |
1784 | VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACH] = %" PRIx64 ", size %d" , |
1785 | val, size); |
1786 | vmxnet3_set_variable_mac(s, val, s->temp_mac); |
1787 | break; |
1788 | |
1789 | /* Interrupt Cause Register */ |
1790 | case VMXNET3_REG_ICR: |
1791 | VMW_CBPRN("Write BAR1 [VMXNET3_REG_ICR] = %" PRIx64 ", size %d" , |
1792 | val, size); |
1793 | g_assert_not_reached(); |
1794 | break; |
1795 | |
1796 | /* Event Cause Register */ |
1797 | case VMXNET3_REG_ECR: |
1798 | VMW_CBPRN("Write BAR1 [VMXNET3_REG_ECR] = %" PRIx64 ", size %d" , |
1799 | val, size); |
1800 | vmxnet3_ack_events(s, val); |
1801 | break; |
1802 | |
1803 | default: |
1804 | VMW_CBPRN("Unknown Write to BAR1 [%" PRIx64 "] = %" PRIx64 ", size %d" , |
1805 | addr, val, size); |
1806 | break; |
1807 | } |
1808 | } |
1809 | |
1810 | static uint64_t |
1811 | vmxnet3_io_bar1_read(void *opaque, hwaddr addr, unsigned size) |
1812 | { |
1813 | VMXNET3State *s = opaque; |
1814 | uint64_t ret = 0; |
1815 | |
1816 | switch (addr) { |
1817 | /* Vmxnet3 Revision Report Selection */ |
1818 | case VMXNET3_REG_VRRS: |
1819 | VMW_CBPRN("Read BAR1 [VMXNET3_REG_VRRS], size %d" , size); |
1820 | ret = VMXNET3_DEVICE_REVISION; |
1821 | break; |
1822 | |
1823 | /* UPT Version Report Selection */ |
1824 | case VMXNET3_REG_UVRS: |
1825 | VMW_CBPRN("Read BAR1 [VMXNET3_REG_UVRS], size %d" , size); |
1826 | ret = VMXNET3_UPT_REVISION; |
1827 | break; |
1828 | |
1829 | /* Command */ |
1830 | case VMXNET3_REG_CMD: |
1831 | VMW_CBPRN("Read BAR1 [VMXNET3_REG_CMD], size %d" , size); |
1832 | ret = vmxnet3_get_command_status(s); |
1833 | break; |
1834 | |
1835 | /* MAC Address Low */ |
1836 | case VMXNET3_REG_MACL: |
1837 | VMW_CBPRN("Read BAR1 [VMXNET3_REG_MACL], size %d" , size); |
1838 | ret = vmxnet3_get_mac_low(&s->conf.macaddr); |
1839 | break; |
1840 | |
1841 | /* MAC Address High */ |
1842 | case VMXNET3_REG_MACH: |
1843 | VMW_CBPRN("Read BAR1 [VMXNET3_REG_MACH], size %d" , size); |
1844 | ret = vmxnet3_get_mac_high(&s->conf.macaddr); |
1845 | break; |
1846 | |
1847 | /* |
1848 | * Interrupt Cause Register |
1849 | * Used for legacy interrupts only so interrupt index always 0 |
1850 | */ |
1851 | case VMXNET3_REG_ICR: |
1852 | VMW_CBPRN("Read BAR1 [VMXNET3_REG_ICR], size %d" , size); |
1853 | if (vmxnet3_interrupt_asserted(s, 0)) { |
1854 | vmxnet3_clear_interrupt(s, 0); |
1855 | ret = true; |
1856 | } else { |
1857 | ret = false; |
1858 | } |
1859 | break; |
1860 | |
1861 | default: |
1862 | VMW_CBPRN("Unknow read BAR1[%" PRIx64 "], %d bytes" , addr, size); |
1863 | break; |
1864 | } |
1865 | |
1866 | return ret; |
1867 | } |
1868 | |
1869 | static int |
1870 | vmxnet3_can_receive(NetClientState *nc) |
1871 | { |
1872 | VMXNET3State *s = qemu_get_nic_opaque(nc); |
1873 | return s->device_active && |
1874 | VMXNET_FLAG_IS_SET(s->link_status_and_speed, VMXNET3_LINK_STATUS_UP); |
1875 | } |
1876 | |
1877 | static inline bool |
1878 | vmxnet3_is_registered_vlan(VMXNET3State *s, const void *data) |
1879 | { |
1880 | uint16_t vlan_tag = eth_get_pkt_tci(data) & VLAN_VID_MASK; |
1881 | if (IS_SPECIAL_VLAN_ID(vlan_tag)) { |
1882 | return true; |
1883 | } |
1884 | |
1885 | return VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, vlan_tag); |
1886 | } |
1887 | |
1888 | static bool |
1889 | vmxnet3_is_allowed_mcast_group(VMXNET3State *s, const uint8_t *group_mac) |
1890 | { |
1891 | int i; |
1892 | for (i = 0; i < s->mcast_list_len; i++) { |
1893 | if (!memcmp(group_mac, s->mcast_list[i].a, sizeof(s->mcast_list[i]))) { |
1894 | return true; |
1895 | } |
1896 | } |
1897 | return false; |
1898 | } |
1899 | |
1900 | static bool |
1901 | vmxnet3_rx_filter_may_indicate(VMXNET3State *s, const void *data, |
1902 | size_t size) |
1903 | { |
1904 | struct eth_header *ehdr = PKT_GET_ETH_HDR(data); |
1905 | |
1906 | if (VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_PROMISC)) { |
1907 | return true; |
1908 | } |
1909 | |
1910 | if (!vmxnet3_is_registered_vlan(s, data)) { |
1911 | return false; |
1912 | } |
1913 | |
1914 | switch (net_rx_pkt_get_packet_type(s->rx_pkt)) { |
1915 | case ETH_PKT_UCAST: |
1916 | if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_UCAST)) { |
1917 | return false; |
1918 | } |
1919 | if (memcmp(s->conf.macaddr.a, ehdr->h_dest, ETH_ALEN)) { |
1920 | return false; |
1921 | } |
1922 | break; |
1923 | |
1924 | case ETH_PKT_BCAST: |
1925 | if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_BCAST)) { |
1926 | return false; |
1927 | } |
1928 | break; |
1929 | |
1930 | case ETH_PKT_MCAST: |
1931 | if (VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_ALL_MULTI)) { |
1932 | return true; |
1933 | } |
1934 | if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_MCAST)) { |
1935 | return false; |
1936 | } |
1937 | if (!vmxnet3_is_allowed_mcast_group(s, ehdr->h_dest)) { |
1938 | return false; |
1939 | } |
1940 | break; |
1941 | |
1942 | default: |
1943 | g_assert_not_reached(); |
1944 | } |
1945 | |
1946 | return true; |
1947 | } |
1948 | |
1949 | static ssize_t |
1950 | vmxnet3_receive(NetClientState *nc, const uint8_t *buf, size_t size) |
1951 | { |
1952 | VMXNET3State *s = qemu_get_nic_opaque(nc); |
1953 | size_t bytes_indicated; |
1954 | uint8_t min_buf[MIN_BUF_SIZE]; |
1955 | |
1956 | if (!vmxnet3_can_receive(nc)) { |
1957 | VMW_PKPRN("Cannot receive now" ); |
1958 | return -1; |
1959 | } |
1960 | |
1961 | if (s->peer_has_vhdr) { |
1962 | net_rx_pkt_set_vhdr(s->rx_pkt, (struct virtio_net_hdr *)buf); |
1963 | buf += sizeof(struct virtio_net_hdr); |
1964 | size -= sizeof(struct virtio_net_hdr); |
1965 | } |
1966 | |
1967 | /* Pad to minimum Ethernet frame length */ |
1968 | if (size < sizeof(min_buf)) { |
1969 | memcpy(min_buf, buf, size); |
1970 | memset(&min_buf[size], 0, sizeof(min_buf) - size); |
1971 | buf = min_buf; |
1972 | size = sizeof(min_buf); |
1973 | } |
1974 | |
1975 | net_rx_pkt_set_packet_type(s->rx_pkt, |
1976 | get_eth_packet_type(PKT_GET_ETH_HDR(buf))); |
1977 | |
1978 | if (vmxnet3_rx_filter_may_indicate(s, buf, size)) { |
1979 | net_rx_pkt_set_protocols(s->rx_pkt, buf, size); |
1980 | vmxnet3_rx_need_csum_calculate(s->rx_pkt, buf, size); |
1981 | net_rx_pkt_attach_data(s->rx_pkt, buf, size, s->rx_vlan_stripping); |
1982 | bytes_indicated = vmxnet3_indicate_packet(s) ? size : -1; |
1983 | if (bytes_indicated < size) { |
1984 | VMW_PKPRN("RX: %zu of %zu bytes indicated" , bytes_indicated, size); |
1985 | } |
1986 | } else { |
1987 | VMW_PKPRN("Packet dropped by RX filter" ); |
1988 | bytes_indicated = size; |
1989 | } |
1990 | |
1991 | assert(size > 0); |
1992 | assert(bytes_indicated != 0); |
1993 | return bytes_indicated; |
1994 | } |
1995 | |
1996 | static void vmxnet3_set_link_status(NetClientState *nc) |
1997 | { |
1998 | VMXNET3State *s = qemu_get_nic_opaque(nc); |
1999 | |
2000 | if (nc->link_down) { |
2001 | s->link_status_and_speed &= ~VMXNET3_LINK_STATUS_UP; |
2002 | } else { |
2003 | s->link_status_and_speed |= VMXNET3_LINK_STATUS_UP; |
2004 | } |
2005 | |
2006 | vmxnet3_set_events(s, VMXNET3_ECR_LINK); |
2007 | vmxnet3_trigger_interrupt(s, s->event_int_idx); |
2008 | } |
2009 | |
2010 | static NetClientInfo net_vmxnet3_info = { |
2011 | .type = NET_CLIENT_DRIVER_NIC, |
2012 | .size = sizeof(NICState), |
2013 | .receive = vmxnet3_receive, |
2014 | .link_status_changed = vmxnet3_set_link_status, |
2015 | }; |
2016 | |
2017 | static bool vmxnet3_peer_has_vnet_hdr(VMXNET3State *s) |
2018 | { |
2019 | NetClientState *nc = qemu_get_queue(s->nic); |
2020 | |
2021 | if (qemu_has_vnet_hdr(nc->peer)) { |
2022 | return true; |
2023 | } |
2024 | |
2025 | return false; |
2026 | } |
2027 | |
2028 | static void vmxnet3_net_uninit(VMXNET3State *s) |
2029 | { |
2030 | g_free(s->mcast_list); |
2031 | vmxnet3_deactivate_device(s); |
2032 | qemu_del_nic(s->nic); |
2033 | } |
2034 | |
2035 | static void vmxnet3_net_init(VMXNET3State *s) |
2036 | { |
2037 | DeviceState *d = DEVICE(s); |
2038 | |
2039 | VMW_CBPRN("vmxnet3_net_init called..." ); |
2040 | |
2041 | qemu_macaddr_default_if_unset(&s->conf.macaddr); |
2042 | |
2043 | /* Windows guest will query the address that was set on init */ |
2044 | memcpy(&s->perm_mac.a, &s->conf.macaddr.a, sizeof(s->perm_mac.a)); |
2045 | |
2046 | s->mcast_list = NULL; |
2047 | s->mcast_list_len = 0; |
2048 | |
2049 | s->link_status_and_speed = VMXNET3_LINK_SPEED | VMXNET3_LINK_STATUS_UP; |
2050 | |
2051 | VMW_CFPRN("Permanent MAC: " MAC_FMT, MAC_ARG(s->perm_mac.a)); |
2052 | |
2053 | s->nic = qemu_new_nic(&net_vmxnet3_info, &s->conf, |
2054 | object_get_typename(OBJECT(s)), |
2055 | d->id, s); |
2056 | |
2057 | s->peer_has_vhdr = vmxnet3_peer_has_vnet_hdr(s); |
2058 | s->tx_sop = true; |
2059 | s->skip_current_tx_pkt = false; |
2060 | s->tx_pkt = NULL; |
2061 | s->rx_pkt = NULL; |
2062 | s->rx_vlan_stripping = false; |
2063 | s->lro_supported = false; |
2064 | |
2065 | if (s->peer_has_vhdr) { |
2066 | qemu_set_vnet_hdr_len(qemu_get_queue(s->nic)->peer, |
2067 | sizeof(struct virtio_net_hdr)); |
2068 | |
2069 | qemu_using_vnet_hdr(qemu_get_queue(s->nic)->peer, 1); |
2070 | } |
2071 | |
2072 | qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); |
2073 | } |
2074 | |
2075 | static void |
2076 | vmxnet3_unuse_msix_vectors(VMXNET3State *s, int num_vectors) |
2077 | { |
2078 | PCIDevice *d = PCI_DEVICE(s); |
2079 | int i; |
2080 | for (i = 0; i < num_vectors; i++) { |
2081 | msix_vector_unuse(d, i); |
2082 | } |
2083 | } |
2084 | |
2085 | static bool |
2086 | vmxnet3_use_msix_vectors(VMXNET3State *s, int num_vectors) |
2087 | { |
2088 | PCIDevice *d = PCI_DEVICE(s); |
2089 | int i; |
2090 | for (i = 0; i < num_vectors; i++) { |
2091 | int res = msix_vector_use(d, i); |
2092 | if (0 > res) { |
2093 | VMW_WRPRN("Failed to use MSI-X vector %d, error %d" , i, res); |
2094 | vmxnet3_unuse_msix_vectors(s, i); |
2095 | return false; |
2096 | } |
2097 | } |
2098 | return true; |
2099 | } |
2100 | |
2101 | static bool |
2102 | vmxnet3_init_msix(VMXNET3State *s) |
2103 | { |
2104 | PCIDevice *d = PCI_DEVICE(s); |
2105 | int res = msix_init(d, VMXNET3_MAX_INTRS, |
2106 | &s->msix_bar, |
2107 | VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_TABLE, |
2108 | &s->msix_bar, |
2109 | VMXNET3_MSIX_BAR_IDX, VMXNET3_OFF_MSIX_PBA(s), |
2110 | VMXNET3_MSIX_OFFSET(s), NULL); |
2111 | |
2112 | if (0 > res) { |
2113 | VMW_WRPRN("Failed to initialize MSI-X, error %d" , res); |
2114 | s->msix_used = false; |
2115 | } else { |
2116 | if (!vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS)) { |
2117 | VMW_WRPRN("Failed to use MSI-X vectors, error %d" , res); |
2118 | msix_uninit(d, &s->msix_bar, &s->msix_bar); |
2119 | s->msix_used = false; |
2120 | } else { |
2121 | s->msix_used = true; |
2122 | } |
2123 | } |
2124 | return s->msix_used; |
2125 | } |
2126 | |
2127 | static void |
2128 | vmxnet3_cleanup_msix(VMXNET3State *s) |
2129 | { |
2130 | PCIDevice *d = PCI_DEVICE(s); |
2131 | |
2132 | if (s->msix_used) { |
2133 | vmxnet3_unuse_msix_vectors(s, VMXNET3_MAX_INTRS); |
2134 | msix_uninit(d, &s->msix_bar, &s->msix_bar); |
2135 | } |
2136 | } |
2137 | |
2138 | static void |
2139 | vmxnet3_cleanup_msi(VMXNET3State *s) |
2140 | { |
2141 | PCIDevice *d = PCI_DEVICE(s); |
2142 | |
2143 | msi_uninit(d); |
2144 | } |
2145 | |
2146 | static const MemoryRegionOps b0_ops = { |
2147 | .read = vmxnet3_io_bar0_read, |
2148 | .write = vmxnet3_io_bar0_write, |
2149 | .endianness = DEVICE_LITTLE_ENDIAN, |
2150 | .impl = { |
2151 | .min_access_size = 4, |
2152 | .max_access_size = 4, |
2153 | }, |
2154 | }; |
2155 | |
2156 | static const MemoryRegionOps b1_ops = { |
2157 | .read = vmxnet3_io_bar1_read, |
2158 | .write = vmxnet3_io_bar1_write, |
2159 | .endianness = DEVICE_LITTLE_ENDIAN, |
2160 | .impl = { |
2161 | .min_access_size = 4, |
2162 | .max_access_size = 4, |
2163 | }, |
2164 | }; |
2165 | |
2166 | static uint64_t vmxnet3_device_serial_num(VMXNET3State *s) |
2167 | { |
2168 | uint64_t dsn_payload; |
2169 | uint8_t *dsnp = (uint8_t *)&dsn_payload; |
2170 | |
2171 | dsnp[0] = 0xfe; |
2172 | dsnp[1] = s->conf.macaddr.a[3]; |
2173 | dsnp[2] = s->conf.macaddr.a[4]; |
2174 | dsnp[3] = s->conf.macaddr.a[5]; |
2175 | dsnp[4] = s->conf.macaddr.a[0]; |
2176 | dsnp[5] = s->conf.macaddr.a[1]; |
2177 | dsnp[6] = s->conf.macaddr.a[2]; |
2178 | dsnp[7] = 0xff; |
2179 | return dsn_payload; |
2180 | } |
2181 | |
2182 | |
2183 | #define VMXNET3_USE_64BIT (true) |
2184 | #define VMXNET3_PER_VECTOR_MASK (false) |
2185 | |
2186 | static void vmxnet3_pci_realize(PCIDevice *pci_dev, Error **errp) |
2187 | { |
2188 | VMXNET3State *s = VMXNET3(pci_dev); |
2189 | int ret; |
2190 | |
2191 | VMW_CBPRN("Starting init..." ); |
2192 | |
2193 | memory_region_init_io(&s->bar0, OBJECT(s), &b0_ops, s, |
2194 | "vmxnet3-b0" , VMXNET3_PT_REG_SIZE); |
2195 | pci_register_bar(pci_dev, VMXNET3_BAR0_IDX, |
2196 | PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar0); |
2197 | |
2198 | memory_region_init_io(&s->bar1, OBJECT(s), &b1_ops, s, |
2199 | "vmxnet3-b1" , VMXNET3_VD_REG_SIZE); |
2200 | pci_register_bar(pci_dev, VMXNET3_BAR1_IDX, |
2201 | PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar1); |
2202 | |
2203 | memory_region_init(&s->msix_bar, OBJECT(s), "vmxnet3-msix-bar" , |
2204 | VMXNET3_MSIX_BAR_SIZE); |
2205 | pci_register_bar(pci_dev, VMXNET3_MSIX_BAR_IDX, |
2206 | PCI_BASE_ADDRESS_SPACE_MEMORY, &s->msix_bar); |
2207 | |
2208 | vmxnet3_reset_interrupt_states(s); |
2209 | |
2210 | /* Interrupt pin A */ |
2211 | pci_dev->config[PCI_INTERRUPT_PIN] = 0x01; |
2212 | |
2213 | ret = msi_init(pci_dev, VMXNET3_MSI_OFFSET(s), VMXNET3_MAX_NMSIX_INTRS, |
2214 | VMXNET3_USE_64BIT, VMXNET3_PER_VECTOR_MASK, NULL); |
2215 | /* Any error other than -ENOTSUP(board's MSI support is broken) |
2216 | * is a programming error. Fall back to INTx silently on -ENOTSUP */ |
2217 | assert(!ret || ret == -ENOTSUP); |
2218 | |
2219 | if (!vmxnet3_init_msix(s)) { |
2220 | VMW_WRPRN("Failed to initialize MSI-X, configuration is inconsistent." ); |
2221 | } |
2222 | |
2223 | vmxnet3_net_init(s); |
2224 | |
2225 | if (pci_is_express(pci_dev)) { |
2226 | if (pci_bus_is_express(pci_get_bus(pci_dev))) { |
2227 | pcie_endpoint_cap_init(pci_dev, VMXNET3_EXP_EP_OFFSET); |
2228 | } |
2229 | |
2230 | pcie_dev_ser_num_init(pci_dev, VMXNET3_DSN_OFFSET, |
2231 | vmxnet3_device_serial_num(s)); |
2232 | } |
2233 | } |
2234 | |
2235 | static void vmxnet3_instance_init(Object *obj) |
2236 | { |
2237 | VMXNET3State *s = VMXNET3(obj); |
2238 | device_add_bootindex_property(obj, &s->conf.bootindex, |
2239 | "bootindex" , "/ethernet-phy@0" , |
2240 | DEVICE(obj), NULL); |
2241 | } |
2242 | |
2243 | static void vmxnet3_pci_uninit(PCIDevice *pci_dev) |
2244 | { |
2245 | DeviceState *dev = DEVICE(pci_dev); |
2246 | VMXNET3State *s = VMXNET3(pci_dev); |
2247 | |
2248 | VMW_CBPRN("Starting uninit..." ); |
2249 | |
2250 | unregister_savevm(dev, "vmxnet3-msix" , s); |
2251 | |
2252 | vmxnet3_net_uninit(s); |
2253 | |
2254 | vmxnet3_cleanup_msix(s); |
2255 | |
2256 | vmxnet3_cleanup_msi(s); |
2257 | } |
2258 | |
2259 | static void vmxnet3_qdev_reset(DeviceState *dev) |
2260 | { |
2261 | PCIDevice *d = PCI_DEVICE(dev); |
2262 | VMXNET3State *s = VMXNET3(d); |
2263 | |
2264 | VMW_CBPRN("Starting QDEV reset..." ); |
2265 | vmxnet3_reset(s); |
2266 | } |
2267 | |
2268 | static bool vmxnet3_mc_list_needed(void *opaque) |
2269 | { |
2270 | return true; |
2271 | } |
2272 | |
2273 | static int vmxnet3_mcast_list_pre_load(void *opaque) |
2274 | { |
2275 | VMXNET3State *s = opaque; |
2276 | |
2277 | s->mcast_list = g_malloc(s->mcast_list_buff_size); |
2278 | |
2279 | return 0; |
2280 | } |
2281 | |
2282 | |
2283 | static int vmxnet3_pre_save(void *opaque) |
2284 | { |
2285 | VMXNET3State *s = opaque; |
2286 | |
2287 | s->mcast_list_buff_size = s->mcast_list_len * sizeof(MACAddr); |
2288 | |
2289 | return 0; |
2290 | } |
2291 | |
2292 | static const VMStateDescription vmxstate_vmxnet3_mcast_list = { |
2293 | .name = "vmxnet3/mcast_list" , |
2294 | .version_id = 1, |
2295 | .minimum_version_id = 1, |
2296 | .pre_load = vmxnet3_mcast_list_pre_load, |
2297 | .needed = vmxnet3_mc_list_needed, |
2298 | .fields = (VMStateField[]) { |
2299 | VMSTATE_VBUFFER_UINT32(mcast_list, VMXNET3State, 0, NULL, |
2300 | mcast_list_buff_size), |
2301 | VMSTATE_END_OF_LIST() |
2302 | } |
2303 | }; |
2304 | |
2305 | static const VMStateDescription vmstate_vmxnet3_ring = { |
2306 | .name = "vmxnet3-ring" , |
2307 | .version_id = 0, |
2308 | .fields = (VMStateField[]) { |
2309 | VMSTATE_UINT64(pa, Vmxnet3Ring), |
2310 | VMSTATE_UINT32(size, Vmxnet3Ring), |
2311 | VMSTATE_UINT32(cell_size, Vmxnet3Ring), |
2312 | VMSTATE_UINT32(next, Vmxnet3Ring), |
2313 | VMSTATE_UINT8(gen, Vmxnet3Ring), |
2314 | VMSTATE_END_OF_LIST() |
2315 | } |
2316 | }; |
2317 | |
2318 | static const VMStateDescription vmstate_vmxnet3_tx_stats = { |
2319 | .name = "vmxnet3-tx-stats" , |
2320 | .version_id = 0, |
2321 | .fields = (VMStateField[]) { |
2322 | VMSTATE_UINT64(TSOPktsTxOK, struct UPT1_TxStats), |
2323 | VMSTATE_UINT64(TSOBytesTxOK, struct UPT1_TxStats), |
2324 | VMSTATE_UINT64(ucastPktsTxOK, struct UPT1_TxStats), |
2325 | VMSTATE_UINT64(ucastBytesTxOK, struct UPT1_TxStats), |
2326 | VMSTATE_UINT64(mcastPktsTxOK, struct UPT1_TxStats), |
2327 | VMSTATE_UINT64(mcastBytesTxOK, struct UPT1_TxStats), |
2328 | VMSTATE_UINT64(bcastPktsTxOK, struct UPT1_TxStats), |
2329 | VMSTATE_UINT64(bcastBytesTxOK, struct UPT1_TxStats), |
2330 | VMSTATE_UINT64(pktsTxError, struct UPT1_TxStats), |
2331 | VMSTATE_UINT64(pktsTxDiscard, struct UPT1_TxStats), |
2332 | VMSTATE_END_OF_LIST() |
2333 | } |
2334 | }; |
2335 | |
2336 | static const VMStateDescription vmstate_vmxnet3_txq_descr = { |
2337 | .name = "vmxnet3-txq-descr" , |
2338 | .version_id = 0, |
2339 | .fields = (VMStateField[]) { |
2340 | VMSTATE_STRUCT(tx_ring, Vmxnet3TxqDescr, 0, vmstate_vmxnet3_ring, |
2341 | Vmxnet3Ring), |
2342 | VMSTATE_STRUCT(comp_ring, Vmxnet3TxqDescr, 0, vmstate_vmxnet3_ring, |
2343 | Vmxnet3Ring), |
2344 | VMSTATE_UINT8(intr_idx, Vmxnet3TxqDescr), |
2345 | VMSTATE_UINT64(tx_stats_pa, Vmxnet3TxqDescr), |
2346 | VMSTATE_STRUCT(txq_stats, Vmxnet3TxqDescr, 0, vmstate_vmxnet3_tx_stats, |
2347 | struct UPT1_TxStats), |
2348 | VMSTATE_END_OF_LIST() |
2349 | } |
2350 | }; |
2351 | |
2352 | static const VMStateDescription vmstate_vmxnet3_rx_stats = { |
2353 | .name = "vmxnet3-rx-stats" , |
2354 | .version_id = 0, |
2355 | .fields = (VMStateField[]) { |
2356 | VMSTATE_UINT64(LROPktsRxOK, struct UPT1_RxStats), |
2357 | VMSTATE_UINT64(LROBytesRxOK, struct UPT1_RxStats), |
2358 | VMSTATE_UINT64(ucastPktsRxOK, struct UPT1_RxStats), |
2359 | VMSTATE_UINT64(ucastBytesRxOK, struct UPT1_RxStats), |
2360 | VMSTATE_UINT64(mcastPktsRxOK, struct UPT1_RxStats), |
2361 | VMSTATE_UINT64(mcastBytesRxOK, struct UPT1_RxStats), |
2362 | VMSTATE_UINT64(bcastPktsRxOK, struct UPT1_RxStats), |
2363 | VMSTATE_UINT64(bcastBytesRxOK, struct UPT1_RxStats), |
2364 | VMSTATE_UINT64(pktsRxOutOfBuf, struct UPT1_RxStats), |
2365 | VMSTATE_UINT64(pktsRxError, struct UPT1_RxStats), |
2366 | VMSTATE_END_OF_LIST() |
2367 | } |
2368 | }; |
2369 | |
2370 | static const VMStateDescription vmstate_vmxnet3_rxq_descr = { |
2371 | .name = "vmxnet3-rxq-descr" , |
2372 | .version_id = 0, |
2373 | .fields = (VMStateField[]) { |
2374 | VMSTATE_STRUCT_ARRAY(rx_ring, Vmxnet3RxqDescr, |
2375 | VMXNET3_RX_RINGS_PER_QUEUE, 0, |
2376 | vmstate_vmxnet3_ring, Vmxnet3Ring), |
2377 | VMSTATE_STRUCT(comp_ring, Vmxnet3RxqDescr, 0, vmstate_vmxnet3_ring, |
2378 | Vmxnet3Ring), |
2379 | VMSTATE_UINT8(intr_idx, Vmxnet3RxqDescr), |
2380 | VMSTATE_UINT64(rx_stats_pa, Vmxnet3RxqDescr), |
2381 | VMSTATE_STRUCT(rxq_stats, Vmxnet3RxqDescr, 0, vmstate_vmxnet3_rx_stats, |
2382 | struct UPT1_RxStats), |
2383 | VMSTATE_END_OF_LIST() |
2384 | } |
2385 | }; |
2386 | |
2387 | static int vmxnet3_post_load(void *opaque, int version_id) |
2388 | { |
2389 | VMXNET3State *s = opaque; |
2390 | PCIDevice *d = PCI_DEVICE(s); |
2391 | |
2392 | net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), |
2393 | s->max_tx_frags, s->peer_has_vhdr); |
2394 | net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr); |
2395 | |
2396 | if (s->msix_used) { |
2397 | if (!vmxnet3_use_msix_vectors(s, VMXNET3_MAX_INTRS)) { |
2398 | VMW_WRPRN("Failed to re-use MSI-X vectors" ); |
2399 | msix_uninit(d, &s->msix_bar, &s->msix_bar); |
2400 | s->msix_used = false; |
2401 | return -1; |
2402 | } |
2403 | } |
2404 | |
2405 | vmxnet3_validate_queues(s); |
2406 | vmxnet3_validate_interrupts(s); |
2407 | |
2408 | return 0; |
2409 | } |
2410 | |
2411 | static const VMStateDescription vmstate_vmxnet3_int_state = { |
2412 | .name = "vmxnet3-int-state" , |
2413 | .version_id = 0, |
2414 | .fields = (VMStateField[]) { |
2415 | VMSTATE_BOOL(is_masked, Vmxnet3IntState), |
2416 | VMSTATE_BOOL(is_pending, Vmxnet3IntState), |
2417 | VMSTATE_BOOL(is_asserted, Vmxnet3IntState), |
2418 | VMSTATE_END_OF_LIST() |
2419 | } |
2420 | }; |
2421 | |
2422 | static const VMStateDescription vmstate_vmxnet3 = { |
2423 | .name = "vmxnet3" , |
2424 | .version_id = 1, |
2425 | .minimum_version_id = 1, |
2426 | .pre_save = vmxnet3_pre_save, |
2427 | .post_load = vmxnet3_post_load, |
2428 | .fields = (VMStateField[]) { |
2429 | VMSTATE_PCI_DEVICE(parent_obj, VMXNET3State), |
2430 | VMSTATE_MSIX(parent_obj, VMXNET3State), |
2431 | VMSTATE_BOOL(rx_packets_compound, VMXNET3State), |
2432 | VMSTATE_BOOL(rx_vlan_stripping, VMXNET3State), |
2433 | VMSTATE_BOOL(lro_supported, VMXNET3State), |
2434 | VMSTATE_UINT32(rx_mode, VMXNET3State), |
2435 | VMSTATE_UINT32(mcast_list_len, VMXNET3State), |
2436 | VMSTATE_UINT32(mcast_list_buff_size, VMXNET3State), |
2437 | VMSTATE_UINT32_ARRAY(vlan_table, VMXNET3State, VMXNET3_VFT_SIZE), |
2438 | VMSTATE_UINT32(mtu, VMXNET3State), |
2439 | VMSTATE_UINT16(max_rx_frags, VMXNET3State), |
2440 | VMSTATE_UINT32(max_tx_frags, VMXNET3State), |
2441 | VMSTATE_UINT8(event_int_idx, VMXNET3State), |
2442 | VMSTATE_BOOL(auto_int_masking, VMXNET3State), |
2443 | VMSTATE_UINT8(txq_num, VMXNET3State), |
2444 | VMSTATE_UINT8(rxq_num, VMXNET3State), |
2445 | VMSTATE_UINT32(device_active, VMXNET3State), |
2446 | VMSTATE_UINT32(last_command, VMXNET3State), |
2447 | VMSTATE_UINT32(link_status_and_speed, VMXNET3State), |
2448 | VMSTATE_UINT32(temp_mac, VMXNET3State), |
2449 | VMSTATE_UINT64(drv_shmem, VMXNET3State), |
2450 | VMSTATE_UINT64(temp_shared_guest_driver_memory, VMXNET3State), |
2451 | |
2452 | VMSTATE_STRUCT_ARRAY(txq_descr, VMXNET3State, |
2453 | VMXNET3_DEVICE_MAX_TX_QUEUES, 0, vmstate_vmxnet3_txq_descr, |
2454 | Vmxnet3TxqDescr), |
2455 | VMSTATE_STRUCT_ARRAY(rxq_descr, VMXNET3State, |
2456 | VMXNET3_DEVICE_MAX_RX_QUEUES, 0, vmstate_vmxnet3_rxq_descr, |
2457 | Vmxnet3RxqDescr), |
2458 | VMSTATE_STRUCT_ARRAY(interrupt_states, VMXNET3State, |
2459 | VMXNET3_MAX_INTRS, 0, vmstate_vmxnet3_int_state, |
2460 | Vmxnet3IntState), |
2461 | |
2462 | VMSTATE_END_OF_LIST() |
2463 | }, |
2464 | .subsections = (const VMStateDescription*[]) { |
2465 | &vmxstate_vmxnet3_mcast_list, |
2466 | NULL |
2467 | } |
2468 | }; |
2469 | |
2470 | static Property vmxnet3_properties[] = { |
2471 | DEFINE_NIC_PROPERTIES(VMXNET3State, conf), |
2472 | DEFINE_PROP_BIT("x-old-msi-offsets" , VMXNET3State, compat_flags, |
2473 | VMXNET3_COMPAT_FLAG_OLD_MSI_OFFSETS_BIT, false), |
2474 | DEFINE_PROP_BIT("x-disable-pcie" , VMXNET3State, compat_flags, |
2475 | VMXNET3_COMPAT_FLAG_DISABLE_PCIE_BIT, false), |
2476 | DEFINE_PROP_END_OF_LIST(), |
2477 | }; |
2478 | |
2479 | static void vmxnet3_realize(DeviceState *qdev, Error **errp) |
2480 | { |
2481 | VMXNET3Class *vc = VMXNET3_DEVICE_GET_CLASS(qdev); |
2482 | PCIDevice *pci_dev = PCI_DEVICE(qdev); |
2483 | VMXNET3State *s = VMXNET3(qdev); |
2484 | |
2485 | if (!(s->compat_flags & VMXNET3_COMPAT_FLAG_DISABLE_PCIE)) { |
2486 | pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; |
2487 | } |
2488 | |
2489 | vc->parent_dc_realize(qdev, errp); |
2490 | } |
2491 | |
2492 | static void vmxnet3_class_init(ObjectClass *class, void *data) |
2493 | { |
2494 | DeviceClass *dc = DEVICE_CLASS(class); |
2495 | PCIDeviceClass *c = PCI_DEVICE_CLASS(class); |
2496 | VMXNET3Class *vc = VMXNET3_DEVICE_CLASS(class); |
2497 | |
2498 | c->realize = vmxnet3_pci_realize; |
2499 | c->exit = vmxnet3_pci_uninit; |
2500 | c->vendor_id = PCI_VENDOR_ID_VMWARE; |
2501 | c->device_id = PCI_DEVICE_ID_VMWARE_VMXNET3; |
2502 | c->revision = PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION; |
2503 | c->romfile = "efi-vmxnet3.rom" ; |
2504 | c->class_id = PCI_CLASS_NETWORK_ETHERNET; |
2505 | c->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE; |
2506 | c->subsystem_id = PCI_DEVICE_ID_VMWARE_VMXNET3; |
2507 | device_class_set_parent_realize(dc, vmxnet3_realize, |
2508 | &vc->parent_dc_realize); |
2509 | dc->desc = "VMWare Paravirtualized Ethernet v3" ; |
2510 | dc->reset = vmxnet3_qdev_reset; |
2511 | dc->vmsd = &vmstate_vmxnet3; |
2512 | dc->props = vmxnet3_properties; |
2513 | set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); |
2514 | } |
2515 | |
2516 | static const TypeInfo vmxnet3_info = { |
2517 | .name = TYPE_VMXNET3, |
2518 | .parent = TYPE_PCI_DEVICE, |
2519 | .class_size = sizeof(VMXNET3Class), |
2520 | .instance_size = sizeof(VMXNET3State), |
2521 | .class_init = vmxnet3_class_init, |
2522 | .instance_init = vmxnet3_instance_init, |
2523 | .interfaces = (InterfaceInfo[]) { |
2524 | { INTERFACE_PCIE_DEVICE }, |
2525 | { INTERFACE_CONVENTIONAL_PCI_DEVICE }, |
2526 | { } |
2527 | }, |
2528 | }; |
2529 | |
2530 | static void vmxnet3_register_types(void) |
2531 | { |
2532 | VMW_CBPRN("vmxnet3_register_types called..." ); |
2533 | type_register_static(&vmxnet3_info); |
2534 | } |
2535 | |
2536 | type_init(vmxnet3_register_types) |
2537 | |