1/* SPDX-License-Identifier: BSD-3-Clause */
2/*
3 * Copyright (c) 1995 Danny Gasparovski.
4 */
5
6#include "slirp.h"
7
8static void ifs_insque(struct mbuf *ifm, struct mbuf *ifmhead)
9{
10 ifm->ifs_next = ifmhead->ifs_next;
11 ifmhead->ifs_next = ifm;
12 ifm->ifs_prev = ifmhead;
13 ifm->ifs_next->ifs_prev = ifm;
14}
15
16static void ifs_remque(struct mbuf *ifm)
17{
18 ifm->ifs_prev->ifs_next = ifm->ifs_next;
19 ifm->ifs_next->ifs_prev = ifm->ifs_prev;
20}
21
22void if_init(Slirp *slirp)
23{
24 slirp->if_fastq.qh_link = slirp->if_fastq.qh_rlink = &slirp->if_fastq;
25 slirp->if_batchq.qh_link = slirp->if_batchq.qh_rlink = &slirp->if_batchq;
26}
27
28/*
29 * if_output: Queue packet into an output queue.
30 * There are 2 output queue's, if_fastq and if_batchq.
31 * Each output queue is a doubly linked list of double linked lists
32 * of mbufs, each list belonging to one "session" (socket). This
33 * way, we can output packets fairly by sending one packet from each
34 * session, instead of all the packets from one session, then all packets
35 * from the next session, etc. Packets on the if_fastq get absolute
36 * priority, but if one session hogs the link, it gets "downgraded"
37 * to the batchq until it runs out of packets, then it'll return
38 * to the fastq (eg. if the user does an ls -alR in a telnet session,
39 * it'll temporarily get downgraded to the batchq)
40 */
41void if_output(struct socket *so, struct mbuf *ifm)
42{
43 Slirp *slirp = ifm->slirp;
44 struct mbuf *ifq;
45 int on_fastq = 1;
46
47 DEBUG_CALL("if_output");
48 DEBUG_ARG("so = %p", so);
49 DEBUG_ARG("ifm = %p", ifm);
50
51 /*
52 * First remove the mbuf from m_usedlist,
53 * since we're gonna use m_next and m_prev ourselves
54 * XXX Shouldn't need this, gotta change dtom() etc.
55 */
56 if (ifm->m_flags & M_USEDLIST) {
57 remque(ifm);
58 ifm->m_flags &= ~M_USEDLIST;
59 }
60
61 /*
62 * See if there's already a batchq list for this session.
63 * This can include an interactive session, which should go on fastq,
64 * but gets too greedy... hence it'll be downgraded from fastq to batchq.
65 * We mustn't put this packet back on the fastq (or we'll send it out of
66 * order)
67 * XXX add cache here?
68 */
69 if (so) {
70 for (ifq = (struct mbuf *)slirp->if_batchq.qh_rlink;
71 (struct quehead *)ifq != &slirp->if_batchq; ifq = ifq->ifq_prev) {
72 if (so == ifq->ifq_so) {
73 /* A match! */
74 ifm->ifq_so = so;
75 ifs_insque(ifm, ifq->ifs_prev);
76 goto diddit;
77 }
78 }
79 }
80
81 /* No match, check which queue to put it on */
82 if (so && (so->so_iptos & IPTOS_LOWDELAY)) {
83 ifq = (struct mbuf *)slirp->if_fastq.qh_rlink;
84 on_fastq = 1;
85 /*
86 * Check if this packet is a part of the last
87 * packet's session
88 */
89 if (ifq->ifq_so == so) {
90 ifm->ifq_so = so;
91 ifs_insque(ifm, ifq->ifs_prev);
92 goto diddit;
93 }
94 } else {
95 ifq = (struct mbuf *)slirp->if_batchq.qh_rlink;
96 }
97
98 /* Create a new doubly linked list for this session */
99 ifm->ifq_so = so;
100 ifs_init(ifm);
101 insque(ifm, ifq);
102
103diddit:
104 if (so) {
105 /* Update *_queued */
106 so->so_queued++;
107 so->so_nqueued++;
108 /*
109 * Check if the interactive session should be downgraded to
110 * the batchq. A session is downgraded if it has queued 6
111 * packets without pausing, and at least 3 of those packets
112 * have been sent over the link
113 * (XXX These are arbitrary numbers, probably not optimal..)
114 */
115 if (on_fastq &&
116 ((so->so_nqueued >= 6) && (so->so_nqueued - so->so_queued) >= 3)) {
117 /* Remove from current queue... */
118 remque(ifm->ifs_next);
119
120 /* ...And insert in the new. That'll teach ya! */
121 insque(ifm->ifs_next, &slirp->if_batchq);
122 }
123 }
124
125 /*
126 * This prevents us from malloc()ing too many mbufs
127 */
128 if_start(ifm->slirp);
129}
130
131/*
132 * Send one packet from each session.
133 * If there are packets on the fastq, they are sent FIFO, before
134 * everything else. Then we choose the first packet from each
135 * batchq session (socket) and send it.
136 * For example, if there are 3 ftp sessions fighting for bandwidth,
137 * one packet will be sent from the first session, then one packet
138 * from the second session, then one packet from the third.
139 */
140void if_start(Slirp *slirp)
141{
142 uint64_t now = slirp->cb->clock_get_ns(slirp->opaque);
143 bool from_batchq = false;
144 struct mbuf *ifm, *ifm_next, *ifqt;
145
146 DEBUG_CALL("if_start");
147
148 if (slirp->if_start_busy) {
149 return;
150 }
151 slirp->if_start_busy = true;
152
153 struct mbuf *batch_head = NULL;
154 if (slirp->if_batchq.qh_link != &slirp->if_batchq) {
155 batch_head = (struct mbuf *)slirp->if_batchq.qh_link;
156 }
157
158 if (slirp->if_fastq.qh_link != &slirp->if_fastq) {
159 ifm_next = (struct mbuf *)slirp->if_fastq.qh_link;
160 } else if (batch_head) {
161 /* Nothing on fastq, pick up from batchq */
162 ifm_next = batch_head;
163 from_batchq = true;
164 } else {
165 ifm_next = NULL;
166 }
167
168 while (ifm_next) {
169 ifm = ifm_next;
170
171 ifm_next = ifm->ifq_next;
172 if ((struct quehead *)ifm_next == &slirp->if_fastq) {
173 /* No more packets in fastq, switch to batchq */
174 ifm_next = batch_head;
175 from_batchq = true;
176 }
177 if ((struct quehead *)ifm_next == &slirp->if_batchq) {
178 /* end of batchq */
179 ifm_next = NULL;
180 }
181
182 /* Try to send packet unless it already expired */
183 if (ifm->expiration_date >= now && !if_encap(slirp, ifm)) {
184 /* Packet is delayed due to pending ARP or NDP resolution */
185 continue;
186 }
187
188 /* Remove it from the queue */
189 ifqt = ifm->ifq_prev;
190 remque(ifm);
191
192 /* If there are more packets for this session, re-queue them */
193 if (ifm->ifs_next != ifm) {
194 struct mbuf *next = ifm->ifs_next;
195
196 insque(next, ifqt);
197 ifs_remque(ifm);
198 if (!from_batchq) {
199 ifm_next = next;
200 }
201 }
202
203 /* Update so_queued */
204 if (ifm->ifq_so && --ifm->ifq_so->so_queued == 0) {
205 /* If there's no more queued, reset nqueued */
206 ifm->ifq_so->so_nqueued = 0;
207 }
208
209 m_free(ifm);
210 }
211
212 slirp->if_start_busy = false;
213}
214