JustKernel

Ray Of Hope

QEMU + Simple Network rate limiter from Linux Host side.

Simple Network rate limiter from Linux Host side : Controlling the flow using ring buffer.
diff –git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 613f144..df620f7 100644
— a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -218,6 +218,7 @@ static void rxfilter_notify(NetClientState *nc)

static char *mac_strdup_printf(const uint8_t *mac)
{
+ printf(“Hello world \n”);
return g_strdup_printf(“%.2x:%.2x:%.2x:%.2x:%.2x:%.2x”, mac[0],
mac[1], mac[2], mac[3], mac[4], mac[5]);
}
@@ -1050,13 +1051,19 @@ static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
virtio_net_flush_tx(q);
}

+#define BANDWIDTH_LIMIT 10 /* Megabits per second */
/* TX */
+/* anshul: for xmit of packet . VirtIONetQueue is the queue that has been passed from the the guest PV driver.*/
static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
{
VirtIONet *n = q->n;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
VirtQueueElement elem;
+ VirtIONetQueue delay_queue;
int32_t num_packets = 0;
+ uint32_t uBandwidthBytes;
+ uint32_t in_size =0, out_size = 0;
+ uBandwidthBytes = (BANDWIDTH_LIMIT * 1024 * 1024 ) / 8;
int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
return num_packets;
@@ -1068,7 +1075,39 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
virtio_queue_set_notification(q->tx_vq, 0);
return num_packets;
}

+ if(n->tx_bandwidth_use >= uBandwidthBytes)
+ {
+ while (virtqueue_pop(q->tx_vq, &elem))
+ {
+ out_size = iov_size(elem.out_sg, elem.out_num);
+ n->tx_bandwidth_use += out_size;
+ virtuqueue_push(delay_queue.tx_vq, &elem, in_size+out_size);
+ }
+ cur_bandwidth_bytes = n->tx_bandwidth_use;
+ /* copy back till the point bandwidth is less than whats allowed */
+ while (cur_bandwidth_bytes <= uBandwidthBytes) + { + virtqueue_pop(delay_queue.tx_vq, &elem) + out_size = iov_size(elem.out_sg, elem.out_num); + cur_bandwidth_bytes += out_size; + virtuqueue_push(q->tx_vq, &elem, out_size);
+ }
+ }
+ /* if bandwidth usage is less than the limit, but due to previous burst
+ * there are some packets left in delay queue , empty the delay queue and copy back the packet to the main queue.
+ */
+ else
+ {
+ while (virtqueue_pop(q->delay_queue, &elem))
+ {
+ out_size = iov_size(elem.out_sg, elem.out_num);
+ virtuqueue_push(q.tx_vq, &elem, in_size+out_size);
+ }
+
+ }
+ /* now virtio is free to use q-tx_vq, &elem)) {
ssize_t ret, len;
unsigned int out_num = elem.out_num;
@@ -1179,6 +1218,20 @@ static void virtio_net_tx_timer(void *opaque)
virtio_net_flush_tx(q);
}

+static void virtio_net_tx_rate_calc_timer(void *opaque)
+{
+ VirtIONetQueue *q = opaque;
+ VirtIONet *n = q->n;
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
+ assert(vdev->vm_running);
+ n->tx_bandwidth_use = 0; /* reset it after every 1 sec. */
+ if (q->tx_timer)
+ {
+ /* to reset the timer */
+ timer_mod(q->tx_rate_calc_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_rate_calc_timeout);
+ }
+}
+
static void virtio_net_tx_bh(void *opaque)
{
VirtIONetQueue *q = opaque;
@@ -1503,6 +1556,7 @@ static int virtio_net_device_init(VirtIODevice *vdev)
n->curr_queues = 1;
n->vqs[0].n = n;
n->tx_timeout = n->net_conf.txtimer;
+ n->tx_rate_calcl_timeout = 1000; /* 1 sec */

if (n->net_conf.tx && strcmp(n->net_conf.tx, “timer”)
&& strcmp(n->net_conf.tx, “bh”)) {
@@ -1522,6 +1576,7 @@ static int virtio_net_device_init(VirtIODevice *vdev)
virtio_net_handle_tx_bh);
n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]);
}
+ n->vqs[0].tx_rate_calc_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_net_tx_rate_calc_timer, &n->vqs[0]);
n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
diff –git a/hw/net/vmxnet_tx_pkt.c b/hw/net/vmxnet_tx_pkt.c
index f7344c4..7531d2c 100644
— a/hw/net/vmxnet_tx_pkt.c
+++ b/hw/net/vmxnet_tx_pkt.c
@@ -525,7 +525,7 @@ static bool vmxnet_tx_pkt_do_sw_fragmentation(struct VmxnetTxPkt *pkt,
l3_iov_len, fragment_len, fragment_offset, more_frags);

eth_fix_ip4_checksum(l3_iov_base, l3_iov_len);

+ /* defined in virtio-net.c. Ultimately request for sending packet goes to qemu_sendv_packet in virtio-net.c */
qemu_sendv_packet(nc, fragment, dst_idx);

fragment_offset += fragment_len;
diff –git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index df60f16..3948f24 100644
— a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -162,6 +162,8 @@ typedef struct VirtIONet {
VirtQueue *ctrl_vq;
NICState *nic;
uint32_t tx_timeout;
+ uint32_t tx_rate_calc_timeout;
+ uint32_t tx_bandwidth_use;
int32_t tx_burst;
uint32_t has_vnet_hdr;
size_t host_hdr_len;

Tags:


Leave a Reply

Your email address will not be published. Required fields are marked *

This site uses Akismet to reduce spam. Learn how your comment data is processed.