1359
1360 start = xnbp->xnb_tx_ring.req_cons;
1361 end = xnbp->xnb_tx_ring.sring->req_prod;
1362
1363 if ((end - start) > NET_TX_RING_SIZE) {
1364 /*
1365 * This usually indicates that the frontend driver is
1366 * misbehaving, as it's not possible to have more than
1367 * NET_TX_RING_SIZE ring elements in play at any one
1368 * time.
1369 *
1370 * We reset the ring pointers to the state declared by
1371 * the frontend and try to carry on.
1372 */
1373 cmn_err(CE_WARN, "xnb_from_peer: domain %d tried to give us %u "
1374 "items in the ring, resetting and trying to recover.",
1375 xnbp->xnb_peer, (end - start));
1376
1377 /* LINTED: constant in conditional context */
1378 BACK_RING_ATTACH(&xnbp->xnb_tx_ring,
1379 (netif_tx_sring_t *)xnbp->xnb_tx_ring_addr, PAGESIZE);
1380
1381 goto around;
1382 }
1383
1384 loop = start;
1385 cop = xnbp->xnb_tx_cop;
1386 txpp = xnbp->xnb_tx_bufp;
1387 n_data_req = 0;
1388
1389 while (loop < end) {
1390 static const uint16_t acceptable_flags =
1391 NETTXF_csum_blank |
1392 NETTXF_data_validated |
1393 NETTXF_extra_info;
1394 uint16_t unexpected_flags;
1395
1396 txreq = RING_GET_REQUEST(&xnbp->xnb_tx_ring, loop);
1397
1398 unexpected_flags = txreq->flags & ~acceptable_flags;
1399 if (unexpected_flags != 0) {
1768 /* 1.tx */
1769 xnbp->xnb_tx_ring_addr = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE,
1770 0, 0, 0, 0, VM_SLEEP);
1771 ASSERT(xnbp->xnb_tx_ring_addr != NULL);
1772
1773 /* 2.tx */
1774 map_op.host_addr = (uint64_t)((long)xnbp->xnb_tx_ring_addr);
1775 map_op.flags = GNTMAP_host_map;
1776 map_op.ref = xnbp->xnb_tx_ring_ref;
1777 map_op.dom = xnbp->xnb_peer;
1778 hat_prepare_mapping(kas.a_hat, xnbp->xnb_tx_ring_addr, NULL);
1779 if (xen_map_gref(GNTTABOP_map_grant_ref, &map_op, 1, B_FALSE) != 0 ||
1780 map_op.status != 0) {
1781 cmn_err(CE_WARN, "xnb_connect_rings: cannot map tx-ring page.");
1782 goto fail;
1783 }
1784 xnbp->xnb_tx_ring_handle = map_op.handle;
1785
1786 /* LINTED: constant in conditional context */
1787 BACK_RING_INIT(&xnbp->xnb_tx_ring,
1788 (netif_tx_sring_t *)xnbp->xnb_tx_ring_addr, PAGESIZE);
1789
1790 /* 1.rx */
1791 xnbp->xnb_rx_ring_addr = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE,
1792 0, 0, 0, 0, VM_SLEEP);
1793 ASSERT(xnbp->xnb_rx_ring_addr != NULL);
1794
1795 /* 2.rx */
1796 map_op.host_addr = (uint64_t)((long)xnbp->xnb_rx_ring_addr);
1797 map_op.flags = GNTMAP_host_map;
1798 map_op.ref = xnbp->xnb_rx_ring_ref;
1799 map_op.dom = xnbp->xnb_peer;
1800 hat_prepare_mapping(kas.a_hat, xnbp->xnb_rx_ring_addr, NULL);
1801 if (xen_map_gref(GNTTABOP_map_grant_ref, &map_op, 1, B_FALSE) != 0 ||
1802 map_op.status != 0) {
1803 cmn_err(CE_WARN, "xnb_connect_rings: cannot map rx-ring page.");
1804 goto fail;
1805 }
1806 xnbp->xnb_rx_ring_handle = map_op.handle;
1807
1808 /* LINTED: constant in conditional context */
1809 BACK_RING_INIT(&xnbp->xnb_rx_ring,
1810 (netif_rx_sring_t *)xnbp->xnb_rx_ring_addr, PAGESIZE);
1811
1812 /* 3 */
1813 if (xvdi_bind_evtchn(dip, xnbp->xnb_fe_evtchn) != DDI_SUCCESS) {
1814 cmn_err(CE_WARN, "xnb_connect_rings: "
1815 "cannot bind event channel %d", xnbp->xnb_evtchn);
1816 xnbp->xnb_evtchn = INVALID_EVTCHN;
1817 goto fail;
1818 }
1819 xnbp->xnb_evtchn = xvdi_get_evtchn(dip);
1820
1821 /*
1822 * It would be good to set the state to XenbusStateConnected
1823 * here as well, but then what if ddi_add_intr() failed?
1824 * Changing the state in the store will be noticed by the peer
1825 * and cannot be "taken back".
1826 */
1827 mutex_enter(&xnbp->xnb_tx_lock);
1828 mutex_enter(&xnbp->xnb_rx_lock);
1829
1830 xnbp->xnb_connected = B_TRUE;
|
1359
1360 start = xnbp->xnb_tx_ring.req_cons;
1361 end = xnbp->xnb_tx_ring.sring->req_prod;
1362
1363 if ((end - start) > NET_TX_RING_SIZE) {
1364 /*
1365 * This usually indicates that the frontend driver is
1366 * misbehaving, as it's not possible to have more than
1367 * NET_TX_RING_SIZE ring elements in play at any one
1368 * time.
1369 *
1370 * We reset the ring pointers to the state declared by
1371 * the frontend and try to carry on.
1372 */
1373 cmn_err(CE_WARN, "xnb_from_peer: domain %d tried to give us %u "
1374 "items in the ring, resetting and trying to recover.",
1375 xnbp->xnb_peer, (end - start));
1376
1377 /* LINTED: constant in conditional context */
1378 BACK_RING_ATTACH(&xnbp->xnb_tx_ring,
1379 (struct netif_tx_sring *)xnbp->xnb_tx_ring_addr, PAGESIZE);
1380
1381 goto around;
1382 }
1383
1384 loop = start;
1385 cop = xnbp->xnb_tx_cop;
1386 txpp = xnbp->xnb_tx_bufp;
1387 n_data_req = 0;
1388
1389 while (loop < end) {
1390 static const uint16_t acceptable_flags =
1391 NETTXF_csum_blank |
1392 NETTXF_data_validated |
1393 NETTXF_extra_info;
1394 uint16_t unexpected_flags;
1395
1396 txreq = RING_GET_REQUEST(&xnbp->xnb_tx_ring, loop);
1397
1398 unexpected_flags = txreq->flags & ~acceptable_flags;
1399 if (unexpected_flags != 0) {
1768 /* 1.tx */
1769 xnbp->xnb_tx_ring_addr = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE,
1770 0, 0, 0, 0, VM_SLEEP);
1771 ASSERT(xnbp->xnb_tx_ring_addr != NULL);
1772
1773 /* 2.tx */
1774 map_op.host_addr = (uint64_t)((long)xnbp->xnb_tx_ring_addr);
1775 map_op.flags = GNTMAP_host_map;
1776 map_op.ref = xnbp->xnb_tx_ring_ref;
1777 map_op.dom = xnbp->xnb_peer;
1778 hat_prepare_mapping(kas.a_hat, xnbp->xnb_tx_ring_addr, NULL);
1779 if (xen_map_gref(GNTTABOP_map_grant_ref, &map_op, 1, B_FALSE) != 0 ||
1780 map_op.status != 0) {
1781 cmn_err(CE_WARN, "xnb_connect_rings: cannot map tx-ring page.");
1782 goto fail;
1783 }
1784 xnbp->xnb_tx_ring_handle = map_op.handle;
1785
1786 /* LINTED: constant in conditional context */
1787 BACK_RING_INIT(&xnbp->xnb_tx_ring,
1788 (struct netif_tx_sring *)xnbp->xnb_tx_ring_addr, PAGESIZE);
1789
1790 /* 1.rx */
1791 xnbp->xnb_rx_ring_addr = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE,
1792 0, 0, 0, 0, VM_SLEEP);
1793 ASSERT(xnbp->xnb_rx_ring_addr != NULL);
1794
1795 /* 2.rx */
1796 map_op.host_addr = (uint64_t)((long)xnbp->xnb_rx_ring_addr);
1797 map_op.flags = GNTMAP_host_map;
1798 map_op.ref = xnbp->xnb_rx_ring_ref;
1799 map_op.dom = xnbp->xnb_peer;
1800 hat_prepare_mapping(kas.a_hat, xnbp->xnb_rx_ring_addr, NULL);
1801 if (xen_map_gref(GNTTABOP_map_grant_ref, &map_op, 1, B_FALSE) != 0 ||
1802 map_op.status != 0) {
1803 cmn_err(CE_WARN, "xnb_connect_rings: cannot map rx-ring page.");
1804 goto fail;
1805 }
1806 xnbp->xnb_rx_ring_handle = map_op.handle;
1807
1808 /* LINTED: constant in conditional context */
1809 BACK_RING_INIT(&xnbp->xnb_rx_ring,
1810 (struct netif_rx_sring *)xnbp->xnb_rx_ring_addr, PAGESIZE);
1811
1812 /* 3 */
1813 if (xvdi_bind_evtchn(dip, xnbp->xnb_fe_evtchn) != DDI_SUCCESS) {
1814 cmn_err(CE_WARN, "xnb_connect_rings: "
1815 "cannot bind event channel %d", xnbp->xnb_evtchn);
1816 xnbp->xnb_evtchn = INVALID_EVTCHN;
1817 goto fail;
1818 }
1819 xnbp->xnb_evtchn = xvdi_get_evtchn(dip);
1820
1821 /*
1822 * It would be good to set the state to XenbusStateConnected
1823 * here as well, but then what if ddi_add_intr() failed?
1824 * Changing the state in the store will be noticed by the peer
1825 * and cannot be "taken back".
1826 */
1827 mutex_enter(&xnbp->xnb_tx_lock);
1828 mutex_enter(&xnbp->xnb_rx_lock);
1829
1830 xnbp->xnb_connected = B_TRUE;
|