Print this page
3373 update files for xen


2142                 } else {
2143                         ASSERT(xnfp->xnf_rx_tail != NULL);
2144 
2145                         xnfp->xnf_rx_tail->b_next = head;
2146                 }
2147                 xnfp->xnf_rx_tail = tail;
2148         }
2149 }
2150 
2151 /*
2152  *  xnf_alloc_dma_resources() -- initialize the drivers structures
2153  */
2154 static int
2155 xnf_alloc_dma_resources(xnf_t *xnfp)
2156 {
2157         dev_info_t              *devinfo = xnfp->xnf_devinfo;
2158         size_t                  len;
2159         ddi_dma_cookie_t        dma_cookie;
2160         uint_t                  ncookies;
2161         int                     rc;
2162         caddr_t                 rptr;

2163 
2164         /*
2165          * The code below allocates all the DMA data structures that
2166          * need to be released when the driver is detached.
2167          *
2168          * Allocate page for the transmit descriptor ring.
2169          */
2170         if (ddi_dma_alloc_handle(devinfo, &ringbuf_dma_attr,
2171             DDI_DMA_SLEEP, 0, &xnfp->xnf_tx_ring_dma_handle) != DDI_SUCCESS)
2172                 goto alloc_error;
2173 
2174         if (ddi_dma_mem_alloc(xnfp->xnf_tx_ring_dma_handle,
2175             PAGESIZE, &accattr, DDI_DMA_CONSISTENT,
2176             DDI_DMA_SLEEP, 0, &rptr, &len,
2177             &xnfp->xnf_tx_ring_dma_acchandle) != DDI_SUCCESS) {
2178                 ddi_dma_free_handle(&xnfp->xnf_tx_ring_dma_handle);
2179                 xnfp->xnf_tx_ring_dma_handle = NULL;
2180                 goto alloc_error;
2181         }
2182 
2183         if ((rc = ddi_dma_addr_bind_handle(xnfp->xnf_tx_ring_dma_handle, NULL,
2184             rptr, PAGESIZE, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2185             DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies)) != DDI_DMA_MAPPED) {
2186                 ddi_dma_mem_free(&xnfp->xnf_tx_ring_dma_acchandle);
2187                 ddi_dma_free_handle(&xnfp->xnf_tx_ring_dma_handle);
2188                 xnfp->xnf_tx_ring_dma_handle = NULL;
2189                 xnfp->xnf_tx_ring_dma_acchandle = NULL;
2190                 if (rc == DDI_DMA_NORESOURCES)
2191                         goto alloc_error;
2192                 else
2193                         goto error;
2194         }
2195 
2196         ASSERT(ncookies == 1);
2197         bzero(rptr, PAGESIZE);
2198         /* LINTED: constant in conditional context */
2199         SHARED_RING_INIT((netif_tx_sring_t *)rptr);
2200         /* LINTED: constant in conditional context */
2201         FRONT_RING_INIT(&xnfp->xnf_tx_ring, (netif_tx_sring_t *)rptr, PAGESIZE);
2202         xnfp->xnf_tx_ring_phys_addr = dma_cookie.dmac_laddress;
2203 
2204         /*
2205          * Allocate page for the receive descriptor ring.
2206          */
2207         if (ddi_dma_alloc_handle(devinfo, &ringbuf_dma_attr,
2208             DDI_DMA_SLEEP, 0, &xnfp->xnf_rx_ring_dma_handle) != DDI_SUCCESS)
2209                 goto alloc_error;
2210 
2211         if (ddi_dma_mem_alloc(xnfp->xnf_rx_ring_dma_handle,
2212             PAGESIZE, &accattr, DDI_DMA_CONSISTENT,
2213             DDI_DMA_SLEEP, 0, &rptr, &len,
2214             &xnfp->xnf_rx_ring_dma_acchandle) != DDI_SUCCESS) {
2215                 ddi_dma_free_handle(&xnfp->xnf_rx_ring_dma_handle);
2216                 xnfp->xnf_rx_ring_dma_handle = NULL;
2217                 goto alloc_error;
2218         }
2219 
2220         if ((rc = ddi_dma_addr_bind_handle(xnfp->xnf_rx_ring_dma_handle, NULL,
2221             rptr, PAGESIZE, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2222             DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies)) != DDI_DMA_MAPPED) {
2223                 ddi_dma_mem_free(&xnfp->xnf_rx_ring_dma_acchandle);
2224                 ddi_dma_free_handle(&xnfp->xnf_rx_ring_dma_handle);
2225                 xnfp->xnf_rx_ring_dma_handle = NULL;
2226                 xnfp->xnf_rx_ring_dma_acchandle = NULL;
2227                 if (rc == DDI_DMA_NORESOURCES)
2228                         goto alloc_error;
2229                 else
2230                         goto error;
2231         }
2232 
2233         ASSERT(ncookies == 1);
2234         bzero(rptr, PAGESIZE);
2235         /* LINTED: constant in conditional context */
2236         SHARED_RING_INIT((netif_rx_sring_t *)rptr);
2237         /* LINTED: constant in conditional context */
2238         FRONT_RING_INIT(&xnfp->xnf_rx_ring, (netif_rx_sring_t *)rptr, PAGESIZE);
2239         xnfp->xnf_rx_ring_phys_addr = dma_cookie.dmac_laddress;
2240 
2241         return (DDI_SUCCESS);
2242 
2243 alloc_error:
2244         cmn_err(CE_WARN, "xnf%d: could not allocate enough DMA memory",
2245             ddi_get_instance(xnfp->xnf_devinfo));
2246 error:
2247         xnf_release_dma_resources(xnfp);
2248         return (DDI_FAILURE);
2249 }
2250 
2251 /*
2252  * Release all DMA resources in the opposite order from acquisition
2253  */
2254 static void
2255 xnf_release_dma_resources(xnf_t *xnfp)
2256 {
2257         int i;
2258 




2142                 } else {
2143                         ASSERT(xnfp->xnf_rx_tail != NULL);
2144 
2145                         xnfp->xnf_rx_tail->b_next = head;
2146                 }
2147                 xnfp->xnf_rx_tail = tail;
2148         }
2149 }
2150 
2151 /*
2152  *  xnf_alloc_dma_resources() -- initialize the drivers structures
2153  */
2154 static int
2155 xnf_alloc_dma_resources(xnf_t *xnfp)
2156 {
2157         dev_info_t              *devinfo = xnfp->xnf_devinfo;
2158         size_t                  len;
2159         ddi_dma_cookie_t        dma_cookie;
2160         uint_t                  ncookies;
2161         int                     rc;
2162         struct netif_tx_sring *txs;
2163         struct netif_rx_sring *rxs;
2164 
2165         /*
2166          * The code below allocates all the DMA data structures that
2167          * need to be released when the driver is detached.
2168          *
2169          * Allocate page for the transmit descriptor ring.
2170          */
2171         if (ddi_dma_alloc_handle(devinfo, &ringbuf_dma_attr,
2172             DDI_DMA_SLEEP, 0, &xnfp->xnf_tx_ring_dma_handle) != DDI_SUCCESS)
2173                 goto alloc_error;
2174 
2175         if (ddi_dma_mem_alloc(xnfp->xnf_tx_ring_dma_handle,
2176             PAGESIZE, &accattr, DDI_DMA_CONSISTENT,
2177             DDI_DMA_SLEEP, 0, (caddr_t *)&txs, &len,
2178             &xnfp->xnf_tx_ring_dma_acchandle) != DDI_SUCCESS) {
2179                 ddi_dma_free_handle(&xnfp->xnf_tx_ring_dma_handle);
2180                 xnfp->xnf_tx_ring_dma_handle = NULL;
2181                 goto alloc_error;
2182         }
2183 
2184         if ((rc = ddi_dma_addr_bind_handle(xnfp->xnf_tx_ring_dma_handle, NULL,
2185             (caddr_t)&txs, PAGESIZE, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2186             DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies)) != DDI_DMA_MAPPED) {
2187                 ddi_dma_mem_free(&xnfp->xnf_tx_ring_dma_acchandle);
2188                 ddi_dma_free_handle(&xnfp->xnf_tx_ring_dma_handle);
2189                 xnfp->xnf_tx_ring_dma_handle = NULL;
2190                 xnfp->xnf_tx_ring_dma_acchandle = NULL;
2191                 if (rc == DDI_DMA_NORESOURCES)
2192                         goto alloc_error;
2193                 else
2194                         goto error;
2195         }
2196 
2197         ASSERT(ncookies == 1);
2198         bzero(txs, PAGESIZE);
2199         /* LINTED: constant in conditional context */
2200         SHARED_RING_INIT(txs);
2201         /* LINTED: constant in conditional context */
2202         FRONT_RING_INIT(&xnfp->xnf_tx_ring, txs, PAGESIZE);
2203         xnfp->xnf_tx_ring_phys_addr = dma_cookie.dmac_laddress;
2204 
2205         /*
2206          * Allocate page for the receive descriptor ring.
2207          */
2208         if (ddi_dma_alloc_handle(devinfo, &ringbuf_dma_attr,
2209             DDI_DMA_SLEEP, 0, &xnfp->xnf_rx_ring_dma_handle) != DDI_SUCCESS)
2210                 goto alloc_error;
2211 
2212         if (ddi_dma_mem_alloc(xnfp->xnf_rx_ring_dma_handle,
2213             PAGESIZE, &accattr, DDI_DMA_CONSISTENT,
2214             DDI_DMA_SLEEP, 0, (caddr_t *)&rxs, &len,
2215             &xnfp->xnf_rx_ring_dma_acchandle) != DDI_SUCCESS) {
2216                 ddi_dma_free_handle(&xnfp->xnf_rx_ring_dma_handle);
2217                 xnfp->xnf_rx_ring_dma_handle = NULL;
2218                 goto alloc_error;
2219         }
2220 
2221         if ((rc = ddi_dma_addr_bind_handle(xnfp->xnf_rx_ring_dma_handle, NULL,
2222             (caddr_t)&rxs, PAGESIZE, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2223             DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies)) != DDI_DMA_MAPPED) {
2224                 ddi_dma_mem_free(&xnfp->xnf_rx_ring_dma_acchandle);
2225                 ddi_dma_free_handle(&xnfp->xnf_rx_ring_dma_handle);
2226                 xnfp->xnf_rx_ring_dma_handle = NULL;
2227                 xnfp->xnf_rx_ring_dma_acchandle = NULL;
2228                 if (rc == DDI_DMA_NORESOURCES)
2229                         goto alloc_error;
2230                 else
2231                         goto error;
2232         }
2233 
2234         ASSERT(ncookies == 1);
2235         bzero(rxs, PAGESIZE);
2236         /* LINTED: constant in conditional context */
2237         SHARED_RING_INIT(rxs);
2238         /* LINTED: constant in conditional context */
2239         FRONT_RING_INIT(&xnfp->xnf_rx_ring, rxs, PAGESIZE);
2240         xnfp->xnf_rx_ring_phys_addr = dma_cookie.dmac_laddress;
2241 
2242         return (DDI_SUCCESS);
2243 
2244 alloc_error:
2245         cmn_err(CE_WARN, "xnf%d: could not allocate enough DMA memory",
2246             ddi_get_instance(xnfp->xnf_devinfo));
2247 error:
2248         xnf_release_dma_resources(xnfp);
2249         return (DDI_FAILURE);
2250 }
2251 
2252 /*
2253  * Release all DMA resources in the opposite order from acquisition
2254  */
2255 static void
2256 xnf_release_dma_resources(xnf_t *xnfp)
2257 {
2258         int i;
2259