Print this page
3373 update files for xen

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/xen/io/xnf.c
          +++ new/usr/src/uts/common/xen/io/xnf.c
↓ open down ↓ 2151 lines elided ↑ open up ↑
2152 2152   *  xnf_alloc_dma_resources() -- initialize the drivers structures
2153 2153   */
2154 2154  static int
2155 2155  xnf_alloc_dma_resources(xnf_t *xnfp)
2156 2156  {
2157 2157          dev_info_t              *devinfo = xnfp->xnf_devinfo;
2158 2158          size_t                  len;
2159 2159          ddi_dma_cookie_t        dma_cookie;
2160 2160          uint_t                  ncookies;
2161 2161          int                     rc;
2162      -        caddr_t                 rptr;
     2162 +        struct netif_tx_sring *txs;
     2163 +        struct netif_rx_sring *rxs;
2163 2164  
2164 2165          /*
2165 2166           * The code below allocates all the DMA data structures that
2166 2167           * need to be released when the driver is detached.
2167 2168           *
2168 2169           * Allocate page for the transmit descriptor ring.
2169 2170           */
2170 2171          if (ddi_dma_alloc_handle(devinfo, &ringbuf_dma_attr,
2171 2172              DDI_DMA_SLEEP, 0, &xnfp->xnf_tx_ring_dma_handle) != DDI_SUCCESS)
2172 2173                  goto alloc_error;
2173 2174  
2174 2175          if (ddi_dma_mem_alloc(xnfp->xnf_tx_ring_dma_handle,
2175 2176              PAGESIZE, &accattr, DDI_DMA_CONSISTENT,
2176      -            DDI_DMA_SLEEP, 0, &rptr, &len,
     2177 +            DDI_DMA_SLEEP, 0, (caddr_t *)&txs, &len,
2177 2178              &xnfp->xnf_tx_ring_dma_acchandle) != DDI_SUCCESS) {
2178 2179                  ddi_dma_free_handle(&xnfp->xnf_tx_ring_dma_handle);
2179 2180                  xnfp->xnf_tx_ring_dma_handle = NULL;
2180 2181                  goto alloc_error;
2181 2182          }
2182 2183  
2183 2184          if ((rc = ddi_dma_addr_bind_handle(xnfp->xnf_tx_ring_dma_handle, NULL,
2184      -            rptr, PAGESIZE, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
     2185 +            (caddr_t)&txs, PAGESIZE, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2185 2186              DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies)) != DDI_DMA_MAPPED) {
2186 2187                  ddi_dma_mem_free(&xnfp->xnf_tx_ring_dma_acchandle);
2187 2188                  ddi_dma_free_handle(&xnfp->xnf_tx_ring_dma_handle);
2188 2189                  xnfp->xnf_tx_ring_dma_handle = NULL;
2189 2190                  xnfp->xnf_tx_ring_dma_acchandle = NULL;
2190 2191                  if (rc == DDI_DMA_NORESOURCES)
2191 2192                          goto alloc_error;
2192 2193                  else
2193 2194                          goto error;
2194 2195          }
2195 2196  
2196 2197          ASSERT(ncookies == 1);
2197      -        bzero(rptr, PAGESIZE);
     2198 +        bzero(txs, PAGESIZE);
2198 2199          /* LINTED: constant in conditional context */
2199      -        SHARED_RING_INIT((netif_tx_sring_t *)rptr);
     2200 +        SHARED_RING_INIT(txs);
2200 2201          /* LINTED: constant in conditional context */
2201      -        FRONT_RING_INIT(&xnfp->xnf_tx_ring, (netif_tx_sring_t *)rptr, PAGESIZE);
     2202 +        FRONT_RING_INIT(&xnfp->xnf_tx_ring, txs, PAGESIZE);
2202 2203          xnfp->xnf_tx_ring_phys_addr = dma_cookie.dmac_laddress;
2203 2204  
2204 2205          /*
2205 2206           * Allocate page for the receive descriptor ring.
2206 2207           */
2207 2208          if (ddi_dma_alloc_handle(devinfo, &ringbuf_dma_attr,
2208 2209              DDI_DMA_SLEEP, 0, &xnfp->xnf_rx_ring_dma_handle) != DDI_SUCCESS)
2209 2210                  goto alloc_error;
2210 2211  
2211 2212          if (ddi_dma_mem_alloc(xnfp->xnf_rx_ring_dma_handle,
2212 2213              PAGESIZE, &accattr, DDI_DMA_CONSISTENT,
2213      -            DDI_DMA_SLEEP, 0, &rptr, &len,
     2214 +            DDI_DMA_SLEEP, 0, (caddr_t *)&rxs, &len,
2214 2215              &xnfp->xnf_rx_ring_dma_acchandle) != DDI_SUCCESS) {
2215 2216                  ddi_dma_free_handle(&xnfp->xnf_rx_ring_dma_handle);
2216 2217                  xnfp->xnf_rx_ring_dma_handle = NULL;
2217 2218                  goto alloc_error;
2218 2219          }
2219 2220  
2220 2221          if ((rc = ddi_dma_addr_bind_handle(xnfp->xnf_rx_ring_dma_handle, NULL,
2221      -            rptr, PAGESIZE, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
     2222 +            (caddr_t)&rxs, PAGESIZE, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2222 2223              DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies)) != DDI_DMA_MAPPED) {
2223 2224                  ddi_dma_mem_free(&xnfp->xnf_rx_ring_dma_acchandle);
2224 2225                  ddi_dma_free_handle(&xnfp->xnf_rx_ring_dma_handle);
2225 2226                  xnfp->xnf_rx_ring_dma_handle = NULL;
2226 2227                  xnfp->xnf_rx_ring_dma_acchandle = NULL;
2227 2228                  if (rc == DDI_DMA_NORESOURCES)
2228 2229                          goto alloc_error;
2229 2230                  else
2230 2231                          goto error;
2231 2232          }
2232 2233  
2233 2234          ASSERT(ncookies == 1);
2234      -        bzero(rptr, PAGESIZE);
     2235 +        bzero(rxs, PAGESIZE);
2235 2236          /* LINTED: constant in conditional context */
2236      -        SHARED_RING_INIT((netif_rx_sring_t *)rptr);
     2237 +        SHARED_RING_INIT(rxs);
2237 2238          /* LINTED: constant in conditional context */
2238      -        FRONT_RING_INIT(&xnfp->xnf_rx_ring, (netif_rx_sring_t *)rptr, PAGESIZE);
     2239 +        FRONT_RING_INIT(&xnfp->xnf_rx_ring, rxs, PAGESIZE);
2239 2240          xnfp->xnf_rx_ring_phys_addr = dma_cookie.dmac_laddress;
2240 2241  
2241 2242          return (DDI_SUCCESS);
2242 2243  
2243 2244  alloc_error:
2244 2245          cmn_err(CE_WARN, "xnf%d: could not allocate enough DMA memory",
2245 2246              ddi_get_instance(xnfp->xnf_devinfo));
2246 2247  error:
2247 2248          xnf_release_dma_resources(xnfp);
2248 2249          return (DDI_FAILURE);
↓ open down ↓ 511 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX