1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
  25  */
  26 
  27 /*
  28  * sf - Solaris Fibre Channel driver
  29  *
  30  * This module implements some of the Fibre Channel FC-4 layer, converting
  31  * from FC frames to SCSI and back.  (Note: no sequence management is done
  32  * here, though.)
  33  */
  34 
  35 #if defined(lint) && !defined(DEBUG)
  36 #define DEBUG   1
  37 #endif
  38 
  39 /*
  40  * XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
  41  * Need to use the ugly RAID LUN mappings in FCP Annex D
  42  * to prevent SCSA from barfing.  This *REALLY* needs to
  43  * be addressed by the standards committee.
  44  */
  45 #define RAID_LUNS       1
  46 
  47 #ifdef DEBUG
  48 static int sfdebug = 0;
  49 #include <sys/debug.h>
  50 
  51 #define SF_DEBUG(level, args) \
  52         if (sfdebug >= (level)) sf_log args
  53 #else
  54 #define SF_DEBUG(level, args)
  55 #endif
  56 
  57 static int sf_bus_config_debug = 0;
  58 
  59 /* Why do I have to do this? */
  60 #if defined(__GNUC__)
  61 #define offsetof(s, m)  __builtin_offsetof(s, m)
  62 #else
  63 #define offsetof(s, m)  ((size_t)(&(((s *)0)->m)))
  64 #endif
  65 
  66 #include <sys/scsi/scsi.h>
  67 #include <sys/fc4/fcal.h>
  68 #include <sys/fc4/fcp.h>
  69 #include <sys/fc4/fcal_linkapp.h>
  70 #include <sys/socal_cq_defs.h>
  71 #include <sys/fc4/fcal_transport.h>
  72 #include <sys/fc4/fcio.h>
  73 #include <sys/scsi/adapters/sfvar.h>
  74 #include <sys/scsi/impl/scsi_reset_notify.h>
  75 #include <sys/stat.h>
  76 #include <sys/varargs.h>
  77 #include <sys/var.h>
  78 #include <sys/thread.h>
  79 #include <sys/proc.h>
  80 #include <sys/kstat.h>
  81 #include <sys/devctl.h>
  82 #include <sys/scsi/targets/ses.h>
  83 #include <sys/callb.h>
  84 
  85 static int sf_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
  86 static int sf_attach(dev_info_t *, ddi_attach_cmd_t);
  87 static int sf_detach(dev_info_t *, ddi_detach_cmd_t);
  88 static void sf_softstate_unlink(struct sf *);
  89 static int sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
  90     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
  91 static int sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
  92     ddi_bus_config_op_t op, void *arg);
  93 static int sf_scsi_tgt_init(dev_info_t *, dev_info_t *,
  94     scsi_hba_tran_t *, struct scsi_device *);
  95 static void sf_scsi_tgt_free(dev_info_t *, dev_info_t *,
  96     scsi_hba_tran_t *, struct scsi_device *);
  97 static int sf_pkt_alloc_extern(struct sf *, struct sf_pkt *,
  98     int, int, int);
  99 static void sf_pkt_destroy_extern(struct sf *, struct sf_pkt *);
 100 static struct scsi_pkt *sf_scsi_init_pkt(struct scsi_address *,
 101     struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
 102 static void sf_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
 103 static void sf_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
 104 static void sf_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
 105 static int sf_scsi_reset_notify(struct scsi_address *, int,
 106     void (*)(caddr_t), caddr_t);
 107 static int sf_scsi_get_name(struct scsi_device *, char *, int);
 108 static int sf_scsi_get_bus_addr(struct scsi_device *, char *, int);
 109 static int sf_add_cr_pool(struct sf *);
 110 static int sf_cr_alloc(struct sf *, struct sf_pkt *, int (*)());
 111 static void sf_cr_free(struct sf_cr_pool *, struct sf_pkt *);
 112 static void sf_crpool_free(struct sf *);
 113 static int sf_kmem_cache_constructor(void *, void *, int);
 114 static void sf_kmem_cache_destructor(void *, void *);
 115 static void sf_statec_callback(void *, int);
 116 static int sf_login(struct sf *, uchar_t, uchar_t, uint_t, int);
 117 static int sf_els_transport(struct sf *, struct sf_els_hdr *);
 118 static void sf_els_callback(struct fcal_packet *);
 119 static int sf_do_prli(struct sf *, struct sf_els_hdr *, struct la_els_logi *);
 120 static int sf_do_adisc(struct sf *, struct sf_els_hdr *);
 121 static int sf_do_reportlun(struct sf *, struct sf_els_hdr *,
 122     struct sf_target *);
 123 static void sf_reportlun_callback(struct fcal_packet *);
 124 static int sf_do_inquiry(struct sf *, struct sf_els_hdr *,
 125     struct sf_target *);
 126 static void sf_inq_callback(struct fcal_packet *);
 127 static struct fcal_packet *sf_els_alloc(struct sf *, uchar_t, int, int,
 128     int, caddr_t *, caddr_t *);
 129 static void sf_els_free(struct fcal_packet *);
 130 static struct sf_target *sf_create_target(struct sf *,
 131     struct sf_els_hdr *, int, int64_t);
 132 #ifdef RAID_LUNS
 133 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int);
 134 #else
 135 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int64_t);
 136 #endif
 137 static void sf_finish_init(struct sf *, int);
 138 static void sf_offline_target(struct sf *, struct sf_target *);
 139 static void sf_create_devinfo(struct sf *, struct sf_target *, int);
 140 static int sf_create_props(dev_info_t *, struct sf_target *, int);
 141 static int sf_commoncap(struct scsi_address *, char *, int, int, int);
 142 static int sf_getcap(struct scsi_address *, char *, int);
 143 static int sf_setcap(struct scsi_address *, char *, int, int);
 144 static int sf_abort(struct scsi_address *, struct scsi_pkt *);
 145 static int sf_reset(struct scsi_address *, int);
 146 static void sf_abort_all(struct sf *, struct sf_target *, int, int, int);
 147 static int sf_start(struct scsi_address *, struct scsi_pkt *);
 148 static int sf_start_internal(struct sf *, struct sf_pkt *);
 149 static void sf_fill_ids(struct sf *, struct sf_pkt *, struct sf_target *);
 150 static int sf_prepare_pkt(struct sf *, struct sf_pkt *, struct sf_target *);
 151 static int sf_dopoll(struct sf *, struct sf_pkt *);
 152 static void sf_cmd_callback(struct fcal_packet *);
 153 static void sf_throttle(struct sf *);
 154 static void sf_watch(void *);
 155 static void sf_throttle_start(struct sf *);
 156 static void sf_check_targets(struct sf *);
 157 static void sf_check_reset_delay(void *);
 158 static int sf_target_timeout(struct sf *, struct sf_pkt *);
 159 static void sf_force_lip(struct sf *);
 160 static void sf_unsol_els_callback(void *, soc_response_t *, caddr_t);
 161 static struct sf_els_hdr *sf_els_timeout(struct sf *, struct sf_els_hdr *);
 162 /*PRINTFLIKE3*/
 163 static void sf_log(struct sf *, int, const char *, ...);
 164 static int sf_kstat_update(kstat_t *, int);
 165 static int sf_open(dev_t *, int, int, cred_t *);
 166 static int sf_close(dev_t, int, int, cred_t *);
 167 static int sf_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
 168 static struct sf_target *sf_get_target_from_dip(struct sf *, dev_info_t *);
 169 static int sf_bus_get_eventcookie(dev_info_t *, dev_info_t *, char *,
 170     ddi_eventcookie_t *);
 171 static int sf_bus_add_eventcall(dev_info_t *, dev_info_t *,
 172     ddi_eventcookie_t, void (*)(), void *, ddi_callback_id_t *cb_id);
 173 static int sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id);
 174 static int sf_bus_post_event(dev_info_t *, dev_info_t *,
 175     ddi_eventcookie_t, void *);
 176 
 177 static void sf_hp_daemon(void *);
 178 
 179 /*
 180  * this is required to be able to supply a control node
 181  * where ioctls can be executed
 182  */
 183 struct cb_ops sf_cb_ops = {
 184         sf_open,                        /* open */
 185         sf_close,                       /* close */
 186         nodev,                          /* strategy */
 187         nodev,                          /* print */
 188         nodev,                          /* dump */
 189         nodev,                          /* read */
 190         nodev,                          /* write */
 191         sf_ioctl,                       /* ioctl */
 192         nodev,                          /* devmap */
 193         nodev,                          /* mmap */
 194         nodev,                          /* segmap */
 195         nochpoll,                       /* poll */
 196         ddi_prop_op,                    /* cb_prop_op */
 197         0,                              /* streamtab  */
 198         D_MP | D_NEW | D_HOTPLUG        /* driver flags */
 199 
 200 };
 201 
 202 /*
 203  * autoconfiguration routines.
 204  */
 205 static struct dev_ops sf_ops = {
 206         DEVO_REV,               /* devo_rev, */
 207         0,                      /* refcnt  */
 208         sf_info,                /* info */
 209         nulldev,                /* identify */
 210         nulldev,                /* probe */
 211         sf_attach,              /* attach */
 212         sf_detach,              /* detach */
 213         nodev,                  /* reset */
 214         &sf_cb_ops,         /* driver operations */
 215         NULL,                   /* bus operations */
 216         NULL,                   /* power management */
 217         ddi_quiesce_not_supported,      /* devo_quiesce */
 218 };
 219 
 220 #define SF_NAME "FC-AL FCP Nexus Driver"        /* Name of the module. */
 221 static  char    sf_version[] = "1.72 08/19/2008"; /* version of the module */
 222 
 223 static struct modldrv modldrv = {
 224         &mod_driverops, /* Type of module. This one is a driver */
 225         SF_NAME,
 226         &sf_ops,    /* driver ops */
 227 };
 228 
 229 static struct modlinkage modlinkage = {
 230         MODREV_1, (void *)&modldrv, NULL
 231 };
 232 
 233 /* XXXXXX The following is here to handle broken targets -- remove it later */
 234 static int sf_reportlun_forever = 0;
 235 /* XXXXXX */
 236 static int sf_lip_on_plogo = 0;
 237 static int sf_els_retries = SF_ELS_RETRIES;
 238 static struct sf *sf_head = NULL;
 239 static int sf_target_scan_cnt = 4;
 240 static int sf_pkt_scan_cnt = 5;
 241 static int sf_pool_scan_cnt = 1800;
 242 static void *sf_state = NULL;
 243 static int sf_watchdog_init = 0;
 244 static int sf_watchdog_time = 0;
 245 static int sf_watchdog_timeout = 1;
 246 static int sf_watchdog_tick;
 247 static int sf_watch_running = 0;
 248 static timeout_id_t sf_watchdog_id;
 249 static timeout_id_t sf_reset_timeout_id;
 250 static int sf_max_targets = SF_MAX_TARGETS;
 251 static kmutex_t sf_global_mutex;
 252 static int sf_core = 0;
 253 int *sf_token = NULL; /* Must not be static or lint complains. */
 254 static kcondvar_t sf_watch_cv;
 255 extern pri_t minclsyspri;
 256 static ddi_eventcookie_t        sf_insert_eid;
 257 static ddi_eventcookie_t        sf_remove_eid;
 258 
 259 static ndi_event_definition_t   sf_event_defs[] = {
 260 { SF_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL, 0 },
 261 { SF_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT, 0 }
 262 };
 263 
 264 #define SF_N_NDI_EVENTS \
 265         (sizeof (sf_event_defs) / sizeof (ndi_event_definition_t))
 266 
 267 #ifdef DEBUG
 268 static int sf_lip_flag = 1;             /* bool: to allow LIPs */
 269 static int sf_reset_flag = 1;           /* bool: to allow reset after LIP */
 270 static int sf_abort_flag = 0;           /* bool: to do just one abort */
 271 #endif
 272 
 273 extern int64_t ddi_get_lbolt64(void);
 274 
 275 /*
 276  * for converting between target number (switch) and hard address/AL_PA
 277  */
 278 static uchar_t sf_switch_to_alpa[] = {
 279         0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
 280         0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
 281         0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
 282         0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
 283         0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
 284         0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
 285         0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
 286         0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
 287         0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
 288         0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
 289         0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
 290         0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
 291         0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
 292 };
 293 
 294 static uchar_t sf_alpa_to_switch[] = {
 295         0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
 296         0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
 297         0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
 298         0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
 299         0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
 300         0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
 301         0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
 302         0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
 303         0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
 304         0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
 305         0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
 306         0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
 307         0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
 308         0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
 309         0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
 310         0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
 311         0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
 312         0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
 313         0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
 314         0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
 315         0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
 316         0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
 317         0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
 318         0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 319 };
 320 
 321 /*
 322  * these macros call the proper transport-layer function given
 323  * a particular transport
 324  */
 325 #define soc_transport(a, b, c, d) (*a->fcal_ops->fcal_transport)(b, c, d)
 326 #define soc_transport_poll(a, b, c, d)\
 327         (*a->fcal_ops->fcal_transport_poll)(b, c, d)
 328 #define soc_get_lilp_map(a, b, c, d, e)\
 329         (*a->fcal_ops->fcal_lilp_map)(b, c, d, e)
 330 #define soc_force_lip(a, b, c, d, e)\
 331         (*a->fcal_ops->fcal_force_lip)(b, c, d, e)
 332 #define soc_abort(a, b, c, d, e)\
 333         (*a->fcal_ops->fcal_abort_cmd)(b, c, d, e)
 334 #define soc_force_reset(a, b, c, d)\
 335         (*a->fcal_ops->fcal_force_reset)(b, c, d)
 336 #define soc_add_ulp(a, b, c, d, e, f, g, h)\
 337         (*a->fcal_ops->fcal_add_ulp)(b, c, d, e, f, g, h)
 338 #define soc_remove_ulp(a, b, c, d, e)\
 339         (*a->fcal_ops->fcal_remove_ulp)(b, c, d, e)
 340 #define soc_take_core(a, b) (*a->fcal_ops->fcal_take_core)(b)
 341 
 342 
 343 /* power management property defines (should be in a common include file?) */
 344 #define PM_HARDWARE_STATE_PROP          "pm-hardware-state"
 345 #define PM_NEEDS_SUSPEND_RESUME         "needs-suspend-resume"
 346 
 347 
 348 /* node properties */
 349 #define NODE_WWN_PROP                   "node-wwn"
 350 #define PORT_WWN_PROP                   "port-wwn"
 351 #define LIP_CNT_PROP                    "lip-count"
 352 #define TARGET_PROP                     "target"
 353 #define LUN_PROP                        "lun"
 354 
 355 
 356 /*
 357  * initialize this driver and install this module
 358  */
 359 int
 360 _init(void)
 361 {
 362         int     i;
 363 
 364         i = ddi_soft_state_init(&sf_state, sizeof (struct sf),
 365             SF_INIT_ITEMS);
 366         if (i != 0)
 367                 return (i);
 368 
 369         if ((i = scsi_hba_init(&modlinkage)) != 0) {
 370                 ddi_soft_state_fini(&sf_state);
 371                 return (i);
 372         }
 373 
 374         mutex_init(&sf_global_mutex, NULL, MUTEX_DRIVER, NULL);
 375         sf_watch_running = 0;
 376         cv_init(&sf_watch_cv, NULL, CV_DRIVER, NULL);
 377 
 378         if ((i = mod_install(&modlinkage)) != 0) {
 379                 mutex_destroy(&sf_global_mutex);
 380                 cv_destroy(&sf_watch_cv);
 381                 scsi_hba_fini(&modlinkage);
 382                 ddi_soft_state_fini(&sf_state);
 383                 return (i);
 384         }
 385 
 386         return (i);
 387 }
 388 
 389 
 390 /*
 391  * remove this driver module from the system
 392  */
 393 int
 394 _fini(void)
 395 {
 396         int     i;
 397 
 398         if ((i = mod_remove(&modlinkage)) == 0) {
 399                 scsi_hba_fini(&modlinkage);
 400                 mutex_destroy(&sf_global_mutex);
 401                 cv_destroy(&sf_watch_cv);
 402                 ddi_soft_state_fini(&sf_state);
 403         }
 404         return (i);
 405 }
 406 
 407 
 408 int
 409 _info(struct modinfo *modinfop)
 410 {
 411         return (mod_info(&modlinkage, modinfop));
 412 }
 413 
 414 /*
 415  * Given the device number return the devinfo pointer or instance
 416  */
 417 /*ARGSUSED*/
 418 static int
 419 sf_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
 420 {
 421         int             instance = SF_MINOR2INST(getminor((dev_t)arg));
 422         struct sf       *sf;
 423 
 424         switch (infocmd) {
 425         case DDI_INFO_DEVT2DEVINFO:
 426                 sf = ddi_get_soft_state(sf_state, instance);
 427                 if (sf != NULL)
 428                         *result = sf->sf_dip;
 429                 else {
 430                         *result = NULL;
 431                         return (DDI_FAILURE);
 432                 }
 433                 break;
 434 
 435         case DDI_INFO_DEVT2INSTANCE:
 436                 *result = (void *)(uintptr_t)instance;
 437                 break;
 438         default:
 439                 return (DDI_FAILURE);
 440         }
 441         return (DDI_SUCCESS);
 442 }
 443 
 444 /*
 445  * either attach or resume this driver
 446  */
 447 static int
 448 sf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 449 {
 450         int instance;
 451         int mutex_initted = FALSE;
 452         uint_t ccount;
 453         size_t i, real_size;
 454         struct fcal_transport *handle;
 455         char buf[64];
 456         struct sf *sf, *tsf;
 457         scsi_hba_tran_t *tran = NULL;
 458         int     handle_bound = FALSE;
 459         kthread_t *tp;
 460 
 461 
 462         switch ((int)cmd) {
 463 
 464         case DDI_RESUME:
 465 
 466                 /*
 467                  * we've previously been SF_STATE_OFFLINEd by a DDI_SUSPEND,
 468                  * so time to undo that and get going again by forcing a
 469                  * lip
 470                  */
 471 
 472                 instance = ddi_get_instance(dip);
 473 
 474                 sf = ddi_get_soft_state(sf_state, instance);
 475                 SF_DEBUG(2, (sf, CE_CONT,
 476                     "sf_attach: DDI_RESUME for sf%d\n", instance));
 477                 if (sf == NULL) {
 478                         cmn_err(CE_WARN, "sf%d: bad soft state", instance);
 479                         return (DDI_FAILURE);
 480                 }
 481 
 482                 /*
 483                  * clear suspended flag so that normal operations can resume
 484                  */
 485                 mutex_enter(&sf->sf_mutex);
 486                 sf->sf_state &= ~SF_STATE_SUSPENDED;
 487                 mutex_exit(&sf->sf_mutex);
 488 
 489                 /*
 490                  * force a login by setting our state to offline
 491                  */
 492                 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
 493                 sf->sf_state = SF_STATE_OFFLINE;
 494 
 495                 /*
 496                  * call transport routine to register state change and
 497                  * ELS callback routines (to register us as a ULP)
 498                  */
 499                 soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
 500                     sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
 501                     sf_statec_callback, sf_unsol_els_callback, NULL, sf);
 502 
 503                 /*
 504                  * call transport routine to force loop initialization
 505                  */
 506                 (void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
 507                     sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
 508 
 509                 /*
 510                  * increment watchdog init flag, setting watchdog timeout
 511                  * if we are the first (since somebody has to do it)
 512                  */
 513                 mutex_enter(&sf_global_mutex);
 514                 if (!sf_watchdog_init++) {
 515                         mutex_exit(&sf_global_mutex);
 516                         sf_watchdog_id = timeout(sf_watch,
 517                             (caddr_t)0, sf_watchdog_tick);
 518                 } else {
 519                         mutex_exit(&sf_global_mutex);
 520                 }
 521 
 522                 return (DDI_SUCCESS);
 523 
 524         case DDI_ATTACH:
 525 
 526                 /*
 527                  * this instance attaching for the first time
 528                  */
 529 
 530                 instance = ddi_get_instance(dip);
 531 
 532                 if (ddi_soft_state_zalloc(sf_state, instance) !=
 533                     DDI_SUCCESS) {
 534                         cmn_err(CE_WARN, "sf%d: failed to allocate soft state",
 535                             instance);
 536                         return (DDI_FAILURE);
 537                 }
 538 
 539                 sf = ddi_get_soft_state(sf_state, instance);
 540                 SF_DEBUG(4, (sf, CE_CONT,
 541                     "sf_attach: DDI_ATTACH for sf%d\n", instance));
 542                 if (sf == NULL) {
 543                         /* this shouldn't happen since we just allocated it */
 544                         cmn_err(CE_WARN, "sf%d: bad soft state", instance);
 545                         return (DDI_FAILURE);
 546                 }
 547 
 548                 /*
 549                  * from this point on, if there's an error, we must de-allocate
 550                  * soft state before returning DDI_FAILURE
 551                  */
 552 
 553                 if ((handle = ddi_get_parent_data(dip)) == NULL) {
 554                         cmn_err(CE_WARN,
 555                             "sf%d: failed to obtain transport handle",
 556                             instance);
 557                         goto fail;
 558                 }
 559 
 560                 /* fill in our soft state structure */
 561                 sf->sf_dip = dip;
 562                 sf->sf_state = SF_STATE_INIT;
 563                 sf->sf_throttle = handle->fcal_cmdmax;
 564                 sf->sf_sochandle = handle;
 565                 sf->sf_socp = handle->fcal_handle;
 566                 sf->sf_check_n_close = 0;
 567 
 568                 /* create a command/response buffer pool for this instance */
 569                 if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
 570                         cmn_err(CE_WARN,
 571                             "sf%d: failed to allocate command/response pool",
 572                             instance);
 573                         goto fail;
 574                 }
 575 
 576                 /* create a a cache for this instance */
 577                 (void) sprintf(buf, "sf%d_cache", instance);
 578                 sf->sf_pkt_cache = kmem_cache_create(buf,
 579                     sizeof (fcal_packet_t) + sizeof (struct sf_pkt) +
 580                     scsi_pkt_size(), 8,
 581                     sf_kmem_cache_constructor, sf_kmem_cache_destructor,
 582                     NULL, NULL, NULL, 0);
 583                 if (sf->sf_pkt_cache == NULL) {
 584                         cmn_err(CE_WARN, "sf%d: failed to allocate kmem cache",
 585                             instance);
 586                         goto fail;
 587                 }
 588 
 589                 /* set up a handle and allocate memory for DMA */
 590                 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->
 591                     fcal_dmaattr, DDI_DMA_DONTWAIT, NULL, &sf->
 592                     sf_lilp_dmahandle) != DDI_SUCCESS) {
 593                         cmn_err(CE_WARN,
 594                             "sf%d: failed to allocate dma handle for lilp map",
 595                             instance);
 596                         goto fail;
 597                 }
 598                 i = sizeof (struct fcal_lilp_map) + 1;
 599                 if (ddi_dma_mem_alloc(sf->sf_lilp_dmahandle,
 600                     i, sf->sf_sochandle->
 601                     fcal_accattr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
 602                     (caddr_t *)&sf->sf_lilp_map, &real_size,
 603                     &sf->sf_lilp_acchandle) != DDI_SUCCESS) {
 604                         cmn_err(CE_WARN, "sf%d: failed to allocate lilp map",
 605                             instance);
 606                         goto fail;
 607                 }
 608                 if (real_size < i) {
 609                         /* no error message ??? */
 610                         goto fail;              /* trouble allocating memory */
 611                 }
 612 
 613                 /*
 614                  * set up the address for the DMA transfers (getting a cookie)
 615                  */
 616                 if (ddi_dma_addr_bind_handle(sf->sf_lilp_dmahandle, NULL,
 617                     (caddr_t)sf->sf_lilp_map, real_size,
 618                     DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
 619                     &sf->sf_lilp_dmacookie, &ccount) != DDI_DMA_MAPPED) {
 620                         cmn_err(CE_WARN,
 621                             "sf%d: failed to bind dma handle for lilp map",
 622                             instance);
 623                         goto fail;
 624                 }
 625                 handle_bound = TRUE;
 626                 /* ensure only one cookie was allocated */
 627                 if (ccount != 1) {
 628                         goto fail;
 629                 }
 630 
 631                 /* ensure LILP map and DMA cookie addresses are even?? */
 632                 sf->sf_lilp_map = (struct fcal_lilp_map *)(((uintptr_t)sf->
 633                     sf_lilp_map + 1) & ~1);
 634                 sf->sf_lilp_dmacookie.dmac_address = (sf->
 635                     sf_lilp_dmacookie.dmac_address + 1) & ~1;
 636 
 637                 /* set up all of our mutexes and condition variables */
 638                 mutex_init(&sf->sf_mutex, NULL, MUTEX_DRIVER, NULL);
 639                 mutex_init(&sf->sf_cmd_mutex, NULL, MUTEX_DRIVER, NULL);
 640                 mutex_init(&sf->sf_cr_mutex, NULL, MUTEX_DRIVER, NULL);
 641                 mutex_init(&sf->sf_hp_daemon_mutex, NULL, MUTEX_DRIVER, NULL);
 642                 cv_init(&sf->sf_cr_cv, NULL, CV_DRIVER, NULL);
 643                 cv_init(&sf->sf_hp_daemon_cv, NULL, CV_DRIVER, NULL);
 644 
 645                 mutex_initted = TRUE;
 646 
 647                 /* create our devctl minor node */
 648                 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
 649                     SF_INST2DEVCTL_MINOR(instance),
 650                     DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
 651                         cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
 652                             " for devctl", instance);
 653                         goto fail;
 654                 }
 655 
 656                 /* create fc minor node */
 657                 if (ddi_create_minor_node(dip, "fc", S_IFCHR,
 658                     SF_INST2FC_MINOR(instance), DDI_NT_FC_ATTACHMENT_POINT,
 659                     0) != DDI_SUCCESS) {
 660                         cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
 661                             " for fc", instance);
 662                         goto fail;
 663                 }
 664                 /* allocate a SCSI transport structure */
 665                 tran = scsi_hba_tran_alloc(dip, 0);
 666                 if (tran == NULL) {
 667                         /* remove all minor nodes created */
 668                         ddi_remove_minor_node(dip, NULL);
 669                         cmn_err(CE_WARN, "sf%d: scsi_hba_tran_alloc failed",
 670                             instance);
 671                         goto fail;
 672                 }
 673 
 674                 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
 675                 scsi_size_clean(dip);           /* SCSI_SIZE_CLEAN_VERIFY ok */
 676 
 677                 /* save ptr to new transport structure and fill it in */
 678                 sf->sf_tran = tran;
 679 
 680                 tran->tran_hba_private               = sf;
 681                 tran->tran_tgt_private               = NULL;
 682                 tran->tran_tgt_init          = sf_scsi_tgt_init;
 683                 tran->tran_tgt_probe         = NULL;
 684                 tran->tran_tgt_free          = sf_scsi_tgt_free;
 685 
 686                 tran->tran_start             = sf_start;
 687                 tran->tran_abort             = sf_abort;
 688                 tran->tran_reset             = sf_reset;
 689                 tran->tran_getcap            = sf_getcap;
 690                 tran->tran_setcap            = sf_setcap;
 691                 tran->tran_init_pkt          = sf_scsi_init_pkt;
 692                 tran->tran_destroy_pkt               = sf_scsi_destroy_pkt;
 693                 tran->tran_dmafree           = sf_scsi_dmafree;
 694                 tran->tran_sync_pkt          = sf_scsi_sync_pkt;
 695                 tran->tran_reset_notify              = sf_scsi_reset_notify;
 696 
 697                 /*
 698                  * register event notification routines with scsa
 699                  */
 700                 tran->tran_get_eventcookie   = sf_bus_get_eventcookie;
 701                 tran->tran_add_eventcall     = sf_bus_add_eventcall;
 702                 tran->tran_remove_eventcall  = sf_bus_remove_eventcall;
 703                 tran->tran_post_event                = sf_bus_post_event;
 704 
 705                 /*
 706                  * register bus configure/unconfigure
 707                  */
 708                 tran->tran_bus_config                = sf_scsi_bus_config;
 709                 tran->tran_bus_unconfig              = sf_scsi_bus_unconfig;
 710 
 711                 /*
 712                  * allocate an ndi event handle
 713                  */
 714                 sf->sf_event_defs = (ndi_event_definition_t *)
 715                     kmem_zalloc(sizeof (sf_event_defs), KM_SLEEP);
 716 
 717                 bcopy(sf_event_defs, sf->sf_event_defs,
 718                     sizeof (sf_event_defs));
 719 
 720                 (void) ndi_event_alloc_hdl(dip, NULL,
 721                     &sf->sf_event_hdl, NDI_SLEEP);
 722 
 723                 sf->sf_events.ndi_events_version = NDI_EVENTS_REV1;
 724                 sf->sf_events.ndi_n_events = SF_N_NDI_EVENTS;
 725                 sf->sf_events.ndi_event_defs = sf->sf_event_defs;
 726 
 727                 if (ndi_event_bind_set(sf->sf_event_hdl,
 728                     &sf->sf_events, NDI_SLEEP) != NDI_SUCCESS) {
 729                         goto fail;
 730                 }
 731 
 732                 tran->tran_get_name          = sf_scsi_get_name;
 733                 tran->tran_get_bus_addr              = sf_scsi_get_bus_addr;
 734 
 735                 /* setup and attach SCSI hba transport */
 736                 if (scsi_hba_attach_setup(dip, sf->sf_sochandle->
 737                     fcal_dmaattr, tran, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
 738                         cmn_err(CE_WARN, "sf%d: scsi_hba_attach_setup failed",
 739                             instance);
 740                         goto fail;
 741                 }
 742 
 743                 /* set up kstats */
 744                 if ((sf->sf_ksp = kstat_create("sf", instance, "statistics",
 745                     "controller", KSTAT_TYPE_RAW, sizeof (struct sf_stats),
 746                     KSTAT_FLAG_VIRTUAL)) == NULL) {
 747                         cmn_err(CE_WARN, "sf%d: failed to create kstat",
 748                             instance);
 749                 } else {
 750                         sf->sf_stats.version = 2;
 751                         (void) sprintf(sf->sf_stats.drvr_name,
 752                         "%s: %s", SF_NAME, sf_version);
 753                         sf->sf_ksp->ks_data = (void *)&sf->sf_stats;
 754                         sf->sf_ksp->ks_private = sf;
 755                         sf->sf_ksp->ks_update = sf_kstat_update;
 756                         kstat_install(sf->sf_ksp);
 757                 }
 758 
 759                 /* create the hotplug thread */
 760                 mutex_enter(&sf->sf_hp_daemon_mutex);
 761                 tp = thread_create(NULL, 0,
 762                     (void (*)())sf_hp_daemon, sf, 0, &p0, TS_RUN, minclsyspri);
 763                 sf->sf_hp_tid = tp->t_did;
 764                 mutex_exit(&sf->sf_hp_daemon_mutex);
 765 
 766                 /* add this soft state instance to the head of the list */
 767                 mutex_enter(&sf_global_mutex);
 768                 sf->sf_next = sf_head;
 769                 tsf = sf_head;
 770                 sf_head = sf;
 771 
 772                 /*
 773                  * find entry in list that has the same FC-AL handle (if any)
 774                  */
 775                 while (tsf != NULL) {
 776                         if (tsf->sf_socp == sf->sf_socp) {
 777                                 break;          /* found matching entry */
 778                         }
 779                         tsf = tsf->sf_next;
 780                 }
 781 
 782                 if (tsf != NULL) {
 783                         /* if we found a matching entry keep track of it */
 784                         sf->sf_sibling = tsf;
 785                 }
 786 
 787                 /*
 788                  * increment watchdog init flag, setting watchdog timeout
 789                  * if we are the first (since somebody has to do it)
 790                  */
 791                 if (!sf_watchdog_init++) {
 792                         mutex_exit(&sf_global_mutex);
 793                         sf_watchdog_tick = sf_watchdog_timeout *
 794                             drv_usectohz(1000000);
 795                         sf_watchdog_id = timeout(sf_watch,
 796                             NULL, sf_watchdog_tick);
 797                 } else {
 798                         mutex_exit(&sf_global_mutex);
 799                 }
 800 
 801                 if (tsf != NULL) {
 802                         /*
 803                          * set up matching entry to be our sibling
 804                          */
 805                         mutex_enter(&tsf->sf_mutex);
 806                         tsf->sf_sibling = sf;
 807                         mutex_exit(&tsf->sf_mutex);
 808                 }
 809 
 810                 /*
 811                  * create this property so that PM code knows we want
 812                  * to be suspended at PM time
 813                  */
 814                 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
 815                     PM_HARDWARE_STATE_PROP, PM_NEEDS_SUSPEND_RESUME);
 816 
 817                 /* log the fact that we have a new device */
 818                 ddi_report_dev(dip);
 819 
 820                 /*
 821                  * force a login by setting our state to offline
 822                  */
 823                 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
 824                 sf->sf_state = SF_STATE_OFFLINE;
 825 
 826                 /*
 827                  * call transport routine to register state change and
 828                  * ELS callback routines (to register us as a ULP)
 829                  */
 830                 soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
 831                     sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
 832                     sf_statec_callback, sf_unsol_els_callback, NULL, sf);
 833 
 834                 /*
 835                  * call transport routine to force loop initialization
 836                  */
 837                 (void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
 838                     sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
 839                 sf->sf_reset_time = ddi_get_lbolt64();
 840                 return (DDI_SUCCESS);
 841 
 842         default:
 843                 return (DDI_FAILURE);
 844         }
 845 
 846 fail:
 847         cmn_err(CE_WARN, "sf%d: failed to attach", instance);
 848 
 849         /*
 850          * Unbind and free event set
 851          */
 852         if (sf->sf_event_hdl) {
 853                 (void) ndi_event_unbind_set(sf->sf_event_hdl,
 854                     &sf->sf_events, NDI_SLEEP);
 855                 (void) ndi_event_free_hdl(sf->sf_event_hdl);
 856         }
 857 
 858         if (sf->sf_event_defs) {
 859                 kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
 860         }
 861 
 862         if (sf->sf_tran != NULL) {
 863                 scsi_hba_tran_free(sf->sf_tran);
 864         }
 865         while (sf->sf_cr_pool != NULL) {
 866                 sf_crpool_free(sf);
 867         }
 868         if (sf->sf_lilp_dmahandle != NULL) {
 869                 if (handle_bound) {
 870                         (void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
 871                 }
 872                 ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
 873         }
 874         if (sf->sf_pkt_cache != NULL) {
 875                 kmem_cache_destroy(sf->sf_pkt_cache);
 876         }
 877         if (sf->sf_lilp_map != NULL) {
 878                 ddi_dma_mem_free(&sf->sf_lilp_acchandle);
 879         }
 880         if (sf->sf_ksp != NULL) {
 881                 kstat_delete(sf->sf_ksp);
 882         }
 883         if (mutex_initted) {
 884                 mutex_destroy(&sf->sf_mutex);
 885                 mutex_destroy(&sf->sf_cmd_mutex);
 886                 mutex_destroy(&sf->sf_cr_mutex);
 887                 mutex_destroy(&sf->sf_hp_daemon_mutex);
 888                 cv_destroy(&sf->sf_cr_cv);
 889                 cv_destroy(&sf->sf_hp_daemon_cv);
 890         }
 891         mutex_enter(&sf_global_mutex);
 892 
 893         /*
 894          * kill off the watchdog if we are the last instance
 895          */
 896         if (!--sf_watchdog_init) {
 897                 timeout_id_t tid = sf_watchdog_id;
 898                 mutex_exit(&sf_global_mutex);
 899                 (void) untimeout(tid);
 900         } else {
 901                 mutex_exit(&sf_global_mutex);
 902         }
 903 
 904         ddi_soft_state_free(sf_state, instance);
 905 
 906         if (tran != NULL) {
 907                 /* remove all minor nodes */
 908                 ddi_remove_minor_node(dip, NULL);
 909         }
 910 
 911         return (DDI_FAILURE);
 912 }
 913 
 914 
 915 /* ARGSUSED */
 916 static int
 917 sf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
 918 {
 919         struct sf               *sf;
 920         int                     instance;
 921         int                     i;
 922         struct sf_target        *target;
 923         timeout_id_t            tid;
 924 
 925 
 926 
 927         /* NO OTHER THREADS ARE RUNNING */
 928 
 929         instance = ddi_get_instance(dip);
 930 
 931         if ((sf = ddi_get_soft_state(sf_state, instance)) == NULL) {
 932                 cmn_err(CE_WARN, "sf_detach, sf%d: bad soft state", instance);
 933                 return (DDI_FAILURE);
 934         }
 935 
 936         switch (cmd) {
 937 
 938         case DDI_SUSPEND:
 939                 /*
 940                  * suspend our instance
 941                  */
 942 
 943                 SF_DEBUG(2, (sf, CE_CONT,
 944                     "sf_detach: DDI_SUSPEND for sf%d\n", instance));
 945                 /*
 946                  * There is a race condition in socal where while doing
 947                  * callbacks if a ULP removes it self from the callback list
 948                  * the for loop in socal may panic as cblist is junk and
 949                  * while trying to get cblist->next the system will panic.
 950                  */
 951 
 952                 /* call transport to remove our unregister our callbacks */
 953                 soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
 954                     sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
 955 
 956                 /*
 957                  * begin process of clearing outstanding commands
 958                  * by issuing a lip
 959                  */
 960                 sf_force_lip(sf);
 961 
 962                 /*
 963                  * toggle the device OFFLINE in order to cause
 964                  * outstanding commands to drain
 965                  */
 966                 mutex_enter(&sf->sf_mutex);
 967                 sf->sf_lip_cnt++;
 968                 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
 969                 sf->sf_state = (SF_STATE_OFFLINE | SF_STATE_SUSPENDED);
 970                 for (i = 0; i < sf_max_targets; i++) {
 971                         target = sf->sf_targets[i];
 972                         if (target != NULL) {
 973                                 struct sf_target *ntarget;
 974 
 975                                 mutex_enter(&target->sft_mutex);
 976                                 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
 977                                         target->sft_state |=
 978                                             (SF_TARGET_BUSY | SF_TARGET_MARK);
 979                                 }
 980                                 /* do this for all LUNs as well */
 981                                 for (ntarget = target->sft_next_lun;
 982                                     ntarget;
 983                                     ntarget = ntarget->sft_next_lun) {
 984                                         mutex_enter(&ntarget->sft_mutex);
 985                                         if (!(ntarget->sft_state &
 986                                             SF_TARGET_OFFLINE)) {
 987                                                 ntarget->sft_state |=
 988                                                     (SF_TARGET_BUSY |
 989                                                     SF_TARGET_MARK);
 990                                         }
 991                                         mutex_exit(&ntarget->sft_mutex);
 992                                 }
 993                                 mutex_exit(&target->sft_mutex);
 994                         }
 995                 }
 996                 mutex_exit(&sf->sf_mutex);
 997                 mutex_enter(&sf_global_mutex);
 998 
 999                 /*
1000                  * kill off the watchdog if we are the last instance
1001                  */
1002                 if (!--sf_watchdog_init) {
1003                         tid = sf_watchdog_id;
1004                         mutex_exit(&sf_global_mutex);
1005                         (void) untimeout(tid);
1006                 } else {
1007                         mutex_exit(&sf_global_mutex);
1008                 }
1009 
1010                 return (DDI_SUCCESS);
1011 
1012         case DDI_DETACH:
1013                 /*
1014                  * detach this instance
1015                  */
1016 
1017                 SF_DEBUG(2, (sf, CE_CONT,
1018                     "sf_detach: DDI_DETACH for sf%d\n", instance));
1019 
1020                 /* remove this "sf" from the list of sf softstates */
1021                 sf_softstate_unlink(sf);
1022 
1023                 /*
1024                  * prior to taking any DDI_DETACH actions, toggle the
1025                  * device OFFLINE in order to cause outstanding
1026                  * commands to drain
1027                  */
1028                 mutex_enter(&sf->sf_mutex);
1029                 sf->sf_lip_cnt++;
1030                 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
1031                 sf->sf_state = SF_STATE_OFFLINE;
1032                 for (i = 0; i < sf_max_targets; i++) {
1033                         target = sf->sf_targets[i];
1034                         if (target != NULL) {
1035                                 struct sf_target *ntarget;
1036 
1037                                 mutex_enter(&target->sft_mutex);
1038                                 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
1039                                         target->sft_state |=
1040                                             (SF_TARGET_BUSY | SF_TARGET_MARK);
1041                                 }
1042                                 for (ntarget = target->sft_next_lun;
1043                                     ntarget;
1044                                     ntarget = ntarget->sft_next_lun) {
1045                                         mutex_enter(&ntarget->sft_mutex);
1046                                         if (!(ntarget->sft_state &
1047                                             SF_TARGET_OFFLINE)) {
1048                                                 ntarget->sft_state |=
1049                                                     (SF_TARGET_BUSY |
1050                                                     SF_TARGET_MARK);
1051                                         }
1052                                         mutex_exit(&ntarget->sft_mutex);
1053                                 }
1054                                 mutex_exit(&target->sft_mutex);
1055                         }
1056                 }
1057                 mutex_exit(&sf->sf_mutex);
1058 
1059                 /* call transport to remove and unregister our callbacks */
1060                 soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
1061                     sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
1062 
1063                 /*
1064                  * kill off the watchdog if we are the last instance
1065                  */
1066                 mutex_enter(&sf_global_mutex);
1067                 if (!--sf_watchdog_init) {
1068                         tid = sf_watchdog_id;
1069                         mutex_exit(&sf_global_mutex);
1070                         (void) untimeout(tid);
1071                 } else {
1072                         mutex_exit(&sf_global_mutex);
1073                 }
1074 
1075                 /* signal sf_hp_daemon() to exit and wait for exit */
1076                 mutex_enter(&sf->sf_hp_daemon_mutex);
1077                 ASSERT(sf->sf_hp_tid);
1078                 sf->sf_hp_exit = 1;          /* flag exit */
1079                 cv_signal(&sf->sf_hp_daemon_cv);
1080                 mutex_exit(&sf->sf_hp_daemon_mutex);
1081                 thread_join(sf->sf_hp_tid);  /* wait for hotplug to exit */
1082 
1083                 /*
1084                  * Unbind and free event set
1085                  */
1086                 if (sf->sf_event_hdl) {
1087                         (void) ndi_event_unbind_set(sf->sf_event_hdl,
1088                             &sf->sf_events, NDI_SLEEP);
1089                         (void) ndi_event_free_hdl(sf->sf_event_hdl);
1090                 }
1091 
1092                 if (sf->sf_event_defs) {
1093                         kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
1094                 }
1095 
1096                 /* detach this instance of the HBA driver */
1097                 (void) scsi_hba_detach(dip);
1098                 scsi_hba_tran_free(sf->sf_tran);
1099 
1100                 /* deallocate/unbind DMA handle for lilp map */
1101                 if (sf->sf_lilp_map != NULL) {
1102                         (void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
1103                         if (sf->sf_lilp_dmahandle != NULL) {
1104                                 ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
1105                         }
1106                         ddi_dma_mem_free(&sf->sf_lilp_acchandle);
1107                 }
1108 
1109                 /*
1110                  * the kmem cache must be destroyed before free'ing
1111                  * up the crpools
1112                  *
1113                  * our finagle of "ntot" and "nfree"
1114                  * causes an ASSERT failure in "sf_cr_free()"
1115                  * if the kmem cache is free'd after invoking
1116                  * "sf_crpool_free()".
1117                  */
1118                 kmem_cache_destroy(sf->sf_pkt_cache);
1119 
1120                 SF_DEBUG(2, (sf, CE_CONT,
1121                     "sf_detach: sf_crpool_free() for instance 0x%x\n",
1122                     instance));
1123                 while (sf->sf_cr_pool != NULL) {
1124                         /*
1125                          * set ntot to nfree for this particular entry
1126                          *
1127                          * this causes sf_crpool_free() to update
1128                          * the cr_pool list when deallocating this entry
1129                          */
1130                         sf->sf_cr_pool->ntot = sf->sf_cr_pool->nfree;
1131                         sf_crpool_free(sf);
1132                 }
1133 
1134                 /*
1135                  * now that the cr_pool's are gone it's safe
1136                  * to destroy all softstate mutex's and cv's
1137                  */
1138                 mutex_destroy(&sf->sf_mutex);
1139                 mutex_destroy(&sf->sf_cmd_mutex);
1140                 mutex_destroy(&sf->sf_cr_mutex);
1141                 mutex_destroy(&sf->sf_hp_daemon_mutex);
1142                 cv_destroy(&sf->sf_cr_cv);
1143                 cv_destroy(&sf->sf_hp_daemon_cv);
1144 
1145                 /* remove all minor nodes from the device tree */
1146                 ddi_remove_minor_node(dip, NULL);
1147 
1148                 /* remove properties created during attach() */
1149                 ddi_prop_remove_all(dip);
1150 
1151                 /* remove kstat's if present */
1152                 if (sf->sf_ksp != NULL) {
1153                         kstat_delete(sf->sf_ksp);
1154                 }
1155 
1156                 SF_DEBUG(2, (sf, CE_CONT,
1157                     "sf_detach: ddi_soft_state_free() for instance 0x%x\n",
1158                     instance));
1159                 ddi_soft_state_free(sf_state, instance);
1160                 return (DDI_SUCCESS);
1161 
1162         default:
1163                 SF_DEBUG(2, (sf, CE_CONT, "sf_detach: sf%d unknown cmd %x\n",
1164                     instance, (int)cmd));
1165                 return (DDI_FAILURE);
1166         }
1167 }
1168 
1169 
1170 /*
1171  * sf_softstate_unlink() - remove an sf instance from the list of softstates
1172  */
1173 static void
1174 sf_softstate_unlink(struct sf *sf)
1175 {
1176         struct sf       *sf_ptr;
1177         struct sf       *sf_found_sibling;
1178         struct sf       *sf_reposition = NULL;
1179 
1180 
1181         mutex_enter(&sf_global_mutex);
1182         while (sf_watch_running) {
1183                 /* Busy working the list -- wait */
1184                 cv_wait(&sf_watch_cv, &sf_global_mutex);
1185         }
1186         if ((sf_found_sibling = sf->sf_sibling) != NULL) {
1187                 /*
1188                  * we have a sibling so NULL out its reference to us
1189                  */
1190                 mutex_enter(&sf_found_sibling->sf_mutex);
1191                 sf_found_sibling->sf_sibling = NULL;
1192                 mutex_exit(&sf_found_sibling->sf_mutex);
1193         }
1194 
1195         /* remove our instance from the global list */
1196         if (sf == sf_head) {
1197                 /* we were at at head of the list */
1198                 sf_head = sf->sf_next;
1199         } else {
1200                 /* find us in the list */
1201                 for (sf_ptr = sf_head;
1202                     sf_ptr != NULL;
1203                     sf_ptr = sf_ptr->sf_next) {
1204                         if (sf_ptr == sf) {
1205                                 break;
1206                         }
1207                         /* remember this place */
1208                         sf_reposition = sf_ptr;
1209                 }
1210                 ASSERT(sf_ptr == sf);
1211                 ASSERT(sf_reposition != NULL);
1212 
1213                 sf_reposition->sf_next = sf_ptr->sf_next;
1214         }
1215         mutex_exit(&sf_global_mutex);
1216 }
1217 
1218 
1219 static int
1220 sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
1221     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1222 {
1223         int64_t         reset_delay;
1224         struct sf       *sf;
1225 
1226         sf = ddi_get_soft_state(sf_state, ddi_get_instance(parent));
1227         ASSERT(sf);
1228 
1229         reset_delay = (int64_t)(USEC_TO_TICK(SF_INIT_WAIT_TIMEOUT)) -
1230             (ddi_get_lbolt64() - sf->sf_reset_time);
1231         if (reset_delay < 0)
1232                 reset_delay = 0;
1233 
1234         if (sf_bus_config_debug)
1235                 flag |= NDI_DEVI_DEBUG;
1236 
1237         return (ndi_busop_bus_config(parent, flag, op,
1238             arg, childp, (clock_t)reset_delay));
1239 }
1240 
1241 static int
1242 sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
1243     ddi_bus_config_op_t op, void *arg)
1244 {
1245         if (sf_bus_config_debug)
1246                 flag |= NDI_DEVI_DEBUG;
1247 
1248         return (ndi_busop_bus_unconfig(parent, flag, op, arg));
1249 }
1250 
1251 
1252 /*
1253  * called by transport to initialize a SCSI target
1254  */
1255 /* ARGSUSED */
1256 static int
1257 sf_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1258     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1259 {
1260 #ifdef RAID_LUNS
1261         int lun;
1262 #else
1263         int64_t lun;
1264 #endif
1265         struct sf_target *target;
1266         struct sf *sf = (struct sf *)hba_tran->tran_hba_private;
1267         int i, t_len;
1268         unsigned int lip_cnt;
1269         unsigned char wwn[FC_WWN_SIZE];
1270 
1271 
1272         /* get and validate our SCSI target ID */
1273         i = sd->sd_address.a_target;
1274         if (i >= sf_max_targets) {
1275                 return (DDI_NOT_WELL_FORMED);
1276         }
1277 
1278         /* get our port WWN property */
1279         t_len = sizeof (wwn);
1280         if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1281             DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1282             (caddr_t)&wwn, &t_len) != DDI_SUCCESS) {
1283                 /* no port WWN property - ignore the OBP stub node */
1284                 return (DDI_NOT_WELL_FORMED);
1285         }
1286 
1287         /* get our LIP count property */
1288         t_len = sizeof (lip_cnt);
1289         if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1290             DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, LIP_CNT_PROP,
1291             (caddr_t)&lip_cnt, &t_len) != DDI_SUCCESS) {
1292                 return (DDI_FAILURE);
1293         }
1294         /* and our LUN property */
1295         t_len = sizeof (lun);
1296         if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1297             DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1298             (caddr_t)&lun, &t_len) != DDI_SUCCESS) {
1299                 return (DDI_FAILURE);
1300         }
1301 
1302         /* find the target structure for this instance */
1303         mutex_enter(&sf->sf_mutex);
1304         if ((target = sf_lookup_target(sf, wwn, lun)) == NULL) {
1305                 mutex_exit(&sf->sf_mutex);
1306                 return (DDI_FAILURE);
1307         }
1308 
1309         mutex_enter(&target->sft_mutex);
1310         if ((sf->sf_lip_cnt == lip_cnt) && !(target->sft_state
1311             & SF_TARGET_INIT_DONE)) {
1312                 /*
1313                  * set links between HBA transport and target structures
1314                  * and set done flag
1315                  */
1316                 hba_tran->tran_tgt_private = target;
1317                 target->sft_tran = hba_tran;
1318                 target->sft_state |= SF_TARGET_INIT_DONE;
1319         } else {
1320                 /* already initialized ?? */
1321                 mutex_exit(&target->sft_mutex);
1322                 mutex_exit(&sf->sf_mutex);
1323                 return (DDI_FAILURE);
1324         }
1325         mutex_exit(&target->sft_mutex);
1326         mutex_exit(&sf->sf_mutex);
1327 
1328         return (DDI_SUCCESS);
1329 }
1330 
1331 
1332 /*
1333  * called by transport to free a target
1334  */
1335 /* ARGSUSED */
1336 static void
1337 sf_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1338     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1339 {
1340         struct sf_target *target = hba_tran->tran_tgt_private;
1341 
1342         if (target != NULL) {
1343                 mutex_enter(&target->sft_mutex);
1344                 target->sft_tran = NULL;
1345                 target->sft_state &= ~SF_TARGET_INIT_DONE;
1346                 mutex_exit(&target->sft_mutex);
1347         }
1348 }
1349 
1350 
1351 /*
1352  * allocator for non-std size cdb/pkt_private/status -- return TRUE iff
1353  * success, else return FALSE
1354  */
1355 /*ARGSUSED*/
1356 static int
1357 sf_pkt_alloc_extern(struct sf *sf, struct sf_pkt *cmd,
1358     int tgtlen, int statuslen, int kf)
1359 {
1360         caddr_t scbp, tgt;
1361         int failure = FALSE;
1362         struct scsi_pkt *pkt = CMD2PKT(cmd);
1363 
1364 
1365         tgt = scbp = NULL;
1366 
1367         if (tgtlen > PKT_PRIV_LEN) {
1368                 if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) {
1369                         failure = TRUE;
1370                 } else {
1371                         cmd->cmd_flags |= CFLAG_PRIVEXTERN;
1372                         pkt->pkt_private = tgt;
1373                 }
1374         }
1375         if (statuslen > EXTCMDS_STATUS_SIZE) {
1376                 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
1377                         failure = TRUE;
1378                 } else {
1379                         cmd->cmd_flags |= CFLAG_SCBEXTERN;
1380                         pkt->pkt_scbp = (opaque_t)scbp;
1381                 }
1382         }
1383         if (failure) {
1384                 sf_pkt_destroy_extern(sf, cmd);
1385         }
1386         return (failure);
1387 }
1388 
1389 
1390 /*
1391  * deallocator for non-std size cdb/pkt_private/status
1392  */
1393 static void
1394 sf_pkt_destroy_extern(struct sf *sf, struct sf_pkt *cmd)
1395 {
1396         struct scsi_pkt *pkt = CMD2PKT(cmd);
1397 
1398         if (cmd->cmd_flags & CFLAG_FREE) {
1399                 cmn_err(CE_PANIC,
1400                     "sf_scsi_impl_pktfree: freeing free packet");
1401                 _NOTE(NOT_REACHED)
1402                 /* NOTREACHED */
1403         }
1404         if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
1405                 kmem_free((caddr_t)pkt->pkt_scbp,
1406                     (size_t)cmd->cmd_scblen);
1407         }
1408         if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
1409                 kmem_free((caddr_t)pkt->pkt_private,
1410                     (size_t)cmd->cmd_privlen);
1411         }
1412 
1413         cmd->cmd_flags = CFLAG_FREE;
1414         kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1415 }
1416 
1417 
1418 /*
1419  * create or initialize a SCSI packet -- called internally and
1420  * by the transport
1421  */
1422 static struct scsi_pkt *
1423 sf_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1424     struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1425     int flags, int (*callback)(), caddr_t arg)
1426 {
1427         int kf;
1428         int failure = FALSE;
1429         struct sf_pkt *cmd;
1430         struct sf *sf = ADDR2SF(ap);
1431         struct sf_target *target = ADDR2TARGET(ap);
1432         struct sf_pkt   *new_cmd = NULL;
1433         struct fcal_packet      *fpkt;
1434         fc_frame_header_t       *hp;
1435         struct fcp_cmd *fcmd;
1436 
1437 
1438         /*
1439          * If we've already allocated a pkt once,
1440          * this request is for dma allocation only.
1441          */
1442         if (pkt == NULL) {
1443 
1444                 /*
1445                  * First step of sf_scsi_init_pkt:  pkt allocation
1446                  */
1447                 if (cmdlen > FCP_CDB_SIZE) {
1448                         return (NULL);
1449                 }
1450 
1451                 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
1452 
1453                 if ((cmd = kmem_cache_alloc(sf->sf_pkt_cache, kf)) != NULL) {
1454                         /*
1455                          * Selective zeroing of the pkt.
1456                          */
1457 
1458                         cmd->cmd_flags = 0;
1459                         cmd->cmd_forw = 0;
1460                         cmd->cmd_back = 0;
1461                         cmd->cmd_next = 0;
1462                         cmd->cmd_pkt = (struct scsi_pkt *)((char *)cmd +
1463                             sizeof (struct sf_pkt) + sizeof (struct
1464                             fcal_packet));
1465                         cmd->cmd_fp_pkt = (struct fcal_packet *)((char *)cmd +
1466                             sizeof (struct sf_pkt));
1467                         cmd->cmd_fp_pkt->fcal_pkt_private = (opaque_t)cmd;
1468                         cmd->cmd_state = SF_STATE_IDLE;
1469                         cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
1470                         cmd->cmd_pkt->pkt_scbp = (opaque_t)cmd->cmd_scsi_scb;
1471                         cmd->cmd_pkt->pkt_comp    = NULL;
1472                         cmd->cmd_pkt->pkt_flags   = 0;
1473                         cmd->cmd_pkt->pkt_time    = 0;
1474                         cmd->cmd_pkt->pkt_resid   = 0;
1475                         cmd->cmd_pkt->pkt_reason = 0;
1476                         cmd->cmd_cdblen = (uchar_t)cmdlen;
1477                         cmd->cmd_scblen              = statuslen;
1478                         cmd->cmd_privlen     = tgtlen;
1479                         cmd->cmd_pkt->pkt_address = *ap;
1480 
1481                         /* zero pkt_private */
1482                         (int *)(cmd->cmd_pkt->pkt_private =
1483                             cmd->cmd_pkt_private);
1484                         bzero((caddr_t)cmd->cmd_pkt->pkt_private,
1485                             PKT_PRIV_LEN);
1486                 } else {
1487                         failure = TRUE;
1488                 }
1489 
1490                 if (failure ||
1491                     (tgtlen > PKT_PRIV_LEN) ||
1492                     (statuslen > EXTCMDS_STATUS_SIZE)) {
1493                         if (!failure) {
1494                                 /* need to allocate more space */
1495                                 failure = sf_pkt_alloc_extern(sf, cmd,
1496                                     tgtlen, statuslen, kf);
1497                         }
1498                         if (failure) {
1499                                 return (NULL);
1500                         }
1501                 }
1502 
1503                 fpkt = cmd->cmd_fp_pkt;
1504                 if (cmd->cmd_block == NULL) {
1505 
1506                         /* allocate cmd/response pool buffers */
1507                         if (sf_cr_alloc(sf, cmd, callback) == DDI_FAILURE) {
1508                                 sf_pkt_destroy_extern(sf, cmd);
1509                                 return (NULL);
1510                         }
1511 
1512                         /* fill in the FC-AL packet */
1513                         fpkt->fcal_pkt_cookie = sf->sf_socp;
1514                         fpkt->fcal_pkt_comp = sf_cmd_callback;
1515                         fpkt->fcal_pkt_flags = 0;
1516                         fpkt->fcal_magic = FCALP_MAGIC;
1517                         fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
1518                             (ushort_t)(SOC_FC_HEADER |
1519                             sf->sf_sochandle->fcal_portno);
1520                         fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
1521                         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
1522                         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
1523                         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
1524                         fpkt->fcal_socal_request.sr_dataseg[0].fc_base =
1525                             (uint32_t)cmd->cmd_dmac;
1526                         fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
1527                             sizeof (struct fcp_cmd);
1528                         fpkt->fcal_socal_request.sr_dataseg[1].fc_base =
1529                             (uint32_t)cmd->cmd_rsp_dmac;
1530                         fpkt->fcal_socal_request.sr_dataseg[1].fc_count =
1531                             FCP_MAX_RSP_IU_SIZE;
1532 
1533                         /* Fill in the Fabric Channel Header */
1534                         hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
1535                         hp->r_ctl = R_CTL_COMMAND;
1536                         hp->type = TYPE_SCSI_FCP;
1537                         hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
1538                         hp->reserved1 = 0;
1539                         hp->seq_id = 0;
1540                         hp->df_ctl  = 0;
1541                         hp->seq_cnt = 0;
1542                         hp->ox_id = 0xffff;
1543                         hp->rx_id = 0xffff;
1544                         hp->ro = 0;
1545 
1546                         /* Establish the LUN */
1547                         bcopy((caddr_t)&target->sft_lun.b,
1548                             (caddr_t)&cmd->cmd_block->fcp_ent_addr,
1549                             FCP_LUN_SIZE);
1550                         *((int32_t *)&cmd->cmd_block->fcp_cntl) = 0;
1551                 }
1552                 cmd->cmd_pkt->pkt_cdbp = cmd->cmd_block->fcp_cdb;
1553 
1554                 mutex_enter(&target->sft_pkt_mutex);
1555 
1556                 target->sft_pkt_tail->cmd_forw = cmd;
1557                 cmd->cmd_back = target->sft_pkt_tail;
1558                 cmd->cmd_forw = (struct sf_pkt *)&target->sft_pkt_head;
1559                 target->sft_pkt_tail = cmd;
1560 
1561                 mutex_exit(&target->sft_pkt_mutex);
1562                 new_cmd = cmd;          /* for later cleanup if needed */
1563         } else {
1564                 /* pkt already exists -- just a request for DMA allocation */
1565                 cmd = PKT2CMD(pkt);
1566                 fpkt = cmd->cmd_fp_pkt;
1567         }
1568 
1569         /* zero cdb (bzero is too slow) */
1570         bzero((caddr_t)cmd->cmd_pkt->pkt_cdbp, cmdlen);
1571 
1572         /*
1573          * Second step of sf_scsi_init_pkt:  dma allocation
1574          * Set up dma info
1575          */
1576         if ((bp != NULL) && (bp->b_bcount != 0)) {
1577                 int cmd_flags, dma_flags;
1578                 int rval = 0;
1579                 uint_t dmacookie_count;
1580 
1581                 /* there is a buffer and some data to transfer */
1582 
1583                 /* set up command and DMA flags */
1584                 cmd_flags = cmd->cmd_flags;
1585                 if (bp->b_flags & B_READ) {
1586                         /* a read */
1587                         cmd_flags &= ~CFLAG_DMASEND;
1588                         dma_flags = DDI_DMA_READ;
1589                 } else {
1590                         /* a write */
1591                         cmd_flags |= CFLAG_DMASEND;
1592                         dma_flags = DDI_DMA_WRITE;
1593                 }
1594                 if (flags & PKT_CONSISTENT) {
1595                         cmd_flags |= CFLAG_CMDIOPB;
1596                         dma_flags |= DDI_DMA_CONSISTENT;
1597                 }
1598 
1599                 /* ensure we have a DMA handle */
1600                 if (cmd->cmd_dmahandle == NULL) {
1601                         rval = ddi_dma_alloc_handle(sf->sf_dip,
1602                             sf->sf_sochandle->fcal_dmaattr, callback, arg,
1603                             &cmd->cmd_dmahandle);
1604                 }
1605 
1606                 if (rval == 0) {
1607                         /* bind our DMA handle to our buffer */
1608                         rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
1609                             dma_flags, callback, arg, &cmd->cmd_dmacookie,
1610                             &dmacookie_count);
1611                 }
1612 
1613                 if (rval != 0) {
1614                         /* DMA failure */
1615                         SF_DEBUG(2, (sf, CE_CONT, "ddi_dma_buf.. failed\n"));
1616                         switch (rval) {
1617                         case DDI_DMA_NORESOURCES:
1618                                 bioerror(bp, 0);
1619                                 break;
1620                         case DDI_DMA_BADATTR:
1621                         case DDI_DMA_NOMAPPING:
1622                                 bioerror(bp, EFAULT);
1623                                 break;
1624                         case DDI_DMA_TOOBIG:
1625                         default:
1626                                 bioerror(bp, EINVAL);
1627                                 break;
1628                         }
1629                         /* clear valid flag */
1630                         cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID;
1631                         if (new_cmd != NULL) {
1632                                 /* destroy packet if we just created it */
1633                                 sf_scsi_destroy_pkt(ap, new_cmd->cmd_pkt);
1634                         }
1635                         return (NULL);
1636                 }
1637 
1638                 ASSERT(dmacookie_count == 1);
1639                 /* set up amt to transfer and set valid flag */
1640                 cmd->cmd_dmacount = bp->b_bcount;
1641                 cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID;
1642 
1643                 ASSERT(cmd->cmd_dmahandle != NULL);
1644         }
1645 
1646         /* set up FC-AL packet */
1647         fcmd = cmd->cmd_block;
1648 
1649         if (cmd->cmd_flags & CFLAG_DMAVALID) {
1650                 if (cmd->cmd_flags & CFLAG_DMASEND) {
1651                         /* DMA write */
1652                         fcmd->fcp_cntl.cntl_read_data = 0;
1653                         fcmd->fcp_cntl.cntl_write_data = 1;
1654                         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1655                             CQ_TYPE_IO_WRITE;
1656                 } else {
1657                         /* DMA read */
1658                         fcmd->fcp_cntl.cntl_read_data = 1;
1659                         fcmd->fcp_cntl.cntl_write_data = 0;
1660                         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1661                             CQ_TYPE_IO_READ;
1662                 }
1663                 fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
1664                     (uint32_t)cmd->cmd_dmacookie.dmac_address;
1665                 fpkt->fcal_socal_request.sr_dataseg[2].fc_count =
1666                     cmd->cmd_dmacookie.dmac_size;
1667                 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
1668                 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1669                     cmd->cmd_dmacookie.dmac_size;
1670                 fcmd->fcp_data_len = cmd->cmd_dmacookie.dmac_size;
1671         } else {
1672                 /* not a read or write */
1673                 fcmd->fcp_cntl.cntl_read_data = 0;
1674                 fcmd->fcp_cntl.cntl_write_data = 0;
1675                 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
1676                 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
1677                 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1678                     sizeof (struct fcp_cmd);
1679                 fcmd->fcp_data_len = 0;
1680         }
1681         fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
1682 
1683         return (cmd->cmd_pkt);
1684 }
1685 
1686 
1687 /*
1688  * destroy a SCSI packet -- called internally and by the transport
1689  */
1690 static void
1691 sf_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1692 {
1693         struct sf_pkt *cmd = PKT2CMD(pkt);
1694         struct sf *sf = ADDR2SF(ap);
1695         struct sf_target *target = ADDR2TARGET(ap);
1696         struct fcal_packet      *fpkt = cmd->cmd_fp_pkt;
1697 
1698 
1699         if (cmd->cmd_flags & CFLAG_DMAVALID) {
1700                 /* DMA was set up -- clean up */
1701                 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1702                 cmd->cmd_flags ^= CFLAG_DMAVALID;
1703         }
1704 
1705         /* take this packet off the doubly-linked list */
1706         mutex_enter(&target->sft_pkt_mutex);
1707         cmd->cmd_back->cmd_forw = cmd->cmd_forw;
1708         cmd->cmd_forw->cmd_back = cmd->cmd_back;
1709         mutex_exit(&target->sft_pkt_mutex);
1710 
1711         fpkt->fcal_pkt_flags = 0;
1712         /* free the packet */
1713         if ((cmd->cmd_flags &
1714             (CFLAG_FREE | CFLAG_PRIVEXTERN | CFLAG_SCBEXTERN)) == 0) {
1715                 /* just a regular packet */
1716                 ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
1717                 cmd->cmd_flags = CFLAG_FREE;
1718                 kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1719         } else {
1720                 /* a packet with extra memory */
1721                 sf_pkt_destroy_extern(sf, cmd);
1722         }
1723 }
1724 
1725 
1726 /*
1727  * called by transport to unbind DMA handle
1728  */
1729 /* ARGSUSED */
1730 static void
1731 sf_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1732 {
1733         struct sf_pkt *cmd = PKT2CMD(pkt);
1734 
1735 
1736         if (cmd->cmd_flags & CFLAG_DMAVALID) {
1737                 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1738                 cmd->cmd_flags ^= CFLAG_DMAVALID;
1739         }
1740 
1741 }
1742 
1743 
1744 /*
1745  * called by transport to synchronize CPU and I/O views of memory
1746  */
1747 /* ARGSUSED */
1748 static void
1749 sf_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1750 {
1751         struct sf_pkt *cmd = PKT2CMD(pkt);
1752 
1753 
1754         if (cmd->cmd_flags & CFLAG_DMAVALID) {
1755                 if (ddi_dma_sync(cmd->cmd_dmahandle, (off_t)0, (size_t)0,
1756                     (cmd->cmd_flags & CFLAG_DMASEND) ?
1757                     DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1758                     DDI_SUCCESS) {
1759                         cmn_err(CE_WARN, "sf: sync pkt failed");
1760                 }
1761         }
1762 }
1763 
1764 
1765 /*
1766  * routine for reset notification setup, to register or cancel. -- called
1767  * by transport
1768  */
1769 static int
1770 sf_scsi_reset_notify(struct scsi_address *ap, int flag,
1771     void (*callback)(caddr_t), caddr_t arg)
1772 {
1773         struct sf       *sf = ADDR2SF(ap);
1774 
1775         return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
1776             &sf->sf_mutex, &sf->sf_reset_notify_listf));
1777 }
1778 
1779 
1780 /*
1781  * called by transport to get port WWN property (except sun4u)
1782  */
1783 /* ARGSUSED */
1784 static int
1785 sf_scsi_get_name(struct scsi_device *sd, char *name, int len)
1786 {
1787         char tbuf[(FC_WWN_SIZE*2)+1];
1788         unsigned char wwn[FC_WWN_SIZE];
1789         int i, lun;
1790         dev_info_t *tgt_dip;
1791 
1792         tgt_dip = sd->sd_dev;
1793         i = sizeof (wwn);
1794         if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1795             DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1796             (caddr_t)&wwn, &i) != DDI_SUCCESS) {
1797                 name[0] = '\0';
1798                 return (0);
1799         }
1800         i = sizeof (lun);
1801         if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1802             DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1803             (caddr_t)&lun, &i) != DDI_SUCCESS) {
1804                 name[0] = '\0';
1805                 return (0);
1806         }
1807         for (i = 0; i < FC_WWN_SIZE; i++)
1808                 (void) sprintf(&tbuf[i << 1], "%02x", wwn[i]);
1809         (void) sprintf(name, "w%s,%x", tbuf, lun);
1810         return (1);
1811 }
1812 
1813 
1814 /*
1815  * called by transport to get target soft AL-PA (except sun4u)
1816  */
1817 /* ARGSUSED */
1818 static int
1819 sf_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
1820 {
1821         struct sf_target *target = ADDR2TARGET(&sd->sd_address);
1822 
1823         if (target == NULL)
1824                 return (0);
1825 
1826         (void) sprintf(name, "%x", target->sft_al_pa);
1827         return (1);
1828 }
1829 
1830 
1831 /*
1832  * add to the command/response buffer pool for this sf instance
1833  */
1834 static int
1835 sf_add_cr_pool(struct sf *sf)
1836 {
1837         int             cmd_buf_size;
1838         size_t          real_cmd_buf_size;
1839         int             rsp_buf_size;
1840         size_t          real_rsp_buf_size;
1841         uint_t          i, ccount;
1842         struct sf_cr_pool       *ptr;
1843         struct sf_cr_free_elem *cptr;
1844         caddr_t dptr, eptr;
1845         ddi_dma_cookie_t        cmd_cookie;
1846         ddi_dma_cookie_t        rsp_cookie;
1847         int             cmd_bound = FALSE, rsp_bound = FALSE;
1848 
1849 
1850         /* allocate room for the pool */
1851         if ((ptr = kmem_zalloc(sizeof (struct sf_cr_pool), KM_NOSLEEP)) ==
1852             NULL) {
1853                 return (DDI_FAILURE);
1854         }
1855 
1856         /* allocate a DMA handle for the command pool */
1857         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1858             DDI_DMA_DONTWAIT, NULL, &ptr->cmd_dma_handle) != DDI_SUCCESS) {
1859                 goto fail;
1860         }
1861 
1862         /*
1863          * Get a piece of memory in which to put commands
1864          */
1865         cmd_buf_size = (sizeof (struct fcp_cmd) * SF_ELEMS_IN_POOL + 7) & ~7;
1866         if (ddi_dma_mem_alloc(ptr->cmd_dma_handle, cmd_buf_size,
1867             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1868             DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->cmd_base,
1869             &real_cmd_buf_size, &ptr->cmd_acc_handle) != DDI_SUCCESS) {
1870                 goto fail;
1871         }
1872 
1873         /* bind the DMA handle to an address */
1874         if (ddi_dma_addr_bind_handle(ptr->cmd_dma_handle, NULL,
1875             ptr->cmd_base, real_cmd_buf_size,
1876             DDI_DMA_WRITE | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1877             NULL, &cmd_cookie, &ccount) != DDI_DMA_MAPPED) {
1878                 goto fail;
1879         }
1880         cmd_bound = TRUE;
1881         /* ensure only one cookie was allocated */
1882         if (ccount != 1) {
1883                 goto fail;
1884         }
1885 
1886         /* allocate a DMA handle for the response pool */
1887         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1888             DDI_DMA_DONTWAIT, NULL, &ptr->rsp_dma_handle) != DDI_SUCCESS) {
1889                 goto fail;
1890         }
1891 
1892         /*
1893          * Get a piece of memory in which to put responses
1894          */
1895         rsp_buf_size = FCP_MAX_RSP_IU_SIZE * SF_ELEMS_IN_POOL;
1896         if (ddi_dma_mem_alloc(ptr->rsp_dma_handle, rsp_buf_size,
1897             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1898             DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->rsp_base,
1899             &real_rsp_buf_size, &ptr->rsp_acc_handle) != DDI_SUCCESS) {
1900                 goto fail;
1901         }
1902 
1903         /* bind the DMA handle to an address */
1904         if (ddi_dma_addr_bind_handle(ptr->rsp_dma_handle, NULL,
1905             ptr->rsp_base, real_rsp_buf_size,
1906             DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1907             NULL, &rsp_cookie, &ccount) != DDI_DMA_MAPPED) {
1908                 goto fail;
1909         }
1910         rsp_bound = TRUE;
1911         /* ensure only one cookie was allocated */
1912         if (ccount != 1) {
1913                 goto fail;
1914         }
1915 
1916         /*
1917          * Generate a (cmd/rsp structure) free list
1918          */
1919         /* ensure ptr points to start of long word (8-byte block) */
1920         dptr = (caddr_t)((uintptr_t)(ptr->cmd_base) + 7 & ~7);
1921         /* keep track of actual size after moving pointer */
1922         real_cmd_buf_size -= (dptr - ptr->cmd_base);
1923         eptr = ptr->rsp_base;
1924 
1925         /* set actual total number of entries */
1926         ptr->ntot = min((real_cmd_buf_size / sizeof (struct fcp_cmd)),
1927             (real_rsp_buf_size / FCP_MAX_RSP_IU_SIZE));
1928         ptr->nfree = ptr->ntot;
1929         ptr->free = (struct sf_cr_free_elem *)ptr->cmd_base;
1930         ptr->sf = sf;
1931 
1932         /* set up DMA for each pair of entries */
1933         i = 0;
1934         while (i < ptr->ntot) {
1935                 cptr = (struct sf_cr_free_elem *)dptr;
1936                 dptr += sizeof (struct fcp_cmd);
1937 
1938                 cptr->next = (struct sf_cr_free_elem *)dptr;
1939                 cptr->rsp = eptr;
1940 
1941                 cptr->cmd_dmac = cmd_cookie.dmac_address +
1942                     (uint32_t)((caddr_t)cptr - ptr->cmd_base);
1943 
1944                 cptr->rsp_dmac = rsp_cookie.dmac_address +
1945                     (uint32_t)((caddr_t)eptr - ptr->rsp_base);
1946 
1947                 eptr += FCP_MAX_RSP_IU_SIZE;
1948                 i++;
1949         }
1950 
1951         /* terminate the list */
1952         cptr->next = NULL;
1953 
1954         /* add this list at front of current one */
1955         mutex_enter(&sf->sf_cr_mutex);
1956         ptr->next = sf->sf_cr_pool;
1957         sf->sf_cr_pool = ptr;
1958         sf->sf_cr_pool_cnt++;
1959         mutex_exit(&sf->sf_cr_mutex);
1960 
1961         return (DDI_SUCCESS);
1962 
1963 fail:
1964         /* we failed so clean up */
1965         if (ptr->cmd_dma_handle != NULL) {
1966                 if (cmd_bound) {
1967                         (void) ddi_dma_unbind_handle(ptr->cmd_dma_handle);
1968                 }
1969                 ddi_dma_free_handle(&ptr->cmd_dma_handle);
1970         }
1971 
1972         if (ptr->rsp_dma_handle != NULL) {
1973                 if (rsp_bound) {
1974                         (void) ddi_dma_unbind_handle(ptr->rsp_dma_handle);
1975                 }
1976                 ddi_dma_free_handle(&ptr->rsp_dma_handle);
1977         }
1978 
1979         if (ptr->cmd_base != NULL) {
1980                 ddi_dma_mem_free(&ptr->cmd_acc_handle);
1981         }
1982 
1983         if (ptr->rsp_base != NULL) {
1984                 ddi_dma_mem_free(&ptr->rsp_acc_handle);
1985         }
1986 
1987         kmem_free((caddr_t)ptr, sizeof (struct sf_cr_pool));
1988         return (DDI_FAILURE);
1989 }
1990 
1991 
1992 /*
1993  * allocate a command/response buffer from the pool, allocating more
1994  * in the pool as needed
1995  */
1996 static int
1997 sf_cr_alloc(struct sf *sf, struct sf_pkt *cmd, int (*func)())
1998 {
1999         struct sf_cr_pool *ptr;
2000         struct sf_cr_free_elem *cptr;
2001 
2002 
2003         mutex_enter(&sf->sf_cr_mutex);
2004 
2005 try_again:
2006 
2007         /* find a free buffer in the existing pool */
2008         ptr = sf->sf_cr_pool;
2009         while (ptr != NULL) {
2010                 if (ptr->nfree != 0) {
2011                         ptr->nfree--;
2012                         break;
2013                 } else {
2014                         ptr = ptr->next;
2015                 }
2016         }
2017 
2018         /* did we find a free buffer ? */
2019         if (ptr != NULL) {
2020                 /* we found a free buffer -- take it off the free list */
2021                 cptr = ptr->free;
2022                 ptr->free = cptr->next;
2023                 mutex_exit(&sf->sf_cr_mutex);
2024                 /* set up the command to use the buffer pair */
2025                 cmd->cmd_block = (struct fcp_cmd *)cptr;
2026                 cmd->cmd_dmac = cptr->cmd_dmac;
2027                 cmd->cmd_rsp_dmac = cptr->rsp_dmac;
2028                 cmd->cmd_rsp_block = (struct fcp_rsp *)cptr->rsp;
2029                 cmd->cmd_cr_pool = ptr;
2030                 return (DDI_SUCCESS);           /* success */
2031         }
2032 
2033         /* no free buffer available -- can we allocate more ? */
2034         if (sf->sf_cr_pool_cnt < SF_CR_POOL_MAX) {
2035                 /* we need to allocate more buffer pairs */
2036                 if (sf->sf_cr_flag) {
2037                         /* somebody already allocating for this instance */
2038                         if (func == SLEEP_FUNC) {
2039                                 /* user wants to wait */
2040                                 cv_wait(&sf->sf_cr_cv, &sf->sf_cr_mutex);
2041                                 /* we've been woken so go try again */
2042                                 goto try_again;
2043                         }
2044                         /* user does not want to wait */
2045                         mutex_exit(&sf->sf_cr_mutex);
2046                         sf->sf_stats.cralloc_failures++;
2047                         return (DDI_FAILURE);   /* give up */
2048                 }
2049                 /* set flag saying we're allocating */
2050                 sf->sf_cr_flag = 1;
2051                 mutex_exit(&sf->sf_cr_mutex);
2052                 /* add to our pool */
2053                 if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
2054                         /* couldn't add to our pool for some reason */
2055                         mutex_enter(&sf->sf_cr_mutex);
2056                         sf->sf_cr_flag = 0;
2057                         cv_broadcast(&sf->sf_cr_cv);
2058                         mutex_exit(&sf->sf_cr_mutex);
2059                         sf->sf_stats.cralloc_failures++;
2060                         return (DDI_FAILURE);   /* give up */
2061                 }
2062                 /*
2063                  * clear flag saying we're allocating and tell all other
2064                  * that care
2065                  */
2066                 mutex_enter(&sf->sf_cr_mutex);
2067                 sf->sf_cr_flag = 0;
2068                 cv_broadcast(&sf->sf_cr_cv);
2069                 /* now that we have more buffers try again */
2070                 goto try_again;
2071         }
2072 
2073         /* we don't have room to allocate any more buffers */
2074         mutex_exit(&sf->sf_cr_mutex);
2075         sf->sf_stats.cralloc_failures++;
2076         return (DDI_FAILURE);                   /* give up */
2077 }
2078 
2079 
2080 /*
2081  * free a cmd/response buffer pair in our pool
2082  */
2083 static void
2084 sf_cr_free(struct sf_cr_pool *cp, struct sf_pkt *cmd)
2085 {
2086         struct sf *sf = cp->sf;
2087         struct sf_cr_free_elem *elem;
2088 
2089         elem = (struct sf_cr_free_elem *)cmd->cmd_block;
2090         elem->rsp = (caddr_t)cmd->cmd_rsp_block;
2091         elem->cmd_dmac = cmd->cmd_dmac;
2092         elem->rsp_dmac = cmd->cmd_rsp_dmac;
2093 
2094         mutex_enter(&sf->sf_cr_mutex);
2095         cp->nfree++;
2096         ASSERT(cp->nfree <= cp->ntot);
2097 
2098         elem->next = cp->free;
2099         cp->free = elem;
2100         mutex_exit(&sf->sf_cr_mutex);
2101 }
2102 
2103 
2104 /*
2105  * free our pool of cmd/response buffers
2106  */
2107 static void
2108 sf_crpool_free(struct sf *sf)
2109 {
2110         struct sf_cr_pool *cp, *prev;
2111 
2112         prev = NULL;
2113         mutex_enter(&sf->sf_cr_mutex);
2114         cp = sf->sf_cr_pool;
2115         while (cp != NULL) {
2116                 if (cp->nfree == cp->ntot) {
2117                         if (prev != NULL) {
2118                                 prev->next = cp->next;
2119                         } else {
2120                                 sf->sf_cr_pool = cp->next;
2121                         }
2122                         sf->sf_cr_pool_cnt--;
2123                         mutex_exit(&sf->sf_cr_mutex);
2124 
2125                         (void) ddi_dma_unbind_handle(cp->cmd_dma_handle);
2126                         ddi_dma_free_handle(&cp->cmd_dma_handle);
2127                         (void) ddi_dma_unbind_handle(cp->rsp_dma_handle);
2128                         ddi_dma_free_handle(&cp->rsp_dma_handle);
2129                         ddi_dma_mem_free(&cp->cmd_acc_handle);
2130                         ddi_dma_mem_free(&cp->rsp_acc_handle);
2131                         kmem_free((caddr_t)cp, sizeof (struct sf_cr_pool));
2132                         return;
2133                 }
2134                 prev = cp;
2135                 cp = cp->next;
2136         }
2137         mutex_exit(&sf->sf_cr_mutex);
2138 }
2139 
2140 
2141 /* ARGSUSED */
2142 static int
2143 sf_kmem_cache_constructor(void *buf, void *arg, int size)
2144 {
2145         struct sf_pkt *cmd = buf;
2146 
2147         mutex_init(&cmd->cmd_abort_mutex, NULL, MUTEX_DRIVER, NULL);
2148         cmd->cmd_block = NULL;
2149         cmd->cmd_dmahandle = NULL;
2150         return (0);
2151 }
2152 
2153 
2154 /* ARGSUSED */
2155 static void
2156 sf_kmem_cache_destructor(void *buf, void *size)
2157 {
2158         struct sf_pkt *cmd = buf;
2159 
2160         if (cmd->cmd_dmahandle != NULL) {
2161                 ddi_dma_free_handle(&cmd->cmd_dmahandle);
2162         }
2163 
2164         if (cmd->cmd_block != NULL) {
2165                 sf_cr_free(cmd->cmd_cr_pool, cmd);
2166         }
2167         mutex_destroy(&cmd->cmd_abort_mutex);
2168 }
2169 
2170 
2171 /*
2172  * called by transport when a state change occurs
2173  */
2174 static void
2175 sf_statec_callback(void *arg, int msg)
2176 {
2177         struct sf *sf = (struct sf *)arg;
2178         struct sf_target        *target;
2179         int i;
2180         struct sf_pkt *cmd;
2181         struct scsi_pkt *pkt;
2182 
2183 
2184 
2185         switch (msg) {
2186 
2187         case FCAL_STATUS_LOOP_ONLINE: {
2188                 uchar_t         al_pa;          /* to save AL-PA */
2189                 int             ret;            /* ret value from getmap */
2190                 int             lip_cnt;        /* to save current count */
2191                 int             cnt;            /* map length */
2192 
2193                 /*
2194                  * the loop has gone online
2195                  */
2196                 SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop online\n",
2197                     ddi_get_instance(sf->sf_dip)));
2198                 mutex_enter(&sf->sf_mutex);
2199                 sf->sf_lip_cnt++;
2200                 sf->sf_state = SF_STATE_ONLINING;
2201                 mutex_exit(&sf->sf_mutex);
2202 
2203                 /* scan each target hash queue */
2204                 for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
2205                         target = sf->sf_wwn_lists[i];
2206                         while (target != NULL) {
2207                                 /*
2208                                  * foreach target, if it's not offline then
2209                                  * mark it as busy
2210                                  */
2211                                 mutex_enter(&target->sft_mutex);
2212                                 if (!(target->sft_state & SF_TARGET_OFFLINE))
2213                                         target->sft_state |= (SF_TARGET_BUSY
2214                                             | SF_TARGET_MARK);
2215 #ifdef DEBUG
2216                                 /*
2217                                  * for debugging, print out info on any
2218                                  * pending commands (left hanging)
2219                                  */
2220                                 cmd = target->sft_pkt_head;
2221                                 while (cmd != (struct sf_pkt *)&target->
2222                                     sft_pkt_head) {
2223                                         if (cmd->cmd_state ==
2224                                             SF_STATE_ISSUED) {
2225                                                 SF_DEBUG(1, (sf, CE_CONT,
2226                                                     "cmd 0x%p pending "
2227                                                     "after lip\n",
2228                                                     (void *)cmd->cmd_fp_pkt));
2229                                         }
2230                                         cmd = cmd->cmd_forw;
2231                                 }
2232 #endif
2233                                 mutex_exit(&target->sft_mutex);
2234                                 target = target->sft_next;
2235                         }
2236                 }
2237 
2238                 /*
2239                  * since the loop has just gone online get a new map from
2240                  * the transport
2241                  */
2242                 if ((ret = soc_get_lilp_map(sf->sf_sochandle, sf->sf_socp,
2243                     sf->sf_sochandle->fcal_portno, (uint32_t)sf->
2244                     sf_lilp_dmacookie.dmac_address, 1)) != FCAL_SUCCESS) {
2245                         if (sf_core && (sf_core & SF_CORE_LILP_FAILED)) {
2246                                 (void) soc_take_core(sf->sf_sochandle,
2247                                     sf->sf_socp);
2248                                 sf_core = 0;
2249                         }
2250                         sf_log(sf, CE_WARN,
2251                             "!soc lilp map failed status=0x%x\n", ret);
2252                         mutex_enter(&sf->sf_mutex);
2253                         sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2254                         sf->sf_lip_cnt++;
2255                         sf->sf_state = SF_STATE_OFFLINE;
2256                         mutex_exit(&sf->sf_mutex);
2257                         return;
2258                 }
2259 
2260                 /* ensure consistent view of DMA memory */
2261                 (void) ddi_dma_sync(sf->sf_lilp_dmahandle, (off_t)0, (size_t)0,
2262                     DDI_DMA_SYNC_FORKERNEL);
2263 
2264                 /* how many entries in map ? */
2265                 cnt = sf->sf_lilp_map->lilp_length;
2266                 if (cnt >= SF_MAX_LILP_ENTRIES) {
2267                         sf_log(sf, CE_WARN, "invalid lilp map\n");
2268                         return;
2269                 }
2270 
2271                 mutex_enter(&sf->sf_mutex);
2272                 sf->sf_device_count = cnt - 1;
2273                 sf->sf_al_pa = sf->sf_lilp_map->lilp_myalpa;
2274                 lip_cnt = sf->sf_lip_cnt;
2275                 al_pa = sf->sf_al_pa;
2276 
2277                 SF_DEBUG(1, (sf, CE_CONT,
2278                     "!lilp map has %d entries, al_pa is %x\n", cnt, al_pa));
2279 
2280                 /*
2281                  * since the last entry of the map may be mine (common) check
2282                  * for that, and if it is we have one less entry to look at
2283                  */
2284                 if (sf->sf_lilp_map->lilp_alpalist[cnt-1] == al_pa) {
2285                         cnt--;
2286                 }
2287                 /* If we didn't get a valid loop map enable all targets */
2288                 if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
2289                         for (i = 0; i < sizeof (sf_switch_to_alpa); i++)
2290                                 sf->sf_lilp_map->lilp_alpalist[i] =
2291                                     sf_switch_to_alpa[i];
2292                         cnt = i;
2293                         sf->sf_device_count = cnt - 1;
2294                 }
2295                 if (sf->sf_device_count == 0) {
2296                         sf_finish_init(sf, lip_cnt);
2297                         mutex_exit(&sf->sf_mutex);
2298                         break;
2299                 }
2300                 mutex_exit(&sf->sf_mutex);
2301 
2302                 SF_DEBUG(2, (sf, CE_WARN,
2303                     "!statec_callback: starting with %d targets\n",
2304                     sf->sf_device_count));
2305 
2306                 /* scan loop map, logging into all ports (except mine) */
2307                 for (i = 0; i < cnt; i++) {
2308                         SF_DEBUG(1, (sf, CE_CONT,
2309                             "!lilp map entry %d = %x,%x\n", i,
2310                             sf->sf_lilp_map->lilp_alpalist[i],
2311                             sf_alpa_to_switch[
2312                             sf->sf_lilp_map->lilp_alpalist[i]]));
2313                         /* is this entry for somebody else ? */
2314                         if (sf->sf_lilp_map->lilp_alpalist[i] != al_pa) {
2315                                 /* do a PLOGI to this port */
2316                                 if (!sf_login(sf, LA_ELS_PLOGI,
2317                                     sf->sf_lilp_map->lilp_alpalist[i],
2318                                     sf->sf_lilp_map->lilp_alpalist[cnt-1],
2319                                     lip_cnt)) {
2320                                         /* a problem logging in */
2321                                         mutex_enter(&sf->sf_mutex);
2322                                         if (lip_cnt == sf->sf_lip_cnt) {
2323                                                 /*
2324                                                  * problem not from a new LIP
2325                                                  */
2326                                                 sf->sf_device_count--;
2327                                                 ASSERT(sf->sf_device_count
2328                                                     >= 0);
2329                                                 if (sf->sf_device_count == 0) {
2330                                                         sf_finish_init(sf,
2331                                                             lip_cnt);
2332                                                 }
2333                                         }
2334                                         mutex_exit(&sf->sf_mutex);
2335                                 }
2336                         }
2337                 }
2338                 break;
2339         }
2340 
2341         case FCAL_STATUS_ERR_OFFLINE:
2342                 /*
2343                  * loop has gone offline due to an error
2344                  */
2345                 SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop offline\n",
2346                     ddi_get_instance(sf->sf_dip)));
2347                 mutex_enter(&sf->sf_mutex);
2348                 sf->sf_lip_cnt++;
2349                 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2350                 if (!sf->sf_online_timer) {
2351                         sf->sf_online_timer = sf_watchdog_time +
2352                             SF_ONLINE_TIMEOUT;
2353                 }
2354                 /*
2355                  * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2356                  * since throttling logic in sf_watch() depends on
2357                  * preservation of this flag while device is suspended
2358                  */
2359                 if (sf->sf_state & SF_STATE_SUSPENDED) {
2360                         sf->sf_state |= SF_STATE_OFFLINE;
2361                         SF_DEBUG(1, (sf, CE_CONT,
2362                             "sf_statec_callback, sf%d: "
2363                             "got FCAL_STATE_OFFLINE during DDI_SUSPEND\n",
2364                             ddi_get_instance(sf->sf_dip)));
2365                 } else {
2366                         sf->sf_state = SF_STATE_OFFLINE;
2367                 }
2368 
2369                 /* scan each possible target on the loop */
2370                 for (i = 0; i < sf_max_targets; i++) {
2371                         target = sf->sf_targets[i];
2372                         while (target != NULL) {
2373                                 mutex_enter(&target->sft_mutex);
2374                                 if (!(target->sft_state & SF_TARGET_OFFLINE))
2375                                         target->sft_state |= (SF_TARGET_BUSY
2376                                             | SF_TARGET_MARK);
2377                                 mutex_exit(&target->sft_mutex);
2378                                 target = target->sft_next_lun;
2379                         }
2380                 }
2381                 mutex_exit(&sf->sf_mutex);
2382                 break;
2383 
2384         case FCAL_STATE_RESET: {
2385                 struct sf_els_hdr       *privp; /* ptr to private list */
2386                 struct sf_els_hdr       *tmpp1; /* tmp prev hdr ptr */
2387                 struct sf_els_hdr       *tmpp2; /* tmp next hdr ptr */
2388                 struct sf_els_hdr       *head;  /* to save our private list */
2389                 struct fcal_packet      *fpkt;  /* ptr to pkt in hdr */
2390 
2391                 /*
2392                  * a transport reset
2393                  */
2394                 SF_DEBUG(1, (sf, CE_CONT, "!sf%d: soc reset\n",
2395                     ddi_get_instance(sf->sf_dip)));
2396                 tmpp1 = head = NULL;
2397                 mutex_enter(&sf->sf_mutex);
2398                 sf->sf_lip_cnt++;
2399                 sf->sf_timer = sf_watchdog_time + SF_RESET_TIMEOUT;
2400                 /*
2401                  * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2402                  * since throttling logic in sf_watch() depends on
2403                  * preservation of this flag while device is suspended
2404                  */
2405                 if (sf->sf_state & SF_STATE_SUSPENDED) {
2406                         sf->sf_state |= SF_STATE_OFFLINE;
2407                         SF_DEBUG(1, (sf, CE_CONT,
2408                             "sf_statec_callback, sf%d: "
2409                             "got FCAL_STATE_RESET during DDI_SUSPEND\n",
2410                             ddi_get_instance(sf->sf_dip)));
2411                 } else {
2412                         sf->sf_state = SF_STATE_OFFLINE;
2413                 }
2414 
2415                 /*
2416                  * scan each possible target on the loop, looking for targets
2417                  * that need callbacks ran
2418                  */
2419                 for (i = 0; i < sf_max_targets; i++) {
2420                         target = sf->sf_targets[i];
2421                         while (target != NULL) {
2422                                 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
2423                                         target->sft_state |= (SF_TARGET_BUSY
2424                                             | SF_TARGET_MARK);
2425                                         mutex_exit(&sf->sf_mutex);
2426                                         /*
2427                                          * run remove event callbacks for lun
2428                                          *
2429                                          * We have a nasty race condition here
2430                                          * 'cause we're dropping this mutex to
2431                                          * run the callback and expect the
2432                                          * linked list to be the same.
2433                                          */
2434                                         (void) ndi_event_retrieve_cookie(
2435                                             sf->sf_event_hdl, target->sft_dip,
2436                                             FCAL_REMOVE_EVENT, &sf_remove_eid,
2437                                             NDI_EVENT_NOPASS);
2438                                         (void) ndi_event_run_callbacks(
2439                                             sf->sf_event_hdl,
2440                                             target->sft_dip,
2441                                             sf_remove_eid, NULL);
2442                                         mutex_enter(&sf->sf_mutex);
2443                                 }
2444                                 target = target->sft_next_lun;
2445                         }
2446                 }
2447 
2448                 /*
2449                  * scan for ELS commands that are in transport, not complete,
2450                  * and have a valid timeout, building a private list
2451                  */
2452                 privp = sf->sf_els_list;
2453                 while (privp != NULL) {
2454                         fpkt = privp->fpkt;
2455                         if ((fpkt->fcal_cmd_state & FCAL_CMD_IN_TRANSPORT) &&
2456                             (!(fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE)) &&
2457                             (privp->timeout != SF_INVALID_TIMEOUT)) {
2458                                 /*
2459                                  * cmd in transport && not complete &&
2460                                  * timeout valid
2461                                  *
2462                                  * move this entry from ELS input list to our
2463                                  * private list
2464                                  */
2465 
2466                                 tmpp2 = privp->next; /* save ptr to next */
2467 
2468                                 /* push this on private list head */
2469                                 privp->next = head;
2470                                 head = privp;
2471 
2472                                 /* remove this entry from input list */
2473                                 if (tmpp1 != NULL) {
2474                                         /*
2475                                          * remove this entry from somewhere in
2476                                          * the middle of the list
2477                                          */
2478                                         tmpp1->next = tmpp2;
2479                                         if (tmpp2 != NULL) {
2480                                                 tmpp2->prev = tmpp1;
2481                                         }
2482                                 } else {
2483                                         /*
2484                                          * remove this entry from the head
2485                                          * of the list
2486                                          */
2487                                         sf->sf_els_list = tmpp2;
2488                                         if (tmpp2 != NULL) {
2489                                                 tmpp2->prev = NULL;
2490                                         }
2491                                 }
2492                                 privp = tmpp2;  /* skip to next entry */
2493                         } else {
2494                                 tmpp1 = privp;  /* save ptr to prev entry */
2495                                 privp = privp->next; /* skip to next entry */
2496                         }
2497                 }
2498 
2499                 mutex_exit(&sf->sf_mutex);
2500 
2501                 /*
2502                  * foreach cmd in our list free the ELS packet associated
2503                  * with it
2504                  */
2505                 privp = head;
2506                 while (privp != NULL) {
2507                         fpkt = privp->fpkt;
2508                         privp = privp->next;
2509                         sf_els_free(fpkt);
2510                 }
2511 
2512                 /*
2513                  * scan for commands from each possible target
2514                  */
2515                 for (i = 0; i < sf_max_targets; i++) {
2516                         target = sf->sf_targets[i];
2517                         while (target != NULL) {
2518                                 /*
2519                                  * scan all active commands for this target,
2520                                  * looking for commands that have been issued,
2521                                  * are in transport, and are not yet complete
2522                                  * (so we can terminate them because of the
2523                                  * reset)
2524                                  */
2525                                 mutex_enter(&target->sft_pkt_mutex);
2526                                 cmd = target->sft_pkt_head;
2527                                 while (cmd != (struct sf_pkt *)&target->
2528                                     sft_pkt_head) {
2529                                         fpkt = cmd->cmd_fp_pkt;
2530                                         mutex_enter(&cmd->cmd_abort_mutex);
2531                                         if ((cmd->cmd_state ==
2532                                             SF_STATE_ISSUED) &&
2533                                             (fpkt->fcal_cmd_state &
2534                                             FCAL_CMD_IN_TRANSPORT) &&
2535                                             (!(fpkt->fcal_cmd_state &
2536                                             FCAL_CMD_COMPLETE))) {
2537                                                 /* a command to be reset */
2538                                                 pkt = cmd->cmd_pkt;
2539                                                 pkt->pkt_reason = CMD_RESET;
2540                                                 pkt->pkt_statistics |=
2541                                                     STAT_BUS_RESET;
2542                                                 cmd->cmd_state = SF_STATE_IDLE;
2543                                                 mutex_exit(&cmd->
2544                                                     cmd_abort_mutex);
2545                                                 mutex_exit(&target->
2546                                                     sft_pkt_mutex);
2547                                                 if (pkt->pkt_comp != NULL) {
2548                                                         (*pkt->pkt_comp)(pkt);
2549                                                 }
2550                                                 mutex_enter(&target->
2551                                                     sft_pkt_mutex);
2552                                                 cmd = target->sft_pkt_head;
2553                                         } else {
2554                                                 mutex_exit(&cmd->
2555                                                     cmd_abort_mutex);
2556                                                 /* get next command */
2557                                                 cmd = cmd->cmd_forw;
2558                                         }
2559                                 }
2560                                 mutex_exit(&target->sft_pkt_mutex);
2561                                 target = target->sft_next_lun;
2562                         }
2563                 }
2564 
2565                 /*
2566                  * get packet queue for this target, resetting all remaining
2567                  * commands
2568                  */
2569                 mutex_enter(&sf->sf_mutex);
2570                 cmd = sf->sf_pkt_head;
2571                 sf->sf_pkt_head = NULL;
2572                 mutex_exit(&sf->sf_mutex);
2573 
2574                 while (cmd != NULL) {
2575                         pkt = cmd->cmd_pkt;
2576                         cmd = cmd->cmd_next;
2577                         pkt->pkt_reason = CMD_RESET;
2578                         pkt->pkt_statistics |= STAT_BUS_RESET;
2579                         if (pkt->pkt_comp != NULL) {
2580                                 (*pkt->pkt_comp)(pkt);
2581                         }
2582                 }
2583                 break;
2584         }
2585 
2586         default:
2587                 break;
2588         }
2589 }
2590 
2591 
2592 /*
2593  * called to send a PLOGI (N_port login) ELS request to a destination ID,
2594  * returning TRUE upon success, else returning FALSE
2595  */
2596 static int
2597 sf_login(struct sf *sf, uchar_t els_code, uchar_t dest_id, uint_t arg1,
2598     int lip_cnt)
2599 {
2600         struct la_els_logi      *logi;
2601         struct  sf_els_hdr      *privp;
2602 
2603 
2604         if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
2605             sizeof (union sf_els_cmd), sizeof (union sf_els_rsp),
2606             (caddr_t *)&privp, (caddr_t *)&logi) == NULL) {
2607                 sf_log(sf, CE_WARN, "Cannot allocate PLOGI for target %x "
2608                     "due to DVMA shortage.\n", sf_alpa_to_switch[dest_id]);
2609                 return (FALSE);
2610         }
2611 
2612         privp->lip_cnt = lip_cnt;
2613         if (els_code == LA_ELS_PLOGI) {
2614                 bcopy((caddr_t)sf->sf_sochandle->fcal_loginparms,
2615                     (caddr_t)&logi->common_service, sizeof (struct la_els_logi)
2616                     - 4);
2617                 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2618                     (caddr_t)&logi->nport_ww_name, sizeof (la_wwn_t));
2619                 bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2620                     (caddr_t)&logi->node_ww_name, sizeof (la_wwn_t));
2621                 bzero((caddr_t)&logi->reserved, 16);
2622         } else if (els_code == LA_ELS_LOGO) {
2623                 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2624                     (caddr_t)&(((struct la_els_logo *)logi)->nport_ww_name), 8);
2625                 ((struct la_els_logo    *)logi)->reserved = 0;
2626                 ((struct la_els_logo    *)logi)->nport_id[0] = 0;
2627                 ((struct la_els_logo    *)logi)->nport_id[1] = 0;
2628                 ((struct la_els_logo    *)logi)->nport_id[2] = arg1;
2629         }
2630 
2631         privp->els_code = els_code;
2632         logi->ls_code = els_code;
2633         logi->mbz[0] = 0;
2634         logi->mbz[1] = 0;
2635         logi->mbz[2] = 0;
2636 
2637         privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2638         return (sf_els_transport(sf, privp));
2639 }
2640 
2641 
2642 /*
2643  * send an ELS IU via the transport,
2644  * returning TRUE upon success, else returning FALSE
2645  */
2646 static int
2647 sf_els_transport(struct sf *sf, struct sf_els_hdr *privp)
2648 {
2649         struct fcal_packet *fpkt = privp->fpkt;
2650 
2651 
2652         (void) ddi_dma_sync(privp->cmd_dma_handle, (off_t)0, (size_t)0,
2653             DDI_DMA_SYNC_FORDEV);
2654         privp->prev = NULL;
2655         mutex_enter(&sf->sf_mutex);
2656         privp->next = sf->sf_els_list;
2657         if (sf->sf_els_list != NULL) {
2658                 sf->sf_els_list->prev = privp;
2659         }
2660         sf->sf_els_list = privp;
2661         mutex_exit(&sf->sf_mutex);
2662 
2663         /* call the transport to send a packet */
2664         if (soc_transport(sf->sf_sochandle, fpkt, FCAL_NOSLEEP,
2665             CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
2666                 mutex_enter(&sf->sf_mutex);
2667                 if (privp->prev != NULL) {
2668                         privp->prev->next = privp->next;
2669                 }
2670                 if (privp->next != NULL) {
2671                         privp->next->prev = privp->prev;
2672                 }
2673                 if (sf->sf_els_list == privp) {
2674                         sf->sf_els_list = privp->next;
2675                 }
2676                 mutex_exit(&sf->sf_mutex);
2677                 sf_els_free(fpkt);
2678                 return (FALSE);                 /* failure */
2679         }
2680         return (TRUE);                          /* success */
2681 }
2682 
2683 
2684 /*
2685  * called as the pkt_comp routine for ELS FC packets
2686  */
2687 static void
2688 sf_els_callback(struct fcal_packet *fpkt)
2689 {
2690         struct sf_els_hdr *privp = fpkt->fcal_pkt_private;
2691         struct sf *sf = privp->sf;
2692         struct sf *tsf;
2693         int tgt_id;
2694         struct la_els_logi *ptr = (struct la_els_logi *)privp->rsp;
2695         struct la_els_adisc *adisc = (struct la_els_adisc *)ptr;
2696         struct  sf_target *target;
2697         short   ncmds;
2698         short   free_pkt = TRUE;
2699 
2700 
2701         /*
2702          * we've received an ELS callback, i.e. an ELS packet has arrived
2703          */
2704 
2705         /* take the current packet off of the queue */
2706         mutex_enter(&sf->sf_mutex);
2707         if (privp->timeout == SF_INVALID_TIMEOUT) {
2708                 mutex_exit(&sf->sf_mutex);
2709                 return;
2710         }
2711         if (privp->prev != NULL) {
2712                 privp->prev->next = privp->next;
2713         }
2714         if (privp->next != NULL) {
2715                 privp->next->prev = privp->prev;
2716         }
2717         if (sf->sf_els_list == privp) {
2718                 sf->sf_els_list = privp->next;
2719         }
2720         privp->prev = privp->next = NULL;
2721         mutex_exit(&sf->sf_mutex);
2722 
2723         /* get # pkts in this callback */
2724         ncmds = fpkt->fcal_ncmds;
2725         ASSERT(ncmds >= 0);
2726         mutex_enter(&sf->sf_cmd_mutex);
2727         sf->sf_ncmds = ncmds;
2728         mutex_exit(&sf->sf_cmd_mutex);
2729 
2730         /* sync idea of memory */
2731         (void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0, (size_t)0,
2732             DDI_DMA_SYNC_FORKERNEL);
2733 
2734         /* was this an OK ACC msg ?? */
2735         if ((fpkt->fcal_pkt_status == FCAL_STATUS_OK) &&
2736             (ptr->ls_code == LA_ELS_ACC)) {
2737 
2738                 /*
2739                  * this was an OK ACC pkt
2740                  */
2741 
2742                 switch (privp->els_code) {
2743                 case LA_ELS_PLOGI:
2744                         /*
2745                          * was able to to an N_port login
2746                          */
2747                         SF_DEBUG(2, (sf, CE_CONT,
2748                             "!PLOGI to al_pa %x succeeded, wwn %x%x\n",
2749                             privp->dest_nport_id,
2750                             *((int *)&ptr->nport_ww_name.raw_wwn[0]),
2751                             *((int *)&ptr->nport_ww_name.raw_wwn[4])));
2752                         /* try to do a process login */
2753                         if (!sf_do_prli(sf, privp, ptr)) {
2754                                 free_pkt = FALSE;
2755                                 goto fail;      /* PRLI failed */
2756                         }
2757                         break;
2758                 case LA_ELS_PRLI:
2759                         /*
2760                          * was able to do a process login
2761                          */
2762                         SF_DEBUG(2, (sf, CE_CONT,
2763                             "!PRLI to al_pa %x succeeded\n",
2764                             privp->dest_nport_id));
2765                         /* try to do address discovery */
2766                         if (sf_do_adisc(sf, privp) != 1) {
2767                                 free_pkt = FALSE;
2768                                 goto fail;      /* ADISC failed */
2769                         }
2770                         break;
2771                 case LA_ELS_ADISC:
2772                         /*
2773                          * found a target via ADISC
2774                          */
2775 
2776                         SF_DEBUG(2, (sf, CE_CONT,
2777                             "!ADISC to al_pa %x succeeded\n",
2778                             privp->dest_nport_id));
2779 
2780                         /* create the target info */
2781                         if ((target = sf_create_target(sf, privp,
2782                             sf_alpa_to_switch[(uchar_t)adisc->hard_address],
2783                             (int64_t)0))
2784                             == NULL) {
2785                                 goto fail;      /* can't create target */
2786                         }
2787 
2788                         /*
2789                          * ensure address discovered matches what we thought
2790                          * it would be
2791                          */
2792                         if ((uchar_t)adisc->hard_address !=
2793                             privp->dest_nport_id) {
2794                                 sf_log(sf, CE_WARN,
2795                                     "target 0x%x, AL-PA 0x%x and "
2796                                     "hard address 0x%x don't match\n",
2797                                     sf_alpa_to_switch[
2798                                     (uchar_t)privp->dest_nport_id],
2799                                     privp->dest_nport_id,
2800                                     (uchar_t)adisc->hard_address);
2801                                 mutex_enter(&sf->sf_mutex);
2802                                 sf_offline_target(sf, target);
2803                                 mutex_exit(&sf->sf_mutex);
2804                                 goto fail;      /* addr doesn't match */
2805                         }
2806                         /*
2807                          * get inquiry data from the target
2808                          */
2809                         if (!sf_do_reportlun(sf, privp, target)) {
2810                                 mutex_enter(&sf->sf_mutex);
2811                                 sf_offline_target(sf, target);
2812                                 mutex_exit(&sf->sf_mutex);
2813                                 free_pkt = FALSE;
2814                                 goto fail;      /* inquiry failed */
2815                         }
2816                         break;
2817                 default:
2818                         SF_DEBUG(2, (sf, CE_CONT,
2819                             "!ELS %x to al_pa %x succeeded\n",
2820                             privp->els_code, privp->dest_nport_id));
2821                         sf_els_free(fpkt);
2822                         break;
2823                 }
2824 
2825         } else {
2826 
2827                 /*
2828                  * oh oh -- this was not an OK ACC packet
2829                  */
2830 
2831                 /* get target ID from dest loop address */
2832                 tgt_id = sf_alpa_to_switch[(uchar_t)privp->dest_nport_id];
2833 
2834                 /* keep track of failures */
2835                 sf->sf_stats.tstats[tgt_id].els_failures++;
2836                 if (++(privp->retries) < sf_els_retries &&
2837                     fpkt->fcal_pkt_status != FCAL_STATUS_OPEN_FAIL) {
2838                         if (fpkt->fcal_pkt_status ==
2839                             FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
2840                                 tsf = sf->sf_sibling;
2841                                 if (tsf != NULL) {
2842                                         mutex_enter(&tsf->sf_cmd_mutex);
2843                                         tsf->sf_flag = 1;
2844                                         tsf->sf_throttle = SF_DECR_DELTA;
2845                                         mutex_exit(&tsf->sf_cmd_mutex);
2846                                 }
2847                         }
2848                         privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2849                         privp->prev = NULL;
2850 
2851                         mutex_enter(&sf->sf_mutex);
2852 
2853                         if (privp->lip_cnt == sf->sf_lip_cnt) {
2854                                 SF_DEBUG(1, (sf, CE_WARN,
2855                                     "!ELS %x to al_pa %x failed, retrying",
2856                                     privp->els_code, privp->dest_nport_id));
2857                                 privp->next = sf->sf_els_list;
2858                                 if (sf->sf_els_list != NULL) {
2859                                         sf->sf_els_list->prev = privp;
2860                                 }
2861 
2862                                 sf->sf_els_list = privp;
2863 
2864                                 mutex_exit(&sf->sf_mutex);
2865                                 /* device busy?  wait a bit ... */
2866                                 if (fpkt->fcal_pkt_status ==
2867                                     FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
2868                                         privp->delayed_retry = 1;
2869                                         return;
2870                                 }
2871                                 /* call the transport to send a pkt */
2872                                 if (soc_transport(sf->sf_sochandle, fpkt,
2873                                     FCAL_NOSLEEP, CQ_REQUEST_1) !=
2874                                     FCAL_TRANSPORT_SUCCESS) {
2875                                         mutex_enter(&sf->sf_mutex);
2876                                         if (privp->prev != NULL) {
2877                                                 privp->prev->next =
2878                                                     privp->next;
2879                                         }
2880                                         if (privp->next != NULL) {
2881                                                 privp->next->prev =
2882                                                     privp->prev;
2883                                         }
2884                                         if (sf->sf_els_list == privp) {
2885                                                 sf->sf_els_list = privp->next;
2886                                         }
2887                                         mutex_exit(&sf->sf_mutex);
2888                                         goto fail;
2889                                 } else
2890                                         return;
2891                         } else {
2892                                 mutex_exit(&sf->sf_mutex);
2893                                 goto fail;
2894                         }
2895                 } else {
2896 #ifdef  DEBUG
2897                         if (fpkt->fcal_pkt_status != 0x36 || sfdebug > 4) {
2898                         SF_DEBUG(2, (sf, CE_NOTE, "ELS %x to al_pa %x failed",
2899                             privp->els_code, privp->dest_nport_id));
2900                         if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
2901                                 SF_DEBUG(2, (sf, CE_NOTE,
2902                                     "els reply code = %x", ptr->ls_code));
2903                                 if (ptr->ls_code == LA_ELS_RJT)
2904                                         SF_DEBUG(1, (sf, CE_CONT,
2905                                             "LS_RJT reason = %x\n",
2906                                             *(((uint_t *)ptr) + 1)));
2907                         } else
2908                                 SF_DEBUG(2, (sf, CE_NOTE,
2909                                     "fc packet status = %x",
2910                                     fpkt->fcal_pkt_status));
2911                         }
2912 #endif
2913                         goto fail;
2914                 }
2915         }
2916         return;                                 /* success */
2917 fail:
2918         mutex_enter(&sf->sf_mutex);
2919         if (sf->sf_lip_cnt == privp->lip_cnt) {
2920                 sf->sf_device_count--;
2921                 ASSERT(sf->sf_device_count >= 0);
2922                 if (sf->sf_device_count == 0) {
2923                         sf_finish_init(sf, privp->lip_cnt);
2924                 }
2925         }
2926         mutex_exit(&sf->sf_mutex);
2927         if (free_pkt) {
2928                 sf_els_free(fpkt);
2929         }
2930 }
2931 
2932 
2933 /*
2934  * send a PRLI (process login) ELS IU via the transport,
2935  * returning TRUE upon success, else returning FALSE
2936  */
2937 static int
2938 sf_do_prli(struct sf *sf, struct sf_els_hdr *privp, struct la_els_logi *ptr)
2939 {
2940         struct la_els_prli      *prli = (struct la_els_prli *)privp->cmd;
2941         struct fcp_prli         *fprli;
2942         struct  fcal_packet     *fpkt = privp->fpkt;
2943 
2944 
2945         fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2946             sizeof (struct la_els_prli);
2947         privp->els_code = LA_ELS_PRLI;
2948         fprli = (struct fcp_prli *)prli->service_params;
2949         prli->ls_code = LA_ELS_PRLI;
2950         prli->page_length = 0x10;
2951         prli->payload_length = sizeof (struct la_els_prli);
2952         fprli->type = 0x08;                  /* no define here? */
2953         fprli->resvd1 = 0;
2954         fprli->orig_process_assoc_valid = 0;
2955         fprli->resp_process_assoc_valid = 0;
2956         fprli->establish_image_pair = 1;
2957         fprli->resvd2 = 0;
2958         fprli->resvd3 = 0;
2959         fprli->data_overlay_allowed = 0;
2960         fprli->initiator_fn = 1;
2961         fprli->target_fn = 0;
2962         fprli->cmd_data_mixed = 0;
2963         fprli->data_resp_mixed = 0;
2964         fprli->read_xfer_rdy_disabled = 1;
2965         fprli->write_xfer_rdy_disabled = 0;
2966 
2967         bcopy((caddr_t)&ptr->nport_ww_name, (caddr_t)&privp->port_wwn,
2968             sizeof (privp->port_wwn));
2969         bcopy((caddr_t)&ptr->node_ww_name, (caddr_t)&privp->node_wwn,
2970             sizeof (privp->node_wwn));
2971 
2972         privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2973         return (sf_els_transport(sf, privp));
2974 }
2975 
2976 
2977 /*
2978  * send an ADISC (address discovery) ELS IU via the transport,
2979  * returning TRUE upon success, else returning FALSE
2980  */
2981 static int
2982 sf_do_adisc(struct sf *sf, struct sf_els_hdr *privp)
2983 {
2984         struct la_els_adisc     *adisc = (struct la_els_adisc *)privp->cmd;
2985         struct  fcal_packet     *fpkt = privp->fpkt;
2986 
2987         privp->els_code = LA_ELS_ADISC;
2988         adisc->ls_code = LA_ELS_ADISC;
2989         adisc->mbz[0] = 0;
2990         adisc->mbz[1] = 0;
2991         adisc->mbz[2] = 0;
2992         adisc->hard_address = 0; /* ??? */
2993         fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2994             sizeof (struct la_els_adisc);
2995         bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2996             (caddr_t)&adisc->port_wwn, sizeof (adisc->port_wwn));
2997         bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2998             (caddr_t)&adisc->node_wwn, sizeof (adisc->node_wwn));
2999         adisc->nport_id = sf->sf_al_pa;
3000 
3001         privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
3002         return (sf_els_transport(sf, privp));
3003 }
3004 
3005 
3006 static struct fcal_packet *
3007 sf_els_alloc(struct sf *sf, uchar_t dest_id, int priv_size, int cmd_size,
3008     int rsp_size, caddr_t *rprivp, caddr_t *cmd_buf)
3009 {
3010         struct  fcal_packet     *fpkt;
3011         ddi_dma_cookie_t        pcookie;
3012         ddi_dma_cookie_t        rcookie;
3013         struct  sf_els_hdr      *privp;
3014         ddi_dma_handle_t        cmd_dma_handle = NULL;
3015         ddi_dma_handle_t        rsp_dma_handle = NULL;
3016         ddi_acc_handle_t        cmd_acc_handle = NULL;
3017         ddi_acc_handle_t        rsp_acc_handle = NULL;
3018         size_t                  real_size;
3019         uint_t                  ccount;
3020         fc_frame_header_t       *hp;
3021         int                     cmd_bound = FALSE, rsp_bound = FALSE;
3022         caddr_t                 cmd = NULL;
3023         caddr_t                 rsp = NULL;
3024 
3025         if ((fpkt = (struct fcal_packet *)kmem_zalloc(
3026             sizeof (struct fcal_packet), KM_NOSLEEP)) == NULL) {
3027                 SF_DEBUG(1, (sf, CE_WARN,
3028                         "Could not allocate fcal_packet for ELS\n"));
3029                 return (NULL);
3030         }
3031 
3032         if ((privp = (struct sf_els_hdr *)kmem_zalloc(priv_size,
3033             KM_NOSLEEP)) == NULL) {
3034                 SF_DEBUG(1, (sf, CE_WARN,
3035                     "Could not allocate sf_els_hdr for ELS\n"));
3036                 goto fail;
3037         }
3038 
3039         privp->size = priv_size;
3040         fpkt->fcal_pkt_private = (caddr_t)privp;
3041 
3042         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3043             DDI_DMA_DONTWAIT, NULL, &cmd_dma_handle) != DDI_SUCCESS) {
3044                 SF_DEBUG(1, (sf, CE_WARN,
3045                     "Could not allocate DMA handle for ELS\n"));
3046                 goto fail;
3047         }
3048 
3049         if (ddi_dma_mem_alloc(cmd_dma_handle, cmd_size,
3050             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3051             DDI_DMA_DONTWAIT, NULL, &cmd,
3052             &real_size, &cmd_acc_handle) != DDI_SUCCESS) {
3053                 SF_DEBUG(1, (sf, CE_WARN,
3054                     "Could not allocate DMA memory for ELS\n"));
3055                 goto fail;
3056         }
3057 
3058         if (real_size < cmd_size) {
3059                 SF_DEBUG(1, (sf, CE_WARN,
3060                     "DMA memory too small for ELS\n"));
3061                 goto fail;
3062         }
3063 
3064         if (ddi_dma_addr_bind_handle(cmd_dma_handle, NULL,
3065             cmd, real_size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
3066             DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3067                 SF_DEBUG(1, (sf, CE_WARN,
3068                     "Could not bind DMA memory for ELS\n"));
3069                 goto fail;
3070         }
3071         cmd_bound = TRUE;
3072 
3073         if (ccount != 1) {
3074                 SF_DEBUG(1, (sf, CE_WARN,
3075                     "Wrong cookie count for ELS\n"));
3076                 goto fail;
3077         }
3078 
3079         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3080             DDI_DMA_DONTWAIT, NULL, &rsp_dma_handle) != DDI_SUCCESS) {
3081                 SF_DEBUG(1, (sf, CE_WARN,
3082                     "Could not allocate DMA handle for ELS rsp\n"));
3083                 goto fail;
3084         }
3085         if (ddi_dma_mem_alloc(rsp_dma_handle, rsp_size,
3086             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3087             DDI_DMA_DONTWAIT, NULL, &rsp,
3088             &real_size, &rsp_acc_handle) != DDI_SUCCESS) {
3089                 SF_DEBUG(1, (sf, CE_WARN,
3090                     "Could not allocate DMA memory for ELS rsp\n"));
3091                 goto fail;
3092         }
3093 
3094         if (real_size < rsp_size) {
3095                 SF_DEBUG(1, (sf, CE_WARN,
3096                     "DMA memory too small for ELS rsp\n"));
3097                 goto fail;
3098         }
3099 
3100         if (ddi_dma_addr_bind_handle(rsp_dma_handle, NULL,
3101             rsp, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3102             DDI_DMA_DONTWAIT, NULL, &rcookie, &ccount) != DDI_DMA_MAPPED) {
3103                 SF_DEBUG(1, (sf, CE_WARN,
3104                     "Could not bind DMA memory for ELS rsp\n"));
3105                 goto fail;
3106         }
3107         rsp_bound = TRUE;
3108 
3109         if (ccount != 1) {
3110                 SF_DEBUG(1, (sf, CE_WARN,
3111                     "Wrong cookie count for ELS rsp\n"));
3112                 goto fail;
3113         }
3114 
3115         privp->cmd = cmd;
3116         privp->sf = sf;
3117         privp->cmd_dma_handle = cmd_dma_handle;
3118         privp->cmd_acc_handle = cmd_acc_handle;
3119         privp->rsp = rsp;
3120         privp->rsp_dma_handle = rsp_dma_handle;
3121         privp->rsp_acc_handle = rsp_acc_handle;
3122         privp->dest_nport_id = dest_id;
3123         privp->fpkt = fpkt;
3124 
3125         fpkt->fcal_pkt_cookie = sf->sf_socp;
3126         fpkt->fcal_pkt_comp = sf_els_callback;
3127         fpkt->fcal_magic = FCALP_MAGIC;
3128         fpkt->fcal_pkt_flags = 0;
3129         fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
3130             (ushort_t)(SOC_FC_HEADER | sf->sf_sochandle->fcal_portno);
3131         fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
3132         fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
3133         fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = cmd_size;
3134         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
3135         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
3136         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
3137         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
3138         fpkt->fcal_socal_request.sr_dataseg[0].fc_base = (uint32_t)
3139             pcookie.dmac_address;
3140         fpkt->fcal_socal_request.sr_dataseg[0].fc_count = cmd_size;
3141         fpkt->fcal_socal_request.sr_dataseg[1].fc_base = (uint32_t)
3142             rcookie.dmac_address;
3143         fpkt->fcal_socal_request.sr_dataseg[1].fc_count = rsp_size;
3144 
3145         /* Fill in the Fabric Channel Header */
3146         hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3147         hp->r_ctl = R_CTL_ELS_REQ;
3148         hp->d_id = dest_id;
3149         hp->s_id = sf->sf_al_pa;
3150         hp->type = TYPE_EXTENDED_LS;
3151         hp->reserved1 = 0;
3152         hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3153         hp->seq_id = 0;
3154         hp->df_ctl  = 0;
3155         hp->seq_cnt = 0;
3156         hp->ox_id = 0xffff;
3157         hp->rx_id = 0xffff;
3158         hp->ro = 0;
3159 
3160         *rprivp = (caddr_t)privp;
3161         *cmd_buf = cmd;
3162         return (fpkt);
3163 
3164 fail:
3165         if (cmd_dma_handle != NULL) {
3166                 if (cmd_bound) {
3167                         (void) ddi_dma_unbind_handle(cmd_dma_handle);
3168                 }
3169                 ddi_dma_free_handle(&cmd_dma_handle);
3170                 privp->cmd_dma_handle = NULL;
3171         }
3172         if (rsp_dma_handle != NULL) {
3173                 if (rsp_bound) {
3174                         (void) ddi_dma_unbind_handle(rsp_dma_handle);
3175                 }
3176                 ddi_dma_free_handle(&rsp_dma_handle);
3177                 privp->rsp_dma_handle = NULL;
3178         }
3179         sf_els_free(fpkt);
3180         return (NULL);
3181 }
3182 
3183 
3184 static void
3185 sf_els_free(struct fcal_packet *fpkt)
3186 {
3187         struct  sf_els_hdr      *privp = fpkt->fcal_pkt_private;
3188 
3189         if (privp != NULL) {
3190                 if (privp->cmd_dma_handle != NULL) {
3191                         (void) ddi_dma_unbind_handle(privp->cmd_dma_handle);
3192                         ddi_dma_free_handle(&privp->cmd_dma_handle);
3193                 }
3194                 if (privp->cmd != NULL) {
3195                         ddi_dma_mem_free(&privp->cmd_acc_handle);
3196                 }
3197 
3198                 if (privp->rsp_dma_handle != NULL) {
3199                         (void) ddi_dma_unbind_handle(privp->rsp_dma_handle);
3200                         ddi_dma_free_handle(&privp->rsp_dma_handle);
3201                 }
3202 
3203                 if (privp->rsp != NULL) {
3204                         ddi_dma_mem_free(&privp->rsp_acc_handle);
3205                 }
3206                 if (privp->data_dma_handle) {
3207                         (void) ddi_dma_unbind_handle(privp->data_dma_handle);
3208                         ddi_dma_free_handle(&privp->data_dma_handle);
3209                 }
3210                 if (privp->data_buf) {
3211                         ddi_dma_mem_free(&privp->data_acc_handle);
3212                 }
3213                 kmem_free(privp, privp->size);
3214         }
3215         kmem_free(fpkt, sizeof (struct fcal_packet));
3216 }
3217 
3218 
3219 static struct sf_target *
3220 sf_create_target(struct sf *sf, struct sf_els_hdr *privp, int tnum, int64_t lun)
3221 {
3222         struct sf_target *target, *ntarget, *otarget, *ptarget;
3223         int hash;
3224 #ifdef RAID_LUNS
3225         int64_t orig_lun = lun;
3226 
3227         /* XXXX Work around SCSA limitations. */
3228         lun = *((short *)&lun);
3229 #endif
3230         ntarget = kmem_zalloc(sizeof (struct sf_target), KM_NOSLEEP);
3231         mutex_enter(&sf->sf_mutex);
3232         if (sf->sf_lip_cnt != privp->lip_cnt) {
3233                 mutex_exit(&sf->sf_mutex);
3234                 if (ntarget != NULL)
3235                         kmem_free(ntarget, sizeof (struct sf_target));
3236                 return (NULL);
3237         }
3238 
3239         target = sf_lookup_target(sf, privp->port_wwn, lun);
3240         if (lun != 0) {
3241                 /*
3242                  * Since LUNs != 0 are queued up after LUN == 0, find LUN == 0
3243                  * and enqueue the new LUN.
3244                  */
3245                 if ((ptarget = sf_lookup_target(sf, privp->port_wwn,
3246                     (int64_t)0)) ==     NULL) {
3247                         /*
3248                          * Yeep -- no LUN 0?
3249                          */
3250                         mutex_exit(&sf->sf_mutex);
3251                         sf_log(sf, CE_WARN, "target 0x%x "
3252                             "lun %" PRIx64 ": No LUN 0\n", tnum, lun);
3253                         if (ntarget != NULL)
3254                                 kmem_free(ntarget, sizeof (struct sf_target));
3255                         return (NULL);
3256                 }
3257                 mutex_enter(&ptarget->sft_mutex);
3258                 if (target != NULL && ptarget->sft_lip_cnt == sf->sf_lip_cnt &&
3259                     ptarget->sft_state&SF_TARGET_OFFLINE) {
3260                         /* LUN 0 already finished, duplicate its state */
3261                         mutex_exit(&ptarget->sft_mutex);
3262                         sf_offline_target(sf, target);
3263                         mutex_exit(&sf->sf_mutex);
3264                         if (ntarget != NULL)
3265                                 kmem_free(ntarget, sizeof (struct sf_target));
3266                         return (target);
3267                 } else if (target != NULL) {
3268                         /*
3269                          * LUN 0 online or not examined yet.
3270                          * Try to bring the LUN back online
3271                          */
3272                         mutex_exit(&ptarget->sft_mutex);
3273                         mutex_enter(&target->sft_mutex);
3274                         target->sft_lip_cnt = privp->lip_cnt;
3275                         target->sft_state |= SF_TARGET_BUSY;
3276                         target->sft_state &= ~(SF_TARGET_OFFLINE|
3277                             SF_TARGET_MARK);
3278                         target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3279                         target->sft_hard_address = sf_switch_to_alpa[tnum];
3280                         mutex_exit(&target->sft_mutex);
3281                         mutex_exit(&sf->sf_mutex);
3282                         if (ntarget != NULL)
3283                                 kmem_free(ntarget, sizeof (struct sf_target));
3284                         return (target);
3285                 }
3286                 mutex_exit(&ptarget->sft_mutex);
3287                 if (ntarget == NULL) {
3288                         mutex_exit(&sf->sf_mutex);
3289                         return (NULL);
3290                 }
3291                 /* Initialize new target structure */
3292                 bcopy((caddr_t)&privp->node_wwn,
3293                     (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3294                 bcopy((caddr_t)&privp->port_wwn,
3295                     (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3296                 ntarget->sft_lun.l = lun;
3297 #ifdef RAID_LUNS
3298                 ntarget->sft_lun.l = orig_lun;
3299                 ntarget->sft_raid_lun = (uint_t)lun;
3300 #endif
3301                 mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3302                 mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3303                 /* Don't let anyone use this till we finishup init. */
3304                 mutex_enter(&ntarget->sft_mutex);
3305                 mutex_enter(&ntarget->sft_pkt_mutex);
3306 
3307                 hash = SF_HASH(privp->port_wwn, lun);
3308                 ntarget->sft_next = sf->sf_wwn_lists[hash];
3309                 sf->sf_wwn_lists[hash] = ntarget;
3310 
3311                 ntarget->sft_lip_cnt = privp->lip_cnt;
3312                 ntarget->sft_al_pa = (uchar_t)privp->dest_nport_id;
3313                 ntarget->sft_hard_address = sf_switch_to_alpa[tnum];
3314                 ntarget->sft_device_type = DTYPE_UNKNOWN;
3315                 ntarget->sft_state = SF_TARGET_BUSY;
3316                 ntarget->sft_pkt_head = (struct sf_pkt *)&ntarget->
3317                     sft_pkt_head;
3318                 ntarget->sft_pkt_tail = (struct sf_pkt *)&ntarget->
3319                     sft_pkt_head;
3320 
3321                 mutex_enter(&ptarget->sft_mutex);
3322                 /* Traverse the list looking for this target */
3323                 for (target = ptarget; target->sft_next_lun;
3324                     target = target->sft_next_lun) {
3325                         otarget = target->sft_next_lun;
3326                 }
3327                 ntarget->sft_next_lun = target->sft_next_lun;
3328                 target->sft_next_lun = ntarget;
3329                 mutex_exit(&ptarget->sft_mutex);
3330                 mutex_exit(&ntarget->sft_pkt_mutex);
3331                 mutex_exit(&ntarget->sft_mutex);
3332                 mutex_exit(&sf->sf_mutex);
3333                 return (ntarget);
3334 
3335         }
3336         if (target != NULL && target->sft_lip_cnt == sf->sf_lip_cnt) {
3337                 /* It's been touched this LIP -- duplicate WWNs */
3338                 sf_offline_target(sf, target); /* And all the baby targets */
3339                 mutex_exit(&sf->sf_mutex);
3340                 sf_log(sf, CE_WARN, "target 0x%x, duplicate port wwns\n",
3341                     tnum);
3342                 if (ntarget != NULL) {
3343                         kmem_free(ntarget, sizeof (struct sf_target));
3344                 }
3345                 return (NULL);
3346         }
3347 
3348         if ((otarget = sf->sf_targets[tnum]) != NULL) {
3349                 /* Someone else is in our slot */
3350                 mutex_enter(&otarget->sft_mutex);
3351                 if (otarget->sft_lip_cnt == sf->sf_lip_cnt) {
3352                         mutex_exit(&otarget->sft_mutex);
3353                         sf_offline_target(sf, otarget);
3354                         if (target != NULL)
3355                                 sf_offline_target(sf, target);
3356                         mutex_exit(&sf->sf_mutex);
3357                         sf_log(sf, CE_WARN,
3358                             "target 0x%x, duplicate switch settings\n", tnum);
3359                         if (ntarget != NULL)
3360                                 kmem_free(ntarget, sizeof (struct sf_target));
3361                         return (NULL);
3362                 }
3363                 mutex_exit(&otarget->sft_mutex);
3364                 if (bcmp((caddr_t)&privp->port_wwn, (caddr_t)&otarget->
3365                     sft_port_wwn, sizeof (privp->port_wwn))) {
3366                         sf_offline_target(sf, otarget);
3367                         mutex_exit(&sf->sf_mutex);
3368                         sf_log(sf, CE_WARN, "wwn changed on target 0x%x\n",
3369                             tnum);
3370                         bzero((caddr_t)&sf->sf_stats.tstats[tnum],
3371                             sizeof (struct sf_target_stats));
3372                         mutex_enter(&sf->sf_mutex);
3373                 }
3374         }
3375 
3376         sf->sf_targets[tnum] = target;
3377         if ((target = sf->sf_targets[tnum]) == NULL) {
3378                 if (ntarget == NULL) {
3379                         mutex_exit(&sf->sf_mutex);
3380                         return (NULL);
3381                 }
3382                 bcopy((caddr_t)&privp->node_wwn,
3383                     (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3384                 bcopy((caddr_t)&privp->port_wwn,
3385                     (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3386                 ntarget->sft_lun.l = lun;
3387 #ifdef RAID_LUNS
3388                 ntarget->sft_lun.l = orig_lun;
3389                 ntarget->sft_raid_lun = (uint_t)lun;
3390 #endif
3391                 mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3392                 mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3393                 mutex_enter(&ntarget->sft_mutex);
3394                 mutex_enter(&ntarget->sft_pkt_mutex);
3395                 hash = SF_HASH(privp->port_wwn, lun); /* lun 0 */
3396                 ntarget->sft_next = sf->sf_wwn_lists[hash];
3397                 sf->sf_wwn_lists[hash] = ntarget;
3398 
3399                 target = ntarget;
3400                 target->sft_lip_cnt = privp->lip_cnt;
3401                 target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3402                 target->sft_hard_address = sf_switch_to_alpa[tnum];
3403                 target->sft_device_type = DTYPE_UNKNOWN;
3404                 target->sft_state = SF_TARGET_BUSY;
3405                 target->sft_pkt_head = (struct sf_pkt *)&target->
3406                     sft_pkt_head;
3407                 target->sft_pkt_tail = (struct sf_pkt *)&target->
3408                     sft_pkt_head;
3409                 sf->sf_targets[tnum] = target;
3410                 mutex_exit(&ntarget->sft_mutex);
3411                 mutex_exit(&ntarget->sft_pkt_mutex);
3412                 mutex_exit(&sf->sf_mutex);
3413         } else {
3414                 mutex_enter(&target->sft_mutex);
3415                 target->sft_lip_cnt = privp->lip_cnt;
3416                 target->sft_state |= SF_TARGET_BUSY;
3417                 target->sft_state &= ~(SF_TARGET_OFFLINE|SF_TARGET_MARK);
3418                 target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3419                 target->sft_hard_address = sf_switch_to_alpa[tnum];
3420                 mutex_exit(&target->sft_mutex);
3421                 mutex_exit(&sf->sf_mutex);
3422                 if (ntarget != NULL)
3423                         kmem_free(ntarget, sizeof (struct sf_target));
3424         }
3425         return (target);
3426 }
3427 
3428 
3429 /*
3430  * find the target for a given sf instance
3431  */
3432 /* ARGSUSED */
3433 static struct sf_target *
3434 #ifdef RAID_LUNS
3435 sf_lookup_target(struct sf *sf, uchar_t *wwn, int lun)
3436 #else
3437 sf_lookup_target(struct sf *sf, uchar_t *wwn, int64_t lun)
3438 #endif
3439 {
3440         int hash;
3441         struct sf_target *target;
3442 
3443         ASSERT(mutex_owned(&sf->sf_mutex));
3444         hash = SF_HASH(wwn, lun);
3445 
3446         target = sf->sf_wwn_lists[hash];
3447         while (target != NULL) {
3448 
3449 #ifndef RAID_LUNS
3450                 if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3451                     sizeof (target->sft_port_wwn)) == 0 &&
3452                         target->sft_lun.l == lun)
3453                         break;
3454 #else
3455                 if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3456                     sizeof (target->sft_port_wwn)) == 0 &&
3457                         target->sft_raid_lun == lun)
3458                         break;
3459 #endif
3460                 target = target->sft_next;
3461         }
3462 
3463         return (target);
3464 }
3465 
3466 
3467 /*
3468  * Send out a REPORT_LUNS command.
3469  */
3470 static int
3471 sf_do_reportlun(struct sf *sf, struct sf_els_hdr *privp,
3472     struct sf_target *target)
3473 {
3474         struct  fcal_packet     *fpkt = privp->fpkt;
3475         ddi_dma_cookie_t        pcookie;
3476         ddi_dma_handle_t        lun_dma_handle = NULL;
3477         ddi_acc_handle_t        lun_acc_handle;
3478         uint_t                  ccount;
3479         size_t                  real_size;
3480         caddr_t                 lun_buf = NULL;
3481         int                     handle_bound = 0;
3482         fc_frame_header_t       *hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3483         struct fcp_cmd          *reportlun = (struct fcp_cmd *)privp->cmd;
3484         char                    *msg = "Transport";
3485 
3486         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3487             DDI_DMA_DONTWAIT, NULL, &lun_dma_handle) != DDI_SUCCESS) {
3488                 msg = "ddi_dma_alloc_handle()";
3489                 goto fail;
3490         }
3491 
3492         if (ddi_dma_mem_alloc(lun_dma_handle, REPORT_LUNS_SIZE,
3493             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3494             DDI_DMA_DONTWAIT, NULL, &lun_buf,
3495             &real_size, &lun_acc_handle) != DDI_SUCCESS) {
3496                 msg = "ddi_dma_mem_alloc()";
3497                 goto fail;
3498         }
3499 
3500         if (real_size < REPORT_LUNS_SIZE) {
3501                 msg = "DMA mem < REPORT_LUNS_SIZE";
3502                 goto fail;
3503         }
3504 
3505         if (ddi_dma_addr_bind_handle(lun_dma_handle, NULL,
3506             lun_buf, real_size, DDI_DMA_READ |
3507             DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
3508             NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3509                 msg = "ddi_dma_addr_bind_handle()";
3510                 goto fail;
3511         }
3512         handle_bound = 1;
3513 
3514         if (ccount != 1) {
3515                 msg = "ccount != 1";
3516                 goto fail;
3517         }
3518         privp->els_code = 0;
3519         privp->target = target;
3520         privp->data_dma_handle = lun_dma_handle;
3521         privp->data_acc_handle = lun_acc_handle;
3522         privp->data_buf = lun_buf;
3523 
3524         fpkt->fcal_pkt_comp = sf_reportlun_callback;
3525         fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3526         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3527         fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3528             sizeof (struct fcp_cmd);
3529         fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3530             (uint32_t)pcookie.dmac_address;
3531         fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3532         fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3533         hp->r_ctl = R_CTL_COMMAND;
3534         hp->type = TYPE_SCSI_FCP;
3535         bzero((caddr_t)reportlun, sizeof (struct fcp_cmd));
3536         ((union scsi_cdb *)reportlun->fcp_cdb)->scc_cmd = SCMD_REPORT_LUNS;
3537         /* Now set the buffer size.  If DDI gave us extra, that's O.K. */
3538         ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count0 =
3539             (real_size&0x0ff);
3540         ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count1 =
3541             (real_size>>8)&0x0ff;
3542         ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count2 =
3543             (real_size>>16)&0x0ff;
3544         ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count3 =
3545             (real_size>>24)&0x0ff;
3546         reportlun->fcp_cntl.cntl_read_data = 1;
3547         reportlun->fcp_cntl.cntl_write_data = 0;
3548         reportlun->fcp_data_len = pcookie.dmac_size;
3549         reportlun->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3550 
3551         (void) ddi_dma_sync(lun_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
3552         /* We know he's there, so this should be fast */
3553         privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3554         if (sf_els_transport(sf, privp) == 1)
3555                 return (1);
3556 
3557 fail:
3558         sf_log(sf, CE_WARN,
3559             "%s failure for REPORTLUN to target 0x%x\n",
3560             msg, sf_alpa_to_switch[privp->dest_nport_id]);
3561         sf_els_free(fpkt);
3562         if (lun_dma_handle != NULL) {
3563                 if (handle_bound)
3564                         (void) ddi_dma_unbind_handle(lun_dma_handle);
3565                 ddi_dma_free_handle(&lun_dma_handle);
3566         }
3567         if (lun_buf != NULL) {
3568                 ddi_dma_mem_free(&lun_acc_handle);
3569         }
3570         return (0);
3571 }
3572 
3573 /*
3574  * Handle the results of a REPORT_LUNS command:
3575  *      Create additional targets if necessary
3576  *      Initiate INQUIRYs on all LUNs.
3577  */
3578 static void
3579 sf_reportlun_callback(struct fcal_packet *fpkt)
3580 {
3581         struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3582             fcal_pkt_private;
3583         struct scsi_report_luns *ptr =
3584             (struct scsi_report_luns *)privp->data_buf;
3585         struct sf *sf = privp->sf;
3586         struct sf_target *target = privp->target;
3587         struct fcp_rsp *rsp = NULL;
3588         int delayed_retry = 0;
3589         int tid = sf_alpa_to_switch[target->sft_hard_address];
3590         int i, free_pkt = 1;
3591         short   ncmds;
3592 
3593         mutex_enter(&sf->sf_mutex);
3594         /* use as temporary state variable */
3595         if (privp->timeout == SF_INVALID_TIMEOUT) {
3596                 mutex_exit(&sf->sf_mutex);
3597                 return;
3598         }
3599         if (privp->prev)
3600                 privp->prev->next = privp->next;
3601         if (privp->next)
3602                 privp->next->prev = privp->prev;
3603         if (sf->sf_els_list == privp)
3604                 sf->sf_els_list = privp->next;
3605         privp->prev = privp->next = NULL;
3606         mutex_exit(&sf->sf_mutex);
3607         ncmds = fpkt->fcal_ncmds;
3608         ASSERT(ncmds >= 0);
3609         mutex_enter(&sf->sf_cmd_mutex);
3610         sf->sf_ncmds = ncmds;
3611         mutex_exit(&sf->sf_cmd_mutex);
3612 
3613         if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3614                 (void) ddi_dma_sync(privp->rsp_dma_handle, 0,
3615                     0, DDI_DMA_SYNC_FORKERNEL);
3616 
3617                 rsp = (struct fcp_rsp *)privp->rsp;
3618         }
3619         SF_DEBUG(1, (sf, CE_CONT,
3620             "!REPORTLUN to al_pa %x pkt status %x scsi status %x\n",
3621             privp->dest_nport_id,
3622             fpkt->fcal_pkt_status,
3623             rsp?rsp->fcp_u.fcp_status.scsi_status:0));
3624 
3625                 /* See if target simply does not support REPORT_LUNS. */
3626         if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK &&
3627             rsp->fcp_u.fcp_status.sense_len_set &&
3628             rsp->fcp_sense_len >=
3629                 offsetof(struct scsi_extended_sense, es_qual_code)) {
3630                         struct scsi_extended_sense *sense;
3631                         sense = (struct scsi_extended_sense *)
3632                         ((caddr_t)rsp + sizeof (struct fcp_rsp)
3633                                 + rsp->fcp_response_len);
3634                         if (sense->es_key == KEY_ILLEGAL_REQUEST) {
3635                                 if (sense->es_add_code == 0x20) {
3636                                         /* Fake LUN 0 */
3637                                 SF_DEBUG(1, (sf, CE_CONT,
3638                                         "!REPORTLUN Faking good "
3639                                         "completion for alpa %x\n",
3640                                         privp->dest_nport_id));
3641                                         ptr->lun_list_len = FCP_LUN_SIZE;
3642                                         ptr->lun[0] = 0;
3643                                         rsp->fcp_u.fcp_status.scsi_status =
3644                                                 STATUS_GOOD;
3645                                 } else if (sense->es_add_code == 0x25) {
3646                                         SF_DEBUG(1, (sf, CE_CONT,
3647                                             "!REPORTLUN device alpa %x "
3648                                             "key %x code %x\n",
3649                                             privp->dest_nport_id,
3650                                             sense->es_key, sense->es_add_code));
3651                                             goto fail;
3652                                 }
3653                         } else if (sense->es_key ==
3654                                 KEY_UNIT_ATTENTION &&
3655                                 sense->es_add_code == 0x29) {
3656                                 SF_DEBUG(1, (sf, CE_CONT,
3657                                         "!REPORTLUN device alpa %x was reset\n",
3658                                         privp->dest_nport_id));
3659                         } else {
3660                                 SF_DEBUG(1, (sf, CE_CONT,
3661                                         "!REPORTLUN device alpa %x "
3662                                         "key %x code %x\n",
3663                                         privp->dest_nport_id,
3664                                         sense->es_key, sense->es_add_code));
3665 /* XXXXXX The following is here to handle broken targets -- remove it later */
3666                                 if (sf_reportlun_forever &&
3667                                         sense->es_key == KEY_UNIT_ATTENTION)
3668                                         goto retry;
3669 /* XXXXXX */
3670                                 if (sense->es_key == KEY_NOT_READY)
3671                                         delayed_retry = 1;
3672                                 }
3673                 }
3674 
3675         if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) {
3676                 struct fcp_rsp_info *bep;
3677 
3678                 bep = (struct fcp_rsp_info *)(&rsp->
3679                     fcp_response_len + 1);
3680                 if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3681                     bep->rsp_code == FCP_NO_FAILURE) {
3682                         (void) ddi_dma_sync(privp->data_dma_handle,
3683                             0, 0, DDI_DMA_SYNC_FORKERNEL);
3684 
3685                         /* Convert from #bytes to #ints */
3686                         ptr->lun_list_len = ptr->lun_list_len >> 3;
3687                         SF_DEBUG(2, (sf, CE_CONT,
3688                             "!REPORTLUN to al_pa %x succeeded: %d LUNs\n",
3689                             privp->dest_nport_id, ptr->lun_list_len));
3690                         if (!ptr->lun_list_len) {
3691                                 /* No LUNs? Ya gotta be kidding... */
3692                                 sf_log(sf, CE_WARN,
3693                                     "SCSI violation -- "
3694                                     "target 0x%x reports no LUNs\n",
3695                                     sf_alpa_to_switch[
3696                                     privp->dest_nport_id]);
3697                                 ptr->lun_list_len = 1;
3698                                 ptr->lun[0] = 0;
3699                         }
3700 
3701                         mutex_enter(&sf->sf_mutex);
3702                         if (sf->sf_lip_cnt == privp->lip_cnt) {
3703                                 sf->sf_device_count += ptr->lun_list_len - 1;
3704                         }
3705 
3706                         mutex_exit(&sf->sf_mutex);
3707                         for (i = 0; i < ptr->lun_list_len && privp->lip_cnt ==
3708                             sf->sf_lip_cnt; i++) {
3709                                 struct sf_els_hdr *nprivp;
3710                                 struct fcal_packet *nfpkt;
3711 
3712                                 /* LUN 0 is already in `target' */
3713                                 if (ptr->lun[i] != 0) {
3714                                         target = sf_create_target(sf,
3715                                             privp, tid, ptr->lun[i]);
3716                                 }
3717                                 nprivp = NULL;
3718                                 nfpkt = NULL;
3719                                 if (target) {
3720                                         nfpkt = sf_els_alloc(sf,
3721                                             target->sft_al_pa,
3722                                             sizeof (struct sf_els_hdr),
3723                                             sizeof (union sf_els_cmd),
3724                                             sizeof (union sf_els_rsp),
3725                                             (caddr_t *)&nprivp,
3726                                             (caddr_t *)&rsp);
3727                                         if (nprivp)
3728                                                 nprivp->lip_cnt =
3729                                                     privp->lip_cnt;
3730                                 }
3731                                 if (nfpkt && nprivp &&
3732                                     (sf_do_inquiry(sf, nprivp, target) ==
3733                                     0)) {
3734                                         mutex_enter(&sf->sf_mutex);
3735                                         if (sf->sf_lip_cnt == privp->
3736                                             lip_cnt) {
3737                                                 sf->sf_device_count --;
3738                                         }
3739                                         sf_offline_target(sf, target);
3740                                         mutex_exit(&sf->sf_mutex);
3741                                 }
3742                         }
3743                         sf_els_free(fpkt);
3744                         return;
3745                 } else {
3746                         SF_DEBUG(1, (sf, CE_CONT,
3747                             "!REPORTLUN al_pa %x fcp failure, "
3748                             "fcp_rsp_code %x scsi status %x\n",
3749                             privp->dest_nport_id, bep->rsp_code,
3750                             rsp ? rsp->fcp_u.fcp_status.scsi_status:0));
3751                         goto fail;
3752                 }
3753         }
3754         if (rsp && ((rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) ||
3755             (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL))) {
3756                 delayed_retry = 1;
3757         }
3758 
3759         if (++(privp->retries) < sf_els_retries ||
3760             (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
3761 /* XXXXXX The following is here to handle broken targets -- remove it later */
3762 retry:
3763 /* XXXXXX */
3764                 if (delayed_retry) {
3765                         privp->retries--;
3766                         privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
3767                         privp->delayed_retry = 1;
3768                 } else {
3769                         privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3770                 }
3771 
3772                 privp->prev = NULL;
3773                 mutex_enter(&sf->sf_mutex);
3774                 if (privp->lip_cnt == sf->sf_lip_cnt) {
3775                         if (!delayed_retry)
3776                                 SF_DEBUG(1, (sf, CE_WARN,
3777                                     "!REPORTLUN to al_pa %x failed, retrying\n",
3778                                     privp->dest_nport_id));
3779                         privp->next = sf->sf_els_list;
3780                         if (sf->sf_els_list != NULL)
3781                                 sf->sf_els_list->prev = privp;
3782                         sf->sf_els_list = privp;
3783                         mutex_exit(&sf->sf_mutex);
3784                         if (!delayed_retry && soc_transport(sf->sf_sochandle,
3785                             fpkt, FCAL_NOSLEEP, CQ_REQUEST_1) !=
3786                             FCAL_TRANSPORT_SUCCESS) {
3787                                 mutex_enter(&sf->sf_mutex);
3788                                 if (privp->prev)
3789                                         privp->prev->next = privp->next;
3790                                 if (privp->next)
3791                                         privp->next->prev = privp->prev;
3792                                 if (sf->sf_els_list == privp)
3793                                         sf->sf_els_list = privp->next;
3794                                 mutex_exit(&sf->sf_mutex);
3795                                 goto fail;
3796                         } else
3797                                 return;
3798                 } else {
3799                         mutex_exit(&sf->sf_mutex);
3800                 }
3801         } else {
3802 fail:
3803 
3804                 /* REPORT_LUN failed -- try inquiry */
3805                 if (sf_do_inquiry(sf, privp, target) != 0) {
3806                         return;
3807                 } else {
3808                         free_pkt = 0;
3809                 }
3810                 mutex_enter(&sf->sf_mutex);
3811                 if (sf->sf_lip_cnt == privp->lip_cnt) {
3812                         sf_log(sf, CE_WARN,
3813                             "!REPORTLUN to target 0x%x failed\n",
3814                             sf_alpa_to_switch[privp->dest_nport_id]);
3815                         sf_offline_target(sf, target);
3816                         sf->sf_device_count--;
3817                         ASSERT(sf->sf_device_count >= 0);
3818                         if (sf->sf_device_count == 0)
3819                         sf_finish_init(sf, privp->lip_cnt);
3820                 }
3821                 mutex_exit(&sf->sf_mutex);
3822         }
3823         if (free_pkt) {
3824                 sf_els_free(fpkt);
3825         }
3826 }
3827 
3828 static int
3829 sf_do_inquiry(struct sf *sf, struct sf_els_hdr *privp,
3830     struct sf_target *target)
3831 {
3832         struct  fcal_packet     *fpkt = privp->fpkt;
3833         ddi_dma_cookie_t        pcookie;
3834         ddi_dma_handle_t        inq_dma_handle = NULL;
3835         ddi_acc_handle_t        inq_acc_handle;
3836         uint_t                  ccount;
3837         size_t                  real_size;
3838         caddr_t                 inq_buf = NULL;
3839         int                     handle_bound = FALSE;
3840         fc_frame_header_t *hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3841         struct fcp_cmd          *inq = (struct fcp_cmd *)privp->cmd;
3842         char                    *msg = "Transport";
3843 
3844 
3845         if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3846             DDI_DMA_DONTWAIT, NULL, &inq_dma_handle) != DDI_SUCCESS) {
3847                 msg = "ddi_dma_alloc_handle()";
3848                 goto fail;
3849         }
3850 
3851         if (ddi_dma_mem_alloc(inq_dma_handle, SUN_INQSIZE,
3852             sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3853             DDI_DMA_DONTWAIT, NULL, &inq_buf,
3854             &real_size, &inq_acc_handle) != DDI_SUCCESS) {
3855                 msg = "ddi_dma_mem_alloc()";
3856                 goto fail;
3857         }
3858 
3859         if (real_size < SUN_INQSIZE) {
3860                 msg = "DMA mem < inquiry size";
3861                 goto fail;
3862         }
3863 
3864         if (ddi_dma_addr_bind_handle(inq_dma_handle, NULL,
3865             inq_buf, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3866             DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3867                 msg = "ddi_dma_addr_bind_handle()";
3868                 goto fail;
3869         }
3870         handle_bound = TRUE;
3871 
3872         if (ccount != 1) {
3873                 msg = "ccount != 1";
3874                 goto fail;
3875         }
3876         privp->els_code = 0;                 /* not an ELS command */
3877         privp->target = target;
3878         privp->data_dma_handle = inq_dma_handle;
3879         privp->data_acc_handle = inq_acc_handle;
3880         privp->data_buf = inq_buf;
3881         fpkt->fcal_pkt_comp = sf_inq_callback;
3882         fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3883         fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3884         fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3885             sizeof (struct fcp_cmd);
3886         fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3887             (uint32_t)pcookie.dmac_address;
3888         fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3889         fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3890         hp->r_ctl = R_CTL_COMMAND;
3891         hp->type = TYPE_SCSI_FCP;
3892         bzero((caddr_t)inq, sizeof (struct fcp_cmd));
3893         ((union scsi_cdb *)inq->fcp_cdb)->scc_cmd = SCMD_INQUIRY;
3894         ((union scsi_cdb *)inq->fcp_cdb)->g0_count0 = SUN_INQSIZE;
3895         bcopy((caddr_t)&target->sft_lun.b, (caddr_t)&inq->fcp_ent_addr,
3896             FCP_LUN_SIZE);
3897         inq->fcp_cntl.cntl_read_data = 1;
3898         inq->fcp_cntl.cntl_write_data = 0;
3899         inq->fcp_data_len = pcookie.dmac_size;
3900         inq->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3901 
3902         (void) ddi_dma_sync(inq_dma_handle, (off_t)0, (size_t)0,
3903             DDI_DMA_SYNC_FORDEV);
3904         privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3905         SF_DEBUG(5, (sf, CE_WARN,
3906             "!Sending INQUIRY to al_pa %x lun %" PRIx64 "\n",
3907             privp->dest_nport_id,
3908             SCSA_LUN(target)));
3909         return (sf_els_transport(sf, privp));
3910 
3911 fail:
3912         sf_log(sf, CE_WARN,
3913             "%s failure for INQUIRY to target 0x%x\n",
3914             msg, sf_alpa_to_switch[privp->dest_nport_id]);
3915         sf_els_free(fpkt);
3916         if (inq_dma_handle != NULL) {
3917                 if (handle_bound) {
3918                         (void) ddi_dma_unbind_handle(inq_dma_handle);
3919                 }
3920                 ddi_dma_free_handle(&inq_dma_handle);
3921         }
3922         if (inq_buf != NULL) {
3923                 ddi_dma_mem_free(&inq_acc_handle);
3924         }
3925         return (FALSE);
3926 }
3927 
3928 
3929 /*
3930  * called as the pkt_comp routine for INQ packets
3931  */
3932 static void
3933 sf_inq_callback(struct fcal_packet *fpkt)
3934 {
3935         struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3936             fcal_pkt_private;
3937         struct scsi_inquiry *prt = (struct scsi_inquiry *)privp->data_buf;
3938         struct sf *sf = privp->sf;
3939         struct sf *tsf;
3940         struct sf_target *target = privp->target;
3941         struct fcp_rsp *rsp;
3942         int delayed_retry = FALSE;
3943         short   ncmds;
3944 
3945 
3946         mutex_enter(&sf->sf_mutex);
3947         /* use as temporary state variable */
3948         if (privp->timeout == SF_INVALID_TIMEOUT) {
3949                 mutex_exit(&sf->sf_mutex);
3950                 return;
3951         }
3952         if (privp->prev != NULL) {
3953                 privp->prev->next = privp->next;
3954         }
3955         if (privp->next != NULL) {
3956                 privp->next->prev = privp->prev;
3957         }
3958         if (sf->sf_els_list == privp) {
3959                 sf->sf_els_list = privp->next;
3960         }
3961         privp->prev = privp->next = NULL;
3962         mutex_exit(&sf->sf_mutex);
3963         ncmds = fpkt->fcal_ncmds;
3964         ASSERT(ncmds >= 0);
3965         mutex_enter(&sf->sf_cmd_mutex);
3966         sf->sf_ncmds = ncmds;
3967         mutex_exit(&sf->sf_cmd_mutex);
3968 
3969         if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3970 
3971                 (void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0,
3972                     (size_t)0, DDI_DMA_SYNC_FORKERNEL);
3973 
3974                 rsp = (struct fcp_rsp *)privp->rsp;
3975                 SF_DEBUG(2, (sf, CE_CONT,
3976                     "!INQUIRY to al_pa %x scsi status %x",
3977                     privp->dest_nport_id, rsp->fcp_u.fcp_status.scsi_status));
3978 
3979                 if ((rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) &&
3980                     !rsp->fcp_u.fcp_status.resid_over &&
3981                     (!rsp->fcp_u.fcp_status.resid_under ||
3982                     ((SUN_INQSIZE - rsp->fcp_resid) >= SUN_MIN_INQLEN))) {
3983                         struct fcp_rsp_info *bep;
3984 
3985                         bep = (struct fcp_rsp_info *)(&rsp->
3986                             fcp_response_len + 1);
3987 
3988                         if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3989                             (bep->rsp_code == FCP_NO_FAILURE)) {
3990 
3991                                 SF_DEBUG(2, (sf, CE_CONT,
3992                                     "!INQUIRY to al_pa %x lun %" PRIx64
3993                                     " succeeded\n",
3994                                     privp->dest_nport_id, SCSA_LUN(target)));
3995 
3996                                 (void) ddi_dma_sync(privp->data_dma_handle,
3997                                     (off_t)0, (size_t)0,
3998                                     DDI_DMA_SYNC_FORKERNEL);
3999 
4000                                 mutex_enter(&sf->sf_mutex);
4001 
4002                                 if (sf->sf_lip_cnt == privp->lip_cnt) {
4003                                         mutex_enter(&target->sft_mutex);
4004                                         target->sft_device_type =
4005                                             prt->inq_dtype;
4006                                         bcopy(prt, &target->sft_inq,
4007                                             sizeof (*prt));
4008                                         mutex_exit(&target->sft_mutex);
4009                                         sf->sf_device_count--;
4010                                         ASSERT(sf->sf_device_count >= 0);
4011                                         if (sf->sf_device_count == 0) {
4012                                                 sf_finish_init(sf,
4013                                                     privp->lip_cnt);
4014                                         }
4015                                 }
4016                                 mutex_exit(&sf->sf_mutex);
4017                                 sf_els_free(fpkt);
4018                                 return;
4019                         }
4020                 } else if ((rsp->fcp_u.fcp_status.scsi_status ==
4021                     STATUS_BUSY) ||
4022                     (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL) ||
4023                     (rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK)) {
4024                         delayed_retry = TRUE;
4025                 }
4026         } else {
4027                 SF_DEBUG(2, (sf, CE_CONT, "!INQUIRY to al_pa %x fc status %x",
4028                     privp->dest_nport_id, fpkt->fcal_pkt_status));
4029         }
4030 
4031         if (++(privp->retries) < sf_els_retries ||
4032             (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
4033                 if (fpkt->fcal_pkt_status == FCAL_STATUS_MAX_XCHG_EXCEEDED)  {
4034                         tsf = sf->sf_sibling;
4035                         if (tsf != NULL) {
4036                                 mutex_enter(&tsf->sf_cmd_mutex);
4037                                 tsf->sf_flag = 1;
4038                                 tsf->sf_throttle = SF_DECR_DELTA;
4039                                 mutex_exit(&tsf->sf_cmd_mutex);
4040                         }
4041                         delayed_retry = 1;
4042                 }
4043                 if (delayed_retry) {
4044                         privp->retries--;
4045                         privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
4046                         privp->delayed_retry = TRUE;
4047                 } else {
4048                         privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
4049                 }
4050 
4051                 privp->prev = NULL;
4052                 mutex_enter(&sf->sf_mutex);
4053                 if (privp->lip_cnt == sf->sf_lip_cnt) {
4054                         if (!delayed_retry) {
4055                                 SF_DEBUG(1, (sf, CE_WARN,
4056                                     "INQUIRY to al_pa %x failed, retrying",
4057                                     privp->dest_nport_id));
4058                         }
4059                         privp->next = sf->sf_els_list;
4060                         if (sf->sf_els_list != NULL) {
4061                                 sf->sf_els_list->prev = privp;
4062                         }
4063                         sf->sf_els_list = privp;
4064                         mutex_exit(&sf->sf_mutex);
4065                         /* if not delayed call transport to send a pkt */
4066                         if (!delayed_retry &&
4067                             (soc_transport(sf->sf_sochandle, fpkt,
4068                             FCAL_NOSLEEP, CQ_REQUEST_1) !=
4069                             FCAL_TRANSPORT_SUCCESS)) {
4070                                 mutex_enter(&sf->sf_mutex);
4071                                 if (privp->prev != NULL) {
4072                                         privp->prev->next = privp->next;
4073                                 }
4074                                 if (privp->next != NULL) {
4075                                         privp->next->prev = privp->prev;
4076                                 }
4077                                 if (sf->sf_els_list == privp) {
4078                                         sf->sf_els_list = privp->next;
4079                                 }
4080                                 mutex_exit(&sf->sf_mutex);
4081                                 goto fail;
4082                         }
4083                         return;
4084                 }
4085                 mutex_exit(&sf->sf_mutex);
4086         } else {
4087 fail:
4088                 mutex_enter(&sf->sf_mutex);
4089                 if (sf->sf_lip_cnt == privp->lip_cnt) {
4090                         sf_offline_target(sf, target);
4091                         sf_log(sf, CE_NOTE,
4092                             "INQUIRY to target 0x%x lun %" PRIx64 " failed. "
4093                             "Retry Count: %d\n",
4094                             sf_alpa_to_switch[privp->dest_nport_id],
4095                             SCSA_LUN(target),
4096                             privp->retries);
4097                         sf->sf_device_count--;
4098                         ASSERT(sf->sf_device_count >= 0);
4099                         if (sf->sf_device_count == 0) {
4100                                 sf_finish_init(sf, privp->lip_cnt);
4101                         }
4102                 }
4103                 mutex_exit(&sf->sf_mutex);
4104         }
4105         sf_els_free(fpkt);
4106 }
4107 
4108 
4109 static void
4110 sf_finish_init(struct sf *sf, int lip_cnt)
4111 {
4112         int                     i;              /* loop index */
4113         int                     cflag;
4114         struct sf_target        *target;        /* current target */
4115         dev_info_t              *dip;
4116         struct sf_hp_elem       *elem;          /* hotplug element created */
4117 
4118         SF_DEBUG(1, (sf, CE_WARN, "!sf_finish_init\n"));
4119         ASSERT(mutex_owned(&sf->sf_mutex));
4120 
4121         /* scan all hash queues */
4122         for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
4123                 target = sf->sf_wwn_lists[i];
4124                 while (target != NULL) {
4125                         mutex_enter(&target->sft_mutex);
4126 
4127                         /* see if target is not offline */
4128                         if ((target->sft_state & SF_TARGET_OFFLINE)) {
4129                                 /*
4130                                  * target already offline
4131                                  */
4132                                 mutex_exit(&target->sft_mutex);
4133                                 goto next_entry;
4134                         }
4135 
4136                         /*
4137                          * target is not already offline -- see if it has
4138                          * already been marked as ready to go offline
4139                          */
4140                         if (target->sft_state & SF_TARGET_MARK) {
4141                                 /*
4142                                  * target already marked, so take it offline
4143                                  */
4144                                 mutex_exit(&target->sft_mutex);
4145                                 sf_offline_target(sf, target);
4146                                 goto next_entry;
4147                         }
4148 
4149                         /* clear target busy flag */
4150                         target->sft_state &= ~SF_TARGET_BUSY;
4151 
4152                         /* is target init not yet done ?? */
4153                         cflag = !(target->sft_state & SF_TARGET_INIT_DONE);
4154 
4155                         /* get pointer to target dip */
4156                         dip = target->sft_dip;
4157 
4158                         mutex_exit(&target->sft_mutex);
4159                         mutex_exit(&sf->sf_mutex);
4160 
4161                         if (cflag && (dip == NULL)) {
4162                                 /*
4163                                  * target init not yet done &&
4164                                  * devinfo not yet created
4165                                  */
4166                                 sf_create_devinfo(sf, target, lip_cnt);
4167                                 mutex_enter(&sf->sf_mutex);
4168                                 goto next_entry;
4169                         }
4170 
4171                         /*
4172                          * target init already done || devinfo already created
4173                          */
4174                         ASSERT(dip != NULL);
4175                         if (!sf_create_props(dip, target, lip_cnt)) {
4176                                 /* a problem creating properties */
4177                                 mutex_enter(&sf->sf_mutex);
4178                                 goto next_entry;
4179                         }
4180 
4181                         /* create a new element for the hotplug list */
4182                         if ((elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4183                             KM_NOSLEEP)) != NULL) {
4184 
4185                                 /* fill in the new element */
4186                                 elem->dip = dip;
4187                                 elem->target = target;
4188                                 elem->what = SF_ONLINE;
4189 
4190                                 /* add the new element into the hotplug list */
4191                                 mutex_enter(&sf->sf_hp_daemon_mutex);
4192                                 if (sf->sf_hp_elem_tail != NULL) {
4193                                         sf->sf_hp_elem_tail->next = elem;
4194                                         sf->sf_hp_elem_tail = elem;
4195                                 } else {
4196                                         /* this is the first element in list */
4197                                         sf->sf_hp_elem_head =
4198                                             sf->sf_hp_elem_tail =
4199                                             elem;
4200                                 }
4201                                 cv_signal(&sf->sf_hp_daemon_cv);
4202                                 mutex_exit(&sf->sf_hp_daemon_mutex);
4203                         } else {
4204                                 /* could not allocate memory for element ?? */
4205                                 (void) ndi_devi_online_async(dip, 0);
4206                         }
4207 
4208                         mutex_enter(&sf->sf_mutex);
4209 
4210 next_entry:
4211                         /* ensure no new LIPs have occurred */
4212                         if (sf->sf_lip_cnt != lip_cnt) {
4213                                 return;
4214                         }
4215                         target = target->sft_next;
4216                 }
4217 
4218                 /* done scanning all targets in this queue */
4219         }
4220 
4221         /* done with all hash queues */
4222 
4223         sf->sf_state = SF_STATE_ONLINE;
4224         sf->sf_online_timer = 0;
4225 }
4226 
4227 
4228 /*
4229  * create devinfo node
4230  */
4231 static void
4232 sf_create_devinfo(struct sf *sf, struct sf_target *target, int lip_cnt)
4233 {
4234         dev_info_t              *cdip = NULL;
4235         char                    *nname = NULL;
4236         char                    **compatible = NULL;
4237         int                     ncompatible;
4238         struct scsi_inquiry     *inq = &target->sft_inq;
4239         char                    *scsi_binding_set;
4240 
4241         /* get the 'scsi-binding-set' property */
4242         if (ddi_prop_lookup_string(DDI_DEV_T_ANY, sf->sf_dip,
4243             DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
4244             &scsi_binding_set) != DDI_PROP_SUCCESS)
4245                 scsi_binding_set = NULL;
4246 
4247         /* determine the node name and compatible */
4248         scsi_hba_nodename_compatible_get(inq, scsi_binding_set,
4249             inq->inq_dtype, NULL, &nname, &compatible, &ncompatible);
4250         if (scsi_binding_set)
4251                 ddi_prop_free(scsi_binding_set);
4252 
4253         /* if nodename can't be determined then print a message and skip it */
4254         if (nname == NULL) {
4255 #ifndef RAID_LUNS
4256                 sf_log(sf, CE_WARN, "%s%d: no driver for device "
4257                     "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4258                     "    compatible: %s",
4259                     ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4260                     target->sft_port_wwn[0], target->sft_port_wwn[1],
4261                     target->sft_port_wwn[2], target->sft_port_wwn[3],
4262                     target->sft_port_wwn[4], target->sft_port_wwn[5],
4263                     target->sft_port_wwn[6], target->sft_port_wwn[7],
4264                     target->sft_lun.l, *compatible);
4265 #else
4266                 sf_log(sf, CE_WARN, "%s%d: no driver for device "
4267                     "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4268                     "    compatible: %s",
4269                     ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4270                     target->sft_port_wwn[0], target->sft_port_wwn[1],
4271                     target->sft_port_wwn[2], target->sft_port_wwn[3],
4272                     target->sft_port_wwn[4], target->sft_port_wwn[5],
4273                     target->sft_port_wwn[6], target->sft_port_wwn[7],
4274                     target->sft_raid_lun, *compatible);
4275 #endif
4276                 goto fail;
4277         }
4278 
4279         /* allocate the node */
4280         if (ndi_devi_alloc(sf->sf_dip, nname,
4281             DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
4282                 goto fail;
4283         }
4284 
4285         /* decorate the node with compatible */
4286         if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
4287             "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
4288                 goto fail;
4289         }
4290 
4291         /* add addressing properties to the node */
4292         if (sf_create_props(cdip, target, lip_cnt) != 1) {
4293                 goto fail;
4294         }
4295 
4296         mutex_enter(&target->sft_mutex);
4297         if (target->sft_dip != NULL) {
4298                 mutex_exit(&target->sft_mutex);
4299                 goto fail;
4300         }
4301         target->sft_dip = cdip;
4302         mutex_exit(&target->sft_mutex);
4303 
4304         if (ndi_devi_online_async(cdip, 0) != DDI_SUCCESS) {
4305                 goto fail;
4306         }
4307 
4308         scsi_hba_nodename_compatible_free(nname, compatible);
4309         return;
4310 
4311 fail:
4312         scsi_hba_nodename_compatible_free(nname, compatible);
4313         if (cdip != NULL) {
4314                 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP);
4315                 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP);
4316                 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LIP_CNT_PROP);
4317                 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, TARGET_PROP);
4318                 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LUN_PROP);
4319                 if (ndi_devi_free(cdip) != NDI_SUCCESS) {
4320                         sf_log(sf, CE_WARN, "ndi_devi_free failed\n");
4321                 } else {
4322                         mutex_enter(&target->sft_mutex);
4323                         if (cdip == target->sft_dip) {
4324                                 target->sft_dip = NULL;
4325                         }
4326                         mutex_exit(&target->sft_mutex);
4327                 }
4328         }
4329 }
4330 
4331 /*
4332  * create required properties, returning TRUE iff we succeed, else
4333  * returning FALSE
4334  */
4335 static int
4336 sf_create_props(dev_info_t *cdip, struct sf_target *target, int lip_cnt)
4337 {
4338         int tgt_id = sf_alpa_to_switch[target->sft_al_pa];
4339 
4340 
4341         if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4342             cdip, NODE_WWN_PROP, target->sft_node_wwn, FC_WWN_SIZE) !=
4343             DDI_PROP_SUCCESS) {
4344                 return (FALSE);
4345         }
4346 
4347         if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4348             cdip, PORT_WWN_PROP, target->sft_port_wwn, FC_WWN_SIZE) !=
4349             DDI_PROP_SUCCESS) {
4350                 return (FALSE);
4351         }
4352 
4353         if (ndi_prop_update_int(DDI_DEV_T_NONE,
4354             cdip, LIP_CNT_PROP, lip_cnt) != DDI_PROP_SUCCESS) {
4355                 return (FALSE);
4356         }
4357 
4358         if (ndi_prop_update_int(DDI_DEV_T_NONE,
4359             cdip, TARGET_PROP, tgt_id) != DDI_PROP_SUCCESS) {
4360                 return (FALSE);
4361         }
4362 
4363 #ifndef RAID_LUNS
4364         if (ndi_prop_update_int(DDI_DEV_T_NONE,
4365             cdip, LUN_PROP, target->sft_lun.l) != DDI_PROP_SUCCESS) {
4366                 return (0);
4367         }
4368 #else
4369         if (ndi_prop_update_int(DDI_DEV_T_NONE,
4370             cdip, LUN_PROP, target->sft_raid_lun) != DDI_PROP_SUCCESS) {
4371                 return (0);
4372         }
4373 #endif
4374 
4375         return (TRUE);
4376 }
4377 
4378 
4379 /*
4380  * called by the transport to offline a target
4381  */
4382 /* ARGSUSED */
4383 static void
4384 sf_offline_target(struct sf *sf, struct sf_target *target)
4385 {
4386         dev_info_t *dip;
4387         struct sf_target *next_target = NULL;
4388         struct sf_hp_elem       *elem;
4389 
4390         ASSERT(mutex_owned(&sf->sf_mutex));
4391 
4392         if (sf_core && (sf_core & SF_CORE_OFFLINE_TARGET)) {
4393                 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
4394                 sf_core = 0;
4395         }
4396 
4397         while (target != NULL) {
4398                 sf_log(sf, CE_NOTE,
4399                     "!target 0x%x al_pa 0x%x lun %" PRIx64 " offlined\n",
4400                     sf_alpa_to_switch[target->sft_al_pa],
4401                     target->sft_al_pa, SCSA_LUN(target));
4402                 mutex_enter(&target->sft_mutex);
4403                 target->sft_state &= ~(SF_TARGET_BUSY|SF_TARGET_MARK);
4404                 target->sft_state |= SF_TARGET_OFFLINE;
4405                 mutex_exit(&target->sft_mutex);
4406                 mutex_exit(&sf->sf_mutex);
4407 
4408                 /* XXXX if this is LUN 0, offline all other LUNs */
4409                 if (next_target || target->sft_lun.l == 0)
4410                         next_target = target->sft_next_lun;
4411 
4412                 /* abort all cmds for this target */
4413                 sf_abort_all(sf, target, FALSE, sf->sf_lip_cnt, FALSE);
4414 
4415                 mutex_enter(&sf->sf_mutex);
4416                 mutex_enter(&target->sft_mutex);
4417                 if (target->sft_state & SF_TARGET_INIT_DONE) {
4418                         dip = target->sft_dip;
4419                         mutex_exit(&target->sft_mutex);
4420                         mutex_exit(&sf->sf_mutex);
4421                         (void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
4422                             TARGET_PROP);
4423                         (void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
4424                             dip, FCAL_REMOVE_EVENT, &sf_remove_eid,
4425                             NDI_EVENT_NOPASS);
4426                         (void) ndi_event_run_callbacks(sf->sf_event_hdl,
4427                             target->sft_dip, sf_remove_eid, NULL);
4428 
4429                         elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4430                             KM_NOSLEEP);
4431                         if (elem != NULL) {
4432                                 elem->dip = dip;
4433                                 elem->target = target;
4434                                 elem->what = SF_OFFLINE;
4435                                 mutex_enter(&sf->sf_hp_daemon_mutex);
4436                                 if (sf->sf_hp_elem_tail != NULL) {
4437                                         sf->sf_hp_elem_tail->next = elem;
4438                                         sf->sf_hp_elem_tail = elem;
4439                                 } else {
4440                                         sf->sf_hp_elem_head =
4441                                             sf->sf_hp_elem_tail =
4442                                             elem;
4443                                 }
4444                                 cv_signal(&sf->sf_hp_daemon_cv);
4445                                 mutex_exit(&sf->sf_hp_daemon_mutex);
4446                         } else {
4447                                 /* don't do NDI_DEVI_REMOVE for now */
4448                                 if (ndi_devi_offline(dip, 0) != NDI_SUCCESS) {
4449                                         SF_DEBUG(1, (sf, CE_WARN,
4450                                             "target %x lun %" PRIx64 ", "
4451                                             "device offline failed",
4452                                             sf_alpa_to_switch[target->
4453                                             sft_al_pa],
4454                                             SCSA_LUN(target)));
4455                                 } else {
4456                                         SF_DEBUG(1, (sf, CE_NOTE,
4457                                             "target %x, lun %" PRIx64 ", "
4458                                             "device offline succeeded\n",
4459                                             sf_alpa_to_switch[target->
4460                                             sft_al_pa],
4461                                             SCSA_LUN(target)));
4462                                 }
4463                         }
4464                         mutex_enter(&sf->sf_mutex);
4465                 } else {
4466                         mutex_exit(&target->sft_mutex);
4467                 }
4468                 target = next_target;
4469         }
4470 }
4471 
4472 
4473 /*
4474  * routine to get/set a capability
4475  *
4476  * returning:
4477  *      1 (TRUE)        boolean capability is true (on get)
4478  *      0 (FALSE)       invalid capability, can't set capability (on set),
4479  *                      or boolean capability is false (on get)
4480  *      -1 (UNDEFINED)  can't find capability (SCSA) or unsupported capability
4481  *      3               when getting SCSI version number
4482  *      AL_PA           when getting port initiator ID
4483  */
4484 static int
4485 sf_commoncap(struct scsi_address *ap, char *cap,
4486     int val, int tgtonly, int doset)
4487 {
4488         struct sf *sf = ADDR2SF(ap);
4489         int cidx;
4490         int rval = FALSE;
4491 
4492 
4493         if (cap == NULL) {
4494                 SF_DEBUG(3, (sf, CE_WARN, "sf_commoncap: invalid arg"));
4495                 return (rval);
4496         }
4497 
4498         /* get index of capability string */
4499         if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
4500                 /* can't find capability */
4501                 return (UNDEFINED);
4502         }
4503 
4504         if (doset) {
4505                 /*
4506                  * Process setcap request.
4507                  */
4508 
4509                 /*
4510                  * At present, we can only set binary (0/1) values
4511                  */
4512                 switch (cidx) {
4513                 case SCSI_CAP_ARQ:      /* can't set this capability */
4514                         break;
4515                 default:
4516                         SF_DEBUG(3, (sf, CE_WARN,
4517                             "sf_setcap: unsupported %d", cidx));
4518                         rval = UNDEFINED;
4519                         break;
4520                 }
4521 
4522                 SF_DEBUG(4, (sf, CE_NOTE,
4523                     "set cap: cap=%s,val=0x%x,tgtonly=0x%x"
4524                     ",doset=0x%x,rval=%d\n",
4525                     cap, val, tgtonly, doset, rval));
4526 
4527         } else {
4528                 /*
4529                  * Process getcap request.
4530                  */
4531                 switch (cidx) {
4532                 case SCSI_CAP_DMA_MAX:
4533                         break;          /* don't' have this capability */
4534                 case SCSI_CAP_INITIATOR_ID:
4535                         rval = sf->sf_al_pa;
4536                         break;
4537                 case SCSI_CAP_ARQ:
4538                         rval = TRUE;    /* do have this capability */
4539                         break;
4540                 case SCSI_CAP_RESET_NOTIFICATION:
4541                 case SCSI_CAP_TAGGED_QING:
4542                         rval = TRUE;    /* do have this capability */
4543                         break;
4544                 case SCSI_CAP_SCSI_VERSION:
4545                         rval = 3;
4546                         break;
4547                 case SCSI_CAP_INTERCONNECT_TYPE:
4548                         rval = INTERCONNECT_FIBRE;
4549                         break;
4550                 default:
4551                         SF_DEBUG(4, (sf, CE_WARN,
4552                             "sf_scsi_getcap: unsupported"));
4553                         rval = UNDEFINED;
4554                         break;
4555                 }
4556                 SF_DEBUG(4, (sf, CE_NOTE,
4557                     "get cap: cap=%s,val=0x%x,tgtonly=0x%x,"
4558                     "doset=0x%x,rval=%d\n",
4559                     cap, val, tgtonly, doset, rval));
4560         }
4561 
4562         return (rval);
4563 }
4564 
4565 
4566 /*
4567  * called by the transport to get a capability
4568  */
4569 static int
4570 sf_getcap(struct scsi_address *ap, char *cap, int whom)
4571 {
4572         return (sf_commoncap(ap, cap, 0, whom, FALSE));
4573 }
4574 
4575 
4576 /*
4577  * called by the transport to set a capability
4578  */
4579 static int
4580 sf_setcap(struct scsi_address *ap, char *cap, int value, int whom)
4581 {
4582         return (sf_commoncap(ap, cap, value, whom, TRUE));
4583 }
4584 
4585 
4586 /*
4587  * called by the transport to abort a target
4588  */
4589 static int
4590 sf_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4591 {
4592         struct sf *sf = ADDR2SF(ap);
4593         struct sf_target *target = ADDR2TARGET(ap);
4594         struct sf_pkt *cmd, *ncmd, *pcmd;
4595         struct fcal_packet *fpkt;
4596         int     rval = 0, t, my_rval = FALSE;
4597         int     old_target_state;
4598         int     lip_cnt;
4599         int     tgt_id;
4600         fc_frame_header_t       *hp;
4601         int     deferred_destroy;
4602 
4603         deferred_destroy = 0;
4604 
4605         if (pkt != NULL) {
4606                 cmd = PKT2CMD(pkt);
4607                 fpkt = cmd->cmd_fp_pkt;
4608                 SF_DEBUG(2, (sf, CE_NOTE, "sf_abort packet %p\n",
4609                     (void *)fpkt));
4610                 pcmd = NULL;
4611                 mutex_enter(&sf->sf_cmd_mutex);
4612                 ncmd = sf->sf_pkt_head;
4613                 while (ncmd != NULL) {
4614                         if (ncmd == cmd) {
4615                                 if (pcmd != NULL) {
4616                                         pcmd->cmd_next = cmd->cmd_next;
4617                                 } else {
4618                                         sf->sf_pkt_head = cmd->cmd_next;
4619                                 }
4620                                 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
4621                                 cmd->cmd_state = SF_STATE_IDLE;
4622                                 pkt->pkt_reason = CMD_ABORTED;
4623                                 pkt->pkt_statistics |= STAT_ABORTED;
4624                                 my_rval = TRUE;
4625                                 break;
4626                         } else {
4627                                 pcmd = ncmd;
4628                                 ncmd = ncmd->cmd_next;
4629                         }
4630                 }
4631                 mutex_exit(&sf->sf_cmd_mutex);
4632                 if (ncmd == NULL) {
4633                         mutex_enter(&cmd->cmd_abort_mutex);
4634                         if (cmd->cmd_state == SF_STATE_ISSUED) {
4635                                 cmd->cmd_state = SF_STATE_ABORTING;
4636                                 cmd->cmd_timeout = sf_watchdog_time + 20;
4637                                 mutex_exit(&cmd->cmd_abort_mutex);
4638                                 /* call transport to abort command */
4639                                 if (((rval = soc_abort(sf->sf_sochandle,
4640                                     sf->sf_socp, sf->sf_sochandle->fcal_portno,
4641                                     fpkt, 1)) == FCAL_ABORTED) ||
4642                                     (rval == FCAL_ABORT_FAILED)) {
4643                                         my_rval = TRUE;
4644                                         pkt->pkt_reason = CMD_ABORTED;
4645                                         pkt->pkt_statistics |= STAT_ABORTED;
4646                                         cmd->cmd_state = SF_STATE_IDLE;
4647                                 } else if (rval == FCAL_BAD_ABORT) {
4648                                         cmd->cmd_timeout = sf_watchdog_time
4649                                             + 20;
4650                                         my_rval = FALSE;
4651                                 } else {
4652                                         SF_DEBUG(1, (sf, CE_NOTE,
4653                                             "Command Abort failed\n"));
4654                                 }
4655                         } else {
4656                                 mutex_exit(&cmd->cmd_abort_mutex);
4657                         }
4658                 }
4659         } else {
4660                 SF_DEBUG(2, (sf, CE_NOTE, "sf_abort target\n"));
4661                 mutex_enter(&sf->sf_mutex);
4662                 lip_cnt = sf->sf_lip_cnt;
4663                 mutex_enter(&target->sft_mutex);
4664                 if (target->sft_state & (SF_TARGET_BUSY |
4665                     SF_TARGET_OFFLINE)) {
4666                         mutex_exit(&target->sft_mutex);
4667                         return (rval);
4668                 }
4669                 old_target_state = target->sft_state;
4670                 target->sft_state |= SF_TARGET_BUSY;
4671                 mutex_exit(&target->sft_mutex);
4672                 mutex_exit(&sf->sf_mutex);
4673 
4674                 if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4675                     0, 0, 0, NULL, 0)) != NULL) {
4676 
4677                         cmd = PKT2CMD(pkt);
4678                         cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 1;
4679                         cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4680                         cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4681 
4682                         /* prepare the packet for transport */
4683                         if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4684 
4685                                 cmd->cmd_state = SF_STATE_ISSUED;
4686                                 /*
4687                                  * call transport to send a pkt polled
4688                                  *
4689                                  * if that fails call the transport to abort it
4690                                  */
4691                                 if (soc_transport_poll(sf->sf_sochandle,
4692                                     cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4693                                     CQ_REQUEST_1) == FCAL_TRANSPORT_SUCCESS) {
4694                                         (void) ddi_dma_sync(
4695                                             cmd->cmd_cr_pool->rsp_dma_handle,
4696                                             (off_t)
4697                                             ((caddr_t)cmd->cmd_rsp_block -
4698                                             cmd->cmd_cr_pool->rsp_base),
4699                                             FCP_MAX_RSP_IU_SIZE,
4700                                             DDI_DMA_SYNC_FORKERNEL);
4701                                         if (((struct fcp_rsp_info *)
4702                                             (&cmd->cmd_rsp_block->
4703                                             fcp_response_len + 1))->
4704                                             rsp_code == FCP_NO_FAILURE) {
4705                                                 /* abort cmds for this targ */
4706                                                 sf_abort_all(sf, target, TRUE,
4707                                                     lip_cnt, TRUE);
4708                                         } else {
4709                                                 hp = &cmd->cmd_fp_pkt->
4710                                                     fcal_socal_request.
4711                                                     sr_fc_frame_hdr;
4712                                                 tgt_id = sf_alpa_to_switch[
4713                                                     (uchar_t)hp->d_id];
4714                                                 sf->sf_stats.tstats[tgt_id].
4715                                                     task_mgmt_failures++;
4716                                                 SF_DEBUG(1, (sf, CE_NOTE,
4717                                                     "Target %d Abort Task "
4718                                                     "Set failed\n", hp->d_id));
4719                                         }
4720                                 } else {
4721                                         mutex_enter(&cmd->cmd_abort_mutex);
4722                                         if (cmd->cmd_state == SF_STATE_ISSUED) {
4723                                         cmd->cmd_state = SF_STATE_ABORTING;
4724                                         cmd->cmd_timeout = sf_watchdog_time
4725                                             + 20;
4726                                         mutex_exit(&cmd->cmd_abort_mutex);
4727                                         if ((t = soc_abort(sf->sf_sochandle,
4728                                             sf->sf_socp, sf->sf_sochandle->
4729                                             fcal_portno, cmd->cmd_fp_pkt, 1)) !=
4730                                             FCAL_ABORTED &&
4731                                             (t != FCAL_ABORT_FAILED)) {
4732                                                 sf_log(sf, CE_NOTE,
4733                                                     "sf_abort failed, "
4734                                                     "initiating LIP\n");
4735                                                 sf_force_lip(sf);
4736                                                 deferred_destroy = 1;
4737                                         }
4738                                         } else {
4739                                         mutex_exit(&cmd->cmd_abort_mutex);
4740                                         }
4741                                 }
4742                         }
4743                         if (!deferred_destroy) {
4744                                 cmd->cmd_fp_pkt->fcal_pkt_comp =
4745                                     sf_cmd_callback;
4746                                 cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 0;
4747                                 sf_scsi_destroy_pkt(ap, pkt);
4748                                 my_rval = TRUE;
4749                         }
4750                 }
4751                 mutex_enter(&sf->sf_mutex);
4752                 if (lip_cnt == sf->sf_lip_cnt) {
4753                         mutex_enter(&target->sft_mutex);
4754                         target->sft_state = old_target_state;
4755                         mutex_exit(&target->sft_mutex);
4756                 }
4757                 mutex_exit(&sf->sf_mutex);
4758         }
4759         return (my_rval);
4760 }
4761 
4762 
4763 /*
4764  * called by the transport and internally to reset a target
4765  */
4766 static int
4767 sf_reset(struct scsi_address *ap, int level)
4768 {
4769         struct scsi_pkt *pkt;
4770         struct fcal_packet *fpkt;
4771         struct sf *sf = ADDR2SF(ap);
4772         struct sf_target *target = ADDR2TARGET(ap), *ntarget;
4773         struct sf_pkt *cmd;
4774         int     rval = FALSE, t;
4775         int     lip_cnt;
4776         int     tgt_id, ret;
4777         fc_frame_header_t       *hp;
4778         int     deferred_destroy;
4779 
4780         /* We don't support RESET_LUN yet. */
4781         if (level == RESET_TARGET) {
4782                 struct sf_reset_list *p;
4783 
4784                 if ((p = kmem_alloc(sizeof (struct sf_reset_list), KM_NOSLEEP))
4785                     == NULL)
4786                         return (rval);
4787 
4788                 SF_DEBUG(2, (sf, CE_NOTE, "sf_reset target\n"));
4789                 mutex_enter(&sf->sf_mutex);
4790                 /* All target resets go to LUN 0 */
4791                 if (target->sft_lun.l) {
4792                         target = sf_lookup_target(sf, target->sft_port_wwn, 0);
4793                 }
4794                 mutex_enter(&target->sft_mutex);
4795                 if (target->sft_state & (SF_TARGET_BUSY |
4796                     SF_TARGET_OFFLINE)) {
4797                         mutex_exit(&target->sft_mutex);
4798                         mutex_exit(&sf->sf_mutex);
4799                         kmem_free(p, sizeof (struct sf_reset_list));
4800                         return (rval);
4801                 }
4802                 lip_cnt = sf->sf_lip_cnt;
4803                 target->sft_state |= SF_TARGET_BUSY;
4804                 for (ntarget = target->sft_next_lun;
4805                     ntarget;
4806                     ntarget = ntarget->sft_next_lun) {
4807                         mutex_enter(&ntarget->sft_mutex);
4808                         /*
4809                          * XXXX If we supported RESET_LUN we should check here
4810                          * to see if any LUN were being reset and somehow fail
4811                          * that operation.
4812                          */
4813                         ntarget->sft_state |= SF_TARGET_BUSY;
4814                         mutex_exit(&ntarget->sft_mutex);
4815                 }
4816                 mutex_exit(&target->sft_mutex);
4817                 mutex_exit(&sf->sf_mutex);
4818 
4819                 deferred_destroy = 0;
4820                 if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4821                     0, 0, 0, NULL, 0)) != NULL) {
4822                         cmd = PKT2CMD(pkt);
4823                         cmd->cmd_block->fcp_cntl.cntl_reset = 1;
4824                         cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4825                         cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4826 
4827                         /* prepare the packet for transport */
4828                         if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4829                                 /* call transport to send a pkt polled */
4830                                 cmd->cmd_state = SF_STATE_ISSUED;
4831                                 if ((ret = soc_transport_poll(sf->sf_sochandle,
4832                                     cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4833                                     CQ_REQUEST_1)) == FCAL_TRANSPORT_SUCCESS) {
4834                                         (void) ddi_dma_sync(cmd->cmd_cr_pool->
4835                                             rsp_dma_handle, (caddr_t)cmd->
4836                                             cmd_rsp_block - cmd->cmd_cr_pool->
4837                                             rsp_base, FCP_MAX_RSP_IU_SIZE,
4838                                             DDI_DMA_SYNC_FORKERNEL);
4839                                         fpkt = cmd->cmd_fp_pkt;
4840                                         if ((fpkt->fcal_pkt_status ==
4841                                             FCAL_STATUS_OK) &&
4842                                             (((struct fcp_rsp_info *)
4843                                             (&cmd->cmd_rsp_block->
4844                                             fcp_response_len + 1))->
4845                                             rsp_code == FCP_NO_FAILURE)) {
4846                                                 sf_log(sf, CE_NOTE,
4847                                                     "!sf%d: Target 0x%x Reset "
4848                                                     "successful\n",
4849                                                     ddi_get_instance(\
4850                                                     sf->sf_dip),
4851                                                     sf_alpa_to_switch[
4852                                                     target->sft_al_pa]);
4853                                                 rval = TRUE;
4854                                         } else {
4855                                                 hp = &cmd->cmd_fp_pkt->
4856                                                     fcal_socal_request.
4857                                                     sr_fc_frame_hdr;
4858                                                 tgt_id = sf_alpa_to_switch[
4859                                                     (uchar_t)hp->d_id];
4860                                                 sf->sf_stats.tstats[tgt_id].
4861                                                     task_mgmt_failures++;
4862                                                 sf_log(sf, CE_NOTE,
4863                                                     "!sf%d: Target 0x%x "
4864                                                     "Reset failed."
4865                                                     "Status code 0x%x "
4866                                                     "Resp code 0x%x\n",
4867                                                     ddi_get_instance(\
4868                                                     sf->sf_dip),
4869                                                     tgt_id,
4870                                                     fpkt->fcal_pkt_status,
4871                                                     ((struct fcp_rsp_info *)
4872                                                     (&cmd->cmd_rsp_block->
4873                                                     fcp_response_len + 1))->
4874                                                     rsp_code);
4875                                         }
4876                                 } else {
4877                                         sf_log(sf, CE_NOTE, "!sf%d: Target "
4878                                             "0x%x Reset Failed. Ret=%x\n",
4879                                             ddi_get_instance(sf->sf_dip),
4880                                             sf_alpa_to_switch[
4881                                             target->sft_al_pa], ret);
4882                                         mutex_enter(&cmd->cmd_abort_mutex);
4883                                         if (cmd->cmd_state == SF_STATE_ISSUED) {
4884                                         /* call the transport to abort a cmd */
4885                                         cmd->cmd_timeout = sf_watchdog_time
4886                                             + 20;
4887                                         cmd->cmd_state = SF_STATE_ABORTING;
4888                                         mutex_exit(&cmd->cmd_abort_mutex);
4889                                         if (((t = soc_abort(sf->sf_sochandle,
4890                                             sf->sf_socp,
4891                                             sf->sf_sochandle->fcal_portno,
4892                                             cmd->cmd_fp_pkt, 1)) !=
4893                                             FCAL_ABORTED) &&
4894                                             (t != FCAL_ABORT_FAILED)) {
4895                                                 sf_log(sf, CE_NOTE,
4896                                                     "!sf%d: Target 0x%x Reset "
4897                                                     "failed. Abort Failed, "
4898                                                     "forcing LIP\n",
4899                                                     ddi_get_instance(
4900                                                     sf->sf_dip),
4901                                                     sf_alpa_to_switch[
4902                                                     target->sft_al_pa]);
4903                                                 sf_force_lip(sf);
4904                                                 rval = TRUE;
4905                                                 deferred_destroy = 1;
4906                                         }
4907                                         } else {
4908                                                 mutex_exit
4909                                                     (&cmd->cmd_abort_mutex);
4910                                         }
4911                                 }
4912                         }
4913                         /*
4914                          * Defer releasing the packet if we abort returned with
4915                          * a BAD_ABORT or timed out, because there is a
4916                          * possibility that the ucode might return it.
4917                          * We wait for at least 20s and let it be released
4918                          * by the sf_watch thread
4919                          */
4920                         if (!deferred_destroy) {
4921                                 cmd->cmd_block->fcp_cntl.cntl_reset = 0;
4922                                 cmd->cmd_fp_pkt->fcal_pkt_comp =
4923                                     sf_cmd_callback;
4924                                 cmd->cmd_state = SF_STATE_IDLE;
4925                                 /* for cache */
4926                                 sf_scsi_destroy_pkt(ap, pkt);
4927                         }
4928                 } else {
4929                         cmn_err(CE_WARN, "!sf%d: Target 0x%x Reset Failed. "
4930                             "Resource allocation error.\n",
4931                             ddi_get_instance(sf->sf_dip),
4932                             sf_alpa_to_switch[target->sft_al_pa]);
4933                 }
4934                 mutex_enter(&sf->sf_mutex);
4935                 if ((rval == TRUE) && (lip_cnt == sf->sf_lip_cnt)) {
4936                         p->target = target;
4937                         p->lip_cnt = lip_cnt;
4938                         p->timeout = ddi_get_lbolt() +
4939                             drv_usectohz(SF_TARGET_RESET_DELAY);
4940                         p->next = sf->sf_reset_list;
4941                         sf->sf_reset_list = p;
4942                         mutex_exit(&sf->sf_mutex);
4943                         mutex_enter(&sf_global_mutex);
4944                         if (sf_reset_timeout_id == 0) {
4945                                 sf_reset_timeout_id = timeout(
4946                                     sf_check_reset_delay, NULL,
4947                                     drv_usectohz(SF_TARGET_RESET_DELAY));
4948                         }
4949                         mutex_exit(&sf_global_mutex);
4950                 } else {
4951                         if (lip_cnt == sf->sf_lip_cnt) {
4952                                 mutex_enter(&target->sft_mutex);
4953                                 target->sft_state &= ~SF_TARGET_BUSY;
4954                                 for (ntarget = target->sft_next_lun;
4955                                     ntarget;
4956                                     ntarget = ntarget->sft_next_lun) {
4957                                         mutex_enter(&ntarget->sft_mutex);
4958                                         ntarget->sft_state &= ~SF_TARGET_BUSY;
4959                                         mutex_exit(&ntarget->sft_mutex);
4960                                 }
4961                                 mutex_exit(&target->sft_mutex);
4962                         }
4963                         mutex_exit(&sf->sf_mutex);
4964                         kmem_free(p, sizeof (struct sf_reset_list));
4965                 }
4966         } else {
4967                 mutex_enter(&sf->sf_mutex);
4968                 if ((sf->sf_state == SF_STATE_OFFLINE) &&
4969                     (sf_watchdog_time < sf->sf_timer)) {
4970                         /*
4971                          * We are currently in a lip, so let this one
4972                          * finish before forcing another one.
4973                          */
4974                         mutex_exit(&sf->sf_mutex);
4975                         return (TRUE);
4976                 }
4977                 mutex_exit(&sf->sf_mutex);
4978                 sf_log(sf, CE_NOTE, "!sf:Target driver initiated lip\n");
4979                 sf_force_lip(sf);
4980                 rval = TRUE;
4981         }
4982         return (rval);
4983 }
4984 
4985 
4986 /*
4987  * abort all commands for a target
4988  *
4989  * if try_abort is set then send an abort
4990  * if abort is set then this is abort, else this is a reset
4991  */
4992 static void
4993 sf_abort_all(struct sf *sf, struct sf_target *target, int abort, int
4994     lip_cnt, int try_abort)
4995 {
4996         struct sf_target *ntarget;
4997         struct sf_pkt *cmd, *head = NULL, *tail = NULL, *pcmd = NULL, *tcmd;
4998         struct fcal_packet *fpkt;
4999         struct scsi_pkt *pkt;
5000         int rval = FCAL_ABORTED;
5001 
5002         /*
5003          * First pull all commands for all LUNs on this target out of the
5004          * overflow list.  We can tell it's the same target by comparing
5005          * the node WWN.
5006          */
5007         mutex_enter(&sf->sf_mutex);
5008         if (lip_cnt == sf->sf_lip_cnt) {
5009                 mutex_enter(&sf->sf_cmd_mutex);
5010                 cmd = sf->sf_pkt_head;
5011                 while (cmd != NULL) {
5012                         ntarget = ADDR2TARGET(&cmd->cmd_pkt->
5013                             pkt_address);
5014                         if (ntarget == target) {
5015                                 if (pcmd != NULL)
5016                                         pcmd->cmd_next = cmd->cmd_next;
5017                                 else
5018                                         sf->sf_pkt_head = cmd->cmd_next;
5019                                 if (sf->sf_pkt_tail == cmd) {
5020                                         sf->sf_pkt_tail = pcmd;
5021                                         if (pcmd != NULL)
5022                                                 pcmd->cmd_next = NULL;
5023                                 }
5024                                 tcmd = cmd->cmd_next;
5025                                 if (head == NULL) {
5026                                         head = cmd;
5027                                         tail = cmd;
5028                                 } else {
5029                                         tail->cmd_next = cmd;
5030                                         tail = cmd;
5031                                 }
5032                                 cmd->cmd_next = NULL;
5033                                 cmd = tcmd;
5034                         } else {
5035                                 pcmd = cmd;
5036                                 cmd = cmd->cmd_next;
5037                         }
5038                 }
5039                 mutex_exit(&sf->sf_cmd_mutex);
5040         }
5041         mutex_exit(&sf->sf_mutex);
5042 
5043         /*
5044          * Now complete all the commands on our list.  In the process,
5045          * the completion routine may take the commands off the target
5046          * lists.
5047          */
5048         cmd = head;
5049         while (cmd != NULL) {
5050                 pkt = cmd->cmd_pkt;
5051                 if (abort) {
5052                         pkt->pkt_reason = CMD_ABORTED;
5053                         pkt->pkt_statistics |= STAT_ABORTED;
5054                 } else {
5055                         pkt->pkt_reason = CMD_RESET;
5056                         pkt->pkt_statistics |= STAT_DEV_RESET;
5057                 }
5058                 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5059                 cmd->cmd_state = SF_STATE_IDLE;
5060                 cmd = cmd->cmd_next;
5061                 /*
5062                  * call the packet completion routine only for
5063                  * non-polled commands. Ignore the polled commands as
5064                  * they timeout and will be handled differently
5065                  */
5066                 if ((pkt->pkt_comp) && !(pkt->pkt_flags & FLAG_NOINTR))
5067                         (*pkt->pkt_comp)(pkt);
5068 
5069         }
5070 
5071         /*
5072          * Finally get all outstanding commands for each LUN, and abort them if
5073          * they've been issued, and call the completion routine.
5074          * For the case where sf_offline_target is called from sf_watch
5075          * due to a Offline Timeout, it is quite possible that the soc+
5076          * ucode is hosed and therefore  cannot return the commands.
5077          * Clear up all the issued commands as well.
5078          * Try_abort will be false only if sf_abort_all is coming from
5079          * sf_target_offline.
5080          */
5081 
5082         if (try_abort || sf->sf_state == SF_STATE_OFFLINE) {
5083                 mutex_enter(&target->sft_pkt_mutex);
5084                 cmd = tcmd = target->sft_pkt_head;
5085                 while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
5086                         fpkt = cmd->cmd_fp_pkt;
5087                         pkt = cmd->cmd_pkt;
5088                         mutex_enter(&cmd->cmd_abort_mutex);
5089                         if ((cmd->cmd_state == SF_STATE_ISSUED) &&
5090                             (fpkt->fcal_cmd_state &
5091                             FCAL_CMD_IN_TRANSPORT) &&
5092                             ((fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE) ==
5093                             0) && !(pkt->pkt_flags & FLAG_NOINTR)) {
5094                                 cmd->cmd_state = SF_STATE_ABORTING;
5095                                 cmd->cmd_timeout = sf_watchdog_time +
5096                                     cmd->cmd_pkt->pkt_time + 20;
5097                                 mutex_exit(&cmd->cmd_abort_mutex);
5098                                 mutex_exit(&target->sft_pkt_mutex);
5099                                 if (try_abort) {
5100                                         /* call the transport to abort a pkt */
5101                                         rval = soc_abort(sf->sf_sochandle,
5102                                             sf->sf_socp,
5103                                             sf->sf_sochandle->fcal_portno,
5104                                             fpkt, 1);
5105                                 }
5106                                 if ((rval == FCAL_ABORTED) ||
5107                                     (rval == FCAL_ABORT_FAILED)) {
5108                                         if (abort) {
5109                                                 pkt->pkt_reason = CMD_ABORTED;
5110                                                 pkt->pkt_statistics |=
5111                                                     STAT_ABORTED;
5112                                         } else {
5113                                                 pkt->pkt_reason = CMD_RESET;
5114                                                 pkt->pkt_statistics |=
5115                                                     STAT_DEV_RESET;
5116                                         }
5117                                         cmd->cmd_state = SF_STATE_IDLE;
5118                                         if (pkt->pkt_comp)
5119                                                 (*pkt->pkt_comp)(pkt);
5120                                 }
5121                                 mutex_enter(&sf->sf_mutex);
5122                                 if (lip_cnt != sf->sf_lip_cnt) {
5123                                         mutex_exit(&sf->sf_mutex);
5124                                         return;
5125                                 }
5126                                 mutex_exit(&sf->sf_mutex);
5127                                 mutex_enter(&target->sft_pkt_mutex);
5128                                 cmd = target->sft_pkt_head;
5129                         } else {
5130                                 mutex_exit(&cmd->cmd_abort_mutex);
5131                                 cmd = cmd->cmd_forw;
5132                         }
5133                 }
5134                 mutex_exit(&target->sft_pkt_mutex);
5135         }
5136 }
5137 
5138 
5139 /*
5140  * called by the transport to start a packet
5141  */
5142 static int
5143 sf_start(struct scsi_address *ap, struct scsi_pkt *pkt)
5144 {
5145         struct sf *sf = ADDR2SF(ap);
5146         struct sf_target *target = ADDR2TARGET(ap);
5147         struct sf_pkt *cmd = PKT2CMD(pkt);
5148         int rval;
5149 
5150 
5151         SF_DEBUG(6, (sf, CE_NOTE, "sf_start\n"));
5152 
5153         if (cmd->cmd_state == SF_STATE_ISSUED) {
5154                 cmn_err(CE_PANIC, "sf: issuing packet twice 0x%p\n",
5155                     (void *)cmd);
5156         }
5157 
5158         /* prepare the packet for transport */
5159         if ((rval = sf_prepare_pkt(sf, cmd, target)) != TRAN_ACCEPT) {
5160                 return (rval);
5161         }
5162 
5163         if (target->sft_state & (SF_TARGET_BUSY|SF_TARGET_OFFLINE)) {
5164                 if (target->sft_state & SF_TARGET_OFFLINE) {
5165                         return (TRAN_FATAL_ERROR);
5166                 }
5167                 if (pkt->pkt_flags & FLAG_NOINTR) {
5168                         return (TRAN_BUSY);
5169                 }
5170                 mutex_enter(&sf->sf_cmd_mutex);
5171                 sf->sf_use_lock = TRUE;
5172                 goto enque;
5173         }
5174 
5175 
5176         /* if no interrupts then do polled I/O */
5177         if (pkt->pkt_flags & FLAG_NOINTR) {
5178                 return (sf_dopoll(sf, cmd));
5179         }
5180 
5181         /* regular interrupt-driven I/O */
5182 
5183         if (!sf->sf_use_lock) {
5184 
5185                 /* locking no needed */
5186 
5187                 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
5188                     sf_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
5189                 cmd->cmd_state = SF_STATE_ISSUED;
5190 
5191                 /* call the transport to send a pkt */
5192                 if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt,
5193                     FCAL_NOSLEEP, CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5194                         cmd->cmd_state = SF_STATE_IDLE;
5195                         return (TRAN_BADPKT);
5196                 }
5197                 return (TRAN_ACCEPT);
5198         }
5199 
5200         /* regular I/O using locking */
5201 
5202         mutex_enter(&sf->sf_cmd_mutex);
5203         if ((sf->sf_ncmds >= sf->sf_throttle) ||
5204             (sf->sf_pkt_head != NULL)) {
5205 enque:
5206                 /*
5207                  * either we're throttling back or there are already commands
5208                  * on the queue, so enqueue this one for later
5209                  */
5210                 cmd->cmd_flags |= CFLAG_IN_QUEUE;
5211                 if (sf->sf_pkt_head != NULL) {
5212                         /* add to the queue */
5213                         sf->sf_pkt_tail->cmd_next = cmd;
5214                         cmd->cmd_next = NULL;
5215                         sf->sf_pkt_tail = cmd;
5216                 } else {
5217                         /* this is the first entry in the queue */
5218                         sf->sf_pkt_head = sf->sf_pkt_tail = cmd;
5219                         cmd->cmd_next = NULL;
5220                 }
5221                 mutex_exit(&sf->sf_cmd_mutex);
5222                 return (TRAN_ACCEPT);
5223         }
5224 
5225         /*
5226          * start this packet now
5227          */
5228 
5229         /* still have cmd mutex */
5230         return (sf_start_internal(sf, cmd));
5231 }
5232 
5233 
5234 /*
5235  * internal routine to start a packet from the queue now
5236  *
5237  * enter with cmd mutex held and leave with it released
5238  */
5239 static int
5240 sf_start_internal(struct sf *sf, struct sf_pkt *cmd)
5241 {
5242         /* we have the cmd mutex */
5243         sf->sf_ncmds++;
5244         mutex_exit(&sf->sf_cmd_mutex);
5245 
5246         ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5247         SF_DEBUG(6, (sf, CE_NOTE, "sf_start_internal\n"));
5248 
5249         cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ? sf_watchdog_time +
5250             cmd->cmd_pkt->pkt_time : 0;
5251         cmd->cmd_state = SF_STATE_ISSUED;
5252 
5253         /* call transport to send the pkt */
5254         if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt, FCAL_NOSLEEP,
5255             CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5256                 cmd->cmd_state = SF_STATE_IDLE;
5257                 mutex_enter(&sf->sf_cmd_mutex);
5258                 sf->sf_ncmds--;
5259                 mutex_exit(&sf->sf_cmd_mutex);
5260                 return (TRAN_BADPKT);
5261         }
5262         return (TRAN_ACCEPT);
5263 }
5264 
5265 
5266 /*
5267  * prepare a packet for transport
5268  */
5269 static int
5270 sf_prepare_pkt(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5271 {
5272         struct fcp_cmd *fcmd = cmd->cmd_block;
5273 
5274 /* XXXX Need to set the LUN ? */
5275         bcopy((caddr_t)&target->sft_lun.b,
5276             (caddr_t)&fcmd->fcp_ent_addr,
5277             FCP_LUN_SIZE);
5278         cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
5279         cmd->cmd_pkt->pkt_state = 0;
5280         cmd->cmd_pkt->pkt_statistics = 0;
5281 
5282 
5283         if ((cmd->cmd_pkt->pkt_comp == NULL) &&
5284             ((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0)) {
5285                 return (TRAN_BADPKT);
5286         }
5287 
5288         /* invalidate imp field(s) of rsp block */
5289         cmd->cmd_rsp_block->fcp_u.i_fcp_status = SF_BAD_DMA_MAGIC;
5290 
5291         /* set up amt of I/O to do */
5292         if (cmd->cmd_flags & CFLAG_DMAVALID) {
5293                 cmd->cmd_pkt->pkt_resid = cmd->cmd_dmacount;
5294                 if (cmd->cmd_flags & CFLAG_CMDIOPB) {
5295                         (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
5296                             DDI_DMA_SYNC_FORDEV);
5297                 }
5298         } else {
5299                 cmd->cmd_pkt->pkt_resid = 0;
5300         }
5301 
5302         /* set up the Tagged Queuing type */
5303         if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
5304                 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
5305         } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
5306                 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
5307         }
5308 
5309         /*
5310          * Sync the cmd segment
5311          */
5312         (void) ddi_dma_sync(cmd->cmd_cr_pool->cmd_dma_handle,
5313             (caddr_t)fcmd - cmd->cmd_cr_pool->cmd_base,
5314             sizeof (struct fcp_cmd), DDI_DMA_SYNC_FORDEV);
5315 
5316         sf_fill_ids(sf, cmd, target);
5317         return (TRAN_ACCEPT);
5318 }
5319 
5320 
5321 /*
5322  * fill in packet hdr source and destination IDs and hdr byte count
5323  */
5324 static void
5325 sf_fill_ids(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5326 {
5327         struct fcal_packet *fpkt = cmd->cmd_fp_pkt;
5328         fc_frame_header_t       *hp;
5329 
5330 
5331         hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
5332         hp->d_id = target->sft_al_pa;
5333         hp->s_id = sf->sf_al_pa;
5334         fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
5335             cmd->cmd_dmacookie.dmac_size;
5336 }
5337 
5338 
5339 /*
5340  * do polled I/O using transport
5341  */
5342 static int
5343 sf_dopoll(struct sf *sf, struct sf_pkt *cmd)
5344 {
5345         int timeout;
5346         int rval;
5347 
5348 
5349         mutex_enter(&sf->sf_cmd_mutex);
5350         sf->sf_ncmds++;
5351         mutex_exit(&sf->sf_cmd_mutex);
5352 
5353         timeout = cmd->cmd_pkt->pkt_time ? cmd->cmd_pkt->pkt_time
5354             : SF_POLL_TIMEOUT;
5355         cmd->cmd_timeout = 0;
5356         cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
5357         cmd->cmd_state = SF_STATE_ISSUED;
5358 
5359         /* call transport to send a pkt polled */
5360         rval = soc_transport_poll(sf->sf_sochandle, cmd->cmd_fp_pkt,
5361             timeout*1000000, CQ_REQUEST_1);
5362         mutex_enter(&cmd->cmd_abort_mutex);
5363         cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5364         if (rval != FCAL_TRANSPORT_SUCCESS) {
5365                 if (rval == FCAL_TRANSPORT_TIMEOUT) {
5366                         cmd->cmd_state = SF_STATE_ABORTING;
5367                         mutex_exit(&cmd->cmd_abort_mutex);
5368                         (void) sf_target_timeout(sf, cmd);
5369                 } else {
5370                         mutex_exit(&cmd->cmd_abort_mutex);
5371                 }
5372                 cmd->cmd_state = SF_STATE_IDLE;
5373                 cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5374                 mutex_enter(&sf->sf_cmd_mutex);
5375                 sf->sf_ncmds--;
5376                 mutex_exit(&sf->sf_cmd_mutex);
5377                 return (TRAN_BADPKT);
5378         }
5379         mutex_exit(&cmd->cmd_abort_mutex);
5380         cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5381         sf_cmd_callback(cmd->cmd_fp_pkt);
5382         return (TRAN_ACCEPT);
5383 }
5384 
5385 
5386 /* a shortcut for defining debug messages below */
5387 #ifdef  DEBUG
5388 #define SF_DMSG1(s)             msg1 = s
5389 #else
5390 #define SF_DMSG1(s)             /* do nothing */
5391 #endif
5392 
5393 
5394 /*
5395  * the pkt_comp callback for command packets
5396  */
5397 static void
5398 sf_cmd_callback(struct fcal_packet *fpkt)
5399 {
5400         struct sf_pkt *cmd = (struct sf_pkt *)fpkt->fcal_pkt_private;
5401         struct scsi_pkt *pkt = cmd->cmd_pkt;
5402         struct sf *sf = ADDR2SF(&pkt->pkt_address);
5403         struct sf_target *target = ADDR2TARGET(&pkt->pkt_address);
5404         struct fcp_rsp *rsp;
5405         char *msg1 = NULL;
5406         char *msg2 = NULL;
5407         short ncmds;
5408         int tgt_id;
5409         int good_scsi_status = TRUE;
5410 
5411 
5412 
5413         if (cmd->cmd_state == SF_STATE_IDLE) {
5414                 cmn_err(CE_PANIC, "sf: completing idle packet 0x%p\n",
5415                     (void *)cmd);
5416         }
5417 
5418         mutex_enter(&cmd->cmd_abort_mutex);
5419         if (cmd->cmd_state == SF_STATE_ABORTING) {
5420                 /* cmd already being aborted -- nothing to do */
5421                 mutex_exit(&cmd->cmd_abort_mutex);
5422                 return;
5423         }
5424 
5425         cmd->cmd_state = SF_STATE_IDLE;
5426         mutex_exit(&cmd->cmd_abort_mutex);
5427 
5428         if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
5429 
5430                 (void) ddi_dma_sync(cmd->cmd_cr_pool->rsp_dma_handle,
5431                     (caddr_t)cmd->cmd_rsp_block - cmd->cmd_cr_pool->rsp_base,
5432                     FCP_MAX_RSP_IU_SIZE, DDI_DMA_SYNC_FORKERNEL);
5433 
5434                 rsp = (struct fcp_rsp *)cmd->cmd_rsp_block;
5435 
5436                 if (rsp->fcp_u.i_fcp_status == SF_BAD_DMA_MAGIC) {
5437 
5438                         if (sf_core && (sf_core & SF_CORE_BAD_DMA)) {
5439                                 sf_token = (int *)(uintptr_t)
5440                                     fpkt->fcal_socal_request.\
5441                                     sr_soc_hdr.sh_request_token;
5442                                 (void) soc_take_core(sf->sf_sochandle,
5443                                     sf->sf_socp);
5444                         }
5445 
5446                         pkt->pkt_reason = CMD_INCOMPLETE;
5447                         pkt->pkt_state = STATE_GOT_BUS;
5448                         pkt->pkt_statistics |= STAT_ABORTED;
5449 
5450                 } else {
5451 
5452                         pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
5453                             STATE_SENT_CMD | STATE_GOT_STATUS;
5454                         pkt->pkt_resid = 0;
5455                         if (cmd->cmd_flags & CFLAG_DMAVALID) {
5456                                 pkt->pkt_state |= STATE_XFERRED_DATA;
5457                         }
5458 
5459                         if ((pkt->pkt_scbp != NULL) &&
5460                             ((*(pkt->pkt_scbp) =
5461                             rsp->fcp_u.fcp_status.scsi_status)
5462                             != STATUS_GOOD)) {
5463                                 good_scsi_status = FALSE;
5464                         /*
5465                          * The next two checks make sure that if there
5466                          * is no sense data or a valid response and
5467                          * the command came back with check condition,
5468                          * the command should be retried
5469                          */
5470                                 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
5471                                     !rsp->fcp_u.fcp_status.sense_len_set) {
5472                                         pkt->pkt_state &= ~STATE_XFERRED_DATA;
5473                                         pkt->pkt_resid = cmd->cmd_dmacount;
5474                                 }
5475                         }
5476 
5477                         if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
5478                             (pkt->pkt_state & STATE_XFERRED_DATA)) {
5479                                 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0,
5480                                     (uint_t)0, DDI_DMA_SYNC_FORCPU);
5481                         }
5482                         /*
5483                          * Update the transfer resid, if appropriate
5484                          */
5485                         if (rsp->fcp_u.fcp_status.resid_over ||
5486                             rsp->fcp_u.fcp_status.resid_under)
5487                                 pkt->pkt_resid = rsp->fcp_resid;
5488 
5489                         /*
5490                          * Check to see if the SCSI command failed.
5491                          *
5492                          */
5493 
5494                         /*
5495                          * First see if we got a FCP protocol error.
5496                          */
5497                         if (rsp->fcp_u.fcp_status.rsp_len_set) {
5498                                 struct fcp_rsp_info *bep;
5499 
5500                                 bep = (struct fcp_rsp_info *)
5501                                     (&rsp->fcp_response_len + 1);
5502                                 if (bep->rsp_code != FCP_NO_FAILURE) {
5503                                                 pkt->pkt_reason = CMD_TRAN_ERR;
5504                                         tgt_id = pkt->pkt_address.a_target;
5505                                         switch (bep->rsp_code) {
5506                                         case FCP_CMND_INVALID:
5507                                                 SF_DMSG1("FCP_RSP FCP_CMND "
5508                                                     "fields invalid");
5509                                                 break;
5510                                         case FCP_TASK_MGMT_NOT_SUPPTD:
5511                                                 SF_DMSG1("FCP_RSP Task"
5512                                                     "Management Function"
5513                                                     "Not Supported");
5514                                                 break;
5515                                         case FCP_TASK_MGMT_FAILED:
5516                                                 SF_DMSG1("FCP_RSP Task "
5517                                                     "Management Function"
5518                                                     "Failed");
5519                                                 sf->sf_stats.tstats[tgt_id].
5520                                                     task_mgmt_failures++;
5521                                                 break;
5522                                         case FCP_DATA_RO_MISMATCH:
5523                                                 SF_DMSG1("FCP_RSP FCP_DATA RO "
5524                                                     "mismatch with "
5525                                                     "FCP_XFER_RDY DATA_RO");
5526                                                 sf->sf_stats.tstats[tgt_id].
5527                                                     data_ro_mismatches++;
5528                                                 break;
5529                                         case FCP_DL_LEN_MISMATCH:
5530                                                 SF_DMSG1("FCP_RSP FCP_DATA "
5531                                                     "length "
5532                                                     "different than BURST_LEN");
5533                                                 sf->sf_stats.tstats[tgt_id].
5534                                                     dl_len_mismatches++;
5535                                                 break;
5536                                         default:
5537                                                 SF_DMSG1("FCP_RSP invalid "
5538                                                     "RSP_CODE");
5539                                                 break;
5540                                         }
5541                                 }
5542                         }
5543 
5544                         /*
5545                          * See if we got a SCSI error with sense data
5546                          */
5547                         if (rsp->fcp_u.fcp_status.sense_len_set) {
5548                                 uchar_t rqlen = min(rsp->fcp_sense_len,
5549                                     sizeof (struct scsi_extended_sense));
5550                                 caddr_t sense = (caddr_t)rsp +
5551                                     sizeof (struct fcp_rsp) +
5552                                     rsp->fcp_response_len;
5553                                 struct scsi_arq_status *arq;
5554                                 struct scsi_extended_sense *sensep =
5555                                     (struct scsi_extended_sense *)sense;
5556 
5557                                 if (rsp->fcp_u.fcp_status.scsi_status !=
5558                                     STATUS_GOOD) {
5559                                 if (rsp->fcp_u.fcp_status.scsi_status
5560                                     == STATUS_CHECK) {
5561                                         if (sensep->es_key ==
5562                                             KEY_RECOVERABLE_ERROR)
5563                                                 good_scsi_status = 1;
5564                                         if (sensep->es_key ==
5565                                             KEY_UNIT_ATTENTION &&
5566                                             sensep->es_add_code == 0x3f &&
5567                                             sensep->es_qual_code == 0x0e) {
5568                                                 /* REPORT_LUNS_HAS_CHANGED */
5569                                                 sf_log(sf, CE_NOTE,
5570                                                 "!REPORT_LUNS_HAS_CHANGED\n");
5571                                                 sf_force_lip(sf);
5572                                         }
5573                                 }
5574                                 }
5575 
5576                                 if ((pkt->pkt_scbp != NULL) &&
5577                                     (cmd->cmd_scblen >=
5578                                         sizeof (struct scsi_arq_status))) {
5579 
5580                                 pkt->pkt_state |= STATE_ARQ_DONE;
5581 
5582                                 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
5583                                 /*
5584                                  * copy out sense information
5585                                  */
5586                                 bcopy(sense, (caddr_t)&arq->sts_sensedata,
5587                                     rqlen);
5588                                 arq->sts_rqpkt_resid =
5589                                     sizeof (struct scsi_extended_sense) -
5590                                         rqlen;
5591                                 *((uchar_t *)&arq->sts_rqpkt_status) =
5592                                     STATUS_GOOD;
5593                                 arq->sts_rqpkt_reason = 0;
5594                                 arq->sts_rqpkt_statistics = 0;
5595                                 arq->sts_rqpkt_state = STATE_GOT_BUS |
5596                                     STATE_GOT_TARGET | STATE_SENT_CMD |
5597                                     STATE_GOT_STATUS | STATE_ARQ_DONE |
5598                                     STATE_XFERRED_DATA;
5599                             }
5600                                 target->sft_alive = TRUE;
5601                         }
5602 
5603                         /*
5604                          * The firmware returns the number of bytes actually
5605                          * xfered into/out of host. Compare this with what
5606                          * we asked and if it is different, we lost frames ?
5607                          */
5608                         if ((pkt->pkt_reason == 0) && (pkt->pkt_resid == 0) &&
5609                             (good_scsi_status) &&
5610                             (pkt->pkt_state & STATE_XFERRED_DATA) &&
5611                             (!(cmd->cmd_flags & CFLAG_CMDIOPB)) &&
5612                             (target->sft_device_type != DTYPE_ESI)) {
5613                                 int byte_cnt =
5614                                     fpkt->fcal_socal_request.
5615                                     sr_soc_hdr.sh_byte_cnt;
5616                                 if (cmd->cmd_flags & CFLAG_DMASEND) {
5617                                         if (byte_cnt != 0) {
5618                                         sf_log(sf, CE_NOTE,
5619                                             "!sf_cmd_callback: Lost Frame: "
5620                                             "(write) received 0x%x expected"
5621                                             " 0x%x target 0x%x\n",
5622                                             byte_cnt, cmd->cmd_dmacount,
5623                                             sf_alpa_to_switch[
5624                                             target->sft_al_pa]);
5625                                         pkt->pkt_reason = CMD_INCOMPLETE;
5626                                         pkt->pkt_statistics |= STAT_ABORTED;
5627                                         }
5628                                 } else if (byte_cnt < cmd->cmd_dmacount) {
5629                                         sf_log(sf, CE_NOTE,
5630                                             "!sf_cmd_callback: "
5631                                             "Lost Frame: (read) "
5632                                             "received 0x%x expected 0x%x "
5633                                             "target 0x%x\n", byte_cnt,
5634                                             cmd->cmd_dmacount,
5635                                             sf_alpa_to_switch[
5636                                             target->sft_al_pa]);
5637                                         pkt->pkt_reason = CMD_INCOMPLETE;
5638                                         pkt->pkt_statistics |= STAT_ABORTED;
5639                                 }
5640                         }
5641                 }
5642 
5643         } else {
5644 
5645                 /* pkt status was not ok */
5646 
5647                 switch (fpkt->fcal_pkt_status) {
5648 
5649                 case FCAL_STATUS_ERR_OFFLINE:
5650                         SF_DMSG1("Fibre Channel Offline");
5651                         mutex_enter(&target->sft_mutex);
5652                         if (!(target->sft_state & SF_TARGET_OFFLINE)) {
5653                                 target->sft_state |= (SF_TARGET_BUSY
5654                                     | SF_TARGET_MARK);
5655                         }
5656                         mutex_exit(&target->sft_mutex);
5657                         (void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
5658                             target->sft_dip, FCAL_REMOVE_EVENT,
5659                             &sf_remove_eid, NDI_EVENT_NOPASS);
5660                         (void) ndi_event_run_callbacks(sf->sf_event_hdl,
5661                             target->sft_dip, sf_remove_eid, NULL);
5662                         pkt->pkt_reason = CMD_TRAN_ERR;
5663                         pkt->pkt_statistics |= STAT_BUS_RESET;
5664                         break;
5665 
5666                 case FCAL_STATUS_MAX_XCHG_EXCEEDED:
5667                         sf_throttle(sf);
5668                         sf->sf_use_lock = TRUE;
5669                         pkt->pkt_reason = CMD_TRAN_ERR;
5670                         pkt->pkt_state = STATE_GOT_BUS;
5671                         pkt->pkt_statistics |= STAT_ABORTED;
5672                         break;
5673 
5674                 case FCAL_STATUS_TIMEOUT:
5675                         SF_DMSG1("Fibre Channel Timeout");
5676                         pkt->pkt_reason = CMD_TIMEOUT;
5677                         break;
5678 
5679                 case FCAL_STATUS_ERR_OVERRUN:
5680                         SF_DMSG1("CMD_DATA_OVR");
5681                         pkt->pkt_reason = CMD_DATA_OVR;
5682                         break;
5683 
5684                 case FCAL_STATUS_UNKNOWN_CQ_TYPE:
5685                         SF_DMSG1("Unknown CQ type");
5686                         pkt->pkt_reason = CMD_TRAN_ERR;
5687                         break;
5688 
5689                 case FCAL_STATUS_BAD_SEG_CNT:
5690                         SF_DMSG1("Bad SEG CNT");
5691                         pkt->pkt_reason = CMD_TRAN_ERR;
5692                         break;
5693 
5694                 case FCAL_STATUS_BAD_XID:
5695                         SF_DMSG1("Fibre Channel Invalid X_ID");
5696                         pkt->pkt_reason = CMD_TRAN_ERR;
5697                         break;
5698 
5699                 case FCAL_STATUS_XCHG_BUSY:
5700                         SF_DMSG1("Fibre Channel Exchange Busy");
5701                         pkt->pkt_reason = CMD_TRAN_ERR;
5702                         break;
5703 
5704                 case FCAL_STATUS_INSUFFICIENT_CQES:
5705                         SF_DMSG1("Insufficient CQEs");
5706                         pkt->pkt_reason = CMD_TRAN_ERR;
5707                         break;
5708 
5709                 case FCAL_STATUS_ALLOC_FAIL:
5710                         SF_DMSG1("ALLOC FAIL");
5711                         pkt->pkt_reason = CMD_TRAN_ERR;
5712                         break;
5713 
5714                 case FCAL_STATUS_BAD_SID:
5715                         SF_DMSG1("Fibre Channel Invalid S_ID");
5716                         pkt->pkt_reason = CMD_TRAN_ERR;
5717                         break;
5718 
5719                 case FCAL_STATUS_INCOMPLETE_DMA_ERR:
5720                         if (sf_core && (sf_core & SF_CORE_INCOMPLETE_DMA)) {
5721                                 sf_token = (int *)(uintptr_t)
5722                                     fpkt->fcal_socal_request.\
5723                                     sr_soc_hdr.sh_request_token;
5724                                 (void) soc_take_core(sf->sf_sochandle,
5725                                     sf->sf_socp);
5726                                 sf_core = 0;
5727                         }
5728                         msg2 =
5729                         "INCOMPLETE DMA XFER due to bad SOC+ card, replace HBA";
5730                         pkt->pkt_reason = CMD_INCOMPLETE;
5731                         pkt->pkt_state = STATE_GOT_BUS;
5732                         pkt->pkt_statistics |= STAT_ABORTED;
5733                         break;
5734 
5735                 case FCAL_STATUS_CRC_ERR:
5736                         msg2 = "Fibre Channel CRC Error on frames";
5737                         pkt->pkt_reason = CMD_INCOMPLETE;
5738                         pkt->pkt_state = STATE_GOT_BUS;
5739                         pkt->pkt_statistics |= STAT_ABORTED;
5740                         break;
5741 
5742                 case FCAL_STATUS_NO_SEQ_INIT:
5743                         SF_DMSG1("Fibre Channel Seq Init Error");
5744                         pkt->pkt_reason = CMD_TRAN_ERR;
5745                         break;
5746 
5747                 case  FCAL_STATUS_OPEN_FAIL:
5748                         pkt->pkt_reason = CMD_TRAN_ERR;
5749                         SF_DMSG1("Fibre Channel Open Failure");
5750                         if ((target->sft_state & (SF_TARGET_BUSY |
5751                             SF_TARGET_MARK | SF_TARGET_OFFLINE)) == 0) {
5752                                 sf_log(sf, CE_NOTE,
5753                                     "!Open failure to target 0x%x "
5754                                     "forcing LIP\n",
5755                                     sf_alpa_to_switch[target->sft_al_pa]);
5756                                 sf_force_lip(sf);
5757                         }
5758                         break;
5759 
5760 
5761                 case FCAL_STATUS_ONLINE_TIMEOUT:
5762                         SF_DMSG1("Fibre Channel Online Timeout");
5763                         pkt->pkt_reason = CMD_TRAN_ERR;
5764                         break;
5765 
5766                 default:
5767                         SF_DMSG1("Unknown FC Status");
5768                         pkt->pkt_reason = CMD_TRAN_ERR;
5769                         break;
5770                 }
5771         }
5772 
5773 #ifdef  DEBUG
5774         /*
5775          * msg1 will be non-NULL if we've detected some sort of error
5776          */
5777         if (msg1 != NULL && sfdebug >= 4) {
5778                 sf_log(sf, CE_WARN,
5779                     "!Transport error on cmd=0x%p target=0x%x:  %s\n",
5780                     (void *)fpkt, pkt->pkt_address.a_target, msg1);
5781         }
5782 #endif
5783 
5784         if (msg2 != NULL) {
5785                 sf_log(sf, CE_WARN, "!Transport error on target=0x%x:  %s\n",
5786                     pkt->pkt_address.a_target, msg2);
5787         }
5788 
5789         ncmds = fpkt->fcal_ncmds;
5790         ASSERT(ncmds >= 0);
5791         if (ncmds >= (sf->sf_throttle - SF_HI_CMD_DELTA)) {
5792 #ifdef DEBUG
5793                 if (!sf->sf_use_lock) {
5794                         SF_DEBUG(4, (sf, CE_NOTE, "use lock flag on\n"));
5795                 }
5796 #endif
5797                 sf->sf_use_lock = TRUE;
5798         }
5799 
5800         mutex_enter(&sf->sf_cmd_mutex);
5801         sf->sf_ncmds = ncmds;
5802         sf_throttle_start(sf);
5803         mutex_exit(&sf->sf_cmd_mutex);
5804 
5805         if (!msg1 && !msg2)
5806                 SF_DEBUG(6, (sf, CE_NOTE, "Completing pkt 0x%p\n",
5807                     (void *)pkt));
5808         if (pkt->pkt_comp != NULL) {
5809                 (*pkt->pkt_comp)(pkt);
5810         }
5811 }
5812 
5813 #undef  SF_DMSG1
5814 
5815 
5816 
5817 /*
5818  * start throttling for this instance
5819  */
5820 static void
5821 sf_throttle_start(struct sf *sf)
5822 {
5823         struct sf_pkt *cmd, *prev_cmd = NULL;
5824         struct scsi_pkt *pkt;
5825         struct sf_target *target;
5826 
5827 
5828         ASSERT(mutex_owned(&sf->sf_cmd_mutex));
5829 
5830         cmd = sf->sf_pkt_head;
5831         while ((cmd != NULL) &&
5832             (sf->sf_state == SF_STATE_ONLINE) &&
5833             (sf->sf_ncmds < sf->sf_throttle)) {
5834 
5835                 pkt = CMD2PKT(cmd);
5836 
5837                 target = ADDR2TARGET(&pkt->pkt_address);
5838                 if (target->sft_state & SF_TARGET_BUSY) {
5839                         /* this command is busy -- go to next */
5840                         ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5841                         prev_cmd = cmd;
5842                         cmd = cmd->cmd_next;
5843                         continue;
5844                 }
5845 
5846                 ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5847 
5848                 /* this cmd not busy and not issued */
5849 
5850                 /* remove this packet from the queue */
5851                 if (sf->sf_pkt_head == cmd) {
5852                         /* this was the first packet */
5853                         sf->sf_pkt_head = cmd->cmd_next;
5854                 } else if (sf->sf_pkt_tail == cmd) {
5855                         /* this was the last packet */
5856                         sf->sf_pkt_tail = prev_cmd;
5857                         if (prev_cmd != NULL) {
5858                                 prev_cmd->cmd_next = NULL;
5859                         }
5860                 } else {
5861                         /* some packet in the middle of the queue */
5862                         ASSERT(prev_cmd != NULL);
5863                         prev_cmd->cmd_next = cmd->cmd_next;
5864                 }
5865                 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5866 
5867                 if (target->sft_state & SF_TARGET_OFFLINE) {
5868                         mutex_exit(&sf->sf_cmd_mutex);
5869                         pkt->pkt_reason = CMD_TRAN_ERR;
5870                         if (pkt->pkt_comp != NULL) {
5871                                 (*pkt->pkt_comp)(cmd->cmd_pkt);
5872                         }
5873                 } else {
5874                         sf_fill_ids(sf, cmd, target);
5875                         if (sf_start_internal(sf, cmd) != TRAN_ACCEPT) {
5876                                 pkt->pkt_reason = CMD_TRAN_ERR;
5877                                 if (pkt->pkt_comp != NULL) {
5878                                         (*pkt->pkt_comp)(cmd->cmd_pkt);
5879                                 }
5880                         }
5881                 }
5882                 mutex_enter(&sf->sf_cmd_mutex);
5883                 cmd = sf->sf_pkt_head;
5884                 prev_cmd = NULL;
5885         }
5886 }
5887 
5888 
5889 /*
5890  * called when the max exchange value is exceeded to throttle back commands
5891  */
5892 static void
5893 sf_throttle(struct sf *sf)
5894 {
5895         int cmdmax = sf->sf_sochandle->fcal_cmdmax;
5896 
5897 
5898         mutex_enter(&sf->sf_cmd_mutex);
5899 
5900         sf->sf_flag = TRUE;
5901 
5902         if (sf->sf_ncmds > (cmdmax / 2)) {
5903                 sf->sf_throttle = cmdmax / 2;
5904         } else {
5905                 if (sf->sf_ncmds > SF_DECR_DELTA) {
5906                         sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5907                 } else {
5908                         /*
5909                          * This case is just a safeguard, should not really
5910                          * happen(ncmds < SF_DECR_DELTA and MAX_EXCHG exceed
5911                          */
5912                         sf->sf_throttle = SF_DECR_DELTA;
5913                 }
5914         }
5915         mutex_exit(&sf->sf_cmd_mutex);
5916 
5917         sf = sf->sf_sibling;
5918         if (sf != NULL) {
5919                 mutex_enter(&sf->sf_cmd_mutex);
5920                 sf->sf_flag = TRUE;
5921                 if (sf->sf_ncmds >= (cmdmax / 2)) {
5922                         sf->sf_throttle = cmdmax / 2;
5923                 } else {
5924                         if (sf->sf_ncmds > SF_DECR_DELTA) {
5925                                 sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5926                         } else {
5927                                 sf->sf_throttle = SF_DECR_DELTA;
5928                         }
5929                 }
5930 
5931                 mutex_exit(&sf->sf_cmd_mutex);
5932         }
5933 }
5934 
5935 
5936 /*
5937  * sf watchdog routine, called for a timeout
5938  */
5939 /*ARGSUSED*/
5940 static void
5941 sf_watch(void *arg)
5942 {
5943         struct sf *sf;
5944         struct sf_els_hdr       *privp;
5945         static int count = 0, pscan_count = 0;
5946         int cmdmax, i, mescount = 0;
5947         struct sf_target *target;
5948 
5949 
5950         sf_watchdog_time += sf_watchdog_timeout;
5951         count++;
5952         pscan_count++;
5953 
5954         mutex_enter(&sf_global_mutex);
5955         sf_watch_running = 1;
5956         for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
5957 
5958                 mutex_exit(&sf_global_mutex);
5959 
5960                 /* disable throttling while we're suspended */
5961                 mutex_enter(&sf->sf_mutex);
5962                 if (sf->sf_state & SF_STATE_SUSPENDED) {
5963                         mutex_exit(&sf->sf_mutex);
5964                         SF_DEBUG(1, (sf, CE_CONT,
5965                             "sf_watch, sf%d:throttle disabled "
5966                             "due to DDI_SUSPEND\n",
5967                             ddi_get_instance(sf->sf_dip)));
5968                         mutex_enter(&sf_global_mutex);
5969                         continue;
5970                 }
5971                 mutex_exit(&sf->sf_mutex);
5972 
5973                 cmdmax = sf->sf_sochandle->fcal_cmdmax;
5974 
5975                 if (sf->sf_take_core) {
5976                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
5977                 }
5978 
5979                 mutex_enter(&sf->sf_cmd_mutex);
5980 
5981                 if (!sf->sf_flag) {
5982                         if (sf->sf_throttle < (cmdmax / 2)) {
5983                                 sf->sf_throttle = cmdmax / 2;
5984                         } else if ((sf->sf_throttle += SF_INCR_DELTA) >
5985                             cmdmax) {
5986                                 sf->sf_throttle = cmdmax;
5987                         }
5988                 } else {
5989                         sf->sf_flag = FALSE;
5990                 }
5991 
5992                 sf->sf_ncmds_exp_avg = (sf->sf_ncmds + sf->sf_ncmds_exp_avg)
5993                     >> 2;
5994                 if ((sf->sf_ncmds <= (sf->sf_throttle - SF_LO_CMD_DELTA)) &&
5995                     (sf->sf_pkt_head == NULL)) {
5996 #ifdef DEBUG
5997                         if (sf->sf_use_lock) {
5998                                 SF_DEBUG(4, (sf, CE_NOTE,
5999                                     "use lock flag off\n"));
6000                         }
6001 #endif
6002                         sf->sf_use_lock = FALSE;
6003                 }
6004 
6005                 if (sf->sf_state == SF_STATE_ONLINE && sf->sf_pkt_head &&
6006                     sf->sf_ncmds < sf->sf_throttle) {
6007                         sf_throttle_start(sf);
6008                 }
6009 
6010                 mutex_exit(&sf->sf_cmd_mutex);
6011 
6012                 if (pscan_count >= sf_pool_scan_cnt) {
6013                         if (sf->sf_ncmds_exp_avg < (sf->sf_cr_pool_cnt <<
6014                             SF_LOG2_ELEMS_IN_POOL) - SF_FREE_CR_EPSILON) {
6015                                 sf_crpool_free(sf);
6016                         }
6017                 }
6018                 mutex_enter(&sf->sf_mutex);
6019 
6020                 privp = sf->sf_els_list;
6021                 while (privp != NULL) {
6022                         if (privp->timeout < sf_watchdog_time) {
6023                                 /* timeout this command */
6024                                 privp = sf_els_timeout(sf, privp);
6025                         } else if ((privp->timeout == SF_INVALID_TIMEOUT) &&
6026                             (privp->lip_cnt != sf->sf_lip_cnt)) {
6027                                 if (privp->prev != NULL) {
6028                                         privp->prev->next = privp->next;
6029                                 }
6030                                 if (sf->sf_els_list == privp) {
6031                                         sf->sf_els_list = privp->next;
6032                                 }
6033                                 if (privp->next != NULL) {
6034                                         privp->next->prev = privp->prev;
6035                                 }
6036                                 mutex_exit(&sf->sf_mutex);
6037                                 sf_els_free(privp->fpkt);
6038                                 mutex_enter(&sf->sf_mutex);
6039                                 privp = sf->sf_els_list;
6040                         } else {
6041                                 privp = privp->next;
6042                         }
6043                 }
6044 
6045                 if (sf->sf_online_timer && sf->sf_online_timer <
6046                     sf_watchdog_time) {
6047                         for (i = 0; i < sf_max_targets; i++) {
6048                                 target = sf->sf_targets[i];
6049                                 if (target != NULL) {
6050                                         if (!mescount && target->sft_state &
6051                                             SF_TARGET_BUSY) {
6052                                                 sf_log(sf, CE_WARN, "!Loop "
6053                                                     "Unstable: Failed to bring "
6054                                                     "Loop Online\n");
6055                                                 mescount = 1;
6056                                         }
6057                                         target->sft_state |= SF_TARGET_MARK;
6058                                 }
6059                         }
6060                         sf_finish_init(sf, sf->sf_lip_cnt);
6061                         sf->sf_state = SF_STATE_INIT;
6062                         sf->sf_online_timer = 0;
6063                 }
6064 
6065                 if (sf->sf_state == SF_STATE_ONLINE) {
6066                         mutex_exit(&sf->sf_mutex);
6067                         if (count >= sf_pkt_scan_cnt) {
6068                                 sf_check_targets(sf);
6069                         }
6070                 } else if ((sf->sf_state == SF_STATE_OFFLINE) &&
6071                     (sf->sf_timer < sf_watchdog_time)) {
6072                         for (i = 0; i < sf_max_targets; i++) {
6073                                 target = sf->sf_targets[i];
6074                                 if ((target != NULL) &&
6075                                     (target->sft_state &
6076                                     SF_TARGET_BUSY)) {
6077                                         sf_log(sf, CE_WARN,
6078                                             "!Offline Timeout\n");
6079                                         if (sf_core && (sf_core &
6080                                             SF_CORE_OFFLINE_TIMEOUT)) {
6081                                                 (void) soc_take_core(
6082                                                     sf->sf_sochandle,
6083                                                     sf->sf_socp);
6084                                                 sf_core = 0;
6085                                         }
6086                                         break;
6087                                 }
6088                         }
6089                         sf_finish_init(sf, sf->sf_lip_cnt);
6090                         sf->sf_state = SF_STATE_INIT;
6091                         mutex_exit(&sf->sf_mutex);
6092                 } else {
6093                         mutex_exit(&sf->sf_mutex);
6094                 }
6095                 mutex_enter(&sf_global_mutex);
6096         }
6097         mutex_exit(&sf_global_mutex);
6098         if (count >= sf_pkt_scan_cnt) {
6099                 count = 0;
6100         }
6101         if (pscan_count >= sf_pool_scan_cnt) {
6102                 pscan_count = 0;
6103         }
6104 
6105         /* reset timeout */
6106         sf_watchdog_id = timeout(sf_watch, (caddr_t)0, sf_watchdog_tick);
6107 
6108         /* signal waiting thread */
6109         mutex_enter(&sf_global_mutex);
6110         sf_watch_running = 0;
6111         cv_broadcast(&sf_watch_cv);
6112         mutex_exit(&sf_global_mutex);
6113 }
6114 
6115 
6116 /*
6117  * called during a timeout to check targets
6118  */
6119 static void
6120 sf_check_targets(struct sf *sf)
6121 {
6122         struct sf_target *target;
6123         int i;
6124         struct sf_pkt *cmd;
6125         struct scsi_pkt *pkt;
6126         int lip_cnt;
6127 
6128         mutex_enter(&sf->sf_mutex);
6129         lip_cnt = sf->sf_lip_cnt;
6130         mutex_exit(&sf->sf_mutex);
6131 
6132         /* check scan all possible targets */
6133         for (i = 0; i < sf_max_targets; i++) {
6134                 target = sf->sf_targets[i];
6135                 while (target != NULL) {
6136                         mutex_enter(&target->sft_pkt_mutex);
6137                         if (target->sft_alive && target->sft_scan_count !=
6138                             sf_target_scan_cnt) {
6139                                 target->sft_alive = 0;
6140                                 target->sft_scan_count++;
6141                                 mutex_exit(&target->sft_pkt_mutex);
6142                                 return;
6143                         }
6144                         target->sft_alive = 0;
6145                         target->sft_scan_count = 0;
6146                         cmd = target->sft_pkt_head;
6147                         while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
6148                                 mutex_enter(&cmd->cmd_abort_mutex);
6149                                 if (cmd->cmd_state == SF_STATE_ISSUED &&
6150                                     ((cmd->cmd_timeout && sf_watchdog_time >
6151 #ifdef  DEBUG
6152                                     cmd->cmd_timeout) || sf_abort_flag)) {
6153                                         sf_abort_flag = 0;
6154 #else
6155                                         cmd->cmd_timeout))) {
6156 #endif
6157                                         cmd->cmd_timeout = 0;
6158         /* prevent reset from getting at this packet */
6159                                         cmd->cmd_state = SF_STATE_ABORTING;
6160                                         mutex_exit(&cmd->cmd_abort_mutex);
6161                                         mutex_exit(&target->sft_pkt_mutex);
6162                                         sf->sf_stats.tstats[i].timeouts++;
6163                                         if (sf_target_timeout(sf, cmd))
6164                                                 return;
6165                                         else {
6166                                                 if (lip_cnt != sf->sf_lip_cnt) {
6167                                                         return;
6168                                                 } else {
6169                                                         mutex_enter(&target->
6170                                                             sft_pkt_mutex);
6171                                                         cmd = target->
6172                                                             sft_pkt_head;
6173                                                 }
6174                                         }
6175         /*
6176          * if the abort and lip fail, a reset will be carried out.
6177          * But the reset will ignore this packet. We have waited at least
6178          * 20 seconds after the initial timeout. Now, complete it here.
6179          * This also takes care of spurious bad aborts.
6180          */
6181                                 } else if ((cmd->cmd_state ==
6182                                     SF_STATE_ABORTING) && (cmd->cmd_timeout
6183                                     <= sf_watchdog_time)) {
6184                                         cmd->cmd_state = SF_STATE_IDLE;
6185                                         mutex_exit(&cmd->cmd_abort_mutex);
6186                                         mutex_exit(&target->sft_pkt_mutex);
6187                                         SF_DEBUG(1, (sf, CE_NOTE,
6188                                             "Command 0x%p to sft 0x%p"
6189                                             " delayed release\n",
6190                                             (void *)cmd, (void *)target));
6191                                         pkt = cmd->cmd_pkt;
6192                                         pkt->pkt_statistics |=
6193                                             (STAT_TIMEOUT|STAT_ABORTED);
6194                                         pkt->pkt_reason = CMD_TIMEOUT;
6195                                         if (pkt->pkt_comp) {
6196                                                 scsi_hba_pkt_comp(pkt);
6197                                         /* handle deferred_destroy case */
6198                                         } else {
6199                                                 if ((cmd->cmd_block->fcp_cntl.
6200                                                     cntl_reset == 1) ||
6201                                                     (cmd->cmd_block->
6202                                                     fcp_cntl.cntl_abort_tsk ==
6203                                                     1)) {
6204                                                         cmd->cmd_block->
6205                                                             fcp_cntl.
6206                                                             cntl_reset = 0;
6207                                                         cmd->cmd_block->
6208                                                             fcp_cntl.
6209                                                             cntl_abort_tsk = 0;
6210                                                         cmd->cmd_fp_pkt->
6211                                                             fcal_pkt_comp =
6212                                                             sf_cmd_callback;
6213                                                         /* for cache */
6214                                                         sf_scsi_destroy_pkt
6215                                                             (&pkt->pkt_address,
6216                                                             pkt);
6217                                                 }
6218                                         }
6219                                         mutex_enter(&target->sft_pkt_mutex);
6220                                         cmd = target->sft_pkt_head;
6221                                 } else {
6222                                         mutex_exit(&cmd->cmd_abort_mutex);
6223                                         cmd = cmd->cmd_forw;
6224                                 }
6225                         }
6226                         mutex_exit(&target->sft_pkt_mutex);
6227                         target = target->sft_next_lun;
6228                 }
6229         }
6230 }
6231 
6232 
6233 /*
6234  * a command to a target has timed out
6235  * return TRUE iff cmd abort failed or timed out, else return FALSE
6236  */
6237 static int
6238 sf_target_timeout(struct sf *sf, struct sf_pkt *cmd)
6239 {
6240         int rval;
6241         struct scsi_pkt *pkt;
6242         struct fcal_packet *fpkt;
6243         int tgt_id;
6244         int retval = FALSE;
6245 
6246 
6247         SF_DEBUG(1, (sf, CE_NOTE, "Command 0x%p to target %x timed out\n",
6248             (void *)cmd->cmd_fp_pkt, cmd->cmd_pkt->pkt_address.a_target));
6249 
6250         fpkt = cmd->cmd_fp_pkt;
6251 
6252         if (sf_core && (sf_core & SF_CORE_CMD_TIMEOUT)) {
6253                 sf_token = (int *)(uintptr_t)
6254                     fpkt->fcal_socal_request.sr_soc_hdr.\
6255                     sh_request_token;
6256                 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6257                 sf_core = 0;
6258         }
6259 
6260         /* call the transport to abort a command */
6261         rval = soc_abort(sf->sf_sochandle, sf->sf_socp,
6262             sf->sf_sochandle->fcal_portno, fpkt, 1);
6263 
6264         switch (rval) {
6265         case FCAL_ABORTED:
6266                 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort succeeded\n"));
6267                 pkt = cmd->cmd_pkt;
6268                 cmd->cmd_state = SF_STATE_IDLE;
6269                 pkt->pkt_statistics |= (STAT_TIMEOUT|STAT_ABORTED);
6270                 pkt->pkt_reason = CMD_TIMEOUT;
6271                 if (pkt->pkt_comp != NULL) {
6272                         (*pkt->pkt_comp)(pkt);
6273                 }
6274                 break;                          /* success */
6275 
6276         case FCAL_ABORT_FAILED:
6277                 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort failed at target\n"));
6278                 pkt = cmd->cmd_pkt;
6279                 cmd->cmd_state = SF_STATE_IDLE;
6280                 pkt->pkt_reason = CMD_TIMEOUT;
6281                 pkt->pkt_statistics |= STAT_TIMEOUT;
6282                 tgt_id = pkt->pkt_address.a_target;
6283                 sf->sf_stats.tstats[tgt_id].abts_failures++;
6284                 if (pkt->pkt_comp != NULL) {
6285                         (*pkt->pkt_comp)(pkt);
6286                 }
6287                 break;
6288 
6289         case FCAL_BAD_ABORT:
6290                 if (sf_core && (sf_core & SF_CORE_BAD_ABORT)) {
6291                         sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6292                             sr_soc_hdr.sh_request_token;
6293                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6294                         sf_core = 0;
6295                 }
6296                 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort bad abort\n"));
6297                 cmd->cmd_timeout = sf_watchdog_time + cmd->cmd_pkt->pkt_time
6298                     + 20;
6299                 break;
6300 
6301         case FCAL_TIMEOUT:
6302                 retval = TRUE;
6303                 break;
6304 
6305         default:
6306                 pkt = cmd->cmd_pkt;
6307                 tgt_id = pkt->pkt_address.a_target;
6308                 sf_log(sf, CE_WARN,
6309                 "Command Abort failed target 0x%x, forcing a LIP\n", tgt_id);
6310                 if (sf_core && (sf_core & SF_CORE_ABORT_TIMEOUT)) {
6311                         sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6312                             sr_soc_hdr.sh_request_token;
6313                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6314                         sf_core = 0;
6315                 }
6316                 sf_force_lip(sf);
6317                 retval = TRUE;
6318                 break;
6319         }
6320 
6321         return (retval);
6322 }
6323 
6324 
6325 /*
6326  * an ELS command has timed out
6327  * return ???
6328  */
6329 static struct sf_els_hdr *
6330 sf_els_timeout(struct sf *sf, struct sf_els_hdr *privp)
6331 {
6332         struct fcal_packet *fpkt;
6333         int rval, dflag, timeout = SF_ELS_TIMEOUT;
6334         uint_t lip_cnt = privp->lip_cnt;
6335         uchar_t els_code = privp->els_code;
6336         struct sf_target *target = privp->target;
6337         char what[64];
6338 
6339         fpkt = privp->fpkt;
6340         dflag = privp->delayed_retry;
6341         /* use as temporary state variable */
6342         privp->timeout = SF_INVALID_TIMEOUT;
6343         mutex_exit(&sf->sf_mutex);
6344 
6345         if (privp->fpkt->fcal_pkt_comp == sf_els_callback) {
6346                 /*
6347                  * take socal core if required. Timeouts for IB and hosts
6348                  * are not very interesting, so we take socal core only
6349                  * if the timeout is *not* for a IB or host.
6350                  */
6351                 if (sf_core && (sf_core & SF_CORE_ELS_TIMEOUT) &&
6352                     ((sf_alpa_to_switch[privp->dest_nport_id] &
6353                     0x0d) != 0x0d) && ((privp->dest_nport_id != 1) ||
6354                     (privp->dest_nport_id != 2) ||
6355                     (privp->dest_nport_id != 4) ||
6356                     (privp->dest_nport_id != 8) ||
6357                     (privp->dest_nport_id != 0xf))) {
6358                         sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6359                             sr_soc_hdr.sh_request_token;
6360                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6361                         sf_core = 0;
6362                 }
6363                 (void) sprintf(what, "ELS 0x%x", privp->els_code);
6364         } else if (privp->fpkt->fcal_pkt_comp == sf_reportlun_callback) {
6365                 if (sf_core && (sf_core & SF_CORE_REPORTLUN_TIMEOUT)) {
6366                         sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6367                             sr_soc_hdr.sh_request_token;
6368                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6369                         sf_core = 0;
6370                 }
6371                 timeout = SF_FCP_TIMEOUT;
6372                 (void) sprintf(what, "REPORT_LUNS");
6373         } else if (privp->fpkt->fcal_pkt_comp == sf_inq_callback) {
6374                 if (sf_core && (sf_core & SF_CORE_INQUIRY_TIMEOUT)) {
6375                         sf_token = (int *)(uintptr_t)
6376                             fpkt->fcal_socal_request.\
6377                             sr_soc_hdr.sh_request_token;
6378                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6379                         sf_core = 0;
6380                 }
6381                 timeout = SF_FCP_TIMEOUT;
6382                 (void) sprintf(what, "INQUIRY to LUN 0x%lx",
6383                     (long)SCSA_LUN(target));
6384         } else {
6385                 (void) sprintf(what, "UNKNOWN OPERATION");
6386         }
6387 
6388         if (dflag) {
6389                 /* delayed retry */
6390                 SF_DEBUG(2, (sf, CE_CONT,
6391                     "!sf%d: %s to target %x delayed retry\n",
6392                     ddi_get_instance(sf->sf_dip), what,
6393                     sf_alpa_to_switch[privp->dest_nport_id]));
6394                 privp->delayed_retry = FALSE;
6395                 goto try_again;
6396         }
6397 
6398         sf_log(sf, CE_NOTE, "!%s to target 0x%x alpa 0x%x timed out\n",
6399             what, sf_alpa_to_switch[privp->dest_nport_id],
6400             privp->dest_nport_id);
6401 
6402         rval = soc_abort(sf->sf_sochandle, sf->sf_socp, sf->sf_sochandle
6403             ->fcal_portno, fpkt, 1);
6404         if (rval == FCAL_ABORTED || rval == FCAL_ABORT_FAILED) {
6405         SF_DEBUG(1, (sf, CE_NOTE, "!%s abort to al_pa %x succeeded\n",
6406             what, privp->dest_nport_id));
6407 try_again:
6408 
6409                 mutex_enter(&sf->sf_mutex);
6410                 if (privp->prev != NULL) {
6411                         privp->prev->next = privp->next;
6412                 }
6413                 if (sf->sf_els_list == privp) {
6414                         sf->sf_els_list = privp->next;
6415                 }
6416                 if (privp->next != NULL) {
6417                         privp->next->prev = privp->prev;
6418                 }
6419                 privp->prev = privp->next = NULL;
6420                 if (lip_cnt == sf->sf_lip_cnt) {
6421                         privp->timeout = sf_watchdog_time + timeout;
6422                         if ((++(privp->retries) < sf_els_retries) ||
6423                             (dflag && (privp->retries < SF_BSY_RETRIES))) {
6424                                 mutex_exit(&sf->sf_mutex);
6425                                 sf_log(sf, CE_NOTE,
6426                                     "!%s to target 0x%x retrying\n",
6427                                     what,
6428                                     sf_alpa_to_switch[privp->dest_nport_id]);
6429                                 if (sf_els_transport(sf, privp) == 1) {
6430                                         mutex_enter(&sf->sf_mutex);
6431                                         return (sf->sf_els_list); /* success */
6432                                 }
6433                                 mutex_enter(&sf->sf_mutex);
6434                                 fpkt = NULL;
6435                         }
6436                         if ((lip_cnt == sf->sf_lip_cnt) &&
6437                             (els_code != LA_ELS_LOGO)) {
6438                                 if (target != NULL) {
6439                                         sf_offline_target(sf, target);
6440                                 }
6441                                 if (sf->sf_lip_cnt == lip_cnt) {
6442                                         sf->sf_device_count--;
6443                                         ASSERT(sf->sf_device_count >= 0);
6444                                         if (sf->sf_device_count == 0) {
6445                                                 sf_finish_init(sf,
6446                                                     sf->sf_lip_cnt);
6447                                         }
6448                                 }
6449                         }
6450                         privp = sf->sf_els_list;
6451                         mutex_exit(&sf->sf_mutex);
6452                         if (fpkt != NULL) {
6453                                 sf_els_free(fpkt);
6454                         }
6455                 } else {
6456                         mutex_exit(&sf->sf_mutex);
6457                         sf_els_free(privp->fpkt);
6458                         privp = NULL;
6459                 }
6460         } else {
6461                 if (sf_core && (sf_core & SF_CORE_ELS_FAILED)) {
6462                         sf_token = (int *)(uintptr_t)
6463                             fpkt->fcal_socal_request.\
6464                             sr_soc_hdr.sh_request_token;
6465                         (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6466                         sf_core = 0;
6467                 }
6468                 sf_log(sf, CE_NOTE, "%s abort to target 0x%x failed. "
6469                     "status=0x%x, forcing LIP\n", what,
6470                     sf_alpa_to_switch[privp->dest_nport_id], rval);
6471                 privp = NULL;
6472                 if (sf->sf_lip_cnt == lip_cnt) {
6473                         sf_force_lip(sf);
6474                 }
6475         }
6476 
6477         mutex_enter(&sf->sf_mutex);
6478         return (privp);
6479 }
6480 
6481 
6482 /*
6483  * called by timeout when a reset times out
6484  */
6485 /*ARGSUSED*/
6486 static void
6487 sf_check_reset_delay(void *arg)
6488 {
6489         struct sf *sf;
6490         struct sf_target *target;
6491         struct sf_reset_list *rp, *tp;
6492         uint_t lip_cnt, reset_timeout_flag = FALSE;
6493         clock_t lb;
6494 
6495         lb = ddi_get_lbolt();
6496 
6497         mutex_enter(&sf_global_mutex);
6498 
6499         sf_reset_timeout_id = 0;
6500 
6501         for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
6502 
6503                 mutex_exit(&sf_global_mutex);
6504                 mutex_enter(&sf->sf_mutex);
6505 
6506                 /* is this type cast needed? */
6507                 tp = (struct sf_reset_list *)&sf->sf_reset_list;
6508 
6509                 rp = sf->sf_reset_list;
6510                 while (rp != NULL) {
6511                         if (((rp->timeout - lb) < 0) &&
6512                             (rp->lip_cnt == sf->sf_lip_cnt)) {
6513                                 tp->next = rp->next;
6514                                 mutex_exit(&sf->sf_mutex);
6515                                 target = rp->target;
6516                                 lip_cnt = rp->lip_cnt;
6517                                 kmem_free(rp, sizeof (struct sf_reset_list));
6518                                 /* abort all cmds for this target */
6519                                 while (target) {
6520                                         sf_abort_all(sf, target, FALSE,
6521                                             lip_cnt, TRUE);
6522                                         mutex_enter(&target->sft_mutex);
6523                                         if (lip_cnt == sf->sf_lip_cnt) {
6524                                                 target->sft_state &=
6525                                                     ~SF_TARGET_BUSY;
6526                                         }
6527                                         mutex_exit(&target->sft_mutex);
6528                                         target = target->sft_next_lun;
6529                                 }
6530                                 mutex_enter(&sf->sf_mutex);
6531                                 tp = (struct sf_reset_list *)
6532                                     &sf->sf_reset_list;
6533                                 rp = sf->sf_reset_list;
6534                                 lb = ddi_get_lbolt();
6535                         } else if (rp->lip_cnt != sf->sf_lip_cnt) {
6536                                 tp->next = rp->next;
6537                                 kmem_free(rp, sizeof (struct sf_reset_list));
6538                                 rp = tp->next;
6539                         } else {
6540                                 reset_timeout_flag = TRUE;
6541                                 tp = rp;
6542                                 rp = rp->next;
6543                         }
6544                 }
6545                 mutex_exit(&sf->sf_mutex);
6546                 mutex_enter(&sf_global_mutex);
6547         }
6548 
6549         if (reset_timeout_flag && (sf_reset_timeout_id == 0)) {
6550                 sf_reset_timeout_id = timeout(sf_check_reset_delay,
6551                     NULL, drv_usectohz(SF_TARGET_RESET_DELAY));
6552         }
6553 
6554         mutex_exit(&sf_global_mutex);
6555 }
6556 
6557 
6558 /*
6559  * called to "reset the bus", i.e. force loop initialization (and address
6560  * re-negotiation)
6561  */
6562 static void
6563 sf_force_lip(struct sf *sf)
6564 {
6565         int i;
6566         struct sf_target *target;
6567 
6568 
6569         /* disable restart of lip if we're suspended */
6570         mutex_enter(&sf->sf_mutex);
6571         if (sf->sf_state & SF_STATE_SUSPENDED) {
6572                 mutex_exit(&sf->sf_mutex);
6573                 SF_DEBUG(1, (sf, CE_CONT,
6574                     "sf_force_lip, sf%d: lip restart disabled "
6575                     "due to DDI_SUSPEND\n",
6576                     ddi_get_instance(sf->sf_dip)));
6577                 return;
6578         }
6579 
6580         sf_log(sf, CE_NOTE, "Forcing lip\n");
6581 
6582         for (i = 0; i < sf_max_targets; i++) {
6583                 target = sf->sf_targets[i];
6584                 while (target != NULL) {
6585                         mutex_enter(&target->sft_mutex);
6586                         if (!(target->sft_state & SF_TARGET_OFFLINE))
6587                                 target->sft_state |= SF_TARGET_BUSY;
6588                         mutex_exit(&target->sft_mutex);
6589                         target = target->sft_next_lun;
6590                 }
6591         }
6592 
6593         sf->sf_lip_cnt++;
6594         sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
6595         sf->sf_state = SF_STATE_OFFLINE;
6596         mutex_exit(&sf->sf_mutex);
6597         sf->sf_stats.lip_count++;            /* no mutex for this? */
6598 
6599 #ifdef DEBUG
6600         /* are we allowing LIPs ?? */
6601         if (sf_lip_flag != 0) {
6602 #endif
6603                 /* call the transport to force loop initialization */
6604                 if (((i = soc_force_lip(sf->sf_sochandle, sf->sf_socp,
6605                     sf->sf_sochandle->fcal_portno, 1,
6606                     FCAL_FORCE_LIP)) != FCAL_SUCCESS) &&
6607                     (i != FCAL_TIMEOUT)) {
6608                         /* force LIP failed */
6609                         if (sf_core && (sf_core & SF_CORE_LIP_FAILED)) {
6610                                 (void) soc_take_core(sf->sf_sochandle,
6611                                     sf->sf_socp);
6612                                 sf_core = 0;
6613                         }
6614 #ifdef DEBUG
6615                         /* are we allowing reset after LIP failed ?? */
6616                         if (sf_reset_flag != 0) {
6617 #endif
6618                                 /* restart socal after resetting it */
6619                                 sf_log(sf, CE_NOTE,
6620                                     "!Force lip failed Status code 0x%x."
6621                                     " Reseting\n", i);
6622                                 /* call transport to force a reset */
6623                                 soc_force_reset(sf->sf_sochandle, sf->sf_socp,
6624                                     sf->sf_sochandle->fcal_portno, 1);
6625 #ifdef  DEBUG
6626                         }
6627 #endif
6628                 }
6629 #ifdef  DEBUG
6630         }
6631 #endif
6632 }
6633 
6634 
6635 /*
6636  * called by the transport when an unsolicited ELS is received
6637  */
6638 static void
6639 sf_unsol_els_callback(void *arg, soc_response_t *srp, caddr_t payload)
6640 {
6641         struct sf *sf = (struct sf *)arg;
6642         els_payload_t   *els = (els_payload_t *)payload;
6643         struct la_els_rjt *rsp;
6644         int     i, tgt_id;
6645         uchar_t dest_id;
6646         struct fcal_packet *fpkt;
6647         fc_frame_header_t *hp;
6648         struct sf_els_hdr *privp;
6649 
6650 
6651         if ((els == NULL) || ((i = srp->sr_soc_hdr.sh_byte_cnt) == 0)) {
6652                 return;
6653         }
6654 
6655         if (i > SOC_CQE_PAYLOAD) {
6656                 i = SOC_CQE_PAYLOAD;
6657         }
6658 
6659         dest_id = (uchar_t)srp->sr_fc_frame_hdr.s_id;
6660         tgt_id = sf_alpa_to_switch[dest_id];
6661 
6662         switch (els->els_cmd.c.ls_command) {
6663 
6664         case LA_ELS_LOGO:
6665                 /*
6666                  * logout received -- log the fact
6667                  */
6668                 sf->sf_stats.tstats[tgt_id].logouts_recvd++;
6669                 sf_log(sf, CE_NOTE, "!LOGO recvd from target %x, %s\n",
6670                     tgt_id,
6671                     sf_lip_on_plogo ? "Forcing LIP...." : "");
6672                 if (sf_lip_on_plogo) {
6673                         sf_force_lip(sf);
6674                 }
6675                 break;
6676 
6677         default:  /* includes LA_ELS_PLOGI */
6678                 /*
6679                  * something besides a logout received -- we don't handle
6680                  * this so send back a reject saying its unsupported
6681                  */
6682 
6683                 sf_log(sf, CE_NOTE, "!ELS 0x%x recvd from target 0x%x\n",
6684                     els->els_cmd.c.ls_command, tgt_id);
6685 
6686 
6687                 /* allocate room for a response */
6688                 if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
6689                     sizeof (struct la_els_rjt), sizeof (union sf_els_rsp),
6690                     (caddr_t *)&privp, (caddr_t *)&rsp) == NULL) {
6691                         break;
6692                 }
6693 
6694                 fpkt = privp->fpkt;
6695 
6696                 /* fill in pkt header */
6697                 hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
6698                 hp->r_ctl = R_CTL_ELS_RSP;
6699                 hp->f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
6700                 hp->ox_id = srp->sr_fc_frame_hdr.ox_id;
6701                 hp->rx_id = srp->sr_fc_frame_hdr.rx_id;
6702                 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
6703                     CQ_TYPE_OUTBOUND;
6704 
6705                 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 1;
6706 
6707                 /* fill in response */
6708                 rsp->ls_code = LA_ELS_RJT;   /* reject this ELS */
6709                 rsp->mbz[0] = 0;
6710                 rsp->mbz[1] = 0;
6711                 rsp->mbz[2] = 0;
6712                 ((struct la_els_logi *)privp->rsp)->ls_code = LA_ELS_ACC;
6713                 *((int *)&rsp->reserved) = 0;
6714                 rsp->reason_code = RJT_UNSUPPORTED;
6715                 privp->retries = sf_els_retries;
6716                 privp->els_code = LA_ELS_RJT;
6717                 privp->timeout = (unsigned)0xffffffff;
6718                 (void) sf_els_transport(sf, privp);
6719                 break;
6720         }
6721 }
6722 
6723 
6724 /*
6725  * Error logging, printing, and debug print routines
6726  */
6727 
6728 /*PRINTFLIKE3*/
6729 static void
6730 sf_log(struct sf *sf, int level, const char *fmt, ...)
6731 {
6732         char buf[256];
6733         dev_info_t *dip;
6734         va_list ap;
6735 
6736         if (sf != NULL) {
6737                 dip = sf->sf_dip;
6738         } else {
6739                 dip = NULL;
6740         }
6741 
6742         va_start(ap, fmt);
6743         (void) vsprintf(buf, fmt, ap);
6744         va_end(ap);
6745         scsi_log(dip, "sf", level, buf);
6746 }
6747 
6748 
6749 /*
6750  * called to get some sf kstats -- return 0 on success else return errno
6751  */
6752 static int
6753 sf_kstat_update(kstat_t *ksp, int rw)
6754 {
6755         struct sf *sf;
6756 
6757         if (rw == KSTAT_WRITE) {
6758                 /* can't write */
6759                 return (EACCES);
6760         }
6761 
6762         sf = ksp->ks_private;
6763         sf->sf_stats.ncmds = sf->sf_ncmds;
6764         sf->sf_stats.throttle_limit = sf->sf_throttle;
6765         sf->sf_stats.cr_pool_size = sf->sf_cr_pool_cnt;
6766 
6767         return (0);                             /* success */
6768 }
6769 
6770 
6771 /*
6772  * Unix Entry Points
6773  */
6774 
6775 /*
6776  * driver entry point for opens on control device
6777  */
6778 /* ARGSUSED */
6779 static int
6780 sf_open(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
6781 {
6782         dev_t dev = *dev_p;
6783         struct sf *sf;
6784 
6785 
6786         /* just ensure soft state exists for this device */
6787         sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6788         if (sf == NULL) {
6789                 return (ENXIO);
6790         }
6791 
6792         ++(sf->sf_check_n_close);
6793 
6794         return (0);
6795 }
6796 
6797 
6798 /*
6799  * driver entry point for last close on control device
6800  */
6801 /* ARGSUSED */
6802 static int
6803 sf_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
6804 {
6805         struct sf *sf;
6806 
6807         sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6808         if (sf == NULL) {
6809                 return (ENXIO);
6810         }
6811 
6812         if (!sf->sf_check_n_close) { /* if this flag is zero */
6813                 cmn_err(CE_WARN, "sf%d: trying to close unopened instance",
6814                     SF_MINOR2INST(getminor(dev)));
6815                 return (ENODEV);
6816         } else {
6817                 --(sf->sf_check_n_close);
6818         }
6819         return (0);
6820 }
6821 
6822 
6823 /*
6824  * driver entry point for sf ioctl commands
6825  */
6826 /* ARGSUSED */
6827 static int
6828 sf_ioctl(dev_t dev,
6829     int cmd, intptr_t arg, int mode, cred_t *cred_p, int *rval_p)
6830 {
6831         struct sf *sf;
6832         struct sf_target *target;
6833         uchar_t al_pa;
6834         struct sf_al_map map;
6835         int cnt, i;
6836         int     retval;                         /* return value */
6837         struct devctl_iocdata *dcp;
6838         dev_info_t *cdip;
6839         struct scsi_address ap;
6840         scsi_hba_tran_t *tran;
6841 
6842 
6843         sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6844         if (sf == NULL) {
6845                 return (ENXIO);
6846         }
6847 
6848         /* handle all ioctls */
6849         switch (cmd) {
6850 
6851         /*
6852          * We can use the generic implementation for these ioctls
6853          */
6854         case DEVCTL_DEVICE_GETSTATE:
6855         case DEVCTL_DEVICE_ONLINE:
6856         case DEVCTL_DEVICE_OFFLINE:
6857         case DEVCTL_BUS_GETSTATE:
6858                 return (ndi_devctl_ioctl(sf->sf_dip, cmd, arg, mode, 0));
6859 
6860         /*
6861          * return FC map
6862          */
6863         case SFIOCGMAP:
6864                 if ((sf->sf_lilp_map->lilp_magic != FCAL_LILP_MAGIC &&
6865                     sf->sf_lilp_map->lilp_magic != FCAL_BADLILP_MAGIC) ||
6866                     sf->sf_state != SF_STATE_ONLINE) {
6867                         retval = ENOENT;
6868                         goto dun;
6869                 }
6870                 mutex_enter(&sf->sf_mutex);
6871                 if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
6872                         int i, j = 0;
6873 
6874                         /* Need to generate a fake lilp map */
6875                         for (i = 0; i < sf_max_targets; i++) {
6876                                 if (sf->sf_targets[i])
6877                                         sf->sf_lilp_map->lilp_alpalist[j++] =
6878                                             sf->sf_targets[i]->
6879                                             sft_hard_address;
6880                         }
6881                         sf->sf_lilp_map->lilp_length = (uchar_t)j;
6882                 }
6883                 cnt = sf->sf_lilp_map->lilp_length;
6884                 map.sf_count = (short)cnt;
6885                 bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
6886                     (caddr_t)&map.sf_hba_addr.sf_node_wwn,
6887                     sizeof (la_wwn_t));
6888                 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
6889                     (caddr_t)&map.sf_hba_addr.sf_port_wwn,
6890                     sizeof (la_wwn_t));
6891                 map.sf_hba_addr.sf_al_pa = sf->sf_al_pa;
6892                 map.sf_hba_addr.sf_hard_address = 0;
6893                 map.sf_hba_addr.sf_inq_dtype = DTYPE_UNKNOWN;
6894                 for (i = 0; i < cnt; i++) {
6895                         al_pa = sf->sf_lilp_map->lilp_alpalist[i];
6896                         map.sf_addr_pair[i].sf_al_pa = al_pa;
6897                         if (al_pa == sf->sf_al_pa) {
6898                                 (void) bcopy((caddr_t)&sf->sf_sochandle
6899                                     ->fcal_n_wwn, (caddr_t)&map.
6900                                     sf_addr_pair[i].sf_node_wwn,
6901                                     sizeof (la_wwn_t));
6902                                 (void) bcopy((caddr_t)&sf->sf_sochandle
6903                                     ->fcal_p_wwn, (caddr_t)&map.
6904                                     sf_addr_pair[i].sf_port_wwn,
6905                                     sizeof (la_wwn_t));
6906                                 map.sf_addr_pair[i].sf_hard_address =
6907                                     al_pa;
6908                                 map.sf_addr_pair[i].sf_inq_dtype =
6909                                     DTYPE_PROCESSOR;
6910                                 continue;
6911                         }
6912                         target = sf->sf_targets[sf_alpa_to_switch[
6913                             al_pa]];
6914                         if (target != NULL) {
6915                                 mutex_enter(&target->sft_mutex);
6916                                 if (!(target->sft_state &
6917                                     (SF_TARGET_OFFLINE |
6918                                     SF_TARGET_BUSY))) {
6919                                         bcopy((caddr_t)&target->
6920                                             sft_node_wwn,
6921                                             (caddr_t)&map.sf_addr_pair
6922                                             [i].sf_node_wwn,
6923                                             sizeof (la_wwn_t));
6924                                         bcopy((caddr_t)&target->
6925                                             sft_port_wwn,
6926                                             (caddr_t)&map.sf_addr_pair
6927                                             [i].sf_port_wwn,
6928                                             sizeof (la_wwn_t));
6929                                         map.sf_addr_pair[i].
6930                                             sf_hard_address
6931                                             = target->sft_hard_address;
6932                                         map.sf_addr_pair[i].
6933                                             sf_inq_dtype
6934                                             = target->sft_device_type;
6935                                         mutex_exit(&target->sft_mutex);
6936                                         continue;
6937                                 }
6938                                 mutex_exit(&target->sft_mutex);
6939                         }
6940                         bzero((caddr_t)&map.sf_addr_pair[i].
6941                             sf_node_wwn, sizeof (la_wwn_t));
6942                         bzero((caddr_t)&map.sf_addr_pair[i].
6943                             sf_port_wwn, sizeof (la_wwn_t));
6944                         map.sf_addr_pair[i].sf_inq_dtype =
6945                             DTYPE_UNKNOWN;
6946                 }
6947                 mutex_exit(&sf->sf_mutex);
6948                 if (ddi_copyout((caddr_t)&map, (caddr_t)arg,
6949                     sizeof (struct sf_al_map), mode) != 0) {
6950                         retval = EFAULT;
6951                         goto dun;
6952                 }
6953                 break;
6954 
6955         /*
6956          * handle device control ioctls
6957          */
6958         case DEVCTL_DEVICE_RESET:
6959                 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) {
6960                         retval = EFAULT;
6961                         goto dun;
6962                 }
6963                 if ((ndi_dc_getname(dcp) == NULL) ||
6964                     (ndi_dc_getaddr(dcp) == NULL)) {
6965                         ndi_dc_freehdl(dcp);
6966                         retval = EINVAL;
6967                         goto dun;
6968                 }
6969                 cdip = ndi_devi_find(sf->sf_dip,
6970                     ndi_dc_getname(dcp), ndi_dc_getaddr(dcp));
6971                 ndi_dc_freehdl(dcp);
6972 
6973                 if (cdip == NULL) {
6974                         retval = ENXIO;
6975                         goto dun;
6976                 }
6977 
6978                 if ((target = sf_get_target_from_dip(sf, cdip)) == NULL) {
6979                         retval = ENXIO;
6980                         goto dun;
6981                 }
6982                 mutex_enter(&target->sft_mutex);
6983                 if (!(target->sft_state & SF_TARGET_INIT_DONE)) {
6984                         mutex_exit(&target->sft_mutex);
6985                         retval = ENXIO;
6986                         goto dun;
6987                 }
6988 
6989                 /* This is ugly */
6990                 tran = kmem_zalloc(scsi_hba_tran_size(), KM_SLEEP);
6991                 bcopy(target->sft_tran, tran, scsi_hba_tran_size());
6992                 mutex_exit(&target->sft_mutex);
6993                 ap.a_hba_tran = tran;
6994                 ap.a_target = sf_alpa_to_switch[target->sft_al_pa];
6995                 if (sf_reset(&ap, RESET_TARGET) == FALSE) {
6996                         retval = EIO;
6997                 } else {
6998                         retval = 0;
6999                 }
7000                 kmem_free(tran, scsi_hba_tran_size());
7001                 goto dun;
7002 
7003         case DEVCTL_BUS_QUIESCE:
7004         case DEVCTL_BUS_UNQUIESCE:
7005                 retval = ENOTSUP;
7006                 goto dun;
7007 
7008         case DEVCTL_BUS_RESET:
7009         case DEVCTL_BUS_RESETALL:
7010                 sf_force_lip(sf);
7011                 break;
7012 
7013         default:
7014                 retval = ENOTTY;
7015                 goto dun;
7016         }
7017 
7018         retval = 0;                             /* success */
7019 
7020 dun:
7021         return (retval);
7022 }
7023 
7024 
7025 /*
7026  * get the target given a DIP
7027  */
7028 static struct sf_target *
7029 sf_get_target_from_dip(struct sf *sf, dev_info_t *dip)
7030 {
7031         int i;
7032         struct sf_target *target;
7033 
7034 
7035         /* scan each hash queue for the DIP in question */
7036         for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
7037                 target = sf->sf_wwn_lists[i];
7038                 while (target != NULL) {
7039                         if (target->sft_dip == dip) {
7040                                 return (target); /* success: target found */
7041                         }
7042                         target = target->sft_next;
7043                 }
7044         }
7045         return (NULL);                          /* failure: target not found */
7046 }
7047 
7048 
7049 /*
7050  * called by the transport to get an event cookie
7051  */
7052 static int
7053 sf_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
7054     ddi_eventcookie_t *event_cookiep)
7055 {
7056         struct sf *sf;
7057 
7058         sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7059         if (sf == NULL) {
7060                 /* can't find instance for this device */
7061                 return (DDI_FAILURE);
7062         }
7063 
7064         return (ndi_event_retrieve_cookie(sf->sf_event_hdl, rdip, name,
7065             event_cookiep, NDI_EVENT_NOPASS));
7066 
7067 }
7068 
7069 
7070 /*
7071  * called by the transport to add an event callback
7072  */
7073 static int
7074 sf_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
7075     ddi_eventcookie_t eventid, void (*callback)(dev_info_t *dip,
7076     ddi_eventcookie_t event, void *arg, void *impl_data), void *arg,
7077     ddi_callback_id_t *cb_id)
7078 {
7079         struct sf *sf;
7080 
7081         sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7082         if (sf == NULL) {
7083                 /* can't find instance for this device */
7084                 return (DDI_FAILURE);
7085         }
7086 
7087         return (ndi_event_add_callback(sf->sf_event_hdl, rdip,
7088             eventid, callback, arg, NDI_SLEEP, cb_id));
7089 
7090 }
7091 
7092 
7093 /*
7094  * called by the transport to remove an event callback
7095  */
7096 static int
7097 sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id)
7098 {
7099         struct sf *sf;
7100 
7101         sf = ddi_get_soft_state(sf_state, ddi_get_instance(devi));
7102         if (sf == NULL) {
7103                 /* can't find instance for this device */
7104                 return (DDI_FAILURE);
7105         }
7106 
7107         return (ndi_event_remove_callback(sf->sf_event_hdl, cb_id));
7108 }
7109 
7110 
7111 /*
7112  * called by the transport to post an event
7113  */
7114 static int
7115 sf_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
7116     ddi_eventcookie_t eventid, void *impldata)
7117 {
7118         ddi_eventcookie_t remove_cookie, cookie;
7119 
7120         /* is this a remove event ?? */
7121         struct sf *sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7122         remove_cookie = ndi_event_tag_to_cookie(sf->sf_event_hdl,
7123             SF_EVENT_TAG_REMOVE);
7124 
7125         if (remove_cookie == eventid) {
7126                 struct sf_target *target;
7127 
7128                 /* handle remove event */
7129 
7130                 if (sf == NULL) {
7131                         /* no sf instance for this device */
7132                         return (NDI_FAILURE);
7133                 }
7134 
7135                 /* get the target for this event */
7136                 if ((target = sf_get_target_from_dip(sf, rdip)) != NULL) {
7137                         /*
7138                          * clear device info for this target and mark as
7139                          * not done
7140                          */
7141                         mutex_enter(&target->sft_mutex);
7142                         target->sft_dip = NULL;
7143                         target->sft_state &= ~SF_TARGET_INIT_DONE;
7144                         mutex_exit(&target->sft_mutex);
7145                         return (NDI_SUCCESS); /* event handled */
7146                 }
7147 
7148                 /* no target for this event */
7149                 return (NDI_FAILURE);
7150         }
7151 
7152         /* an insertion event */
7153         if (ndi_busop_get_eventcookie(dip, rdip, FCAL_INSERT_EVENT, &cookie)
7154             != NDI_SUCCESS) {
7155                 return (NDI_FAILURE);
7156         }
7157 
7158         return (ndi_post_event(dip, rdip, cookie, impldata));
7159 }
7160 
7161 
7162 /*
7163  * the sf hotplug daemon, one thread per sf instance
7164  */
7165 static void
7166 sf_hp_daemon(void *arg)
7167 {
7168         struct sf *sf = (struct sf *)arg;
7169         struct sf_hp_elem *elem;
7170         struct sf_target *target;
7171         int tgt_id;
7172         callb_cpr_t cprinfo;
7173 
7174         CALLB_CPR_INIT(&cprinfo, &sf->sf_hp_daemon_mutex,
7175             callb_generic_cpr, "sf_hp_daemon");
7176 
7177         mutex_enter(&sf->sf_hp_daemon_mutex);
7178 
7179         do {
7180                 while (sf->sf_hp_elem_head != NULL) {
7181 
7182                         /* save ptr to head of list */
7183                         elem = sf->sf_hp_elem_head;
7184 
7185                         /* take element off of list */
7186                         if (sf->sf_hp_elem_head == sf->sf_hp_elem_tail) {
7187                                 /* element only one in list -- list now empty */
7188                                 sf->sf_hp_elem_head = NULL;
7189                                 sf->sf_hp_elem_tail = NULL;
7190                         } else {
7191                                 /* remove element from head of list */
7192                                 sf->sf_hp_elem_head = sf->sf_hp_elem_head->next;
7193                         }
7194 
7195                         mutex_exit(&sf->sf_hp_daemon_mutex);
7196 
7197                         switch (elem->what) {
7198                         case SF_ONLINE:
7199                                 /* online this target */
7200                                 target = elem->target;
7201                                 (void) ndi_devi_online(elem->dip, 0);
7202                                 (void) ndi_event_retrieve_cookie(
7203                                     sf->sf_event_hdl,
7204                                     target->sft_dip, FCAL_INSERT_EVENT,
7205                                     &sf_insert_eid, NDI_EVENT_NOPASS);
7206                                 (void) ndi_event_run_callbacks(sf->sf_event_hdl,
7207                                     target->sft_dip, sf_insert_eid, NULL);
7208                                 break;
7209                         case SF_OFFLINE:
7210                                 /* offline this target */
7211                                 target = elem->target;
7212                                 tgt_id = sf_alpa_to_switch[target->sft_al_pa];
7213                                 /* don't do NDI_DEVI_REMOVE for now */
7214                                 if (ndi_devi_offline(elem->dip, 0) !=
7215                                     NDI_SUCCESS) {
7216                                         SF_DEBUG(1, (sf, CE_WARN, "target %x, "
7217                                             "device offline failed", tgt_id));
7218                                 } else {
7219                                         SF_DEBUG(1, (sf, CE_NOTE, "target %x, "
7220                                             "device offline succeeded\n",
7221                                             tgt_id));
7222                                 }
7223                                 break;
7224                         }
7225                         kmem_free(elem, sizeof (struct sf_hp_elem));
7226                         mutex_enter(&sf->sf_hp_daemon_mutex);
7227                 }
7228 
7229                 /* if exit is not already signaled */
7230                 if (sf->sf_hp_exit == 0) {
7231                         /* wait to be signaled by work or exit */
7232                         CALLB_CPR_SAFE_BEGIN(&cprinfo);
7233                         cv_wait(&sf->sf_hp_daemon_cv, &sf->sf_hp_daemon_mutex);
7234                         CALLB_CPR_SAFE_END(&cprinfo, &sf->sf_hp_daemon_mutex);
7235                 }
7236         } while (sf->sf_hp_exit == 0);
7237 
7238         /* sf_hp_daemon_mutex is dropped by CALLB_CPR_EXIT */
7239         CALLB_CPR_EXIT(&cprinfo);
7240         thread_exit();                  /* no more hotplug thread */
7241         /* NOTREACHED */
7242 }