Print this page
3373 gcc >= 4.5 concerns about offsetof()
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun/io/scsi/adapters/sf.c
+++ new/usr/src/uts/sun/io/scsi/adapters/sf.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
25 25 */
26 26
27 27 /*
28 28 * sf - Solaris Fibre Channel driver
29 29 *
30 30 * This module implements some of the Fibre Channel FC-4 layer, converting
31 31 * from FC frames to SCSI and back. (Note: no sequence management is done
32 32 * here, though.)
33 33 */
34 34
35 35 #if defined(lint) && !defined(DEBUG)
36 36 #define DEBUG 1
37 37 #endif
38 38
39 39 /*
40 40 * XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
41 41 * Need to use the ugly RAID LUN mappings in FCP Annex D
42 42 * to prevent SCSA from barfing. This *REALLY* needs to
43 43 * be addressed by the standards committee.
44 44 */
45 45 #define RAID_LUNS 1
46 46
47 47 #ifdef DEBUG
48 48 static int sfdebug = 0;
49 49 #include <sys/debug.h>
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
50 50
51 51 #define SF_DEBUG(level, args) \
52 52 if (sfdebug >= (level)) sf_log args
53 53 #else
54 54 #define SF_DEBUG(level, args)
55 55 #endif
56 56
57 57 static int sf_bus_config_debug = 0;
58 58
59 59 /* Why do I have to do this? */
60 -#define offsetof(s, m) (size_t)(&(((s *)0)->m))
60 +#if defined(__GNUC__)
61 +#define offsetof(s, m) __builtin_offsetof(s, m)
62 +#else
63 +#define offsetof(s, m) ((size_t)(&(((s *)0)->m)))
64 +#endif
61 65
62 66 #include <sys/scsi/scsi.h>
63 67 #include <sys/fc4/fcal.h>
64 68 #include <sys/fc4/fcp.h>
65 69 #include <sys/fc4/fcal_linkapp.h>
66 70 #include <sys/socal_cq_defs.h>
67 71 #include <sys/fc4/fcal_transport.h>
68 72 #include <sys/fc4/fcio.h>
69 73 #include <sys/scsi/adapters/sfvar.h>
70 74 #include <sys/scsi/impl/scsi_reset_notify.h>
71 75 #include <sys/stat.h>
72 76 #include <sys/varargs.h>
73 77 #include <sys/var.h>
74 78 #include <sys/thread.h>
75 79 #include <sys/proc.h>
76 80 #include <sys/kstat.h>
77 81 #include <sys/devctl.h>
78 82 #include <sys/scsi/targets/ses.h>
79 83 #include <sys/callb.h>
80 84
81 85 static int sf_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
82 86 static int sf_attach(dev_info_t *, ddi_attach_cmd_t);
83 87 static int sf_detach(dev_info_t *, ddi_detach_cmd_t);
84 88 static void sf_softstate_unlink(struct sf *);
85 89 static int sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
86 90 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
87 91 static int sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
88 92 ddi_bus_config_op_t op, void *arg);
89 93 static int sf_scsi_tgt_init(dev_info_t *, dev_info_t *,
90 94 scsi_hba_tran_t *, struct scsi_device *);
91 95 static void sf_scsi_tgt_free(dev_info_t *, dev_info_t *,
92 96 scsi_hba_tran_t *, struct scsi_device *);
93 97 static int sf_pkt_alloc_extern(struct sf *, struct sf_pkt *,
94 98 int, int, int);
95 99 static void sf_pkt_destroy_extern(struct sf *, struct sf_pkt *);
96 100 static struct scsi_pkt *sf_scsi_init_pkt(struct scsi_address *,
97 101 struct scsi_pkt *, struct buf *, int, int, int, int, int (*)(), caddr_t);
98 102 static void sf_scsi_destroy_pkt(struct scsi_address *, struct scsi_pkt *);
99 103 static void sf_scsi_dmafree(struct scsi_address *, struct scsi_pkt *);
100 104 static void sf_scsi_sync_pkt(struct scsi_address *, struct scsi_pkt *);
101 105 static int sf_scsi_reset_notify(struct scsi_address *, int,
102 106 void (*)(caddr_t), caddr_t);
103 107 static int sf_scsi_get_name(struct scsi_device *, char *, int);
104 108 static int sf_scsi_get_bus_addr(struct scsi_device *, char *, int);
105 109 static int sf_add_cr_pool(struct sf *);
106 110 static int sf_cr_alloc(struct sf *, struct sf_pkt *, int (*)());
107 111 static void sf_cr_free(struct sf_cr_pool *, struct sf_pkt *);
108 112 static void sf_crpool_free(struct sf *);
109 113 static int sf_kmem_cache_constructor(void *, void *, int);
110 114 static void sf_kmem_cache_destructor(void *, void *);
111 115 static void sf_statec_callback(void *, int);
112 116 static int sf_login(struct sf *, uchar_t, uchar_t, uint_t, int);
113 117 static int sf_els_transport(struct sf *, struct sf_els_hdr *);
114 118 static void sf_els_callback(struct fcal_packet *);
115 119 static int sf_do_prli(struct sf *, struct sf_els_hdr *, struct la_els_logi *);
116 120 static int sf_do_adisc(struct sf *, struct sf_els_hdr *);
117 121 static int sf_do_reportlun(struct sf *, struct sf_els_hdr *,
118 122 struct sf_target *);
119 123 static void sf_reportlun_callback(struct fcal_packet *);
120 124 static int sf_do_inquiry(struct sf *, struct sf_els_hdr *,
121 125 struct sf_target *);
122 126 static void sf_inq_callback(struct fcal_packet *);
123 127 static struct fcal_packet *sf_els_alloc(struct sf *, uchar_t, int, int,
124 128 int, caddr_t *, caddr_t *);
125 129 static void sf_els_free(struct fcal_packet *);
126 130 static struct sf_target *sf_create_target(struct sf *,
127 131 struct sf_els_hdr *, int, int64_t);
128 132 #ifdef RAID_LUNS
129 133 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int);
130 134 #else
131 135 static struct sf_target *sf_lookup_target(struct sf *, uchar_t *, int64_t);
132 136 #endif
133 137 static void sf_finish_init(struct sf *, int);
134 138 static void sf_offline_target(struct sf *, struct sf_target *);
135 139 static void sf_create_devinfo(struct sf *, struct sf_target *, int);
136 140 static int sf_create_props(dev_info_t *, struct sf_target *, int);
137 141 static int sf_commoncap(struct scsi_address *, char *, int, int, int);
138 142 static int sf_getcap(struct scsi_address *, char *, int);
139 143 static int sf_setcap(struct scsi_address *, char *, int, int);
140 144 static int sf_abort(struct scsi_address *, struct scsi_pkt *);
141 145 static int sf_reset(struct scsi_address *, int);
142 146 static void sf_abort_all(struct sf *, struct sf_target *, int, int, int);
143 147 static int sf_start(struct scsi_address *, struct scsi_pkt *);
144 148 static int sf_start_internal(struct sf *, struct sf_pkt *);
145 149 static void sf_fill_ids(struct sf *, struct sf_pkt *, struct sf_target *);
146 150 static int sf_prepare_pkt(struct sf *, struct sf_pkt *, struct sf_target *);
147 151 static int sf_dopoll(struct sf *, struct sf_pkt *);
148 152 static void sf_cmd_callback(struct fcal_packet *);
149 153 static void sf_throttle(struct sf *);
150 154 static void sf_watch(void *);
151 155 static void sf_throttle_start(struct sf *);
152 156 static void sf_check_targets(struct sf *);
153 157 static void sf_check_reset_delay(void *);
154 158 static int sf_target_timeout(struct sf *, struct sf_pkt *);
155 159 static void sf_force_lip(struct sf *);
156 160 static void sf_unsol_els_callback(void *, soc_response_t *, caddr_t);
157 161 static struct sf_els_hdr *sf_els_timeout(struct sf *, struct sf_els_hdr *);
158 162 /*PRINTFLIKE3*/
159 163 static void sf_log(struct sf *, int, const char *, ...);
160 164 static int sf_kstat_update(kstat_t *, int);
161 165 static int sf_open(dev_t *, int, int, cred_t *);
162 166 static int sf_close(dev_t, int, int, cred_t *);
163 167 static int sf_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
164 168 static struct sf_target *sf_get_target_from_dip(struct sf *, dev_info_t *);
165 169 static int sf_bus_get_eventcookie(dev_info_t *, dev_info_t *, char *,
166 170 ddi_eventcookie_t *);
167 171 static int sf_bus_add_eventcall(dev_info_t *, dev_info_t *,
168 172 ddi_eventcookie_t, void (*)(), void *, ddi_callback_id_t *cb_id);
169 173 static int sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id);
170 174 static int sf_bus_post_event(dev_info_t *, dev_info_t *,
171 175 ddi_eventcookie_t, void *);
172 176
173 177 static void sf_hp_daemon(void *);
174 178
175 179 /*
176 180 * this is required to be able to supply a control node
177 181 * where ioctls can be executed
178 182 */
179 183 struct cb_ops sf_cb_ops = {
180 184 sf_open, /* open */
181 185 sf_close, /* close */
182 186 nodev, /* strategy */
183 187 nodev, /* print */
184 188 nodev, /* dump */
185 189 nodev, /* read */
186 190 nodev, /* write */
187 191 sf_ioctl, /* ioctl */
188 192 nodev, /* devmap */
189 193 nodev, /* mmap */
190 194 nodev, /* segmap */
191 195 nochpoll, /* poll */
192 196 ddi_prop_op, /* cb_prop_op */
193 197 0, /* streamtab */
194 198 D_MP | D_NEW | D_HOTPLUG /* driver flags */
195 199
196 200 };
197 201
198 202 /*
199 203 * autoconfiguration routines.
200 204 */
201 205 static struct dev_ops sf_ops = {
202 206 DEVO_REV, /* devo_rev, */
203 207 0, /* refcnt */
204 208 sf_info, /* info */
205 209 nulldev, /* identify */
206 210 nulldev, /* probe */
207 211 sf_attach, /* attach */
208 212 sf_detach, /* detach */
209 213 nodev, /* reset */
210 214 &sf_cb_ops, /* driver operations */
211 215 NULL, /* bus operations */
212 216 NULL, /* power management */
213 217 ddi_quiesce_not_supported, /* devo_quiesce */
214 218 };
215 219
216 220 #define SF_NAME "FC-AL FCP Nexus Driver" /* Name of the module. */
217 221 static char sf_version[] = "1.72 08/19/2008"; /* version of the module */
218 222
219 223 static struct modldrv modldrv = {
220 224 &mod_driverops, /* Type of module. This one is a driver */
221 225 SF_NAME,
222 226 &sf_ops, /* driver ops */
223 227 };
224 228
225 229 static struct modlinkage modlinkage = {
226 230 MODREV_1, (void *)&modldrv, NULL
227 231 };
228 232
229 233 /* XXXXXX The following is here to handle broken targets -- remove it later */
230 234 static int sf_reportlun_forever = 0;
231 235 /* XXXXXX */
232 236 static int sf_lip_on_plogo = 0;
233 237 static int sf_els_retries = SF_ELS_RETRIES;
234 238 static struct sf *sf_head = NULL;
235 239 static int sf_target_scan_cnt = 4;
236 240 static int sf_pkt_scan_cnt = 5;
237 241 static int sf_pool_scan_cnt = 1800;
238 242 static void *sf_state = NULL;
239 243 static int sf_watchdog_init = 0;
240 244 static int sf_watchdog_time = 0;
241 245 static int sf_watchdog_timeout = 1;
242 246 static int sf_watchdog_tick;
243 247 static int sf_watch_running = 0;
244 248 static timeout_id_t sf_watchdog_id;
245 249 static timeout_id_t sf_reset_timeout_id;
246 250 static int sf_max_targets = SF_MAX_TARGETS;
247 251 static kmutex_t sf_global_mutex;
248 252 static int sf_core = 0;
249 253 int *sf_token = NULL; /* Must not be static or lint complains. */
250 254 static kcondvar_t sf_watch_cv;
251 255 extern pri_t minclsyspri;
252 256 static ddi_eventcookie_t sf_insert_eid;
253 257 static ddi_eventcookie_t sf_remove_eid;
254 258
255 259 static ndi_event_definition_t sf_event_defs[] = {
256 260 { SF_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL, 0 },
257 261 { SF_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT, 0 }
258 262 };
259 263
260 264 #define SF_N_NDI_EVENTS \
261 265 (sizeof (sf_event_defs) / sizeof (ndi_event_definition_t))
262 266
263 267 #ifdef DEBUG
264 268 static int sf_lip_flag = 1; /* bool: to allow LIPs */
265 269 static int sf_reset_flag = 1; /* bool: to allow reset after LIP */
266 270 static int sf_abort_flag = 0; /* bool: to do just one abort */
267 271 #endif
268 272
269 273 extern int64_t ddi_get_lbolt64(void);
270 274
271 275 /*
272 276 * for converting between target number (switch) and hard address/AL_PA
273 277 */
274 278 static uchar_t sf_switch_to_alpa[] = {
275 279 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
276 280 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
277 281 0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
278 282 0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
279 283 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
280 284 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
281 285 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
282 286 0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
283 287 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
284 288 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
285 289 0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
286 290 0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
287 291 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
288 292 };
289 293
290 294 static uchar_t sf_alpa_to_switch[] = {
291 295 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
292 296 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
293 297 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
294 298 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
295 299 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
296 300 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
297 301 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
298 302 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
299 303 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
300 304 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
301 305 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
302 306 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
303 307 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
304 308 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
305 309 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
306 310 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
307 311 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
308 312 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
309 313 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
310 314 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
311 315 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
312 316 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
313 317 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
314 318 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
315 319 };
316 320
317 321 /*
318 322 * these macros call the proper transport-layer function given
319 323 * a particular transport
320 324 */
321 325 #define soc_transport(a, b, c, d) (*a->fcal_ops->fcal_transport)(b, c, d)
322 326 #define soc_transport_poll(a, b, c, d)\
323 327 (*a->fcal_ops->fcal_transport_poll)(b, c, d)
324 328 #define soc_get_lilp_map(a, b, c, d, e)\
325 329 (*a->fcal_ops->fcal_lilp_map)(b, c, d, e)
326 330 #define soc_force_lip(a, b, c, d, e)\
327 331 (*a->fcal_ops->fcal_force_lip)(b, c, d, e)
328 332 #define soc_abort(a, b, c, d, e)\
329 333 (*a->fcal_ops->fcal_abort_cmd)(b, c, d, e)
330 334 #define soc_force_reset(a, b, c, d)\
331 335 (*a->fcal_ops->fcal_force_reset)(b, c, d)
332 336 #define soc_add_ulp(a, b, c, d, e, f, g, h)\
333 337 (*a->fcal_ops->fcal_add_ulp)(b, c, d, e, f, g, h)
334 338 #define soc_remove_ulp(a, b, c, d, e)\
335 339 (*a->fcal_ops->fcal_remove_ulp)(b, c, d, e)
336 340 #define soc_take_core(a, b) (*a->fcal_ops->fcal_take_core)(b)
337 341
338 342
339 343 /* power management property defines (should be in a common include file?) */
340 344 #define PM_HARDWARE_STATE_PROP "pm-hardware-state"
341 345 #define PM_NEEDS_SUSPEND_RESUME "needs-suspend-resume"
342 346
343 347
344 348 /* node properties */
345 349 #define NODE_WWN_PROP "node-wwn"
346 350 #define PORT_WWN_PROP "port-wwn"
347 351 #define LIP_CNT_PROP "lip-count"
348 352 #define TARGET_PROP "target"
349 353 #define LUN_PROP "lun"
350 354
351 355
352 356 /*
353 357 * initialize this driver and install this module
354 358 */
355 359 int
356 360 _init(void)
357 361 {
358 362 int i;
359 363
360 364 i = ddi_soft_state_init(&sf_state, sizeof (struct sf),
361 365 SF_INIT_ITEMS);
362 366 if (i != 0)
363 367 return (i);
364 368
365 369 if ((i = scsi_hba_init(&modlinkage)) != 0) {
366 370 ddi_soft_state_fini(&sf_state);
367 371 return (i);
368 372 }
369 373
370 374 mutex_init(&sf_global_mutex, NULL, MUTEX_DRIVER, NULL);
371 375 sf_watch_running = 0;
372 376 cv_init(&sf_watch_cv, NULL, CV_DRIVER, NULL);
373 377
374 378 if ((i = mod_install(&modlinkage)) != 0) {
375 379 mutex_destroy(&sf_global_mutex);
376 380 cv_destroy(&sf_watch_cv);
377 381 scsi_hba_fini(&modlinkage);
378 382 ddi_soft_state_fini(&sf_state);
379 383 return (i);
380 384 }
381 385
382 386 return (i);
383 387 }
384 388
385 389
386 390 /*
387 391 * remove this driver module from the system
388 392 */
389 393 int
390 394 _fini(void)
391 395 {
392 396 int i;
393 397
394 398 if ((i = mod_remove(&modlinkage)) == 0) {
395 399 scsi_hba_fini(&modlinkage);
396 400 mutex_destroy(&sf_global_mutex);
397 401 cv_destroy(&sf_watch_cv);
398 402 ddi_soft_state_fini(&sf_state);
399 403 }
400 404 return (i);
401 405 }
402 406
403 407
404 408 int
405 409 _info(struct modinfo *modinfop)
406 410 {
407 411 return (mod_info(&modlinkage, modinfop));
408 412 }
409 413
410 414 /*
411 415 * Given the device number return the devinfo pointer or instance
412 416 */
413 417 /*ARGSUSED*/
414 418 static int
415 419 sf_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
416 420 {
417 421 int instance = SF_MINOR2INST(getminor((dev_t)arg));
418 422 struct sf *sf;
419 423
420 424 switch (infocmd) {
421 425 case DDI_INFO_DEVT2DEVINFO:
422 426 sf = ddi_get_soft_state(sf_state, instance);
423 427 if (sf != NULL)
424 428 *result = sf->sf_dip;
425 429 else {
426 430 *result = NULL;
427 431 return (DDI_FAILURE);
428 432 }
429 433 break;
430 434
431 435 case DDI_INFO_DEVT2INSTANCE:
432 436 *result = (void *)(uintptr_t)instance;
433 437 break;
434 438 default:
435 439 return (DDI_FAILURE);
436 440 }
437 441 return (DDI_SUCCESS);
438 442 }
439 443
440 444 /*
441 445 * either attach or resume this driver
442 446 */
443 447 static int
444 448 sf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
445 449 {
446 450 int instance;
447 451 int mutex_initted = FALSE;
448 452 uint_t ccount;
449 453 size_t i, real_size;
450 454 struct fcal_transport *handle;
451 455 char buf[64];
452 456 struct sf *sf, *tsf;
453 457 scsi_hba_tran_t *tran = NULL;
454 458 int handle_bound = FALSE;
455 459 kthread_t *tp;
456 460
457 461
458 462 switch ((int)cmd) {
459 463
460 464 case DDI_RESUME:
461 465
462 466 /*
463 467 * we've previously been SF_STATE_OFFLINEd by a DDI_SUSPEND,
464 468 * so time to undo that and get going again by forcing a
465 469 * lip
466 470 */
467 471
468 472 instance = ddi_get_instance(dip);
469 473
470 474 sf = ddi_get_soft_state(sf_state, instance);
471 475 SF_DEBUG(2, (sf, CE_CONT,
472 476 "sf_attach: DDI_RESUME for sf%d\n", instance));
473 477 if (sf == NULL) {
474 478 cmn_err(CE_WARN, "sf%d: bad soft state", instance);
475 479 return (DDI_FAILURE);
476 480 }
477 481
478 482 /*
479 483 * clear suspended flag so that normal operations can resume
480 484 */
481 485 mutex_enter(&sf->sf_mutex);
482 486 sf->sf_state &= ~SF_STATE_SUSPENDED;
483 487 mutex_exit(&sf->sf_mutex);
484 488
485 489 /*
486 490 * force a login by setting our state to offline
487 491 */
488 492 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
489 493 sf->sf_state = SF_STATE_OFFLINE;
490 494
491 495 /*
492 496 * call transport routine to register state change and
493 497 * ELS callback routines (to register us as a ULP)
494 498 */
495 499 soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
496 500 sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
497 501 sf_statec_callback, sf_unsol_els_callback, NULL, sf);
498 502
499 503 /*
500 504 * call transport routine to force loop initialization
501 505 */
502 506 (void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
503 507 sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
504 508
505 509 /*
506 510 * increment watchdog init flag, setting watchdog timeout
507 511 * if we are the first (since somebody has to do it)
508 512 */
509 513 mutex_enter(&sf_global_mutex);
510 514 if (!sf_watchdog_init++) {
511 515 mutex_exit(&sf_global_mutex);
512 516 sf_watchdog_id = timeout(sf_watch,
513 517 (caddr_t)0, sf_watchdog_tick);
514 518 } else {
515 519 mutex_exit(&sf_global_mutex);
516 520 }
517 521
518 522 return (DDI_SUCCESS);
519 523
520 524 case DDI_ATTACH:
521 525
522 526 /*
523 527 * this instance attaching for the first time
524 528 */
525 529
526 530 instance = ddi_get_instance(dip);
527 531
528 532 if (ddi_soft_state_zalloc(sf_state, instance) !=
529 533 DDI_SUCCESS) {
530 534 cmn_err(CE_WARN, "sf%d: failed to allocate soft state",
531 535 instance);
532 536 return (DDI_FAILURE);
533 537 }
534 538
535 539 sf = ddi_get_soft_state(sf_state, instance);
536 540 SF_DEBUG(4, (sf, CE_CONT,
537 541 "sf_attach: DDI_ATTACH for sf%d\n", instance));
538 542 if (sf == NULL) {
539 543 /* this shouldn't happen since we just allocated it */
540 544 cmn_err(CE_WARN, "sf%d: bad soft state", instance);
541 545 return (DDI_FAILURE);
542 546 }
543 547
544 548 /*
545 549 * from this point on, if there's an error, we must de-allocate
546 550 * soft state before returning DDI_FAILURE
547 551 */
548 552
549 553 if ((handle = ddi_get_parent_data(dip)) == NULL) {
550 554 cmn_err(CE_WARN,
551 555 "sf%d: failed to obtain transport handle",
552 556 instance);
553 557 goto fail;
554 558 }
555 559
556 560 /* fill in our soft state structure */
557 561 sf->sf_dip = dip;
558 562 sf->sf_state = SF_STATE_INIT;
559 563 sf->sf_throttle = handle->fcal_cmdmax;
560 564 sf->sf_sochandle = handle;
561 565 sf->sf_socp = handle->fcal_handle;
562 566 sf->sf_check_n_close = 0;
563 567
564 568 /* create a command/response buffer pool for this instance */
565 569 if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
566 570 cmn_err(CE_WARN,
567 571 "sf%d: failed to allocate command/response pool",
568 572 instance);
569 573 goto fail;
570 574 }
571 575
572 576 /* create a a cache for this instance */
573 577 (void) sprintf(buf, "sf%d_cache", instance);
574 578 sf->sf_pkt_cache = kmem_cache_create(buf,
575 579 sizeof (fcal_packet_t) + sizeof (struct sf_pkt) +
576 580 scsi_pkt_size(), 8,
577 581 sf_kmem_cache_constructor, sf_kmem_cache_destructor,
578 582 NULL, NULL, NULL, 0);
579 583 if (sf->sf_pkt_cache == NULL) {
580 584 cmn_err(CE_WARN, "sf%d: failed to allocate kmem cache",
581 585 instance);
582 586 goto fail;
583 587 }
584 588
585 589 /* set up a handle and allocate memory for DMA */
586 590 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->
587 591 fcal_dmaattr, DDI_DMA_DONTWAIT, NULL, &sf->
588 592 sf_lilp_dmahandle) != DDI_SUCCESS) {
589 593 cmn_err(CE_WARN,
590 594 "sf%d: failed to allocate dma handle for lilp map",
591 595 instance);
592 596 goto fail;
593 597 }
594 598 i = sizeof (struct fcal_lilp_map) + 1;
595 599 if (ddi_dma_mem_alloc(sf->sf_lilp_dmahandle,
596 600 i, sf->sf_sochandle->
597 601 fcal_accattr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
598 602 (caddr_t *)&sf->sf_lilp_map, &real_size,
599 603 &sf->sf_lilp_acchandle) != DDI_SUCCESS) {
600 604 cmn_err(CE_WARN, "sf%d: failed to allocate lilp map",
601 605 instance);
602 606 goto fail;
603 607 }
604 608 if (real_size < i) {
605 609 /* no error message ??? */
606 610 goto fail; /* trouble allocating memory */
607 611 }
608 612
609 613 /*
610 614 * set up the address for the DMA transfers (getting a cookie)
611 615 */
612 616 if (ddi_dma_addr_bind_handle(sf->sf_lilp_dmahandle, NULL,
613 617 (caddr_t)sf->sf_lilp_map, real_size,
614 618 DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
615 619 &sf->sf_lilp_dmacookie, &ccount) != DDI_DMA_MAPPED) {
616 620 cmn_err(CE_WARN,
617 621 "sf%d: failed to bind dma handle for lilp map",
618 622 instance);
619 623 goto fail;
620 624 }
621 625 handle_bound = TRUE;
622 626 /* ensure only one cookie was allocated */
623 627 if (ccount != 1) {
624 628 goto fail;
625 629 }
626 630
627 631 /* ensure LILP map and DMA cookie addresses are even?? */
628 632 sf->sf_lilp_map = (struct fcal_lilp_map *)(((uintptr_t)sf->
629 633 sf_lilp_map + 1) & ~1);
630 634 sf->sf_lilp_dmacookie.dmac_address = (sf->
631 635 sf_lilp_dmacookie.dmac_address + 1) & ~1;
632 636
633 637 /* set up all of our mutexes and condition variables */
634 638 mutex_init(&sf->sf_mutex, NULL, MUTEX_DRIVER, NULL);
635 639 mutex_init(&sf->sf_cmd_mutex, NULL, MUTEX_DRIVER, NULL);
636 640 mutex_init(&sf->sf_cr_mutex, NULL, MUTEX_DRIVER, NULL);
637 641 mutex_init(&sf->sf_hp_daemon_mutex, NULL, MUTEX_DRIVER, NULL);
638 642 cv_init(&sf->sf_cr_cv, NULL, CV_DRIVER, NULL);
639 643 cv_init(&sf->sf_hp_daemon_cv, NULL, CV_DRIVER, NULL);
640 644
641 645 mutex_initted = TRUE;
642 646
643 647 /* create our devctl minor node */
644 648 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
645 649 SF_INST2DEVCTL_MINOR(instance),
646 650 DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
647 651 cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
648 652 " for devctl", instance);
649 653 goto fail;
650 654 }
651 655
652 656 /* create fc minor node */
653 657 if (ddi_create_minor_node(dip, "fc", S_IFCHR,
654 658 SF_INST2FC_MINOR(instance), DDI_NT_FC_ATTACHMENT_POINT,
655 659 0) != DDI_SUCCESS) {
656 660 cmn_err(CE_WARN, "sf%d: ddi_create_minor_node failed"
657 661 " for fc", instance);
658 662 goto fail;
659 663 }
660 664 /* allocate a SCSI transport structure */
661 665 tran = scsi_hba_tran_alloc(dip, 0);
662 666 if (tran == NULL) {
663 667 /* remove all minor nodes created */
664 668 ddi_remove_minor_node(dip, NULL);
665 669 cmn_err(CE_WARN, "sf%d: scsi_hba_tran_alloc failed",
666 670 instance);
667 671 goto fail;
668 672 }
669 673
670 674 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
671 675 scsi_size_clean(dip); /* SCSI_SIZE_CLEAN_VERIFY ok */
672 676
673 677 /* save ptr to new transport structure and fill it in */
674 678 sf->sf_tran = tran;
675 679
676 680 tran->tran_hba_private = sf;
677 681 tran->tran_tgt_private = NULL;
678 682 tran->tran_tgt_init = sf_scsi_tgt_init;
679 683 tran->tran_tgt_probe = NULL;
680 684 tran->tran_tgt_free = sf_scsi_tgt_free;
681 685
682 686 tran->tran_start = sf_start;
683 687 tran->tran_abort = sf_abort;
684 688 tran->tran_reset = sf_reset;
685 689 tran->tran_getcap = sf_getcap;
686 690 tran->tran_setcap = sf_setcap;
687 691 tran->tran_init_pkt = sf_scsi_init_pkt;
688 692 tran->tran_destroy_pkt = sf_scsi_destroy_pkt;
689 693 tran->tran_dmafree = sf_scsi_dmafree;
690 694 tran->tran_sync_pkt = sf_scsi_sync_pkt;
691 695 tran->tran_reset_notify = sf_scsi_reset_notify;
692 696
693 697 /*
694 698 * register event notification routines with scsa
695 699 */
696 700 tran->tran_get_eventcookie = sf_bus_get_eventcookie;
697 701 tran->tran_add_eventcall = sf_bus_add_eventcall;
698 702 tran->tran_remove_eventcall = sf_bus_remove_eventcall;
699 703 tran->tran_post_event = sf_bus_post_event;
700 704
701 705 /*
702 706 * register bus configure/unconfigure
703 707 */
704 708 tran->tran_bus_config = sf_scsi_bus_config;
705 709 tran->tran_bus_unconfig = sf_scsi_bus_unconfig;
706 710
707 711 /*
708 712 * allocate an ndi event handle
709 713 */
710 714 sf->sf_event_defs = (ndi_event_definition_t *)
711 715 kmem_zalloc(sizeof (sf_event_defs), KM_SLEEP);
712 716
713 717 bcopy(sf_event_defs, sf->sf_event_defs,
714 718 sizeof (sf_event_defs));
715 719
716 720 (void) ndi_event_alloc_hdl(dip, NULL,
717 721 &sf->sf_event_hdl, NDI_SLEEP);
718 722
719 723 sf->sf_events.ndi_events_version = NDI_EVENTS_REV1;
720 724 sf->sf_events.ndi_n_events = SF_N_NDI_EVENTS;
721 725 sf->sf_events.ndi_event_defs = sf->sf_event_defs;
722 726
723 727 if (ndi_event_bind_set(sf->sf_event_hdl,
724 728 &sf->sf_events, NDI_SLEEP) != NDI_SUCCESS) {
725 729 goto fail;
726 730 }
727 731
728 732 tran->tran_get_name = sf_scsi_get_name;
729 733 tran->tran_get_bus_addr = sf_scsi_get_bus_addr;
730 734
731 735 /* setup and attach SCSI hba transport */
732 736 if (scsi_hba_attach_setup(dip, sf->sf_sochandle->
733 737 fcal_dmaattr, tran, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
734 738 cmn_err(CE_WARN, "sf%d: scsi_hba_attach_setup failed",
735 739 instance);
736 740 goto fail;
737 741 }
738 742
739 743 /* set up kstats */
740 744 if ((sf->sf_ksp = kstat_create("sf", instance, "statistics",
741 745 "controller", KSTAT_TYPE_RAW, sizeof (struct sf_stats),
742 746 KSTAT_FLAG_VIRTUAL)) == NULL) {
743 747 cmn_err(CE_WARN, "sf%d: failed to create kstat",
744 748 instance);
745 749 } else {
746 750 sf->sf_stats.version = 2;
747 751 (void) sprintf(sf->sf_stats.drvr_name,
748 752 "%s: %s", SF_NAME, sf_version);
749 753 sf->sf_ksp->ks_data = (void *)&sf->sf_stats;
750 754 sf->sf_ksp->ks_private = sf;
751 755 sf->sf_ksp->ks_update = sf_kstat_update;
752 756 kstat_install(sf->sf_ksp);
753 757 }
754 758
755 759 /* create the hotplug thread */
756 760 mutex_enter(&sf->sf_hp_daemon_mutex);
757 761 tp = thread_create(NULL, 0,
758 762 (void (*)())sf_hp_daemon, sf, 0, &p0, TS_RUN, minclsyspri);
759 763 sf->sf_hp_tid = tp->t_did;
760 764 mutex_exit(&sf->sf_hp_daemon_mutex);
761 765
762 766 /* add this soft state instance to the head of the list */
763 767 mutex_enter(&sf_global_mutex);
764 768 sf->sf_next = sf_head;
765 769 tsf = sf_head;
766 770 sf_head = sf;
767 771
768 772 /*
769 773 * find entry in list that has the same FC-AL handle (if any)
770 774 */
771 775 while (tsf != NULL) {
772 776 if (tsf->sf_socp == sf->sf_socp) {
773 777 break; /* found matching entry */
774 778 }
775 779 tsf = tsf->sf_next;
776 780 }
777 781
778 782 if (tsf != NULL) {
779 783 /* if we found a matching entry keep track of it */
780 784 sf->sf_sibling = tsf;
781 785 }
782 786
783 787 /*
784 788 * increment watchdog init flag, setting watchdog timeout
785 789 * if we are the first (since somebody has to do it)
786 790 */
787 791 if (!sf_watchdog_init++) {
788 792 mutex_exit(&sf_global_mutex);
789 793 sf_watchdog_tick = sf_watchdog_timeout *
790 794 drv_usectohz(1000000);
791 795 sf_watchdog_id = timeout(sf_watch,
792 796 NULL, sf_watchdog_tick);
793 797 } else {
794 798 mutex_exit(&sf_global_mutex);
795 799 }
796 800
797 801 if (tsf != NULL) {
798 802 /*
799 803 * set up matching entry to be our sibling
800 804 */
801 805 mutex_enter(&tsf->sf_mutex);
802 806 tsf->sf_sibling = sf;
803 807 mutex_exit(&tsf->sf_mutex);
804 808 }
805 809
806 810 /*
807 811 * create this property so that PM code knows we want
808 812 * to be suspended at PM time
809 813 */
810 814 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip,
811 815 PM_HARDWARE_STATE_PROP, PM_NEEDS_SUSPEND_RESUME);
812 816
813 817 /* log the fact that we have a new device */
814 818 ddi_report_dev(dip);
815 819
816 820 /*
817 821 * force a login by setting our state to offline
818 822 */
819 823 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
820 824 sf->sf_state = SF_STATE_OFFLINE;
821 825
822 826 /*
823 827 * call transport routine to register state change and
824 828 * ELS callback routines (to register us as a ULP)
825 829 */
826 830 soc_add_ulp(sf->sf_sochandle, sf->sf_socp,
827 831 sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP,
828 832 sf_statec_callback, sf_unsol_els_callback, NULL, sf);
829 833
830 834 /*
831 835 * call transport routine to force loop initialization
832 836 */
833 837 (void) soc_force_lip(sf->sf_sochandle, sf->sf_socp,
834 838 sf->sf_sochandle->fcal_portno, 0, FCAL_NO_LIP);
835 839 sf->sf_reset_time = ddi_get_lbolt64();
836 840 return (DDI_SUCCESS);
837 841
838 842 default:
839 843 return (DDI_FAILURE);
840 844 }
841 845
842 846 fail:
843 847 cmn_err(CE_WARN, "sf%d: failed to attach", instance);
844 848
845 849 /*
846 850 * Unbind and free event set
847 851 */
848 852 if (sf->sf_event_hdl) {
849 853 (void) ndi_event_unbind_set(sf->sf_event_hdl,
850 854 &sf->sf_events, NDI_SLEEP);
851 855 (void) ndi_event_free_hdl(sf->sf_event_hdl);
852 856 }
853 857
854 858 if (sf->sf_event_defs) {
855 859 kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
856 860 }
857 861
858 862 if (sf->sf_tran != NULL) {
859 863 scsi_hba_tran_free(sf->sf_tran);
860 864 }
861 865 while (sf->sf_cr_pool != NULL) {
862 866 sf_crpool_free(sf);
863 867 }
864 868 if (sf->sf_lilp_dmahandle != NULL) {
865 869 if (handle_bound) {
866 870 (void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
867 871 }
868 872 ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
869 873 }
870 874 if (sf->sf_pkt_cache != NULL) {
871 875 kmem_cache_destroy(sf->sf_pkt_cache);
872 876 }
873 877 if (sf->sf_lilp_map != NULL) {
874 878 ddi_dma_mem_free(&sf->sf_lilp_acchandle);
875 879 }
876 880 if (sf->sf_ksp != NULL) {
877 881 kstat_delete(sf->sf_ksp);
878 882 }
879 883 if (mutex_initted) {
880 884 mutex_destroy(&sf->sf_mutex);
881 885 mutex_destroy(&sf->sf_cmd_mutex);
882 886 mutex_destroy(&sf->sf_cr_mutex);
883 887 mutex_destroy(&sf->sf_hp_daemon_mutex);
884 888 cv_destroy(&sf->sf_cr_cv);
885 889 cv_destroy(&sf->sf_hp_daemon_cv);
886 890 }
887 891 mutex_enter(&sf_global_mutex);
888 892
889 893 /*
890 894 * kill off the watchdog if we are the last instance
891 895 */
892 896 if (!--sf_watchdog_init) {
893 897 timeout_id_t tid = sf_watchdog_id;
894 898 mutex_exit(&sf_global_mutex);
895 899 (void) untimeout(tid);
896 900 } else {
897 901 mutex_exit(&sf_global_mutex);
898 902 }
899 903
900 904 ddi_soft_state_free(sf_state, instance);
901 905
902 906 if (tran != NULL) {
903 907 /* remove all minor nodes */
904 908 ddi_remove_minor_node(dip, NULL);
905 909 }
906 910
907 911 return (DDI_FAILURE);
908 912 }
909 913
910 914
911 915 /* ARGSUSED */
912 916 static int
913 917 sf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
914 918 {
915 919 struct sf *sf;
916 920 int instance;
917 921 int i;
918 922 struct sf_target *target;
919 923 timeout_id_t tid;
920 924
921 925
922 926
923 927 /* NO OTHER THREADS ARE RUNNING */
924 928
925 929 instance = ddi_get_instance(dip);
926 930
927 931 if ((sf = ddi_get_soft_state(sf_state, instance)) == NULL) {
928 932 cmn_err(CE_WARN, "sf_detach, sf%d: bad soft state", instance);
929 933 return (DDI_FAILURE);
930 934 }
931 935
932 936 switch (cmd) {
933 937
934 938 case DDI_SUSPEND:
935 939 /*
936 940 * suspend our instance
937 941 */
938 942
939 943 SF_DEBUG(2, (sf, CE_CONT,
940 944 "sf_detach: DDI_SUSPEND for sf%d\n", instance));
941 945 /*
942 946 * There is a race condition in socal where while doing
943 947 * callbacks if a ULP removes it self from the callback list
944 948 * the for loop in socal may panic as cblist is junk and
945 949 * while trying to get cblist->next the system will panic.
946 950 */
947 951
948 952 /* call transport to remove our unregister our callbacks */
949 953 soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
950 954 sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
951 955
952 956 /*
953 957 * begin process of clearing outstanding commands
954 958 * by issuing a lip
955 959 */
956 960 sf_force_lip(sf);
957 961
958 962 /*
959 963 * toggle the device OFFLINE in order to cause
960 964 * outstanding commands to drain
961 965 */
962 966 mutex_enter(&sf->sf_mutex);
963 967 sf->sf_lip_cnt++;
964 968 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
965 969 sf->sf_state = (SF_STATE_OFFLINE | SF_STATE_SUSPENDED);
966 970 for (i = 0; i < sf_max_targets; i++) {
967 971 target = sf->sf_targets[i];
968 972 if (target != NULL) {
969 973 struct sf_target *ntarget;
970 974
971 975 mutex_enter(&target->sft_mutex);
972 976 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
973 977 target->sft_state |=
974 978 (SF_TARGET_BUSY | SF_TARGET_MARK);
975 979 }
976 980 /* do this for all LUNs as well */
977 981 for (ntarget = target->sft_next_lun;
978 982 ntarget;
979 983 ntarget = ntarget->sft_next_lun) {
980 984 mutex_enter(&ntarget->sft_mutex);
981 985 if (!(ntarget->sft_state &
982 986 SF_TARGET_OFFLINE)) {
983 987 ntarget->sft_state |=
984 988 (SF_TARGET_BUSY |
985 989 SF_TARGET_MARK);
986 990 }
987 991 mutex_exit(&ntarget->sft_mutex);
988 992 }
989 993 mutex_exit(&target->sft_mutex);
990 994 }
991 995 }
992 996 mutex_exit(&sf->sf_mutex);
993 997 mutex_enter(&sf_global_mutex);
994 998
995 999 /*
996 1000 * kill off the watchdog if we are the last instance
997 1001 */
998 1002 if (!--sf_watchdog_init) {
999 1003 tid = sf_watchdog_id;
1000 1004 mutex_exit(&sf_global_mutex);
1001 1005 (void) untimeout(tid);
1002 1006 } else {
1003 1007 mutex_exit(&sf_global_mutex);
1004 1008 }
1005 1009
1006 1010 return (DDI_SUCCESS);
1007 1011
1008 1012 case DDI_DETACH:
1009 1013 /*
1010 1014 * detach this instance
1011 1015 */
1012 1016
1013 1017 SF_DEBUG(2, (sf, CE_CONT,
1014 1018 "sf_detach: DDI_DETACH for sf%d\n", instance));
1015 1019
1016 1020 /* remove this "sf" from the list of sf softstates */
1017 1021 sf_softstate_unlink(sf);
1018 1022
1019 1023 /*
1020 1024 * prior to taking any DDI_DETACH actions, toggle the
1021 1025 * device OFFLINE in order to cause outstanding
1022 1026 * commands to drain
1023 1027 */
1024 1028 mutex_enter(&sf->sf_mutex);
1025 1029 sf->sf_lip_cnt++;
1026 1030 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
1027 1031 sf->sf_state = SF_STATE_OFFLINE;
1028 1032 for (i = 0; i < sf_max_targets; i++) {
1029 1033 target = sf->sf_targets[i];
1030 1034 if (target != NULL) {
1031 1035 struct sf_target *ntarget;
1032 1036
1033 1037 mutex_enter(&target->sft_mutex);
1034 1038 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
1035 1039 target->sft_state |=
1036 1040 (SF_TARGET_BUSY | SF_TARGET_MARK);
1037 1041 }
1038 1042 for (ntarget = target->sft_next_lun;
1039 1043 ntarget;
1040 1044 ntarget = ntarget->sft_next_lun) {
1041 1045 mutex_enter(&ntarget->sft_mutex);
1042 1046 if (!(ntarget->sft_state &
1043 1047 SF_TARGET_OFFLINE)) {
1044 1048 ntarget->sft_state |=
1045 1049 (SF_TARGET_BUSY |
1046 1050 SF_TARGET_MARK);
1047 1051 }
1048 1052 mutex_exit(&ntarget->sft_mutex);
1049 1053 }
1050 1054 mutex_exit(&target->sft_mutex);
1051 1055 }
1052 1056 }
1053 1057 mutex_exit(&sf->sf_mutex);
1054 1058
1055 1059 /* call transport to remove and unregister our callbacks */
1056 1060 soc_remove_ulp(sf->sf_sochandle, sf->sf_socp,
1057 1061 sf->sf_sochandle->fcal_portno, TYPE_SCSI_FCP, sf);
1058 1062
1059 1063 /*
1060 1064 * kill off the watchdog if we are the last instance
1061 1065 */
1062 1066 mutex_enter(&sf_global_mutex);
1063 1067 if (!--sf_watchdog_init) {
1064 1068 tid = sf_watchdog_id;
1065 1069 mutex_exit(&sf_global_mutex);
1066 1070 (void) untimeout(tid);
1067 1071 } else {
1068 1072 mutex_exit(&sf_global_mutex);
1069 1073 }
1070 1074
1071 1075 /* signal sf_hp_daemon() to exit and wait for exit */
1072 1076 mutex_enter(&sf->sf_hp_daemon_mutex);
1073 1077 ASSERT(sf->sf_hp_tid);
1074 1078 sf->sf_hp_exit = 1; /* flag exit */
1075 1079 cv_signal(&sf->sf_hp_daemon_cv);
1076 1080 mutex_exit(&sf->sf_hp_daemon_mutex);
1077 1081 thread_join(sf->sf_hp_tid); /* wait for hotplug to exit */
1078 1082
1079 1083 /*
1080 1084 * Unbind and free event set
1081 1085 */
1082 1086 if (sf->sf_event_hdl) {
1083 1087 (void) ndi_event_unbind_set(sf->sf_event_hdl,
1084 1088 &sf->sf_events, NDI_SLEEP);
1085 1089 (void) ndi_event_free_hdl(sf->sf_event_hdl);
1086 1090 }
1087 1091
1088 1092 if (sf->sf_event_defs) {
1089 1093 kmem_free(sf->sf_event_defs, sizeof (sf_event_defs));
1090 1094 }
1091 1095
1092 1096 /* detach this instance of the HBA driver */
1093 1097 (void) scsi_hba_detach(dip);
1094 1098 scsi_hba_tran_free(sf->sf_tran);
1095 1099
1096 1100 /* deallocate/unbind DMA handle for lilp map */
1097 1101 if (sf->sf_lilp_map != NULL) {
1098 1102 (void) ddi_dma_unbind_handle(sf->sf_lilp_dmahandle);
1099 1103 if (sf->sf_lilp_dmahandle != NULL) {
1100 1104 ddi_dma_free_handle(&sf->sf_lilp_dmahandle);
1101 1105 }
1102 1106 ddi_dma_mem_free(&sf->sf_lilp_acchandle);
1103 1107 }
1104 1108
1105 1109 /*
1106 1110 * the kmem cache must be destroyed before free'ing
1107 1111 * up the crpools
1108 1112 *
1109 1113 * our finagle of "ntot" and "nfree"
1110 1114 * causes an ASSERT failure in "sf_cr_free()"
1111 1115 * if the kmem cache is free'd after invoking
1112 1116 * "sf_crpool_free()".
1113 1117 */
1114 1118 kmem_cache_destroy(sf->sf_pkt_cache);
1115 1119
1116 1120 SF_DEBUG(2, (sf, CE_CONT,
1117 1121 "sf_detach: sf_crpool_free() for instance 0x%x\n",
1118 1122 instance));
1119 1123 while (sf->sf_cr_pool != NULL) {
1120 1124 /*
1121 1125 * set ntot to nfree for this particular entry
1122 1126 *
1123 1127 * this causes sf_crpool_free() to update
1124 1128 * the cr_pool list when deallocating this entry
1125 1129 */
1126 1130 sf->sf_cr_pool->ntot = sf->sf_cr_pool->nfree;
1127 1131 sf_crpool_free(sf);
1128 1132 }
1129 1133
1130 1134 /*
1131 1135 * now that the cr_pool's are gone it's safe
1132 1136 * to destroy all softstate mutex's and cv's
1133 1137 */
1134 1138 mutex_destroy(&sf->sf_mutex);
1135 1139 mutex_destroy(&sf->sf_cmd_mutex);
1136 1140 mutex_destroy(&sf->sf_cr_mutex);
1137 1141 mutex_destroy(&sf->sf_hp_daemon_mutex);
1138 1142 cv_destroy(&sf->sf_cr_cv);
1139 1143 cv_destroy(&sf->sf_hp_daemon_cv);
1140 1144
1141 1145 /* remove all minor nodes from the device tree */
1142 1146 ddi_remove_minor_node(dip, NULL);
1143 1147
1144 1148 /* remove properties created during attach() */
1145 1149 ddi_prop_remove_all(dip);
1146 1150
1147 1151 /* remove kstat's if present */
1148 1152 if (sf->sf_ksp != NULL) {
1149 1153 kstat_delete(sf->sf_ksp);
1150 1154 }
1151 1155
1152 1156 SF_DEBUG(2, (sf, CE_CONT,
1153 1157 "sf_detach: ddi_soft_state_free() for instance 0x%x\n",
1154 1158 instance));
1155 1159 ddi_soft_state_free(sf_state, instance);
1156 1160 return (DDI_SUCCESS);
1157 1161
1158 1162 default:
1159 1163 SF_DEBUG(2, (sf, CE_CONT, "sf_detach: sf%d unknown cmd %x\n",
1160 1164 instance, (int)cmd));
1161 1165 return (DDI_FAILURE);
1162 1166 }
1163 1167 }
1164 1168
1165 1169
1166 1170 /*
1167 1171 * sf_softstate_unlink() - remove an sf instance from the list of softstates
1168 1172 */
1169 1173 static void
1170 1174 sf_softstate_unlink(struct sf *sf)
1171 1175 {
1172 1176 struct sf *sf_ptr;
1173 1177 struct sf *sf_found_sibling;
1174 1178 struct sf *sf_reposition = NULL;
1175 1179
1176 1180
1177 1181 mutex_enter(&sf_global_mutex);
1178 1182 while (sf_watch_running) {
1179 1183 /* Busy working the list -- wait */
1180 1184 cv_wait(&sf_watch_cv, &sf_global_mutex);
1181 1185 }
1182 1186 if ((sf_found_sibling = sf->sf_sibling) != NULL) {
1183 1187 /*
1184 1188 * we have a sibling so NULL out its reference to us
1185 1189 */
1186 1190 mutex_enter(&sf_found_sibling->sf_mutex);
1187 1191 sf_found_sibling->sf_sibling = NULL;
1188 1192 mutex_exit(&sf_found_sibling->sf_mutex);
1189 1193 }
1190 1194
1191 1195 /* remove our instance from the global list */
1192 1196 if (sf == sf_head) {
1193 1197 /* we were at at head of the list */
1194 1198 sf_head = sf->sf_next;
1195 1199 } else {
1196 1200 /* find us in the list */
1197 1201 for (sf_ptr = sf_head;
1198 1202 sf_ptr != NULL;
1199 1203 sf_ptr = sf_ptr->sf_next) {
1200 1204 if (sf_ptr == sf) {
1201 1205 break;
1202 1206 }
1203 1207 /* remember this place */
1204 1208 sf_reposition = sf_ptr;
1205 1209 }
1206 1210 ASSERT(sf_ptr == sf);
1207 1211 ASSERT(sf_reposition != NULL);
1208 1212
1209 1213 sf_reposition->sf_next = sf_ptr->sf_next;
1210 1214 }
1211 1215 mutex_exit(&sf_global_mutex);
1212 1216 }
1213 1217
1214 1218
1215 1219 static int
1216 1220 sf_scsi_bus_config(dev_info_t *parent, uint_t flag,
1217 1221 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1218 1222 {
1219 1223 int64_t reset_delay;
1220 1224 struct sf *sf;
1221 1225
1222 1226 sf = ddi_get_soft_state(sf_state, ddi_get_instance(parent));
1223 1227 ASSERT(sf);
1224 1228
1225 1229 reset_delay = (int64_t)(USEC_TO_TICK(SF_INIT_WAIT_TIMEOUT)) -
1226 1230 (ddi_get_lbolt64() - sf->sf_reset_time);
1227 1231 if (reset_delay < 0)
1228 1232 reset_delay = 0;
1229 1233
1230 1234 if (sf_bus_config_debug)
1231 1235 flag |= NDI_DEVI_DEBUG;
1232 1236
1233 1237 return (ndi_busop_bus_config(parent, flag, op,
1234 1238 arg, childp, (clock_t)reset_delay));
1235 1239 }
1236 1240
1237 1241 static int
1238 1242 sf_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
1239 1243 ddi_bus_config_op_t op, void *arg)
1240 1244 {
1241 1245 if (sf_bus_config_debug)
1242 1246 flag |= NDI_DEVI_DEBUG;
1243 1247
1244 1248 return (ndi_busop_bus_unconfig(parent, flag, op, arg));
1245 1249 }
1246 1250
1247 1251
1248 1252 /*
1249 1253 * called by transport to initialize a SCSI target
1250 1254 */
1251 1255 /* ARGSUSED */
1252 1256 static int
1253 1257 sf_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1254 1258 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1255 1259 {
1256 1260 #ifdef RAID_LUNS
1257 1261 int lun;
1258 1262 #else
1259 1263 int64_t lun;
1260 1264 #endif
1261 1265 struct sf_target *target;
1262 1266 struct sf *sf = (struct sf *)hba_tran->tran_hba_private;
1263 1267 int i, t_len;
1264 1268 unsigned int lip_cnt;
1265 1269 unsigned char wwn[FC_WWN_SIZE];
1266 1270
1267 1271
1268 1272 /* get and validate our SCSI target ID */
1269 1273 i = sd->sd_address.a_target;
1270 1274 if (i >= sf_max_targets) {
1271 1275 return (DDI_NOT_WELL_FORMED);
1272 1276 }
1273 1277
1274 1278 /* get our port WWN property */
1275 1279 t_len = sizeof (wwn);
1276 1280 if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1277 1281 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1278 1282 (caddr_t)&wwn, &t_len) != DDI_SUCCESS) {
1279 1283 /* no port WWN property - ignore the OBP stub node */
1280 1284 return (DDI_NOT_WELL_FORMED);
1281 1285 }
1282 1286
1283 1287 /* get our LIP count property */
1284 1288 t_len = sizeof (lip_cnt);
1285 1289 if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1286 1290 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, LIP_CNT_PROP,
1287 1291 (caddr_t)&lip_cnt, &t_len) != DDI_SUCCESS) {
1288 1292 return (DDI_FAILURE);
1289 1293 }
1290 1294 /* and our LUN property */
1291 1295 t_len = sizeof (lun);
1292 1296 if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1293 1297 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1294 1298 (caddr_t)&lun, &t_len) != DDI_SUCCESS) {
1295 1299 return (DDI_FAILURE);
1296 1300 }
1297 1301
1298 1302 /* find the target structure for this instance */
1299 1303 mutex_enter(&sf->sf_mutex);
1300 1304 if ((target = sf_lookup_target(sf, wwn, lun)) == NULL) {
1301 1305 mutex_exit(&sf->sf_mutex);
1302 1306 return (DDI_FAILURE);
1303 1307 }
1304 1308
1305 1309 mutex_enter(&target->sft_mutex);
1306 1310 if ((sf->sf_lip_cnt == lip_cnt) && !(target->sft_state
1307 1311 & SF_TARGET_INIT_DONE)) {
1308 1312 /*
1309 1313 * set links between HBA transport and target structures
1310 1314 * and set done flag
1311 1315 */
1312 1316 hba_tran->tran_tgt_private = target;
1313 1317 target->sft_tran = hba_tran;
1314 1318 target->sft_state |= SF_TARGET_INIT_DONE;
1315 1319 } else {
1316 1320 /* already initialized ?? */
1317 1321 mutex_exit(&target->sft_mutex);
1318 1322 mutex_exit(&sf->sf_mutex);
1319 1323 return (DDI_FAILURE);
1320 1324 }
1321 1325 mutex_exit(&target->sft_mutex);
1322 1326 mutex_exit(&sf->sf_mutex);
1323 1327
1324 1328 return (DDI_SUCCESS);
1325 1329 }
1326 1330
1327 1331
1328 1332 /*
1329 1333 * called by transport to free a target
1330 1334 */
1331 1335 /* ARGSUSED */
1332 1336 static void
1333 1337 sf_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1334 1338 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1335 1339 {
1336 1340 struct sf_target *target = hba_tran->tran_tgt_private;
1337 1341
1338 1342 if (target != NULL) {
1339 1343 mutex_enter(&target->sft_mutex);
1340 1344 target->sft_tran = NULL;
1341 1345 target->sft_state &= ~SF_TARGET_INIT_DONE;
1342 1346 mutex_exit(&target->sft_mutex);
1343 1347 }
1344 1348 }
1345 1349
1346 1350
1347 1351 /*
1348 1352 * allocator for non-std size cdb/pkt_private/status -- return TRUE iff
1349 1353 * success, else return FALSE
1350 1354 */
1351 1355 /*ARGSUSED*/
1352 1356 static int
1353 1357 sf_pkt_alloc_extern(struct sf *sf, struct sf_pkt *cmd,
1354 1358 int tgtlen, int statuslen, int kf)
1355 1359 {
1356 1360 caddr_t scbp, tgt;
1357 1361 int failure = FALSE;
1358 1362 struct scsi_pkt *pkt = CMD2PKT(cmd);
1359 1363
1360 1364
1361 1365 tgt = scbp = NULL;
1362 1366
1363 1367 if (tgtlen > PKT_PRIV_LEN) {
1364 1368 if ((tgt = kmem_zalloc(tgtlen, kf)) == NULL) {
1365 1369 failure = TRUE;
1366 1370 } else {
1367 1371 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
1368 1372 pkt->pkt_private = tgt;
1369 1373 }
1370 1374 }
1371 1375 if (statuslen > EXTCMDS_STATUS_SIZE) {
1372 1376 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
1373 1377 failure = TRUE;
1374 1378 } else {
1375 1379 cmd->cmd_flags |= CFLAG_SCBEXTERN;
1376 1380 pkt->pkt_scbp = (opaque_t)scbp;
1377 1381 }
1378 1382 }
1379 1383 if (failure) {
1380 1384 sf_pkt_destroy_extern(sf, cmd);
1381 1385 }
1382 1386 return (failure);
1383 1387 }
1384 1388
1385 1389
1386 1390 /*
1387 1391 * deallocator for non-std size cdb/pkt_private/status
1388 1392 */
1389 1393 static void
1390 1394 sf_pkt_destroy_extern(struct sf *sf, struct sf_pkt *cmd)
1391 1395 {
1392 1396 struct scsi_pkt *pkt = CMD2PKT(cmd);
1393 1397
1394 1398 if (cmd->cmd_flags & CFLAG_FREE) {
1395 1399 cmn_err(CE_PANIC,
1396 1400 "sf_scsi_impl_pktfree: freeing free packet");
1397 1401 _NOTE(NOT_REACHED)
1398 1402 /* NOTREACHED */
1399 1403 }
1400 1404 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
1401 1405 kmem_free((caddr_t)pkt->pkt_scbp,
1402 1406 (size_t)cmd->cmd_scblen);
1403 1407 }
1404 1408 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
1405 1409 kmem_free((caddr_t)pkt->pkt_private,
1406 1410 (size_t)cmd->cmd_privlen);
1407 1411 }
1408 1412
1409 1413 cmd->cmd_flags = CFLAG_FREE;
1410 1414 kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1411 1415 }
1412 1416
1413 1417
1414 1418 /*
1415 1419 * create or initialize a SCSI packet -- called internally and
1416 1420 * by the transport
1417 1421 */
1418 1422 static struct scsi_pkt *
1419 1423 sf_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
1420 1424 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1421 1425 int flags, int (*callback)(), caddr_t arg)
1422 1426 {
1423 1427 int kf;
1424 1428 int failure = FALSE;
1425 1429 struct sf_pkt *cmd;
1426 1430 struct sf *sf = ADDR2SF(ap);
1427 1431 struct sf_target *target = ADDR2TARGET(ap);
1428 1432 struct sf_pkt *new_cmd = NULL;
1429 1433 struct fcal_packet *fpkt;
1430 1434 fc_frame_header_t *hp;
1431 1435 struct fcp_cmd *fcmd;
1432 1436
1433 1437
1434 1438 /*
1435 1439 * If we've already allocated a pkt once,
1436 1440 * this request is for dma allocation only.
1437 1441 */
1438 1442 if (pkt == NULL) {
1439 1443
1440 1444 /*
1441 1445 * First step of sf_scsi_init_pkt: pkt allocation
1442 1446 */
1443 1447 if (cmdlen > FCP_CDB_SIZE) {
1444 1448 return (NULL);
1445 1449 }
1446 1450
1447 1451 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
1448 1452
1449 1453 if ((cmd = kmem_cache_alloc(sf->sf_pkt_cache, kf)) != NULL) {
1450 1454 /*
1451 1455 * Selective zeroing of the pkt.
1452 1456 */
1453 1457
1454 1458 cmd->cmd_flags = 0;
1455 1459 cmd->cmd_forw = 0;
1456 1460 cmd->cmd_back = 0;
1457 1461 cmd->cmd_next = 0;
1458 1462 cmd->cmd_pkt = (struct scsi_pkt *)((char *)cmd +
1459 1463 sizeof (struct sf_pkt) + sizeof (struct
1460 1464 fcal_packet));
1461 1465 cmd->cmd_fp_pkt = (struct fcal_packet *)((char *)cmd +
1462 1466 sizeof (struct sf_pkt));
1463 1467 cmd->cmd_fp_pkt->fcal_pkt_private = (opaque_t)cmd;
1464 1468 cmd->cmd_state = SF_STATE_IDLE;
1465 1469 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
1466 1470 cmd->cmd_pkt->pkt_scbp = (opaque_t)cmd->cmd_scsi_scb;
1467 1471 cmd->cmd_pkt->pkt_comp = NULL;
1468 1472 cmd->cmd_pkt->pkt_flags = 0;
1469 1473 cmd->cmd_pkt->pkt_time = 0;
1470 1474 cmd->cmd_pkt->pkt_resid = 0;
1471 1475 cmd->cmd_pkt->pkt_reason = 0;
1472 1476 cmd->cmd_cdblen = (uchar_t)cmdlen;
1473 1477 cmd->cmd_scblen = statuslen;
1474 1478 cmd->cmd_privlen = tgtlen;
1475 1479 cmd->cmd_pkt->pkt_address = *ap;
1476 1480
1477 1481 /* zero pkt_private */
1478 1482 (int *)(cmd->cmd_pkt->pkt_private =
1479 1483 cmd->cmd_pkt_private);
1480 1484 bzero((caddr_t)cmd->cmd_pkt->pkt_private,
1481 1485 PKT_PRIV_LEN);
1482 1486 } else {
1483 1487 failure = TRUE;
1484 1488 }
1485 1489
1486 1490 if (failure ||
1487 1491 (tgtlen > PKT_PRIV_LEN) ||
1488 1492 (statuslen > EXTCMDS_STATUS_SIZE)) {
1489 1493 if (!failure) {
1490 1494 /* need to allocate more space */
1491 1495 failure = sf_pkt_alloc_extern(sf, cmd,
1492 1496 tgtlen, statuslen, kf);
1493 1497 }
1494 1498 if (failure) {
1495 1499 return (NULL);
1496 1500 }
1497 1501 }
1498 1502
1499 1503 fpkt = cmd->cmd_fp_pkt;
1500 1504 if (cmd->cmd_block == NULL) {
1501 1505
1502 1506 /* allocate cmd/response pool buffers */
1503 1507 if (sf_cr_alloc(sf, cmd, callback) == DDI_FAILURE) {
1504 1508 sf_pkt_destroy_extern(sf, cmd);
1505 1509 return (NULL);
1506 1510 }
1507 1511
1508 1512 /* fill in the FC-AL packet */
1509 1513 fpkt->fcal_pkt_cookie = sf->sf_socp;
1510 1514 fpkt->fcal_pkt_comp = sf_cmd_callback;
1511 1515 fpkt->fcal_pkt_flags = 0;
1512 1516 fpkt->fcal_magic = FCALP_MAGIC;
1513 1517 fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
1514 1518 (ushort_t)(SOC_FC_HEADER |
1515 1519 sf->sf_sochandle->fcal_portno);
1516 1520 fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
1517 1521 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
1518 1522 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
1519 1523 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
1520 1524 fpkt->fcal_socal_request.sr_dataseg[0].fc_base =
1521 1525 (uint32_t)cmd->cmd_dmac;
1522 1526 fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
1523 1527 sizeof (struct fcp_cmd);
1524 1528 fpkt->fcal_socal_request.sr_dataseg[1].fc_base =
1525 1529 (uint32_t)cmd->cmd_rsp_dmac;
1526 1530 fpkt->fcal_socal_request.sr_dataseg[1].fc_count =
1527 1531 FCP_MAX_RSP_IU_SIZE;
1528 1532
1529 1533 /* Fill in the Fabric Channel Header */
1530 1534 hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
1531 1535 hp->r_ctl = R_CTL_COMMAND;
1532 1536 hp->type = TYPE_SCSI_FCP;
1533 1537 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
1534 1538 hp->reserved1 = 0;
1535 1539 hp->seq_id = 0;
1536 1540 hp->df_ctl = 0;
1537 1541 hp->seq_cnt = 0;
1538 1542 hp->ox_id = 0xffff;
1539 1543 hp->rx_id = 0xffff;
1540 1544 hp->ro = 0;
1541 1545
1542 1546 /* Establish the LUN */
1543 1547 bcopy((caddr_t)&target->sft_lun.b,
1544 1548 (caddr_t)&cmd->cmd_block->fcp_ent_addr,
1545 1549 FCP_LUN_SIZE);
1546 1550 *((int32_t *)&cmd->cmd_block->fcp_cntl) = 0;
1547 1551 }
1548 1552 cmd->cmd_pkt->pkt_cdbp = cmd->cmd_block->fcp_cdb;
1549 1553
1550 1554 mutex_enter(&target->sft_pkt_mutex);
1551 1555
1552 1556 target->sft_pkt_tail->cmd_forw = cmd;
1553 1557 cmd->cmd_back = target->sft_pkt_tail;
1554 1558 cmd->cmd_forw = (struct sf_pkt *)&target->sft_pkt_head;
1555 1559 target->sft_pkt_tail = cmd;
1556 1560
1557 1561 mutex_exit(&target->sft_pkt_mutex);
1558 1562 new_cmd = cmd; /* for later cleanup if needed */
1559 1563 } else {
1560 1564 /* pkt already exists -- just a request for DMA allocation */
1561 1565 cmd = PKT2CMD(pkt);
1562 1566 fpkt = cmd->cmd_fp_pkt;
1563 1567 }
1564 1568
1565 1569 /* zero cdb (bzero is too slow) */
1566 1570 bzero((caddr_t)cmd->cmd_pkt->pkt_cdbp, cmdlen);
1567 1571
1568 1572 /*
1569 1573 * Second step of sf_scsi_init_pkt: dma allocation
1570 1574 * Set up dma info
1571 1575 */
1572 1576 if ((bp != NULL) && (bp->b_bcount != 0)) {
1573 1577 int cmd_flags, dma_flags;
1574 1578 int rval = 0;
1575 1579 uint_t dmacookie_count;
1576 1580
1577 1581 /* there is a buffer and some data to transfer */
1578 1582
1579 1583 /* set up command and DMA flags */
1580 1584 cmd_flags = cmd->cmd_flags;
1581 1585 if (bp->b_flags & B_READ) {
1582 1586 /* a read */
1583 1587 cmd_flags &= ~CFLAG_DMASEND;
1584 1588 dma_flags = DDI_DMA_READ;
1585 1589 } else {
1586 1590 /* a write */
1587 1591 cmd_flags |= CFLAG_DMASEND;
1588 1592 dma_flags = DDI_DMA_WRITE;
1589 1593 }
1590 1594 if (flags & PKT_CONSISTENT) {
1591 1595 cmd_flags |= CFLAG_CMDIOPB;
1592 1596 dma_flags |= DDI_DMA_CONSISTENT;
1593 1597 }
1594 1598
1595 1599 /* ensure we have a DMA handle */
1596 1600 if (cmd->cmd_dmahandle == NULL) {
1597 1601 rval = ddi_dma_alloc_handle(sf->sf_dip,
1598 1602 sf->sf_sochandle->fcal_dmaattr, callback, arg,
1599 1603 &cmd->cmd_dmahandle);
1600 1604 }
1601 1605
1602 1606 if (rval == 0) {
1603 1607 /* bind our DMA handle to our buffer */
1604 1608 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
1605 1609 dma_flags, callback, arg, &cmd->cmd_dmacookie,
1606 1610 &dmacookie_count);
1607 1611 }
1608 1612
1609 1613 if (rval != 0) {
1610 1614 /* DMA failure */
1611 1615 SF_DEBUG(2, (sf, CE_CONT, "ddi_dma_buf.. failed\n"));
1612 1616 switch (rval) {
1613 1617 case DDI_DMA_NORESOURCES:
1614 1618 bioerror(bp, 0);
1615 1619 break;
1616 1620 case DDI_DMA_BADATTR:
1617 1621 case DDI_DMA_NOMAPPING:
1618 1622 bioerror(bp, EFAULT);
1619 1623 break;
1620 1624 case DDI_DMA_TOOBIG:
1621 1625 default:
1622 1626 bioerror(bp, EINVAL);
1623 1627 break;
1624 1628 }
1625 1629 /* clear valid flag */
1626 1630 cmd->cmd_flags = cmd_flags & ~CFLAG_DMAVALID;
1627 1631 if (new_cmd != NULL) {
1628 1632 /* destroy packet if we just created it */
1629 1633 sf_scsi_destroy_pkt(ap, new_cmd->cmd_pkt);
1630 1634 }
1631 1635 return (NULL);
1632 1636 }
1633 1637
1634 1638 ASSERT(dmacookie_count == 1);
1635 1639 /* set up amt to transfer and set valid flag */
1636 1640 cmd->cmd_dmacount = bp->b_bcount;
1637 1641 cmd->cmd_flags = cmd_flags | CFLAG_DMAVALID;
1638 1642
1639 1643 ASSERT(cmd->cmd_dmahandle != NULL);
1640 1644 }
1641 1645
1642 1646 /* set up FC-AL packet */
1643 1647 fcmd = cmd->cmd_block;
1644 1648
1645 1649 if (cmd->cmd_flags & CFLAG_DMAVALID) {
1646 1650 if (cmd->cmd_flags & CFLAG_DMASEND) {
1647 1651 /* DMA write */
1648 1652 fcmd->fcp_cntl.cntl_read_data = 0;
1649 1653 fcmd->fcp_cntl.cntl_write_data = 1;
1650 1654 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1651 1655 CQ_TYPE_IO_WRITE;
1652 1656 } else {
1653 1657 /* DMA read */
1654 1658 fcmd->fcp_cntl.cntl_read_data = 1;
1655 1659 fcmd->fcp_cntl.cntl_write_data = 0;
1656 1660 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
1657 1661 CQ_TYPE_IO_READ;
1658 1662 }
1659 1663 fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
1660 1664 (uint32_t)cmd->cmd_dmacookie.dmac_address;
1661 1665 fpkt->fcal_socal_request.sr_dataseg[2].fc_count =
1662 1666 cmd->cmd_dmacookie.dmac_size;
1663 1667 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
1664 1668 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1665 1669 cmd->cmd_dmacookie.dmac_size;
1666 1670 fcmd->fcp_data_len = cmd->cmd_dmacookie.dmac_size;
1667 1671 } else {
1668 1672 /* not a read or write */
1669 1673 fcmd->fcp_cntl.cntl_read_data = 0;
1670 1674 fcmd->fcp_cntl.cntl_write_data = 0;
1671 1675 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
1672 1676 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
1673 1677 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
1674 1678 sizeof (struct fcp_cmd);
1675 1679 fcmd->fcp_data_len = 0;
1676 1680 }
1677 1681 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
1678 1682
1679 1683 return (cmd->cmd_pkt);
1680 1684 }
1681 1685
1682 1686
1683 1687 /*
1684 1688 * destroy a SCSI packet -- called internally and by the transport
1685 1689 */
1686 1690 static void
1687 1691 sf_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1688 1692 {
1689 1693 struct sf_pkt *cmd = PKT2CMD(pkt);
1690 1694 struct sf *sf = ADDR2SF(ap);
1691 1695 struct sf_target *target = ADDR2TARGET(ap);
1692 1696 struct fcal_packet *fpkt = cmd->cmd_fp_pkt;
1693 1697
1694 1698
1695 1699 if (cmd->cmd_flags & CFLAG_DMAVALID) {
1696 1700 /* DMA was set up -- clean up */
1697 1701 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1698 1702 cmd->cmd_flags ^= CFLAG_DMAVALID;
1699 1703 }
1700 1704
1701 1705 /* take this packet off the doubly-linked list */
1702 1706 mutex_enter(&target->sft_pkt_mutex);
1703 1707 cmd->cmd_back->cmd_forw = cmd->cmd_forw;
1704 1708 cmd->cmd_forw->cmd_back = cmd->cmd_back;
1705 1709 mutex_exit(&target->sft_pkt_mutex);
1706 1710
1707 1711 fpkt->fcal_pkt_flags = 0;
1708 1712 /* free the packet */
1709 1713 if ((cmd->cmd_flags &
1710 1714 (CFLAG_FREE | CFLAG_PRIVEXTERN | CFLAG_SCBEXTERN)) == 0) {
1711 1715 /* just a regular packet */
1712 1716 ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
1713 1717 cmd->cmd_flags = CFLAG_FREE;
1714 1718 kmem_cache_free(sf->sf_pkt_cache, (void *)cmd);
1715 1719 } else {
1716 1720 /* a packet with extra memory */
1717 1721 sf_pkt_destroy_extern(sf, cmd);
1718 1722 }
1719 1723 }
1720 1724
1721 1725
1722 1726 /*
1723 1727 * called by transport to unbind DMA handle
1724 1728 */
1725 1729 /* ARGSUSED */
1726 1730 static void
1727 1731 sf_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1728 1732 {
1729 1733 struct sf_pkt *cmd = PKT2CMD(pkt);
1730 1734
1731 1735
1732 1736 if (cmd->cmd_flags & CFLAG_DMAVALID) {
1733 1737 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
1734 1738 cmd->cmd_flags ^= CFLAG_DMAVALID;
1735 1739 }
1736 1740
1737 1741 }
1738 1742
1739 1743
1740 1744 /*
1741 1745 * called by transport to synchronize CPU and I/O views of memory
1742 1746 */
1743 1747 /* ARGSUSED */
1744 1748 static void
1745 1749 sf_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1746 1750 {
1747 1751 struct sf_pkt *cmd = PKT2CMD(pkt);
1748 1752
1749 1753
1750 1754 if (cmd->cmd_flags & CFLAG_DMAVALID) {
1751 1755 if (ddi_dma_sync(cmd->cmd_dmahandle, (off_t)0, (size_t)0,
1752 1756 (cmd->cmd_flags & CFLAG_DMASEND) ?
1753 1757 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1754 1758 DDI_SUCCESS) {
1755 1759 cmn_err(CE_WARN, "sf: sync pkt failed");
1756 1760 }
1757 1761 }
1758 1762 }
1759 1763
1760 1764
1761 1765 /*
1762 1766 * routine for reset notification setup, to register or cancel. -- called
1763 1767 * by transport
1764 1768 */
1765 1769 static int
1766 1770 sf_scsi_reset_notify(struct scsi_address *ap, int flag,
1767 1771 void (*callback)(caddr_t), caddr_t arg)
1768 1772 {
1769 1773 struct sf *sf = ADDR2SF(ap);
1770 1774
1771 1775 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
1772 1776 &sf->sf_mutex, &sf->sf_reset_notify_listf));
1773 1777 }
1774 1778
1775 1779
1776 1780 /*
1777 1781 * called by transport to get port WWN property (except sun4u)
1778 1782 */
1779 1783 /* ARGSUSED */
1780 1784 static int
1781 1785 sf_scsi_get_name(struct scsi_device *sd, char *name, int len)
1782 1786 {
1783 1787 char tbuf[(FC_WWN_SIZE*2)+1];
1784 1788 unsigned char wwn[FC_WWN_SIZE];
1785 1789 int i, lun;
1786 1790 dev_info_t *tgt_dip;
1787 1791
1788 1792 tgt_dip = sd->sd_dev;
1789 1793 i = sizeof (wwn);
1790 1794 if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1791 1795 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, PORT_WWN_PROP,
1792 1796 (caddr_t)&wwn, &i) != DDI_SUCCESS) {
1793 1797 name[0] = '\0';
1794 1798 return (0);
1795 1799 }
1796 1800 i = sizeof (lun);
1797 1801 if (ddi_prop_op(DDI_DEV_T_ANY, tgt_dip, PROP_LEN_AND_VAL_BUF,
1798 1802 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "lun",
1799 1803 (caddr_t)&lun, &i) != DDI_SUCCESS) {
1800 1804 name[0] = '\0';
1801 1805 return (0);
1802 1806 }
1803 1807 for (i = 0; i < FC_WWN_SIZE; i++)
1804 1808 (void) sprintf(&tbuf[i << 1], "%02x", wwn[i]);
1805 1809 (void) sprintf(name, "w%s,%x", tbuf, lun);
1806 1810 return (1);
1807 1811 }
1808 1812
1809 1813
1810 1814 /*
1811 1815 * called by transport to get target soft AL-PA (except sun4u)
1812 1816 */
1813 1817 /* ARGSUSED */
1814 1818 static int
1815 1819 sf_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
1816 1820 {
1817 1821 struct sf_target *target = ADDR2TARGET(&sd->sd_address);
1818 1822
1819 1823 if (target == NULL)
1820 1824 return (0);
1821 1825
1822 1826 (void) sprintf(name, "%x", target->sft_al_pa);
1823 1827 return (1);
1824 1828 }
1825 1829
1826 1830
1827 1831 /*
1828 1832 * add to the command/response buffer pool for this sf instance
1829 1833 */
1830 1834 static int
1831 1835 sf_add_cr_pool(struct sf *sf)
1832 1836 {
1833 1837 int cmd_buf_size;
1834 1838 size_t real_cmd_buf_size;
1835 1839 int rsp_buf_size;
1836 1840 size_t real_rsp_buf_size;
1837 1841 uint_t i, ccount;
1838 1842 struct sf_cr_pool *ptr;
1839 1843 struct sf_cr_free_elem *cptr;
1840 1844 caddr_t dptr, eptr;
1841 1845 ddi_dma_cookie_t cmd_cookie;
1842 1846 ddi_dma_cookie_t rsp_cookie;
1843 1847 int cmd_bound = FALSE, rsp_bound = FALSE;
1844 1848
1845 1849
1846 1850 /* allocate room for the pool */
1847 1851 if ((ptr = kmem_zalloc(sizeof (struct sf_cr_pool), KM_NOSLEEP)) ==
1848 1852 NULL) {
1849 1853 return (DDI_FAILURE);
1850 1854 }
1851 1855
1852 1856 /* allocate a DMA handle for the command pool */
1853 1857 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1854 1858 DDI_DMA_DONTWAIT, NULL, &ptr->cmd_dma_handle) != DDI_SUCCESS) {
1855 1859 goto fail;
1856 1860 }
1857 1861
1858 1862 /*
1859 1863 * Get a piece of memory in which to put commands
1860 1864 */
1861 1865 cmd_buf_size = (sizeof (struct fcp_cmd) * SF_ELEMS_IN_POOL + 7) & ~7;
1862 1866 if (ddi_dma_mem_alloc(ptr->cmd_dma_handle, cmd_buf_size,
1863 1867 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1864 1868 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->cmd_base,
1865 1869 &real_cmd_buf_size, &ptr->cmd_acc_handle) != DDI_SUCCESS) {
1866 1870 goto fail;
1867 1871 }
1868 1872
1869 1873 /* bind the DMA handle to an address */
1870 1874 if (ddi_dma_addr_bind_handle(ptr->cmd_dma_handle, NULL,
1871 1875 ptr->cmd_base, real_cmd_buf_size,
1872 1876 DDI_DMA_WRITE | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1873 1877 NULL, &cmd_cookie, &ccount) != DDI_DMA_MAPPED) {
1874 1878 goto fail;
1875 1879 }
1876 1880 cmd_bound = TRUE;
1877 1881 /* ensure only one cookie was allocated */
1878 1882 if (ccount != 1) {
1879 1883 goto fail;
1880 1884 }
1881 1885
1882 1886 /* allocate a DMA handle for the response pool */
1883 1887 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
1884 1888 DDI_DMA_DONTWAIT, NULL, &ptr->rsp_dma_handle) != DDI_SUCCESS) {
1885 1889 goto fail;
1886 1890 }
1887 1891
1888 1892 /*
1889 1893 * Get a piece of memory in which to put responses
1890 1894 */
1891 1895 rsp_buf_size = FCP_MAX_RSP_IU_SIZE * SF_ELEMS_IN_POOL;
1892 1896 if (ddi_dma_mem_alloc(ptr->rsp_dma_handle, rsp_buf_size,
1893 1897 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
1894 1898 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&ptr->rsp_base,
1895 1899 &real_rsp_buf_size, &ptr->rsp_acc_handle) != DDI_SUCCESS) {
1896 1900 goto fail;
1897 1901 }
1898 1902
1899 1903 /* bind the DMA handle to an address */
1900 1904 if (ddi_dma_addr_bind_handle(ptr->rsp_dma_handle, NULL,
1901 1905 ptr->rsp_base, real_rsp_buf_size,
1902 1906 DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
1903 1907 NULL, &rsp_cookie, &ccount) != DDI_DMA_MAPPED) {
1904 1908 goto fail;
1905 1909 }
1906 1910 rsp_bound = TRUE;
1907 1911 /* ensure only one cookie was allocated */
1908 1912 if (ccount != 1) {
1909 1913 goto fail;
1910 1914 }
1911 1915
1912 1916 /*
1913 1917 * Generate a (cmd/rsp structure) free list
1914 1918 */
1915 1919 /* ensure ptr points to start of long word (8-byte block) */
1916 1920 dptr = (caddr_t)((uintptr_t)(ptr->cmd_base) + 7 & ~7);
1917 1921 /* keep track of actual size after moving pointer */
1918 1922 real_cmd_buf_size -= (dptr - ptr->cmd_base);
1919 1923 eptr = ptr->rsp_base;
1920 1924
1921 1925 /* set actual total number of entries */
1922 1926 ptr->ntot = min((real_cmd_buf_size / sizeof (struct fcp_cmd)),
1923 1927 (real_rsp_buf_size / FCP_MAX_RSP_IU_SIZE));
1924 1928 ptr->nfree = ptr->ntot;
1925 1929 ptr->free = (struct sf_cr_free_elem *)ptr->cmd_base;
1926 1930 ptr->sf = sf;
1927 1931
1928 1932 /* set up DMA for each pair of entries */
1929 1933 i = 0;
1930 1934 while (i < ptr->ntot) {
1931 1935 cptr = (struct sf_cr_free_elem *)dptr;
1932 1936 dptr += sizeof (struct fcp_cmd);
1933 1937
1934 1938 cptr->next = (struct sf_cr_free_elem *)dptr;
1935 1939 cptr->rsp = eptr;
1936 1940
1937 1941 cptr->cmd_dmac = cmd_cookie.dmac_address +
1938 1942 (uint32_t)((caddr_t)cptr - ptr->cmd_base);
1939 1943
1940 1944 cptr->rsp_dmac = rsp_cookie.dmac_address +
1941 1945 (uint32_t)((caddr_t)eptr - ptr->rsp_base);
1942 1946
1943 1947 eptr += FCP_MAX_RSP_IU_SIZE;
1944 1948 i++;
1945 1949 }
1946 1950
1947 1951 /* terminate the list */
1948 1952 cptr->next = NULL;
1949 1953
1950 1954 /* add this list at front of current one */
1951 1955 mutex_enter(&sf->sf_cr_mutex);
1952 1956 ptr->next = sf->sf_cr_pool;
1953 1957 sf->sf_cr_pool = ptr;
1954 1958 sf->sf_cr_pool_cnt++;
1955 1959 mutex_exit(&sf->sf_cr_mutex);
1956 1960
1957 1961 return (DDI_SUCCESS);
1958 1962
1959 1963 fail:
1960 1964 /* we failed so clean up */
1961 1965 if (ptr->cmd_dma_handle != NULL) {
1962 1966 if (cmd_bound) {
1963 1967 (void) ddi_dma_unbind_handle(ptr->cmd_dma_handle);
1964 1968 }
1965 1969 ddi_dma_free_handle(&ptr->cmd_dma_handle);
1966 1970 }
1967 1971
1968 1972 if (ptr->rsp_dma_handle != NULL) {
1969 1973 if (rsp_bound) {
1970 1974 (void) ddi_dma_unbind_handle(ptr->rsp_dma_handle);
1971 1975 }
1972 1976 ddi_dma_free_handle(&ptr->rsp_dma_handle);
1973 1977 }
1974 1978
1975 1979 if (ptr->cmd_base != NULL) {
1976 1980 ddi_dma_mem_free(&ptr->cmd_acc_handle);
1977 1981 }
1978 1982
1979 1983 if (ptr->rsp_base != NULL) {
1980 1984 ddi_dma_mem_free(&ptr->rsp_acc_handle);
1981 1985 }
1982 1986
1983 1987 kmem_free((caddr_t)ptr, sizeof (struct sf_cr_pool));
1984 1988 return (DDI_FAILURE);
1985 1989 }
1986 1990
1987 1991
1988 1992 /*
1989 1993 * allocate a command/response buffer from the pool, allocating more
1990 1994 * in the pool as needed
1991 1995 */
1992 1996 static int
1993 1997 sf_cr_alloc(struct sf *sf, struct sf_pkt *cmd, int (*func)())
1994 1998 {
1995 1999 struct sf_cr_pool *ptr;
1996 2000 struct sf_cr_free_elem *cptr;
1997 2001
1998 2002
1999 2003 mutex_enter(&sf->sf_cr_mutex);
2000 2004
2001 2005 try_again:
2002 2006
2003 2007 /* find a free buffer in the existing pool */
2004 2008 ptr = sf->sf_cr_pool;
2005 2009 while (ptr != NULL) {
2006 2010 if (ptr->nfree != 0) {
2007 2011 ptr->nfree--;
2008 2012 break;
2009 2013 } else {
2010 2014 ptr = ptr->next;
2011 2015 }
2012 2016 }
2013 2017
2014 2018 /* did we find a free buffer ? */
2015 2019 if (ptr != NULL) {
2016 2020 /* we found a free buffer -- take it off the free list */
2017 2021 cptr = ptr->free;
2018 2022 ptr->free = cptr->next;
2019 2023 mutex_exit(&sf->sf_cr_mutex);
2020 2024 /* set up the command to use the buffer pair */
2021 2025 cmd->cmd_block = (struct fcp_cmd *)cptr;
2022 2026 cmd->cmd_dmac = cptr->cmd_dmac;
2023 2027 cmd->cmd_rsp_dmac = cptr->rsp_dmac;
2024 2028 cmd->cmd_rsp_block = (struct fcp_rsp *)cptr->rsp;
2025 2029 cmd->cmd_cr_pool = ptr;
2026 2030 return (DDI_SUCCESS); /* success */
2027 2031 }
2028 2032
2029 2033 /* no free buffer available -- can we allocate more ? */
2030 2034 if (sf->sf_cr_pool_cnt < SF_CR_POOL_MAX) {
2031 2035 /* we need to allocate more buffer pairs */
2032 2036 if (sf->sf_cr_flag) {
2033 2037 /* somebody already allocating for this instance */
2034 2038 if (func == SLEEP_FUNC) {
2035 2039 /* user wants to wait */
2036 2040 cv_wait(&sf->sf_cr_cv, &sf->sf_cr_mutex);
2037 2041 /* we've been woken so go try again */
2038 2042 goto try_again;
2039 2043 }
2040 2044 /* user does not want to wait */
2041 2045 mutex_exit(&sf->sf_cr_mutex);
2042 2046 sf->sf_stats.cralloc_failures++;
2043 2047 return (DDI_FAILURE); /* give up */
2044 2048 }
2045 2049 /* set flag saying we're allocating */
2046 2050 sf->sf_cr_flag = 1;
2047 2051 mutex_exit(&sf->sf_cr_mutex);
2048 2052 /* add to our pool */
2049 2053 if (sf_add_cr_pool(sf) != DDI_SUCCESS) {
2050 2054 /* couldn't add to our pool for some reason */
2051 2055 mutex_enter(&sf->sf_cr_mutex);
2052 2056 sf->sf_cr_flag = 0;
2053 2057 cv_broadcast(&sf->sf_cr_cv);
2054 2058 mutex_exit(&sf->sf_cr_mutex);
2055 2059 sf->sf_stats.cralloc_failures++;
2056 2060 return (DDI_FAILURE); /* give up */
2057 2061 }
2058 2062 /*
2059 2063 * clear flag saying we're allocating and tell all other
2060 2064 * that care
2061 2065 */
2062 2066 mutex_enter(&sf->sf_cr_mutex);
2063 2067 sf->sf_cr_flag = 0;
2064 2068 cv_broadcast(&sf->sf_cr_cv);
2065 2069 /* now that we have more buffers try again */
2066 2070 goto try_again;
2067 2071 }
2068 2072
2069 2073 /* we don't have room to allocate any more buffers */
2070 2074 mutex_exit(&sf->sf_cr_mutex);
2071 2075 sf->sf_stats.cralloc_failures++;
2072 2076 return (DDI_FAILURE); /* give up */
2073 2077 }
2074 2078
2075 2079
2076 2080 /*
2077 2081 * free a cmd/response buffer pair in our pool
2078 2082 */
2079 2083 static void
2080 2084 sf_cr_free(struct sf_cr_pool *cp, struct sf_pkt *cmd)
2081 2085 {
2082 2086 struct sf *sf = cp->sf;
2083 2087 struct sf_cr_free_elem *elem;
2084 2088
2085 2089 elem = (struct sf_cr_free_elem *)cmd->cmd_block;
2086 2090 elem->rsp = (caddr_t)cmd->cmd_rsp_block;
2087 2091 elem->cmd_dmac = cmd->cmd_dmac;
2088 2092 elem->rsp_dmac = cmd->cmd_rsp_dmac;
2089 2093
2090 2094 mutex_enter(&sf->sf_cr_mutex);
2091 2095 cp->nfree++;
2092 2096 ASSERT(cp->nfree <= cp->ntot);
2093 2097
2094 2098 elem->next = cp->free;
2095 2099 cp->free = elem;
2096 2100 mutex_exit(&sf->sf_cr_mutex);
2097 2101 }
2098 2102
2099 2103
2100 2104 /*
2101 2105 * free our pool of cmd/response buffers
2102 2106 */
2103 2107 static void
2104 2108 sf_crpool_free(struct sf *sf)
2105 2109 {
2106 2110 struct sf_cr_pool *cp, *prev;
2107 2111
2108 2112 prev = NULL;
2109 2113 mutex_enter(&sf->sf_cr_mutex);
2110 2114 cp = sf->sf_cr_pool;
2111 2115 while (cp != NULL) {
2112 2116 if (cp->nfree == cp->ntot) {
2113 2117 if (prev != NULL) {
2114 2118 prev->next = cp->next;
2115 2119 } else {
2116 2120 sf->sf_cr_pool = cp->next;
2117 2121 }
2118 2122 sf->sf_cr_pool_cnt--;
2119 2123 mutex_exit(&sf->sf_cr_mutex);
2120 2124
2121 2125 (void) ddi_dma_unbind_handle(cp->cmd_dma_handle);
2122 2126 ddi_dma_free_handle(&cp->cmd_dma_handle);
2123 2127 (void) ddi_dma_unbind_handle(cp->rsp_dma_handle);
2124 2128 ddi_dma_free_handle(&cp->rsp_dma_handle);
2125 2129 ddi_dma_mem_free(&cp->cmd_acc_handle);
2126 2130 ddi_dma_mem_free(&cp->rsp_acc_handle);
2127 2131 kmem_free((caddr_t)cp, sizeof (struct sf_cr_pool));
2128 2132 return;
2129 2133 }
2130 2134 prev = cp;
2131 2135 cp = cp->next;
2132 2136 }
2133 2137 mutex_exit(&sf->sf_cr_mutex);
2134 2138 }
2135 2139
2136 2140
2137 2141 /* ARGSUSED */
2138 2142 static int
2139 2143 sf_kmem_cache_constructor(void *buf, void *arg, int size)
2140 2144 {
2141 2145 struct sf_pkt *cmd = buf;
2142 2146
2143 2147 mutex_init(&cmd->cmd_abort_mutex, NULL, MUTEX_DRIVER, NULL);
2144 2148 cmd->cmd_block = NULL;
2145 2149 cmd->cmd_dmahandle = NULL;
2146 2150 return (0);
2147 2151 }
2148 2152
2149 2153
2150 2154 /* ARGSUSED */
2151 2155 static void
2152 2156 sf_kmem_cache_destructor(void *buf, void *size)
2153 2157 {
2154 2158 struct sf_pkt *cmd = buf;
2155 2159
2156 2160 if (cmd->cmd_dmahandle != NULL) {
2157 2161 ddi_dma_free_handle(&cmd->cmd_dmahandle);
2158 2162 }
2159 2163
2160 2164 if (cmd->cmd_block != NULL) {
2161 2165 sf_cr_free(cmd->cmd_cr_pool, cmd);
2162 2166 }
2163 2167 mutex_destroy(&cmd->cmd_abort_mutex);
2164 2168 }
2165 2169
2166 2170
2167 2171 /*
2168 2172 * called by transport when a state change occurs
2169 2173 */
2170 2174 static void
2171 2175 sf_statec_callback(void *arg, int msg)
2172 2176 {
2173 2177 struct sf *sf = (struct sf *)arg;
2174 2178 struct sf_target *target;
2175 2179 int i;
2176 2180 struct sf_pkt *cmd;
2177 2181 struct scsi_pkt *pkt;
2178 2182
2179 2183
2180 2184
2181 2185 switch (msg) {
2182 2186
2183 2187 case FCAL_STATUS_LOOP_ONLINE: {
2184 2188 uchar_t al_pa; /* to save AL-PA */
2185 2189 int ret; /* ret value from getmap */
2186 2190 int lip_cnt; /* to save current count */
2187 2191 int cnt; /* map length */
2188 2192
2189 2193 /*
2190 2194 * the loop has gone online
2191 2195 */
2192 2196 SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop online\n",
2193 2197 ddi_get_instance(sf->sf_dip)));
2194 2198 mutex_enter(&sf->sf_mutex);
2195 2199 sf->sf_lip_cnt++;
2196 2200 sf->sf_state = SF_STATE_ONLINING;
2197 2201 mutex_exit(&sf->sf_mutex);
2198 2202
2199 2203 /* scan each target hash queue */
2200 2204 for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
2201 2205 target = sf->sf_wwn_lists[i];
2202 2206 while (target != NULL) {
2203 2207 /*
2204 2208 * foreach target, if it's not offline then
2205 2209 * mark it as busy
2206 2210 */
2207 2211 mutex_enter(&target->sft_mutex);
2208 2212 if (!(target->sft_state & SF_TARGET_OFFLINE))
2209 2213 target->sft_state |= (SF_TARGET_BUSY
2210 2214 | SF_TARGET_MARK);
2211 2215 #ifdef DEBUG
2212 2216 /*
2213 2217 * for debugging, print out info on any
2214 2218 * pending commands (left hanging)
2215 2219 */
2216 2220 cmd = target->sft_pkt_head;
2217 2221 while (cmd != (struct sf_pkt *)&target->
2218 2222 sft_pkt_head) {
2219 2223 if (cmd->cmd_state ==
2220 2224 SF_STATE_ISSUED) {
2221 2225 SF_DEBUG(1, (sf, CE_CONT,
2222 2226 "cmd 0x%p pending "
2223 2227 "after lip\n",
2224 2228 (void *)cmd->cmd_fp_pkt));
2225 2229 }
2226 2230 cmd = cmd->cmd_forw;
2227 2231 }
2228 2232 #endif
2229 2233 mutex_exit(&target->sft_mutex);
2230 2234 target = target->sft_next;
2231 2235 }
2232 2236 }
2233 2237
2234 2238 /*
2235 2239 * since the loop has just gone online get a new map from
2236 2240 * the transport
2237 2241 */
2238 2242 if ((ret = soc_get_lilp_map(sf->sf_sochandle, sf->sf_socp,
2239 2243 sf->sf_sochandle->fcal_portno, (uint32_t)sf->
2240 2244 sf_lilp_dmacookie.dmac_address, 1)) != FCAL_SUCCESS) {
2241 2245 if (sf_core && (sf_core & SF_CORE_LILP_FAILED)) {
2242 2246 (void) soc_take_core(sf->sf_sochandle,
2243 2247 sf->sf_socp);
2244 2248 sf_core = 0;
2245 2249 }
2246 2250 sf_log(sf, CE_WARN,
2247 2251 "!soc lilp map failed status=0x%x\n", ret);
2248 2252 mutex_enter(&sf->sf_mutex);
2249 2253 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2250 2254 sf->sf_lip_cnt++;
2251 2255 sf->sf_state = SF_STATE_OFFLINE;
2252 2256 mutex_exit(&sf->sf_mutex);
2253 2257 return;
2254 2258 }
2255 2259
2256 2260 /* ensure consistent view of DMA memory */
2257 2261 (void) ddi_dma_sync(sf->sf_lilp_dmahandle, (off_t)0, (size_t)0,
2258 2262 DDI_DMA_SYNC_FORKERNEL);
2259 2263
2260 2264 /* how many entries in map ? */
2261 2265 cnt = sf->sf_lilp_map->lilp_length;
2262 2266 if (cnt >= SF_MAX_LILP_ENTRIES) {
2263 2267 sf_log(sf, CE_WARN, "invalid lilp map\n");
2264 2268 return;
2265 2269 }
2266 2270
2267 2271 mutex_enter(&sf->sf_mutex);
2268 2272 sf->sf_device_count = cnt - 1;
2269 2273 sf->sf_al_pa = sf->sf_lilp_map->lilp_myalpa;
2270 2274 lip_cnt = sf->sf_lip_cnt;
2271 2275 al_pa = sf->sf_al_pa;
2272 2276
2273 2277 SF_DEBUG(1, (sf, CE_CONT,
2274 2278 "!lilp map has %d entries, al_pa is %x\n", cnt, al_pa));
2275 2279
2276 2280 /*
2277 2281 * since the last entry of the map may be mine (common) check
2278 2282 * for that, and if it is we have one less entry to look at
2279 2283 */
2280 2284 if (sf->sf_lilp_map->lilp_alpalist[cnt-1] == al_pa) {
2281 2285 cnt--;
2282 2286 }
2283 2287 /* If we didn't get a valid loop map enable all targets */
2284 2288 if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
2285 2289 for (i = 0; i < sizeof (sf_switch_to_alpa); i++)
2286 2290 sf->sf_lilp_map->lilp_alpalist[i] =
2287 2291 sf_switch_to_alpa[i];
2288 2292 cnt = i;
2289 2293 sf->sf_device_count = cnt - 1;
2290 2294 }
2291 2295 if (sf->sf_device_count == 0) {
2292 2296 sf_finish_init(sf, lip_cnt);
2293 2297 mutex_exit(&sf->sf_mutex);
2294 2298 break;
2295 2299 }
2296 2300 mutex_exit(&sf->sf_mutex);
2297 2301
2298 2302 SF_DEBUG(2, (sf, CE_WARN,
2299 2303 "!statec_callback: starting with %d targets\n",
2300 2304 sf->sf_device_count));
2301 2305
2302 2306 /* scan loop map, logging into all ports (except mine) */
2303 2307 for (i = 0; i < cnt; i++) {
2304 2308 SF_DEBUG(1, (sf, CE_CONT,
2305 2309 "!lilp map entry %d = %x,%x\n", i,
2306 2310 sf->sf_lilp_map->lilp_alpalist[i],
2307 2311 sf_alpa_to_switch[
2308 2312 sf->sf_lilp_map->lilp_alpalist[i]]));
2309 2313 /* is this entry for somebody else ? */
2310 2314 if (sf->sf_lilp_map->lilp_alpalist[i] != al_pa) {
2311 2315 /* do a PLOGI to this port */
2312 2316 if (!sf_login(sf, LA_ELS_PLOGI,
2313 2317 sf->sf_lilp_map->lilp_alpalist[i],
2314 2318 sf->sf_lilp_map->lilp_alpalist[cnt-1],
2315 2319 lip_cnt)) {
2316 2320 /* a problem logging in */
2317 2321 mutex_enter(&sf->sf_mutex);
2318 2322 if (lip_cnt == sf->sf_lip_cnt) {
2319 2323 /*
2320 2324 * problem not from a new LIP
2321 2325 */
2322 2326 sf->sf_device_count--;
2323 2327 ASSERT(sf->sf_device_count
2324 2328 >= 0);
2325 2329 if (sf->sf_device_count == 0) {
2326 2330 sf_finish_init(sf,
2327 2331 lip_cnt);
2328 2332 }
2329 2333 }
2330 2334 mutex_exit(&sf->sf_mutex);
2331 2335 }
2332 2336 }
2333 2337 }
2334 2338 break;
2335 2339 }
2336 2340
2337 2341 case FCAL_STATUS_ERR_OFFLINE:
2338 2342 /*
2339 2343 * loop has gone offline due to an error
2340 2344 */
2341 2345 SF_DEBUG(1, (sf, CE_CONT, "sf%d: loop offline\n",
2342 2346 ddi_get_instance(sf->sf_dip)));
2343 2347 mutex_enter(&sf->sf_mutex);
2344 2348 sf->sf_lip_cnt++;
2345 2349 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
2346 2350 if (!sf->sf_online_timer) {
2347 2351 sf->sf_online_timer = sf_watchdog_time +
2348 2352 SF_ONLINE_TIMEOUT;
2349 2353 }
2350 2354 /*
2351 2355 * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2352 2356 * since throttling logic in sf_watch() depends on
2353 2357 * preservation of this flag while device is suspended
2354 2358 */
2355 2359 if (sf->sf_state & SF_STATE_SUSPENDED) {
2356 2360 sf->sf_state |= SF_STATE_OFFLINE;
2357 2361 SF_DEBUG(1, (sf, CE_CONT,
2358 2362 "sf_statec_callback, sf%d: "
2359 2363 "got FCAL_STATE_OFFLINE during DDI_SUSPEND\n",
2360 2364 ddi_get_instance(sf->sf_dip)));
2361 2365 } else {
2362 2366 sf->sf_state = SF_STATE_OFFLINE;
2363 2367 }
2364 2368
2365 2369 /* scan each possible target on the loop */
2366 2370 for (i = 0; i < sf_max_targets; i++) {
2367 2371 target = sf->sf_targets[i];
2368 2372 while (target != NULL) {
2369 2373 mutex_enter(&target->sft_mutex);
2370 2374 if (!(target->sft_state & SF_TARGET_OFFLINE))
2371 2375 target->sft_state |= (SF_TARGET_BUSY
2372 2376 | SF_TARGET_MARK);
2373 2377 mutex_exit(&target->sft_mutex);
2374 2378 target = target->sft_next_lun;
2375 2379 }
2376 2380 }
2377 2381 mutex_exit(&sf->sf_mutex);
2378 2382 break;
2379 2383
2380 2384 case FCAL_STATE_RESET: {
2381 2385 struct sf_els_hdr *privp; /* ptr to private list */
2382 2386 struct sf_els_hdr *tmpp1; /* tmp prev hdr ptr */
2383 2387 struct sf_els_hdr *tmpp2; /* tmp next hdr ptr */
2384 2388 struct sf_els_hdr *head; /* to save our private list */
2385 2389 struct fcal_packet *fpkt; /* ptr to pkt in hdr */
2386 2390
2387 2391 /*
2388 2392 * a transport reset
2389 2393 */
2390 2394 SF_DEBUG(1, (sf, CE_CONT, "!sf%d: soc reset\n",
2391 2395 ddi_get_instance(sf->sf_dip)));
2392 2396 tmpp1 = head = NULL;
2393 2397 mutex_enter(&sf->sf_mutex);
2394 2398 sf->sf_lip_cnt++;
2395 2399 sf->sf_timer = sf_watchdog_time + SF_RESET_TIMEOUT;
2396 2400 /*
2397 2401 * if we are suspended, preserve the SF_STATE_SUSPENDED flag,
2398 2402 * since throttling logic in sf_watch() depends on
2399 2403 * preservation of this flag while device is suspended
2400 2404 */
2401 2405 if (sf->sf_state & SF_STATE_SUSPENDED) {
2402 2406 sf->sf_state |= SF_STATE_OFFLINE;
2403 2407 SF_DEBUG(1, (sf, CE_CONT,
2404 2408 "sf_statec_callback, sf%d: "
2405 2409 "got FCAL_STATE_RESET during DDI_SUSPEND\n",
2406 2410 ddi_get_instance(sf->sf_dip)));
2407 2411 } else {
2408 2412 sf->sf_state = SF_STATE_OFFLINE;
2409 2413 }
2410 2414
2411 2415 /*
2412 2416 * scan each possible target on the loop, looking for targets
2413 2417 * that need callbacks ran
2414 2418 */
2415 2419 for (i = 0; i < sf_max_targets; i++) {
2416 2420 target = sf->sf_targets[i];
2417 2421 while (target != NULL) {
2418 2422 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
2419 2423 target->sft_state |= (SF_TARGET_BUSY
2420 2424 | SF_TARGET_MARK);
2421 2425 mutex_exit(&sf->sf_mutex);
2422 2426 /*
2423 2427 * run remove event callbacks for lun
2424 2428 *
2425 2429 * We have a nasty race condition here
2426 2430 * 'cause we're dropping this mutex to
2427 2431 * run the callback and expect the
2428 2432 * linked list to be the same.
2429 2433 */
2430 2434 (void) ndi_event_retrieve_cookie(
2431 2435 sf->sf_event_hdl, target->sft_dip,
2432 2436 FCAL_REMOVE_EVENT, &sf_remove_eid,
2433 2437 NDI_EVENT_NOPASS);
2434 2438 (void) ndi_event_run_callbacks(
2435 2439 sf->sf_event_hdl,
2436 2440 target->sft_dip,
2437 2441 sf_remove_eid, NULL);
2438 2442 mutex_enter(&sf->sf_mutex);
2439 2443 }
2440 2444 target = target->sft_next_lun;
2441 2445 }
2442 2446 }
2443 2447
2444 2448 /*
2445 2449 * scan for ELS commands that are in transport, not complete,
2446 2450 * and have a valid timeout, building a private list
2447 2451 */
2448 2452 privp = sf->sf_els_list;
2449 2453 while (privp != NULL) {
2450 2454 fpkt = privp->fpkt;
2451 2455 if ((fpkt->fcal_cmd_state & FCAL_CMD_IN_TRANSPORT) &&
2452 2456 (!(fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE)) &&
2453 2457 (privp->timeout != SF_INVALID_TIMEOUT)) {
2454 2458 /*
2455 2459 * cmd in transport && not complete &&
2456 2460 * timeout valid
2457 2461 *
2458 2462 * move this entry from ELS input list to our
2459 2463 * private list
2460 2464 */
2461 2465
2462 2466 tmpp2 = privp->next; /* save ptr to next */
2463 2467
2464 2468 /* push this on private list head */
2465 2469 privp->next = head;
2466 2470 head = privp;
2467 2471
2468 2472 /* remove this entry from input list */
2469 2473 if (tmpp1 != NULL) {
2470 2474 /*
2471 2475 * remove this entry from somewhere in
2472 2476 * the middle of the list
2473 2477 */
2474 2478 tmpp1->next = tmpp2;
2475 2479 if (tmpp2 != NULL) {
2476 2480 tmpp2->prev = tmpp1;
2477 2481 }
2478 2482 } else {
2479 2483 /*
2480 2484 * remove this entry from the head
2481 2485 * of the list
2482 2486 */
2483 2487 sf->sf_els_list = tmpp2;
2484 2488 if (tmpp2 != NULL) {
2485 2489 tmpp2->prev = NULL;
2486 2490 }
2487 2491 }
2488 2492 privp = tmpp2; /* skip to next entry */
2489 2493 } else {
2490 2494 tmpp1 = privp; /* save ptr to prev entry */
2491 2495 privp = privp->next; /* skip to next entry */
2492 2496 }
2493 2497 }
2494 2498
2495 2499 mutex_exit(&sf->sf_mutex);
2496 2500
2497 2501 /*
2498 2502 * foreach cmd in our list free the ELS packet associated
2499 2503 * with it
2500 2504 */
2501 2505 privp = head;
2502 2506 while (privp != NULL) {
2503 2507 fpkt = privp->fpkt;
2504 2508 privp = privp->next;
2505 2509 sf_els_free(fpkt);
2506 2510 }
2507 2511
2508 2512 /*
2509 2513 * scan for commands from each possible target
2510 2514 */
2511 2515 for (i = 0; i < sf_max_targets; i++) {
2512 2516 target = sf->sf_targets[i];
2513 2517 while (target != NULL) {
2514 2518 /*
2515 2519 * scan all active commands for this target,
2516 2520 * looking for commands that have been issued,
2517 2521 * are in transport, and are not yet complete
2518 2522 * (so we can terminate them because of the
2519 2523 * reset)
2520 2524 */
2521 2525 mutex_enter(&target->sft_pkt_mutex);
2522 2526 cmd = target->sft_pkt_head;
2523 2527 while (cmd != (struct sf_pkt *)&target->
2524 2528 sft_pkt_head) {
2525 2529 fpkt = cmd->cmd_fp_pkt;
2526 2530 mutex_enter(&cmd->cmd_abort_mutex);
2527 2531 if ((cmd->cmd_state ==
2528 2532 SF_STATE_ISSUED) &&
2529 2533 (fpkt->fcal_cmd_state &
2530 2534 FCAL_CMD_IN_TRANSPORT) &&
2531 2535 (!(fpkt->fcal_cmd_state &
2532 2536 FCAL_CMD_COMPLETE))) {
2533 2537 /* a command to be reset */
2534 2538 pkt = cmd->cmd_pkt;
2535 2539 pkt->pkt_reason = CMD_RESET;
2536 2540 pkt->pkt_statistics |=
2537 2541 STAT_BUS_RESET;
2538 2542 cmd->cmd_state = SF_STATE_IDLE;
2539 2543 mutex_exit(&cmd->
2540 2544 cmd_abort_mutex);
2541 2545 mutex_exit(&target->
2542 2546 sft_pkt_mutex);
2543 2547 if (pkt->pkt_comp != NULL) {
2544 2548 (*pkt->pkt_comp)(pkt);
2545 2549 }
2546 2550 mutex_enter(&target->
2547 2551 sft_pkt_mutex);
2548 2552 cmd = target->sft_pkt_head;
2549 2553 } else {
2550 2554 mutex_exit(&cmd->
2551 2555 cmd_abort_mutex);
2552 2556 /* get next command */
2553 2557 cmd = cmd->cmd_forw;
2554 2558 }
2555 2559 }
2556 2560 mutex_exit(&target->sft_pkt_mutex);
2557 2561 target = target->sft_next_lun;
2558 2562 }
2559 2563 }
2560 2564
2561 2565 /*
2562 2566 * get packet queue for this target, resetting all remaining
2563 2567 * commands
2564 2568 */
2565 2569 mutex_enter(&sf->sf_mutex);
2566 2570 cmd = sf->sf_pkt_head;
2567 2571 sf->sf_pkt_head = NULL;
2568 2572 mutex_exit(&sf->sf_mutex);
2569 2573
2570 2574 while (cmd != NULL) {
2571 2575 pkt = cmd->cmd_pkt;
2572 2576 cmd = cmd->cmd_next;
2573 2577 pkt->pkt_reason = CMD_RESET;
2574 2578 pkt->pkt_statistics |= STAT_BUS_RESET;
2575 2579 if (pkt->pkt_comp != NULL) {
2576 2580 (*pkt->pkt_comp)(pkt);
2577 2581 }
2578 2582 }
2579 2583 break;
2580 2584 }
2581 2585
2582 2586 default:
2583 2587 break;
2584 2588 }
2585 2589 }
2586 2590
2587 2591
2588 2592 /*
2589 2593 * called to send a PLOGI (N_port login) ELS request to a destination ID,
2590 2594 * returning TRUE upon success, else returning FALSE
2591 2595 */
2592 2596 static int
2593 2597 sf_login(struct sf *sf, uchar_t els_code, uchar_t dest_id, uint_t arg1,
2594 2598 int lip_cnt)
2595 2599 {
2596 2600 struct la_els_logi *logi;
2597 2601 struct sf_els_hdr *privp;
2598 2602
2599 2603
2600 2604 if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
2601 2605 sizeof (union sf_els_cmd), sizeof (union sf_els_rsp),
2602 2606 (caddr_t *)&privp, (caddr_t *)&logi) == NULL) {
2603 2607 sf_log(sf, CE_WARN, "Cannot allocate PLOGI for target %x "
2604 2608 "due to DVMA shortage.\n", sf_alpa_to_switch[dest_id]);
2605 2609 return (FALSE);
2606 2610 }
2607 2611
2608 2612 privp->lip_cnt = lip_cnt;
2609 2613 if (els_code == LA_ELS_PLOGI) {
2610 2614 bcopy((caddr_t)sf->sf_sochandle->fcal_loginparms,
2611 2615 (caddr_t)&logi->common_service, sizeof (struct la_els_logi)
2612 2616 - 4);
2613 2617 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2614 2618 (caddr_t)&logi->nport_ww_name, sizeof (la_wwn_t));
2615 2619 bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2616 2620 (caddr_t)&logi->node_ww_name, sizeof (la_wwn_t));
2617 2621 bzero((caddr_t)&logi->reserved, 16);
2618 2622 } else if (els_code == LA_ELS_LOGO) {
2619 2623 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2620 2624 (caddr_t)&(((struct la_els_logo *)logi)->nport_ww_name), 8);
2621 2625 ((struct la_els_logo *)logi)->reserved = 0;
2622 2626 ((struct la_els_logo *)logi)->nport_id[0] = 0;
2623 2627 ((struct la_els_logo *)logi)->nport_id[1] = 0;
2624 2628 ((struct la_els_logo *)logi)->nport_id[2] = arg1;
2625 2629 }
2626 2630
2627 2631 privp->els_code = els_code;
2628 2632 logi->ls_code = els_code;
2629 2633 logi->mbz[0] = 0;
2630 2634 logi->mbz[1] = 0;
2631 2635 logi->mbz[2] = 0;
2632 2636
2633 2637 privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2634 2638 return (sf_els_transport(sf, privp));
2635 2639 }
2636 2640
2637 2641
2638 2642 /*
2639 2643 * send an ELS IU via the transport,
2640 2644 * returning TRUE upon success, else returning FALSE
2641 2645 */
2642 2646 static int
2643 2647 sf_els_transport(struct sf *sf, struct sf_els_hdr *privp)
2644 2648 {
2645 2649 struct fcal_packet *fpkt = privp->fpkt;
2646 2650
2647 2651
2648 2652 (void) ddi_dma_sync(privp->cmd_dma_handle, (off_t)0, (size_t)0,
2649 2653 DDI_DMA_SYNC_FORDEV);
2650 2654 privp->prev = NULL;
2651 2655 mutex_enter(&sf->sf_mutex);
2652 2656 privp->next = sf->sf_els_list;
2653 2657 if (sf->sf_els_list != NULL) {
2654 2658 sf->sf_els_list->prev = privp;
2655 2659 }
2656 2660 sf->sf_els_list = privp;
2657 2661 mutex_exit(&sf->sf_mutex);
2658 2662
2659 2663 /* call the transport to send a packet */
2660 2664 if (soc_transport(sf->sf_sochandle, fpkt, FCAL_NOSLEEP,
2661 2665 CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
2662 2666 mutex_enter(&sf->sf_mutex);
2663 2667 if (privp->prev != NULL) {
2664 2668 privp->prev->next = privp->next;
2665 2669 }
2666 2670 if (privp->next != NULL) {
2667 2671 privp->next->prev = privp->prev;
2668 2672 }
2669 2673 if (sf->sf_els_list == privp) {
2670 2674 sf->sf_els_list = privp->next;
2671 2675 }
2672 2676 mutex_exit(&sf->sf_mutex);
2673 2677 sf_els_free(fpkt);
2674 2678 return (FALSE); /* failure */
2675 2679 }
2676 2680 return (TRUE); /* success */
2677 2681 }
2678 2682
2679 2683
2680 2684 /*
2681 2685 * called as the pkt_comp routine for ELS FC packets
2682 2686 */
2683 2687 static void
2684 2688 sf_els_callback(struct fcal_packet *fpkt)
2685 2689 {
2686 2690 struct sf_els_hdr *privp = fpkt->fcal_pkt_private;
2687 2691 struct sf *sf = privp->sf;
2688 2692 struct sf *tsf;
2689 2693 int tgt_id;
2690 2694 struct la_els_logi *ptr = (struct la_els_logi *)privp->rsp;
2691 2695 struct la_els_adisc *adisc = (struct la_els_adisc *)ptr;
2692 2696 struct sf_target *target;
2693 2697 short ncmds;
2694 2698 short free_pkt = TRUE;
2695 2699
2696 2700
2697 2701 /*
2698 2702 * we've received an ELS callback, i.e. an ELS packet has arrived
2699 2703 */
2700 2704
2701 2705 /* take the current packet off of the queue */
2702 2706 mutex_enter(&sf->sf_mutex);
2703 2707 if (privp->timeout == SF_INVALID_TIMEOUT) {
2704 2708 mutex_exit(&sf->sf_mutex);
2705 2709 return;
2706 2710 }
2707 2711 if (privp->prev != NULL) {
2708 2712 privp->prev->next = privp->next;
2709 2713 }
2710 2714 if (privp->next != NULL) {
2711 2715 privp->next->prev = privp->prev;
2712 2716 }
2713 2717 if (sf->sf_els_list == privp) {
2714 2718 sf->sf_els_list = privp->next;
2715 2719 }
2716 2720 privp->prev = privp->next = NULL;
2717 2721 mutex_exit(&sf->sf_mutex);
2718 2722
2719 2723 /* get # pkts in this callback */
2720 2724 ncmds = fpkt->fcal_ncmds;
2721 2725 ASSERT(ncmds >= 0);
2722 2726 mutex_enter(&sf->sf_cmd_mutex);
2723 2727 sf->sf_ncmds = ncmds;
2724 2728 mutex_exit(&sf->sf_cmd_mutex);
2725 2729
2726 2730 /* sync idea of memory */
2727 2731 (void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0, (size_t)0,
2728 2732 DDI_DMA_SYNC_FORKERNEL);
2729 2733
2730 2734 /* was this an OK ACC msg ?? */
2731 2735 if ((fpkt->fcal_pkt_status == FCAL_STATUS_OK) &&
2732 2736 (ptr->ls_code == LA_ELS_ACC)) {
2733 2737
2734 2738 /*
2735 2739 * this was an OK ACC pkt
2736 2740 */
2737 2741
2738 2742 switch (privp->els_code) {
2739 2743 case LA_ELS_PLOGI:
2740 2744 /*
2741 2745 * was able to to an N_port login
2742 2746 */
2743 2747 SF_DEBUG(2, (sf, CE_CONT,
2744 2748 "!PLOGI to al_pa %x succeeded, wwn %x%x\n",
2745 2749 privp->dest_nport_id,
2746 2750 *((int *)&ptr->nport_ww_name.raw_wwn[0]),
2747 2751 *((int *)&ptr->nport_ww_name.raw_wwn[4])));
2748 2752 /* try to do a process login */
2749 2753 if (!sf_do_prli(sf, privp, ptr)) {
2750 2754 free_pkt = FALSE;
2751 2755 goto fail; /* PRLI failed */
2752 2756 }
2753 2757 break;
2754 2758 case LA_ELS_PRLI:
2755 2759 /*
2756 2760 * was able to do a process login
2757 2761 */
2758 2762 SF_DEBUG(2, (sf, CE_CONT,
2759 2763 "!PRLI to al_pa %x succeeded\n",
2760 2764 privp->dest_nport_id));
2761 2765 /* try to do address discovery */
2762 2766 if (sf_do_adisc(sf, privp) != 1) {
2763 2767 free_pkt = FALSE;
2764 2768 goto fail; /* ADISC failed */
2765 2769 }
2766 2770 break;
2767 2771 case LA_ELS_ADISC:
2768 2772 /*
2769 2773 * found a target via ADISC
2770 2774 */
2771 2775
2772 2776 SF_DEBUG(2, (sf, CE_CONT,
2773 2777 "!ADISC to al_pa %x succeeded\n",
2774 2778 privp->dest_nport_id));
2775 2779
2776 2780 /* create the target info */
2777 2781 if ((target = sf_create_target(sf, privp,
2778 2782 sf_alpa_to_switch[(uchar_t)adisc->hard_address],
2779 2783 (int64_t)0))
2780 2784 == NULL) {
2781 2785 goto fail; /* can't create target */
2782 2786 }
2783 2787
2784 2788 /*
2785 2789 * ensure address discovered matches what we thought
2786 2790 * it would be
2787 2791 */
2788 2792 if ((uchar_t)adisc->hard_address !=
2789 2793 privp->dest_nport_id) {
2790 2794 sf_log(sf, CE_WARN,
2791 2795 "target 0x%x, AL-PA 0x%x and "
2792 2796 "hard address 0x%x don't match\n",
2793 2797 sf_alpa_to_switch[
2794 2798 (uchar_t)privp->dest_nport_id],
2795 2799 privp->dest_nport_id,
2796 2800 (uchar_t)adisc->hard_address);
2797 2801 mutex_enter(&sf->sf_mutex);
2798 2802 sf_offline_target(sf, target);
2799 2803 mutex_exit(&sf->sf_mutex);
2800 2804 goto fail; /* addr doesn't match */
2801 2805 }
2802 2806 /*
2803 2807 * get inquiry data from the target
2804 2808 */
2805 2809 if (!sf_do_reportlun(sf, privp, target)) {
2806 2810 mutex_enter(&sf->sf_mutex);
2807 2811 sf_offline_target(sf, target);
2808 2812 mutex_exit(&sf->sf_mutex);
2809 2813 free_pkt = FALSE;
2810 2814 goto fail; /* inquiry failed */
2811 2815 }
2812 2816 break;
2813 2817 default:
2814 2818 SF_DEBUG(2, (sf, CE_CONT,
2815 2819 "!ELS %x to al_pa %x succeeded\n",
2816 2820 privp->els_code, privp->dest_nport_id));
2817 2821 sf_els_free(fpkt);
2818 2822 break;
2819 2823 }
2820 2824
2821 2825 } else {
2822 2826
2823 2827 /*
2824 2828 * oh oh -- this was not an OK ACC packet
2825 2829 */
2826 2830
2827 2831 /* get target ID from dest loop address */
2828 2832 tgt_id = sf_alpa_to_switch[(uchar_t)privp->dest_nport_id];
2829 2833
2830 2834 /* keep track of failures */
2831 2835 sf->sf_stats.tstats[tgt_id].els_failures++;
2832 2836 if (++(privp->retries) < sf_els_retries &&
2833 2837 fpkt->fcal_pkt_status != FCAL_STATUS_OPEN_FAIL) {
2834 2838 if (fpkt->fcal_pkt_status ==
2835 2839 FCAL_STATUS_MAX_XCHG_EXCEEDED) {
2836 2840 tsf = sf->sf_sibling;
2837 2841 if (tsf != NULL) {
2838 2842 mutex_enter(&tsf->sf_cmd_mutex);
2839 2843 tsf->sf_flag = 1;
2840 2844 tsf->sf_throttle = SF_DECR_DELTA;
2841 2845 mutex_exit(&tsf->sf_cmd_mutex);
2842 2846 }
2843 2847 }
2844 2848 privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2845 2849 privp->prev = NULL;
2846 2850
2847 2851 mutex_enter(&sf->sf_mutex);
2848 2852
2849 2853 if (privp->lip_cnt == sf->sf_lip_cnt) {
2850 2854 SF_DEBUG(1, (sf, CE_WARN,
2851 2855 "!ELS %x to al_pa %x failed, retrying",
2852 2856 privp->els_code, privp->dest_nport_id));
2853 2857 privp->next = sf->sf_els_list;
2854 2858 if (sf->sf_els_list != NULL) {
2855 2859 sf->sf_els_list->prev = privp;
2856 2860 }
2857 2861
2858 2862 sf->sf_els_list = privp;
2859 2863
2860 2864 mutex_exit(&sf->sf_mutex);
2861 2865 /* device busy? wait a bit ... */
2862 2866 if (fpkt->fcal_pkt_status ==
2863 2867 FCAL_STATUS_MAX_XCHG_EXCEEDED) {
2864 2868 privp->delayed_retry = 1;
2865 2869 return;
2866 2870 }
2867 2871 /* call the transport to send a pkt */
2868 2872 if (soc_transport(sf->sf_sochandle, fpkt,
2869 2873 FCAL_NOSLEEP, CQ_REQUEST_1) !=
2870 2874 FCAL_TRANSPORT_SUCCESS) {
2871 2875 mutex_enter(&sf->sf_mutex);
2872 2876 if (privp->prev != NULL) {
2873 2877 privp->prev->next =
2874 2878 privp->next;
2875 2879 }
2876 2880 if (privp->next != NULL) {
2877 2881 privp->next->prev =
2878 2882 privp->prev;
2879 2883 }
2880 2884 if (sf->sf_els_list == privp) {
2881 2885 sf->sf_els_list = privp->next;
2882 2886 }
2883 2887 mutex_exit(&sf->sf_mutex);
2884 2888 goto fail;
2885 2889 } else
2886 2890 return;
2887 2891 } else {
2888 2892 mutex_exit(&sf->sf_mutex);
2889 2893 goto fail;
2890 2894 }
2891 2895 } else {
2892 2896 #ifdef DEBUG
2893 2897 if (fpkt->fcal_pkt_status != 0x36 || sfdebug > 4) {
2894 2898 SF_DEBUG(2, (sf, CE_NOTE, "ELS %x to al_pa %x failed",
2895 2899 privp->els_code, privp->dest_nport_id));
2896 2900 if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
2897 2901 SF_DEBUG(2, (sf, CE_NOTE,
2898 2902 "els reply code = %x", ptr->ls_code));
2899 2903 if (ptr->ls_code == LA_ELS_RJT)
2900 2904 SF_DEBUG(1, (sf, CE_CONT,
2901 2905 "LS_RJT reason = %x\n",
2902 2906 *(((uint_t *)ptr) + 1)));
2903 2907 } else
2904 2908 SF_DEBUG(2, (sf, CE_NOTE,
2905 2909 "fc packet status = %x",
2906 2910 fpkt->fcal_pkt_status));
2907 2911 }
2908 2912 #endif
2909 2913 goto fail;
2910 2914 }
2911 2915 }
2912 2916 return; /* success */
2913 2917 fail:
2914 2918 mutex_enter(&sf->sf_mutex);
2915 2919 if (sf->sf_lip_cnt == privp->lip_cnt) {
2916 2920 sf->sf_device_count--;
2917 2921 ASSERT(sf->sf_device_count >= 0);
2918 2922 if (sf->sf_device_count == 0) {
2919 2923 sf_finish_init(sf, privp->lip_cnt);
2920 2924 }
2921 2925 }
2922 2926 mutex_exit(&sf->sf_mutex);
2923 2927 if (free_pkt) {
2924 2928 sf_els_free(fpkt);
2925 2929 }
2926 2930 }
2927 2931
2928 2932
2929 2933 /*
2930 2934 * send a PRLI (process login) ELS IU via the transport,
2931 2935 * returning TRUE upon success, else returning FALSE
2932 2936 */
2933 2937 static int
2934 2938 sf_do_prli(struct sf *sf, struct sf_els_hdr *privp, struct la_els_logi *ptr)
2935 2939 {
2936 2940 struct la_els_prli *prli = (struct la_els_prli *)privp->cmd;
2937 2941 struct fcp_prli *fprli;
2938 2942 struct fcal_packet *fpkt = privp->fpkt;
2939 2943
2940 2944
2941 2945 fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2942 2946 sizeof (struct la_els_prli);
2943 2947 privp->els_code = LA_ELS_PRLI;
2944 2948 fprli = (struct fcp_prli *)prli->service_params;
2945 2949 prli->ls_code = LA_ELS_PRLI;
2946 2950 prli->page_length = 0x10;
2947 2951 prli->payload_length = sizeof (struct la_els_prli);
2948 2952 fprli->type = 0x08; /* no define here? */
2949 2953 fprli->resvd1 = 0;
2950 2954 fprli->orig_process_assoc_valid = 0;
2951 2955 fprli->resp_process_assoc_valid = 0;
2952 2956 fprli->establish_image_pair = 1;
2953 2957 fprli->resvd2 = 0;
2954 2958 fprli->resvd3 = 0;
2955 2959 fprli->data_overlay_allowed = 0;
2956 2960 fprli->initiator_fn = 1;
2957 2961 fprli->target_fn = 0;
2958 2962 fprli->cmd_data_mixed = 0;
2959 2963 fprli->data_resp_mixed = 0;
2960 2964 fprli->read_xfer_rdy_disabled = 1;
2961 2965 fprli->write_xfer_rdy_disabled = 0;
2962 2966
2963 2967 bcopy((caddr_t)&ptr->nport_ww_name, (caddr_t)&privp->port_wwn,
2964 2968 sizeof (privp->port_wwn));
2965 2969 bcopy((caddr_t)&ptr->node_ww_name, (caddr_t)&privp->node_wwn,
2966 2970 sizeof (privp->node_wwn));
2967 2971
2968 2972 privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2969 2973 return (sf_els_transport(sf, privp));
2970 2974 }
2971 2975
2972 2976
2973 2977 /*
2974 2978 * send an ADISC (address discovery) ELS IU via the transport,
2975 2979 * returning TRUE upon success, else returning FALSE
2976 2980 */
2977 2981 static int
2978 2982 sf_do_adisc(struct sf *sf, struct sf_els_hdr *privp)
2979 2983 {
2980 2984 struct la_els_adisc *adisc = (struct la_els_adisc *)privp->cmd;
2981 2985 struct fcal_packet *fpkt = privp->fpkt;
2982 2986
2983 2987 privp->els_code = LA_ELS_ADISC;
2984 2988 adisc->ls_code = LA_ELS_ADISC;
2985 2989 adisc->mbz[0] = 0;
2986 2990 adisc->mbz[1] = 0;
2987 2991 adisc->mbz[2] = 0;
2988 2992 adisc->hard_address = 0; /* ??? */
2989 2993 fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
2990 2994 sizeof (struct la_els_adisc);
2991 2995 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
2992 2996 (caddr_t)&adisc->port_wwn, sizeof (adisc->port_wwn));
2993 2997 bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
2994 2998 (caddr_t)&adisc->node_wwn, sizeof (adisc->node_wwn));
2995 2999 adisc->nport_id = sf->sf_al_pa;
2996 3000
2997 3001 privp->timeout = sf_watchdog_time + SF_ELS_TIMEOUT;
2998 3002 return (sf_els_transport(sf, privp));
2999 3003 }
3000 3004
3001 3005
3002 3006 static struct fcal_packet *
3003 3007 sf_els_alloc(struct sf *sf, uchar_t dest_id, int priv_size, int cmd_size,
3004 3008 int rsp_size, caddr_t *rprivp, caddr_t *cmd_buf)
3005 3009 {
3006 3010 struct fcal_packet *fpkt;
3007 3011 ddi_dma_cookie_t pcookie;
3008 3012 ddi_dma_cookie_t rcookie;
3009 3013 struct sf_els_hdr *privp;
3010 3014 ddi_dma_handle_t cmd_dma_handle = NULL;
3011 3015 ddi_dma_handle_t rsp_dma_handle = NULL;
3012 3016 ddi_acc_handle_t cmd_acc_handle = NULL;
3013 3017 ddi_acc_handle_t rsp_acc_handle = NULL;
3014 3018 size_t real_size;
3015 3019 uint_t ccount;
3016 3020 fc_frame_header_t *hp;
3017 3021 int cmd_bound = FALSE, rsp_bound = FALSE;
3018 3022 caddr_t cmd = NULL;
3019 3023 caddr_t rsp = NULL;
3020 3024
3021 3025 if ((fpkt = (struct fcal_packet *)kmem_zalloc(
3022 3026 sizeof (struct fcal_packet), KM_NOSLEEP)) == NULL) {
3023 3027 SF_DEBUG(1, (sf, CE_WARN,
3024 3028 "Could not allocate fcal_packet for ELS\n"));
3025 3029 return (NULL);
3026 3030 }
3027 3031
3028 3032 if ((privp = (struct sf_els_hdr *)kmem_zalloc(priv_size,
3029 3033 KM_NOSLEEP)) == NULL) {
3030 3034 SF_DEBUG(1, (sf, CE_WARN,
3031 3035 "Could not allocate sf_els_hdr for ELS\n"));
3032 3036 goto fail;
3033 3037 }
3034 3038
3035 3039 privp->size = priv_size;
3036 3040 fpkt->fcal_pkt_private = (caddr_t)privp;
3037 3041
3038 3042 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3039 3043 DDI_DMA_DONTWAIT, NULL, &cmd_dma_handle) != DDI_SUCCESS) {
3040 3044 SF_DEBUG(1, (sf, CE_WARN,
3041 3045 "Could not allocate DMA handle for ELS\n"));
3042 3046 goto fail;
3043 3047 }
3044 3048
3045 3049 if (ddi_dma_mem_alloc(cmd_dma_handle, cmd_size,
3046 3050 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3047 3051 DDI_DMA_DONTWAIT, NULL, &cmd,
3048 3052 &real_size, &cmd_acc_handle) != DDI_SUCCESS) {
3049 3053 SF_DEBUG(1, (sf, CE_WARN,
3050 3054 "Could not allocate DMA memory for ELS\n"));
3051 3055 goto fail;
3052 3056 }
3053 3057
3054 3058 if (real_size < cmd_size) {
3055 3059 SF_DEBUG(1, (sf, CE_WARN,
3056 3060 "DMA memory too small for ELS\n"));
3057 3061 goto fail;
3058 3062 }
3059 3063
3060 3064 if (ddi_dma_addr_bind_handle(cmd_dma_handle, NULL,
3061 3065 cmd, real_size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
3062 3066 DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3063 3067 SF_DEBUG(1, (sf, CE_WARN,
3064 3068 "Could not bind DMA memory for ELS\n"));
3065 3069 goto fail;
3066 3070 }
3067 3071 cmd_bound = TRUE;
3068 3072
3069 3073 if (ccount != 1) {
3070 3074 SF_DEBUG(1, (sf, CE_WARN,
3071 3075 "Wrong cookie count for ELS\n"));
3072 3076 goto fail;
3073 3077 }
3074 3078
3075 3079 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3076 3080 DDI_DMA_DONTWAIT, NULL, &rsp_dma_handle) != DDI_SUCCESS) {
3077 3081 SF_DEBUG(1, (sf, CE_WARN,
3078 3082 "Could not allocate DMA handle for ELS rsp\n"));
3079 3083 goto fail;
3080 3084 }
3081 3085 if (ddi_dma_mem_alloc(rsp_dma_handle, rsp_size,
3082 3086 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3083 3087 DDI_DMA_DONTWAIT, NULL, &rsp,
3084 3088 &real_size, &rsp_acc_handle) != DDI_SUCCESS) {
3085 3089 SF_DEBUG(1, (sf, CE_WARN,
3086 3090 "Could not allocate DMA memory for ELS rsp\n"));
3087 3091 goto fail;
3088 3092 }
3089 3093
3090 3094 if (real_size < rsp_size) {
3091 3095 SF_DEBUG(1, (sf, CE_WARN,
3092 3096 "DMA memory too small for ELS rsp\n"));
3093 3097 goto fail;
3094 3098 }
3095 3099
3096 3100 if (ddi_dma_addr_bind_handle(rsp_dma_handle, NULL,
3097 3101 rsp, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3098 3102 DDI_DMA_DONTWAIT, NULL, &rcookie, &ccount) != DDI_DMA_MAPPED) {
3099 3103 SF_DEBUG(1, (sf, CE_WARN,
3100 3104 "Could not bind DMA memory for ELS rsp\n"));
3101 3105 goto fail;
3102 3106 }
3103 3107 rsp_bound = TRUE;
3104 3108
3105 3109 if (ccount != 1) {
3106 3110 SF_DEBUG(1, (sf, CE_WARN,
3107 3111 "Wrong cookie count for ELS rsp\n"));
3108 3112 goto fail;
3109 3113 }
3110 3114
3111 3115 privp->cmd = cmd;
3112 3116 privp->sf = sf;
3113 3117 privp->cmd_dma_handle = cmd_dma_handle;
3114 3118 privp->cmd_acc_handle = cmd_acc_handle;
3115 3119 privp->rsp = rsp;
3116 3120 privp->rsp_dma_handle = rsp_dma_handle;
3117 3121 privp->rsp_acc_handle = rsp_acc_handle;
3118 3122 privp->dest_nport_id = dest_id;
3119 3123 privp->fpkt = fpkt;
3120 3124
3121 3125 fpkt->fcal_pkt_cookie = sf->sf_socp;
3122 3126 fpkt->fcal_pkt_comp = sf_els_callback;
3123 3127 fpkt->fcal_magic = FCALP_MAGIC;
3124 3128 fpkt->fcal_pkt_flags = 0;
3125 3129 fpkt->fcal_socal_request.sr_soc_hdr.sh_flags =
3126 3130 (ushort_t)(SOC_FC_HEADER | sf->sf_sochandle->fcal_portno);
3127 3131 fpkt->fcal_socal_request.sr_soc_hdr.sh_class = 3;
3128 3132 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 2;
3129 3133 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = cmd_size;
3130 3134 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_count = 1;
3131 3135 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_flags = 0;
3132 3136 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_seqno = 0;
3133 3137 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_SIMPLE;
3134 3138 fpkt->fcal_socal_request.sr_dataseg[0].fc_base = (uint32_t)
3135 3139 pcookie.dmac_address;
3136 3140 fpkt->fcal_socal_request.sr_dataseg[0].fc_count = cmd_size;
3137 3141 fpkt->fcal_socal_request.sr_dataseg[1].fc_base = (uint32_t)
3138 3142 rcookie.dmac_address;
3139 3143 fpkt->fcal_socal_request.sr_dataseg[1].fc_count = rsp_size;
3140 3144
3141 3145 /* Fill in the Fabric Channel Header */
3142 3146 hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3143 3147 hp->r_ctl = R_CTL_ELS_REQ;
3144 3148 hp->d_id = dest_id;
3145 3149 hp->s_id = sf->sf_al_pa;
3146 3150 hp->type = TYPE_EXTENDED_LS;
3147 3151 hp->reserved1 = 0;
3148 3152 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3149 3153 hp->seq_id = 0;
3150 3154 hp->df_ctl = 0;
3151 3155 hp->seq_cnt = 0;
3152 3156 hp->ox_id = 0xffff;
3153 3157 hp->rx_id = 0xffff;
3154 3158 hp->ro = 0;
3155 3159
3156 3160 *rprivp = (caddr_t)privp;
3157 3161 *cmd_buf = cmd;
3158 3162 return (fpkt);
3159 3163
3160 3164 fail:
3161 3165 if (cmd_dma_handle != NULL) {
3162 3166 if (cmd_bound) {
3163 3167 (void) ddi_dma_unbind_handle(cmd_dma_handle);
3164 3168 }
3165 3169 ddi_dma_free_handle(&cmd_dma_handle);
3166 3170 privp->cmd_dma_handle = NULL;
3167 3171 }
3168 3172 if (rsp_dma_handle != NULL) {
3169 3173 if (rsp_bound) {
3170 3174 (void) ddi_dma_unbind_handle(rsp_dma_handle);
3171 3175 }
3172 3176 ddi_dma_free_handle(&rsp_dma_handle);
3173 3177 privp->rsp_dma_handle = NULL;
3174 3178 }
3175 3179 sf_els_free(fpkt);
3176 3180 return (NULL);
3177 3181 }
3178 3182
3179 3183
3180 3184 static void
3181 3185 sf_els_free(struct fcal_packet *fpkt)
3182 3186 {
3183 3187 struct sf_els_hdr *privp = fpkt->fcal_pkt_private;
3184 3188
3185 3189 if (privp != NULL) {
3186 3190 if (privp->cmd_dma_handle != NULL) {
3187 3191 (void) ddi_dma_unbind_handle(privp->cmd_dma_handle);
3188 3192 ddi_dma_free_handle(&privp->cmd_dma_handle);
3189 3193 }
3190 3194 if (privp->cmd != NULL) {
3191 3195 ddi_dma_mem_free(&privp->cmd_acc_handle);
3192 3196 }
3193 3197
3194 3198 if (privp->rsp_dma_handle != NULL) {
3195 3199 (void) ddi_dma_unbind_handle(privp->rsp_dma_handle);
3196 3200 ddi_dma_free_handle(&privp->rsp_dma_handle);
3197 3201 }
3198 3202
3199 3203 if (privp->rsp != NULL) {
3200 3204 ddi_dma_mem_free(&privp->rsp_acc_handle);
3201 3205 }
3202 3206 if (privp->data_dma_handle) {
3203 3207 (void) ddi_dma_unbind_handle(privp->data_dma_handle);
3204 3208 ddi_dma_free_handle(&privp->data_dma_handle);
3205 3209 }
3206 3210 if (privp->data_buf) {
3207 3211 ddi_dma_mem_free(&privp->data_acc_handle);
3208 3212 }
3209 3213 kmem_free(privp, privp->size);
3210 3214 }
3211 3215 kmem_free(fpkt, sizeof (struct fcal_packet));
3212 3216 }
3213 3217
3214 3218
3215 3219 static struct sf_target *
3216 3220 sf_create_target(struct sf *sf, struct sf_els_hdr *privp, int tnum, int64_t lun)
3217 3221 {
3218 3222 struct sf_target *target, *ntarget, *otarget, *ptarget;
3219 3223 int hash;
3220 3224 #ifdef RAID_LUNS
3221 3225 int64_t orig_lun = lun;
3222 3226
3223 3227 /* XXXX Work around SCSA limitations. */
3224 3228 lun = *((short *)&lun);
3225 3229 #endif
3226 3230 ntarget = kmem_zalloc(sizeof (struct sf_target), KM_NOSLEEP);
3227 3231 mutex_enter(&sf->sf_mutex);
3228 3232 if (sf->sf_lip_cnt != privp->lip_cnt) {
3229 3233 mutex_exit(&sf->sf_mutex);
3230 3234 if (ntarget != NULL)
3231 3235 kmem_free(ntarget, sizeof (struct sf_target));
3232 3236 return (NULL);
3233 3237 }
3234 3238
3235 3239 target = sf_lookup_target(sf, privp->port_wwn, lun);
3236 3240 if (lun != 0) {
3237 3241 /*
3238 3242 * Since LUNs != 0 are queued up after LUN == 0, find LUN == 0
3239 3243 * and enqueue the new LUN.
3240 3244 */
3241 3245 if ((ptarget = sf_lookup_target(sf, privp->port_wwn,
3242 3246 (int64_t)0)) == NULL) {
3243 3247 /*
3244 3248 * Yeep -- no LUN 0?
3245 3249 */
3246 3250 mutex_exit(&sf->sf_mutex);
3247 3251 sf_log(sf, CE_WARN, "target 0x%x "
3248 3252 "lun %" PRIx64 ": No LUN 0\n", tnum, lun);
3249 3253 if (ntarget != NULL)
3250 3254 kmem_free(ntarget, sizeof (struct sf_target));
3251 3255 return (NULL);
3252 3256 }
3253 3257 mutex_enter(&ptarget->sft_mutex);
3254 3258 if (target != NULL && ptarget->sft_lip_cnt == sf->sf_lip_cnt &&
3255 3259 ptarget->sft_state&SF_TARGET_OFFLINE) {
3256 3260 /* LUN 0 already finished, duplicate its state */
3257 3261 mutex_exit(&ptarget->sft_mutex);
3258 3262 sf_offline_target(sf, target);
3259 3263 mutex_exit(&sf->sf_mutex);
3260 3264 if (ntarget != NULL)
3261 3265 kmem_free(ntarget, sizeof (struct sf_target));
3262 3266 return (target);
3263 3267 } else if (target != NULL) {
3264 3268 /*
3265 3269 * LUN 0 online or not examined yet.
3266 3270 * Try to bring the LUN back online
3267 3271 */
3268 3272 mutex_exit(&ptarget->sft_mutex);
3269 3273 mutex_enter(&target->sft_mutex);
3270 3274 target->sft_lip_cnt = privp->lip_cnt;
3271 3275 target->sft_state |= SF_TARGET_BUSY;
3272 3276 target->sft_state &= ~(SF_TARGET_OFFLINE|
3273 3277 SF_TARGET_MARK);
3274 3278 target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3275 3279 target->sft_hard_address = sf_switch_to_alpa[tnum];
3276 3280 mutex_exit(&target->sft_mutex);
3277 3281 mutex_exit(&sf->sf_mutex);
3278 3282 if (ntarget != NULL)
3279 3283 kmem_free(ntarget, sizeof (struct sf_target));
3280 3284 return (target);
3281 3285 }
3282 3286 mutex_exit(&ptarget->sft_mutex);
3283 3287 if (ntarget == NULL) {
3284 3288 mutex_exit(&sf->sf_mutex);
3285 3289 return (NULL);
3286 3290 }
3287 3291 /* Initialize new target structure */
3288 3292 bcopy((caddr_t)&privp->node_wwn,
3289 3293 (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3290 3294 bcopy((caddr_t)&privp->port_wwn,
3291 3295 (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3292 3296 ntarget->sft_lun.l = lun;
3293 3297 #ifdef RAID_LUNS
3294 3298 ntarget->sft_lun.l = orig_lun;
3295 3299 ntarget->sft_raid_lun = (uint_t)lun;
3296 3300 #endif
3297 3301 mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3298 3302 mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3299 3303 /* Don't let anyone use this till we finishup init. */
3300 3304 mutex_enter(&ntarget->sft_mutex);
3301 3305 mutex_enter(&ntarget->sft_pkt_mutex);
3302 3306
3303 3307 hash = SF_HASH(privp->port_wwn, lun);
3304 3308 ntarget->sft_next = sf->sf_wwn_lists[hash];
3305 3309 sf->sf_wwn_lists[hash] = ntarget;
3306 3310
3307 3311 ntarget->sft_lip_cnt = privp->lip_cnt;
3308 3312 ntarget->sft_al_pa = (uchar_t)privp->dest_nport_id;
3309 3313 ntarget->sft_hard_address = sf_switch_to_alpa[tnum];
3310 3314 ntarget->sft_device_type = DTYPE_UNKNOWN;
3311 3315 ntarget->sft_state = SF_TARGET_BUSY;
3312 3316 ntarget->sft_pkt_head = (struct sf_pkt *)&ntarget->
3313 3317 sft_pkt_head;
3314 3318 ntarget->sft_pkt_tail = (struct sf_pkt *)&ntarget->
3315 3319 sft_pkt_head;
3316 3320
3317 3321 mutex_enter(&ptarget->sft_mutex);
3318 3322 /* Traverse the list looking for this target */
3319 3323 for (target = ptarget; target->sft_next_lun;
3320 3324 target = target->sft_next_lun) {
3321 3325 otarget = target->sft_next_lun;
3322 3326 }
3323 3327 ntarget->sft_next_lun = target->sft_next_lun;
3324 3328 target->sft_next_lun = ntarget;
3325 3329 mutex_exit(&ptarget->sft_mutex);
3326 3330 mutex_exit(&ntarget->sft_pkt_mutex);
3327 3331 mutex_exit(&ntarget->sft_mutex);
3328 3332 mutex_exit(&sf->sf_mutex);
3329 3333 return (ntarget);
3330 3334
3331 3335 }
3332 3336 if (target != NULL && target->sft_lip_cnt == sf->sf_lip_cnt) {
3333 3337 /* It's been touched this LIP -- duplicate WWNs */
3334 3338 sf_offline_target(sf, target); /* And all the baby targets */
3335 3339 mutex_exit(&sf->sf_mutex);
3336 3340 sf_log(sf, CE_WARN, "target 0x%x, duplicate port wwns\n",
3337 3341 tnum);
3338 3342 if (ntarget != NULL) {
3339 3343 kmem_free(ntarget, sizeof (struct sf_target));
3340 3344 }
3341 3345 return (NULL);
3342 3346 }
3343 3347
3344 3348 if ((otarget = sf->sf_targets[tnum]) != NULL) {
3345 3349 /* Someone else is in our slot */
3346 3350 mutex_enter(&otarget->sft_mutex);
3347 3351 if (otarget->sft_lip_cnt == sf->sf_lip_cnt) {
3348 3352 mutex_exit(&otarget->sft_mutex);
3349 3353 sf_offline_target(sf, otarget);
3350 3354 if (target != NULL)
3351 3355 sf_offline_target(sf, target);
3352 3356 mutex_exit(&sf->sf_mutex);
3353 3357 sf_log(sf, CE_WARN,
3354 3358 "target 0x%x, duplicate switch settings\n", tnum);
3355 3359 if (ntarget != NULL)
3356 3360 kmem_free(ntarget, sizeof (struct sf_target));
3357 3361 return (NULL);
3358 3362 }
3359 3363 mutex_exit(&otarget->sft_mutex);
3360 3364 if (bcmp((caddr_t)&privp->port_wwn, (caddr_t)&otarget->
3361 3365 sft_port_wwn, sizeof (privp->port_wwn))) {
3362 3366 sf_offline_target(sf, otarget);
3363 3367 mutex_exit(&sf->sf_mutex);
3364 3368 sf_log(sf, CE_WARN, "wwn changed on target 0x%x\n",
3365 3369 tnum);
3366 3370 bzero((caddr_t)&sf->sf_stats.tstats[tnum],
3367 3371 sizeof (struct sf_target_stats));
3368 3372 mutex_enter(&sf->sf_mutex);
3369 3373 }
3370 3374 }
3371 3375
3372 3376 sf->sf_targets[tnum] = target;
3373 3377 if ((target = sf->sf_targets[tnum]) == NULL) {
3374 3378 if (ntarget == NULL) {
3375 3379 mutex_exit(&sf->sf_mutex);
3376 3380 return (NULL);
3377 3381 }
3378 3382 bcopy((caddr_t)&privp->node_wwn,
3379 3383 (caddr_t)&ntarget->sft_node_wwn, sizeof (privp->node_wwn));
3380 3384 bcopy((caddr_t)&privp->port_wwn,
3381 3385 (caddr_t)&ntarget->sft_port_wwn, sizeof (privp->port_wwn));
3382 3386 ntarget->sft_lun.l = lun;
3383 3387 #ifdef RAID_LUNS
3384 3388 ntarget->sft_lun.l = orig_lun;
3385 3389 ntarget->sft_raid_lun = (uint_t)lun;
3386 3390 #endif
3387 3391 mutex_init(&ntarget->sft_mutex, NULL, MUTEX_DRIVER, NULL);
3388 3392 mutex_init(&ntarget->sft_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
3389 3393 mutex_enter(&ntarget->sft_mutex);
3390 3394 mutex_enter(&ntarget->sft_pkt_mutex);
3391 3395 hash = SF_HASH(privp->port_wwn, lun); /* lun 0 */
3392 3396 ntarget->sft_next = sf->sf_wwn_lists[hash];
3393 3397 sf->sf_wwn_lists[hash] = ntarget;
3394 3398
3395 3399 target = ntarget;
3396 3400 target->sft_lip_cnt = privp->lip_cnt;
3397 3401 target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3398 3402 target->sft_hard_address = sf_switch_to_alpa[tnum];
3399 3403 target->sft_device_type = DTYPE_UNKNOWN;
3400 3404 target->sft_state = SF_TARGET_BUSY;
3401 3405 target->sft_pkt_head = (struct sf_pkt *)&target->
3402 3406 sft_pkt_head;
3403 3407 target->sft_pkt_tail = (struct sf_pkt *)&target->
3404 3408 sft_pkt_head;
3405 3409 sf->sf_targets[tnum] = target;
3406 3410 mutex_exit(&ntarget->sft_mutex);
3407 3411 mutex_exit(&ntarget->sft_pkt_mutex);
3408 3412 mutex_exit(&sf->sf_mutex);
3409 3413 } else {
3410 3414 mutex_enter(&target->sft_mutex);
3411 3415 target->sft_lip_cnt = privp->lip_cnt;
3412 3416 target->sft_state |= SF_TARGET_BUSY;
3413 3417 target->sft_state &= ~(SF_TARGET_OFFLINE|SF_TARGET_MARK);
3414 3418 target->sft_al_pa = (uchar_t)privp->dest_nport_id;
3415 3419 target->sft_hard_address = sf_switch_to_alpa[tnum];
3416 3420 mutex_exit(&target->sft_mutex);
3417 3421 mutex_exit(&sf->sf_mutex);
3418 3422 if (ntarget != NULL)
3419 3423 kmem_free(ntarget, sizeof (struct sf_target));
3420 3424 }
3421 3425 return (target);
3422 3426 }
3423 3427
3424 3428
3425 3429 /*
3426 3430 * find the target for a given sf instance
3427 3431 */
3428 3432 /* ARGSUSED */
3429 3433 static struct sf_target *
3430 3434 #ifdef RAID_LUNS
3431 3435 sf_lookup_target(struct sf *sf, uchar_t *wwn, int lun)
3432 3436 #else
3433 3437 sf_lookup_target(struct sf *sf, uchar_t *wwn, int64_t lun)
3434 3438 #endif
3435 3439 {
3436 3440 int hash;
3437 3441 struct sf_target *target;
3438 3442
3439 3443 ASSERT(mutex_owned(&sf->sf_mutex));
3440 3444 hash = SF_HASH(wwn, lun);
3441 3445
3442 3446 target = sf->sf_wwn_lists[hash];
3443 3447 while (target != NULL) {
3444 3448
3445 3449 #ifndef RAID_LUNS
3446 3450 if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3447 3451 sizeof (target->sft_port_wwn)) == 0 &&
3448 3452 target->sft_lun.l == lun)
3449 3453 break;
3450 3454 #else
3451 3455 if (bcmp((caddr_t)wwn, (caddr_t)&target->sft_port_wwn,
3452 3456 sizeof (target->sft_port_wwn)) == 0 &&
3453 3457 target->sft_raid_lun == lun)
3454 3458 break;
3455 3459 #endif
3456 3460 target = target->sft_next;
3457 3461 }
3458 3462
3459 3463 return (target);
3460 3464 }
3461 3465
3462 3466
3463 3467 /*
3464 3468 * Send out a REPORT_LUNS command.
3465 3469 */
3466 3470 static int
3467 3471 sf_do_reportlun(struct sf *sf, struct sf_els_hdr *privp,
3468 3472 struct sf_target *target)
3469 3473 {
3470 3474 struct fcal_packet *fpkt = privp->fpkt;
3471 3475 ddi_dma_cookie_t pcookie;
3472 3476 ddi_dma_handle_t lun_dma_handle = NULL;
3473 3477 ddi_acc_handle_t lun_acc_handle;
3474 3478 uint_t ccount;
3475 3479 size_t real_size;
3476 3480 caddr_t lun_buf = NULL;
3477 3481 int handle_bound = 0;
3478 3482 fc_frame_header_t *hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3479 3483 struct fcp_cmd *reportlun = (struct fcp_cmd *)privp->cmd;
3480 3484 char *msg = "Transport";
3481 3485
3482 3486 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3483 3487 DDI_DMA_DONTWAIT, NULL, &lun_dma_handle) != DDI_SUCCESS) {
3484 3488 msg = "ddi_dma_alloc_handle()";
3485 3489 goto fail;
3486 3490 }
3487 3491
3488 3492 if (ddi_dma_mem_alloc(lun_dma_handle, REPORT_LUNS_SIZE,
3489 3493 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3490 3494 DDI_DMA_DONTWAIT, NULL, &lun_buf,
3491 3495 &real_size, &lun_acc_handle) != DDI_SUCCESS) {
3492 3496 msg = "ddi_dma_mem_alloc()";
3493 3497 goto fail;
3494 3498 }
3495 3499
3496 3500 if (real_size < REPORT_LUNS_SIZE) {
3497 3501 msg = "DMA mem < REPORT_LUNS_SIZE";
3498 3502 goto fail;
3499 3503 }
3500 3504
3501 3505 if (ddi_dma_addr_bind_handle(lun_dma_handle, NULL,
3502 3506 lun_buf, real_size, DDI_DMA_READ |
3503 3507 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
3504 3508 NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3505 3509 msg = "ddi_dma_addr_bind_handle()";
3506 3510 goto fail;
3507 3511 }
3508 3512 handle_bound = 1;
3509 3513
3510 3514 if (ccount != 1) {
3511 3515 msg = "ccount != 1";
3512 3516 goto fail;
3513 3517 }
3514 3518 privp->els_code = 0;
3515 3519 privp->target = target;
3516 3520 privp->data_dma_handle = lun_dma_handle;
3517 3521 privp->data_acc_handle = lun_acc_handle;
3518 3522 privp->data_buf = lun_buf;
3519 3523
3520 3524 fpkt->fcal_pkt_comp = sf_reportlun_callback;
3521 3525 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3522 3526 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3523 3527 fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3524 3528 sizeof (struct fcp_cmd);
3525 3529 fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3526 3530 (uint32_t)pcookie.dmac_address;
3527 3531 fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3528 3532 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3529 3533 hp->r_ctl = R_CTL_COMMAND;
3530 3534 hp->type = TYPE_SCSI_FCP;
3531 3535 bzero((caddr_t)reportlun, sizeof (struct fcp_cmd));
3532 3536 ((union scsi_cdb *)reportlun->fcp_cdb)->scc_cmd = SCMD_REPORT_LUNS;
3533 3537 /* Now set the buffer size. If DDI gave us extra, that's O.K. */
3534 3538 ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count0 =
3535 3539 (real_size&0x0ff);
3536 3540 ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count1 =
3537 3541 (real_size>>8)&0x0ff;
3538 3542 ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count2 =
3539 3543 (real_size>>16)&0x0ff;
3540 3544 ((union scsi_cdb *)reportlun->fcp_cdb)->scc5_count3 =
3541 3545 (real_size>>24)&0x0ff;
3542 3546 reportlun->fcp_cntl.cntl_read_data = 1;
3543 3547 reportlun->fcp_cntl.cntl_write_data = 0;
3544 3548 reportlun->fcp_data_len = pcookie.dmac_size;
3545 3549 reportlun->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3546 3550
3547 3551 (void) ddi_dma_sync(lun_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
3548 3552 /* We know he's there, so this should be fast */
3549 3553 privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3550 3554 if (sf_els_transport(sf, privp) == 1)
3551 3555 return (1);
3552 3556
3553 3557 fail:
3554 3558 sf_log(sf, CE_WARN,
3555 3559 "%s failure for REPORTLUN to target 0x%x\n",
3556 3560 msg, sf_alpa_to_switch[privp->dest_nport_id]);
3557 3561 sf_els_free(fpkt);
3558 3562 if (lun_dma_handle != NULL) {
3559 3563 if (handle_bound)
3560 3564 (void) ddi_dma_unbind_handle(lun_dma_handle);
3561 3565 ddi_dma_free_handle(&lun_dma_handle);
3562 3566 }
3563 3567 if (lun_buf != NULL) {
3564 3568 ddi_dma_mem_free(&lun_acc_handle);
3565 3569 }
3566 3570 return (0);
3567 3571 }
3568 3572
3569 3573 /*
3570 3574 * Handle the results of a REPORT_LUNS command:
3571 3575 * Create additional targets if necessary
3572 3576 * Initiate INQUIRYs on all LUNs.
3573 3577 */
3574 3578 static void
3575 3579 sf_reportlun_callback(struct fcal_packet *fpkt)
3576 3580 {
3577 3581 struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3578 3582 fcal_pkt_private;
3579 3583 struct scsi_report_luns *ptr =
3580 3584 (struct scsi_report_luns *)privp->data_buf;
3581 3585 struct sf *sf = privp->sf;
3582 3586 struct sf_target *target = privp->target;
3583 3587 struct fcp_rsp *rsp = NULL;
3584 3588 int delayed_retry = 0;
3585 3589 int tid = sf_alpa_to_switch[target->sft_hard_address];
3586 3590 int i, free_pkt = 1;
3587 3591 short ncmds;
3588 3592
3589 3593 mutex_enter(&sf->sf_mutex);
3590 3594 /* use as temporary state variable */
3591 3595 if (privp->timeout == SF_INVALID_TIMEOUT) {
3592 3596 mutex_exit(&sf->sf_mutex);
3593 3597 return;
3594 3598 }
3595 3599 if (privp->prev)
3596 3600 privp->prev->next = privp->next;
3597 3601 if (privp->next)
3598 3602 privp->next->prev = privp->prev;
3599 3603 if (sf->sf_els_list == privp)
3600 3604 sf->sf_els_list = privp->next;
3601 3605 privp->prev = privp->next = NULL;
3602 3606 mutex_exit(&sf->sf_mutex);
3603 3607 ncmds = fpkt->fcal_ncmds;
3604 3608 ASSERT(ncmds >= 0);
3605 3609 mutex_enter(&sf->sf_cmd_mutex);
3606 3610 sf->sf_ncmds = ncmds;
3607 3611 mutex_exit(&sf->sf_cmd_mutex);
3608 3612
3609 3613 if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3610 3614 (void) ddi_dma_sync(privp->rsp_dma_handle, 0,
3611 3615 0, DDI_DMA_SYNC_FORKERNEL);
3612 3616
3613 3617 rsp = (struct fcp_rsp *)privp->rsp;
3614 3618 }
3615 3619 SF_DEBUG(1, (sf, CE_CONT,
3616 3620 "!REPORTLUN to al_pa %x pkt status %x scsi status %x\n",
3617 3621 privp->dest_nport_id,
3618 3622 fpkt->fcal_pkt_status,
3619 3623 rsp?rsp->fcp_u.fcp_status.scsi_status:0));
3620 3624
3621 3625 /* See if target simply does not support REPORT_LUNS. */
3622 3626 if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK &&
3623 3627 rsp->fcp_u.fcp_status.sense_len_set &&
3624 3628 rsp->fcp_sense_len >=
3625 3629 offsetof(struct scsi_extended_sense, es_qual_code)) {
3626 3630 struct scsi_extended_sense *sense;
3627 3631 sense = (struct scsi_extended_sense *)
3628 3632 ((caddr_t)rsp + sizeof (struct fcp_rsp)
3629 3633 + rsp->fcp_response_len);
3630 3634 if (sense->es_key == KEY_ILLEGAL_REQUEST) {
3631 3635 if (sense->es_add_code == 0x20) {
3632 3636 /* Fake LUN 0 */
3633 3637 SF_DEBUG(1, (sf, CE_CONT,
3634 3638 "!REPORTLUN Faking good "
3635 3639 "completion for alpa %x\n",
3636 3640 privp->dest_nport_id));
3637 3641 ptr->lun_list_len = FCP_LUN_SIZE;
3638 3642 ptr->lun[0] = 0;
3639 3643 rsp->fcp_u.fcp_status.scsi_status =
3640 3644 STATUS_GOOD;
3641 3645 } else if (sense->es_add_code == 0x25) {
3642 3646 SF_DEBUG(1, (sf, CE_CONT,
3643 3647 "!REPORTLUN device alpa %x "
3644 3648 "key %x code %x\n",
3645 3649 privp->dest_nport_id,
3646 3650 sense->es_key, sense->es_add_code));
3647 3651 goto fail;
3648 3652 }
3649 3653 } else if (sense->es_key ==
3650 3654 KEY_UNIT_ATTENTION &&
3651 3655 sense->es_add_code == 0x29) {
3652 3656 SF_DEBUG(1, (sf, CE_CONT,
3653 3657 "!REPORTLUN device alpa %x was reset\n",
3654 3658 privp->dest_nport_id));
3655 3659 } else {
3656 3660 SF_DEBUG(1, (sf, CE_CONT,
3657 3661 "!REPORTLUN device alpa %x "
3658 3662 "key %x code %x\n",
3659 3663 privp->dest_nport_id,
3660 3664 sense->es_key, sense->es_add_code));
3661 3665 /* XXXXXX The following is here to handle broken targets -- remove it later */
3662 3666 if (sf_reportlun_forever &&
3663 3667 sense->es_key == KEY_UNIT_ATTENTION)
3664 3668 goto retry;
3665 3669 /* XXXXXX */
3666 3670 if (sense->es_key == KEY_NOT_READY)
3667 3671 delayed_retry = 1;
3668 3672 }
3669 3673 }
3670 3674
3671 3675 if (rsp && rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) {
3672 3676 struct fcp_rsp_info *bep;
3673 3677
3674 3678 bep = (struct fcp_rsp_info *)(&rsp->
3675 3679 fcp_response_len + 1);
3676 3680 if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3677 3681 bep->rsp_code == FCP_NO_FAILURE) {
3678 3682 (void) ddi_dma_sync(privp->data_dma_handle,
3679 3683 0, 0, DDI_DMA_SYNC_FORKERNEL);
3680 3684
3681 3685 /* Convert from #bytes to #ints */
3682 3686 ptr->lun_list_len = ptr->lun_list_len >> 3;
3683 3687 SF_DEBUG(2, (sf, CE_CONT,
3684 3688 "!REPORTLUN to al_pa %x succeeded: %d LUNs\n",
3685 3689 privp->dest_nport_id, ptr->lun_list_len));
3686 3690 if (!ptr->lun_list_len) {
3687 3691 /* No LUNs? Ya gotta be kidding... */
3688 3692 sf_log(sf, CE_WARN,
3689 3693 "SCSI violation -- "
3690 3694 "target 0x%x reports no LUNs\n",
3691 3695 sf_alpa_to_switch[
3692 3696 privp->dest_nport_id]);
3693 3697 ptr->lun_list_len = 1;
3694 3698 ptr->lun[0] = 0;
3695 3699 }
3696 3700
3697 3701 mutex_enter(&sf->sf_mutex);
3698 3702 if (sf->sf_lip_cnt == privp->lip_cnt) {
3699 3703 sf->sf_device_count += ptr->lun_list_len - 1;
3700 3704 }
3701 3705
3702 3706 mutex_exit(&sf->sf_mutex);
3703 3707 for (i = 0; i < ptr->lun_list_len && privp->lip_cnt ==
3704 3708 sf->sf_lip_cnt; i++) {
3705 3709 struct sf_els_hdr *nprivp;
3706 3710 struct fcal_packet *nfpkt;
3707 3711
3708 3712 /* LUN 0 is already in `target' */
3709 3713 if (ptr->lun[i] != 0) {
3710 3714 target = sf_create_target(sf,
3711 3715 privp, tid, ptr->lun[i]);
3712 3716 }
3713 3717 nprivp = NULL;
3714 3718 nfpkt = NULL;
3715 3719 if (target) {
3716 3720 nfpkt = sf_els_alloc(sf,
3717 3721 target->sft_al_pa,
3718 3722 sizeof (struct sf_els_hdr),
3719 3723 sizeof (union sf_els_cmd),
3720 3724 sizeof (union sf_els_rsp),
3721 3725 (caddr_t *)&nprivp,
3722 3726 (caddr_t *)&rsp);
3723 3727 if (nprivp)
3724 3728 nprivp->lip_cnt =
3725 3729 privp->lip_cnt;
3726 3730 }
3727 3731 if (nfpkt && nprivp &&
3728 3732 (sf_do_inquiry(sf, nprivp, target) ==
3729 3733 0)) {
3730 3734 mutex_enter(&sf->sf_mutex);
3731 3735 if (sf->sf_lip_cnt == privp->
3732 3736 lip_cnt) {
3733 3737 sf->sf_device_count --;
3734 3738 }
3735 3739 sf_offline_target(sf, target);
3736 3740 mutex_exit(&sf->sf_mutex);
3737 3741 }
3738 3742 }
3739 3743 sf_els_free(fpkt);
3740 3744 return;
3741 3745 } else {
3742 3746 SF_DEBUG(1, (sf, CE_CONT,
3743 3747 "!REPORTLUN al_pa %x fcp failure, "
3744 3748 "fcp_rsp_code %x scsi status %x\n",
3745 3749 privp->dest_nport_id, bep->rsp_code,
3746 3750 rsp ? rsp->fcp_u.fcp_status.scsi_status:0));
3747 3751 goto fail;
3748 3752 }
3749 3753 }
3750 3754 if (rsp && ((rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) ||
3751 3755 (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL))) {
3752 3756 delayed_retry = 1;
3753 3757 }
3754 3758
3755 3759 if (++(privp->retries) < sf_els_retries ||
3756 3760 (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
3757 3761 /* XXXXXX The following is here to handle broken targets -- remove it later */
3758 3762 retry:
3759 3763 /* XXXXXX */
3760 3764 if (delayed_retry) {
3761 3765 privp->retries--;
3762 3766 privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
3763 3767 privp->delayed_retry = 1;
3764 3768 } else {
3765 3769 privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3766 3770 }
3767 3771
3768 3772 privp->prev = NULL;
3769 3773 mutex_enter(&sf->sf_mutex);
3770 3774 if (privp->lip_cnt == sf->sf_lip_cnt) {
3771 3775 if (!delayed_retry)
3772 3776 SF_DEBUG(1, (sf, CE_WARN,
3773 3777 "!REPORTLUN to al_pa %x failed, retrying\n",
3774 3778 privp->dest_nport_id));
3775 3779 privp->next = sf->sf_els_list;
3776 3780 if (sf->sf_els_list != NULL)
3777 3781 sf->sf_els_list->prev = privp;
3778 3782 sf->sf_els_list = privp;
3779 3783 mutex_exit(&sf->sf_mutex);
3780 3784 if (!delayed_retry && soc_transport(sf->sf_sochandle,
3781 3785 fpkt, FCAL_NOSLEEP, CQ_REQUEST_1) !=
3782 3786 FCAL_TRANSPORT_SUCCESS) {
3783 3787 mutex_enter(&sf->sf_mutex);
3784 3788 if (privp->prev)
3785 3789 privp->prev->next = privp->next;
3786 3790 if (privp->next)
3787 3791 privp->next->prev = privp->prev;
3788 3792 if (sf->sf_els_list == privp)
3789 3793 sf->sf_els_list = privp->next;
3790 3794 mutex_exit(&sf->sf_mutex);
3791 3795 goto fail;
3792 3796 } else
3793 3797 return;
3794 3798 } else {
3795 3799 mutex_exit(&sf->sf_mutex);
3796 3800 }
3797 3801 } else {
3798 3802 fail:
3799 3803
3800 3804 /* REPORT_LUN failed -- try inquiry */
3801 3805 if (sf_do_inquiry(sf, privp, target) != 0) {
3802 3806 return;
3803 3807 } else {
3804 3808 free_pkt = 0;
3805 3809 }
3806 3810 mutex_enter(&sf->sf_mutex);
3807 3811 if (sf->sf_lip_cnt == privp->lip_cnt) {
3808 3812 sf_log(sf, CE_WARN,
3809 3813 "!REPORTLUN to target 0x%x failed\n",
3810 3814 sf_alpa_to_switch[privp->dest_nport_id]);
3811 3815 sf_offline_target(sf, target);
3812 3816 sf->sf_device_count--;
3813 3817 ASSERT(sf->sf_device_count >= 0);
3814 3818 if (sf->sf_device_count == 0)
3815 3819 sf_finish_init(sf, privp->lip_cnt);
3816 3820 }
3817 3821 mutex_exit(&sf->sf_mutex);
3818 3822 }
3819 3823 if (free_pkt) {
3820 3824 sf_els_free(fpkt);
3821 3825 }
3822 3826 }
3823 3827
3824 3828 static int
3825 3829 sf_do_inquiry(struct sf *sf, struct sf_els_hdr *privp,
3826 3830 struct sf_target *target)
3827 3831 {
3828 3832 struct fcal_packet *fpkt = privp->fpkt;
3829 3833 ddi_dma_cookie_t pcookie;
3830 3834 ddi_dma_handle_t inq_dma_handle = NULL;
3831 3835 ddi_acc_handle_t inq_acc_handle;
3832 3836 uint_t ccount;
3833 3837 size_t real_size;
3834 3838 caddr_t inq_buf = NULL;
3835 3839 int handle_bound = FALSE;
3836 3840 fc_frame_header_t *hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
3837 3841 struct fcp_cmd *inq = (struct fcp_cmd *)privp->cmd;
3838 3842 char *msg = "Transport";
3839 3843
3840 3844
3841 3845 if (ddi_dma_alloc_handle(sf->sf_dip, sf->sf_sochandle->fcal_dmaattr,
3842 3846 DDI_DMA_DONTWAIT, NULL, &inq_dma_handle) != DDI_SUCCESS) {
3843 3847 msg = "ddi_dma_alloc_handle()";
3844 3848 goto fail;
3845 3849 }
3846 3850
3847 3851 if (ddi_dma_mem_alloc(inq_dma_handle, SUN_INQSIZE,
3848 3852 sf->sf_sochandle->fcal_accattr, DDI_DMA_CONSISTENT,
3849 3853 DDI_DMA_DONTWAIT, NULL, &inq_buf,
3850 3854 &real_size, &inq_acc_handle) != DDI_SUCCESS) {
3851 3855 msg = "ddi_dma_mem_alloc()";
3852 3856 goto fail;
3853 3857 }
3854 3858
3855 3859 if (real_size < SUN_INQSIZE) {
3856 3860 msg = "DMA mem < inquiry size";
3857 3861 goto fail;
3858 3862 }
3859 3863
3860 3864 if (ddi_dma_addr_bind_handle(inq_dma_handle, NULL,
3861 3865 inq_buf, real_size, DDI_DMA_READ | DDI_DMA_CONSISTENT,
3862 3866 DDI_DMA_DONTWAIT, NULL, &pcookie, &ccount) != DDI_DMA_MAPPED) {
3863 3867 msg = "ddi_dma_addr_bind_handle()";
3864 3868 goto fail;
3865 3869 }
3866 3870 handle_bound = TRUE;
3867 3871
3868 3872 if (ccount != 1) {
3869 3873 msg = "ccount != 1";
3870 3874 goto fail;
3871 3875 }
3872 3876 privp->els_code = 0; /* not an ELS command */
3873 3877 privp->target = target;
3874 3878 privp->data_dma_handle = inq_dma_handle;
3875 3879 privp->data_acc_handle = inq_acc_handle;
3876 3880 privp->data_buf = inq_buf;
3877 3881 fpkt->fcal_pkt_comp = sf_inq_callback;
3878 3882 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 3;
3879 3883 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type = CQ_TYPE_IO_READ;
3880 3884 fpkt->fcal_socal_request.sr_dataseg[0].fc_count =
3881 3885 sizeof (struct fcp_cmd);
3882 3886 fpkt->fcal_socal_request.sr_dataseg[2].fc_base =
3883 3887 (uint32_t)pcookie.dmac_address;
3884 3888 fpkt->fcal_socal_request.sr_dataseg[2].fc_count = pcookie.dmac_size;
3885 3889 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt = pcookie.dmac_size;
3886 3890 hp->r_ctl = R_CTL_COMMAND;
3887 3891 hp->type = TYPE_SCSI_FCP;
3888 3892 bzero((caddr_t)inq, sizeof (struct fcp_cmd));
3889 3893 ((union scsi_cdb *)inq->fcp_cdb)->scc_cmd = SCMD_INQUIRY;
3890 3894 ((union scsi_cdb *)inq->fcp_cdb)->g0_count0 = SUN_INQSIZE;
3891 3895 bcopy((caddr_t)&target->sft_lun.b, (caddr_t)&inq->fcp_ent_addr,
3892 3896 FCP_LUN_SIZE);
3893 3897 inq->fcp_cntl.cntl_read_data = 1;
3894 3898 inq->fcp_cntl.cntl_write_data = 0;
3895 3899 inq->fcp_data_len = pcookie.dmac_size;
3896 3900 inq->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
3897 3901
3898 3902 (void) ddi_dma_sync(inq_dma_handle, (off_t)0, (size_t)0,
3899 3903 DDI_DMA_SYNC_FORDEV);
3900 3904 privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
3901 3905 SF_DEBUG(5, (sf, CE_WARN,
3902 3906 "!Sending INQUIRY to al_pa %x lun %" PRIx64 "\n",
3903 3907 privp->dest_nport_id,
3904 3908 SCSA_LUN(target)));
3905 3909 return (sf_els_transport(sf, privp));
3906 3910
3907 3911 fail:
3908 3912 sf_log(sf, CE_WARN,
3909 3913 "%s failure for INQUIRY to target 0x%x\n",
3910 3914 msg, sf_alpa_to_switch[privp->dest_nport_id]);
3911 3915 sf_els_free(fpkt);
3912 3916 if (inq_dma_handle != NULL) {
3913 3917 if (handle_bound) {
3914 3918 (void) ddi_dma_unbind_handle(inq_dma_handle);
3915 3919 }
3916 3920 ddi_dma_free_handle(&inq_dma_handle);
3917 3921 }
3918 3922 if (inq_buf != NULL) {
3919 3923 ddi_dma_mem_free(&inq_acc_handle);
3920 3924 }
3921 3925 return (FALSE);
3922 3926 }
3923 3927
3924 3928
3925 3929 /*
3926 3930 * called as the pkt_comp routine for INQ packets
3927 3931 */
3928 3932 static void
3929 3933 sf_inq_callback(struct fcal_packet *fpkt)
3930 3934 {
3931 3935 struct sf_els_hdr *privp = (struct sf_els_hdr *)fpkt->
3932 3936 fcal_pkt_private;
3933 3937 struct scsi_inquiry *prt = (struct scsi_inquiry *)privp->data_buf;
3934 3938 struct sf *sf = privp->sf;
3935 3939 struct sf *tsf;
3936 3940 struct sf_target *target = privp->target;
3937 3941 struct fcp_rsp *rsp;
3938 3942 int delayed_retry = FALSE;
3939 3943 short ncmds;
3940 3944
3941 3945
3942 3946 mutex_enter(&sf->sf_mutex);
3943 3947 /* use as temporary state variable */
3944 3948 if (privp->timeout == SF_INVALID_TIMEOUT) {
3945 3949 mutex_exit(&sf->sf_mutex);
3946 3950 return;
3947 3951 }
3948 3952 if (privp->prev != NULL) {
3949 3953 privp->prev->next = privp->next;
3950 3954 }
3951 3955 if (privp->next != NULL) {
3952 3956 privp->next->prev = privp->prev;
3953 3957 }
3954 3958 if (sf->sf_els_list == privp) {
3955 3959 sf->sf_els_list = privp->next;
3956 3960 }
3957 3961 privp->prev = privp->next = NULL;
3958 3962 mutex_exit(&sf->sf_mutex);
3959 3963 ncmds = fpkt->fcal_ncmds;
3960 3964 ASSERT(ncmds >= 0);
3961 3965 mutex_enter(&sf->sf_cmd_mutex);
3962 3966 sf->sf_ncmds = ncmds;
3963 3967 mutex_exit(&sf->sf_cmd_mutex);
3964 3968
3965 3969 if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
3966 3970
3967 3971 (void) ddi_dma_sync(privp->rsp_dma_handle, (off_t)0,
3968 3972 (size_t)0, DDI_DMA_SYNC_FORKERNEL);
3969 3973
3970 3974 rsp = (struct fcp_rsp *)privp->rsp;
3971 3975 SF_DEBUG(2, (sf, CE_CONT,
3972 3976 "!INQUIRY to al_pa %x scsi status %x",
3973 3977 privp->dest_nport_id, rsp->fcp_u.fcp_status.scsi_status));
3974 3978
3975 3979 if ((rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD) &&
3976 3980 !rsp->fcp_u.fcp_status.resid_over &&
3977 3981 (!rsp->fcp_u.fcp_status.resid_under ||
3978 3982 ((SUN_INQSIZE - rsp->fcp_resid) >= SUN_MIN_INQLEN))) {
3979 3983 struct fcp_rsp_info *bep;
3980 3984
3981 3985 bep = (struct fcp_rsp_info *)(&rsp->
3982 3986 fcp_response_len + 1);
3983 3987
3984 3988 if (!rsp->fcp_u.fcp_status.rsp_len_set ||
3985 3989 (bep->rsp_code == FCP_NO_FAILURE)) {
3986 3990
3987 3991 SF_DEBUG(2, (sf, CE_CONT,
3988 3992 "!INQUIRY to al_pa %x lun %" PRIx64
3989 3993 " succeeded\n",
3990 3994 privp->dest_nport_id, SCSA_LUN(target)));
3991 3995
3992 3996 (void) ddi_dma_sync(privp->data_dma_handle,
3993 3997 (off_t)0, (size_t)0,
3994 3998 DDI_DMA_SYNC_FORKERNEL);
3995 3999
3996 4000 mutex_enter(&sf->sf_mutex);
3997 4001
3998 4002 if (sf->sf_lip_cnt == privp->lip_cnt) {
3999 4003 mutex_enter(&target->sft_mutex);
4000 4004 target->sft_device_type =
4001 4005 prt->inq_dtype;
4002 4006 bcopy(prt, &target->sft_inq,
4003 4007 sizeof (*prt));
4004 4008 mutex_exit(&target->sft_mutex);
4005 4009 sf->sf_device_count--;
4006 4010 ASSERT(sf->sf_device_count >= 0);
4007 4011 if (sf->sf_device_count == 0) {
4008 4012 sf_finish_init(sf,
4009 4013 privp->lip_cnt);
4010 4014 }
4011 4015 }
4012 4016 mutex_exit(&sf->sf_mutex);
4013 4017 sf_els_free(fpkt);
4014 4018 return;
4015 4019 }
4016 4020 } else if ((rsp->fcp_u.fcp_status.scsi_status ==
4017 4021 STATUS_BUSY) ||
4018 4022 (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL) ||
4019 4023 (rsp->fcp_u.fcp_status.scsi_status == STATUS_CHECK)) {
4020 4024 delayed_retry = TRUE;
4021 4025 }
4022 4026 } else {
4023 4027 SF_DEBUG(2, (sf, CE_CONT, "!INQUIRY to al_pa %x fc status %x",
4024 4028 privp->dest_nport_id, fpkt->fcal_pkt_status));
4025 4029 }
4026 4030
4027 4031 if (++(privp->retries) < sf_els_retries ||
4028 4032 (delayed_retry && privp->retries < SF_BSY_RETRIES)) {
4029 4033 if (fpkt->fcal_pkt_status == FCAL_STATUS_MAX_XCHG_EXCEEDED) {
4030 4034 tsf = sf->sf_sibling;
4031 4035 if (tsf != NULL) {
4032 4036 mutex_enter(&tsf->sf_cmd_mutex);
4033 4037 tsf->sf_flag = 1;
4034 4038 tsf->sf_throttle = SF_DECR_DELTA;
4035 4039 mutex_exit(&tsf->sf_cmd_mutex);
4036 4040 }
4037 4041 delayed_retry = 1;
4038 4042 }
4039 4043 if (delayed_retry) {
4040 4044 privp->retries--;
4041 4045 privp->timeout = sf_watchdog_time + SF_BSY_TIMEOUT;
4042 4046 privp->delayed_retry = TRUE;
4043 4047 } else {
4044 4048 privp->timeout = sf_watchdog_time + SF_FCP_TIMEOUT;
4045 4049 }
4046 4050
4047 4051 privp->prev = NULL;
4048 4052 mutex_enter(&sf->sf_mutex);
4049 4053 if (privp->lip_cnt == sf->sf_lip_cnt) {
4050 4054 if (!delayed_retry) {
4051 4055 SF_DEBUG(1, (sf, CE_WARN,
4052 4056 "INQUIRY to al_pa %x failed, retrying",
4053 4057 privp->dest_nport_id));
4054 4058 }
4055 4059 privp->next = sf->sf_els_list;
4056 4060 if (sf->sf_els_list != NULL) {
4057 4061 sf->sf_els_list->prev = privp;
4058 4062 }
4059 4063 sf->sf_els_list = privp;
4060 4064 mutex_exit(&sf->sf_mutex);
4061 4065 /* if not delayed call transport to send a pkt */
4062 4066 if (!delayed_retry &&
4063 4067 (soc_transport(sf->sf_sochandle, fpkt,
4064 4068 FCAL_NOSLEEP, CQ_REQUEST_1) !=
4065 4069 FCAL_TRANSPORT_SUCCESS)) {
4066 4070 mutex_enter(&sf->sf_mutex);
4067 4071 if (privp->prev != NULL) {
4068 4072 privp->prev->next = privp->next;
4069 4073 }
4070 4074 if (privp->next != NULL) {
4071 4075 privp->next->prev = privp->prev;
4072 4076 }
4073 4077 if (sf->sf_els_list == privp) {
4074 4078 sf->sf_els_list = privp->next;
4075 4079 }
4076 4080 mutex_exit(&sf->sf_mutex);
4077 4081 goto fail;
4078 4082 }
4079 4083 return;
4080 4084 }
4081 4085 mutex_exit(&sf->sf_mutex);
4082 4086 } else {
4083 4087 fail:
4084 4088 mutex_enter(&sf->sf_mutex);
4085 4089 if (sf->sf_lip_cnt == privp->lip_cnt) {
4086 4090 sf_offline_target(sf, target);
4087 4091 sf_log(sf, CE_NOTE,
4088 4092 "INQUIRY to target 0x%x lun %" PRIx64 " failed. "
4089 4093 "Retry Count: %d\n",
4090 4094 sf_alpa_to_switch[privp->dest_nport_id],
4091 4095 SCSA_LUN(target),
4092 4096 privp->retries);
4093 4097 sf->sf_device_count--;
4094 4098 ASSERT(sf->sf_device_count >= 0);
4095 4099 if (sf->sf_device_count == 0) {
4096 4100 sf_finish_init(sf, privp->lip_cnt);
4097 4101 }
4098 4102 }
4099 4103 mutex_exit(&sf->sf_mutex);
4100 4104 }
4101 4105 sf_els_free(fpkt);
4102 4106 }
4103 4107
4104 4108
4105 4109 static void
4106 4110 sf_finish_init(struct sf *sf, int lip_cnt)
4107 4111 {
4108 4112 int i; /* loop index */
4109 4113 int cflag;
4110 4114 struct sf_target *target; /* current target */
4111 4115 dev_info_t *dip;
4112 4116 struct sf_hp_elem *elem; /* hotplug element created */
4113 4117
4114 4118 SF_DEBUG(1, (sf, CE_WARN, "!sf_finish_init\n"));
4115 4119 ASSERT(mutex_owned(&sf->sf_mutex));
4116 4120
4117 4121 /* scan all hash queues */
4118 4122 for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
4119 4123 target = sf->sf_wwn_lists[i];
4120 4124 while (target != NULL) {
4121 4125 mutex_enter(&target->sft_mutex);
4122 4126
4123 4127 /* see if target is not offline */
4124 4128 if ((target->sft_state & SF_TARGET_OFFLINE)) {
4125 4129 /*
4126 4130 * target already offline
4127 4131 */
4128 4132 mutex_exit(&target->sft_mutex);
4129 4133 goto next_entry;
4130 4134 }
4131 4135
4132 4136 /*
4133 4137 * target is not already offline -- see if it has
4134 4138 * already been marked as ready to go offline
4135 4139 */
4136 4140 if (target->sft_state & SF_TARGET_MARK) {
4137 4141 /*
4138 4142 * target already marked, so take it offline
4139 4143 */
4140 4144 mutex_exit(&target->sft_mutex);
4141 4145 sf_offline_target(sf, target);
4142 4146 goto next_entry;
4143 4147 }
4144 4148
4145 4149 /* clear target busy flag */
4146 4150 target->sft_state &= ~SF_TARGET_BUSY;
4147 4151
4148 4152 /* is target init not yet done ?? */
4149 4153 cflag = !(target->sft_state & SF_TARGET_INIT_DONE);
4150 4154
4151 4155 /* get pointer to target dip */
4152 4156 dip = target->sft_dip;
4153 4157
4154 4158 mutex_exit(&target->sft_mutex);
4155 4159 mutex_exit(&sf->sf_mutex);
4156 4160
4157 4161 if (cflag && (dip == NULL)) {
4158 4162 /*
4159 4163 * target init not yet done &&
4160 4164 * devinfo not yet created
4161 4165 */
4162 4166 sf_create_devinfo(sf, target, lip_cnt);
4163 4167 mutex_enter(&sf->sf_mutex);
4164 4168 goto next_entry;
4165 4169 }
4166 4170
4167 4171 /*
4168 4172 * target init already done || devinfo already created
4169 4173 */
4170 4174 ASSERT(dip != NULL);
4171 4175 if (!sf_create_props(dip, target, lip_cnt)) {
4172 4176 /* a problem creating properties */
4173 4177 mutex_enter(&sf->sf_mutex);
4174 4178 goto next_entry;
4175 4179 }
4176 4180
4177 4181 /* create a new element for the hotplug list */
4178 4182 if ((elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4179 4183 KM_NOSLEEP)) != NULL) {
4180 4184
4181 4185 /* fill in the new element */
4182 4186 elem->dip = dip;
4183 4187 elem->target = target;
4184 4188 elem->what = SF_ONLINE;
4185 4189
4186 4190 /* add the new element into the hotplug list */
4187 4191 mutex_enter(&sf->sf_hp_daemon_mutex);
4188 4192 if (sf->sf_hp_elem_tail != NULL) {
4189 4193 sf->sf_hp_elem_tail->next = elem;
4190 4194 sf->sf_hp_elem_tail = elem;
4191 4195 } else {
4192 4196 /* this is the first element in list */
4193 4197 sf->sf_hp_elem_head =
4194 4198 sf->sf_hp_elem_tail =
4195 4199 elem;
4196 4200 }
4197 4201 cv_signal(&sf->sf_hp_daemon_cv);
4198 4202 mutex_exit(&sf->sf_hp_daemon_mutex);
4199 4203 } else {
4200 4204 /* could not allocate memory for element ?? */
4201 4205 (void) ndi_devi_online_async(dip, 0);
4202 4206 }
4203 4207
4204 4208 mutex_enter(&sf->sf_mutex);
4205 4209
4206 4210 next_entry:
4207 4211 /* ensure no new LIPs have occurred */
4208 4212 if (sf->sf_lip_cnt != lip_cnt) {
4209 4213 return;
4210 4214 }
4211 4215 target = target->sft_next;
4212 4216 }
4213 4217
4214 4218 /* done scanning all targets in this queue */
4215 4219 }
4216 4220
4217 4221 /* done with all hash queues */
4218 4222
4219 4223 sf->sf_state = SF_STATE_ONLINE;
4220 4224 sf->sf_online_timer = 0;
4221 4225 }
4222 4226
4223 4227
4224 4228 /*
4225 4229 * create devinfo node
4226 4230 */
4227 4231 static void
4228 4232 sf_create_devinfo(struct sf *sf, struct sf_target *target, int lip_cnt)
4229 4233 {
4230 4234 dev_info_t *cdip = NULL;
4231 4235 char *nname = NULL;
4232 4236 char **compatible = NULL;
4233 4237 int ncompatible;
4234 4238 struct scsi_inquiry *inq = &target->sft_inq;
4235 4239 char *scsi_binding_set;
4236 4240
4237 4241 /* get the 'scsi-binding-set' property */
4238 4242 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, sf->sf_dip,
4239 4243 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
4240 4244 &scsi_binding_set) != DDI_PROP_SUCCESS)
4241 4245 scsi_binding_set = NULL;
4242 4246
4243 4247 /* determine the node name and compatible */
4244 4248 scsi_hba_nodename_compatible_get(inq, scsi_binding_set,
4245 4249 inq->inq_dtype, NULL, &nname, &compatible, &ncompatible);
4246 4250 if (scsi_binding_set)
4247 4251 ddi_prop_free(scsi_binding_set);
4248 4252
4249 4253 /* if nodename can't be determined then print a message and skip it */
4250 4254 if (nname == NULL) {
4251 4255 #ifndef RAID_LUNS
4252 4256 sf_log(sf, CE_WARN, "%s%d: no driver for device "
4253 4257 "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4254 4258 " compatible: %s",
4255 4259 ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4256 4260 target->sft_port_wwn[0], target->sft_port_wwn[1],
4257 4261 target->sft_port_wwn[2], target->sft_port_wwn[3],
4258 4262 target->sft_port_wwn[4], target->sft_port_wwn[5],
4259 4263 target->sft_port_wwn[6], target->sft_port_wwn[7],
4260 4264 target->sft_lun.l, *compatible);
4261 4265 #else
4262 4266 sf_log(sf, CE_WARN, "%s%d: no driver for device "
4263 4267 "@w%02x%02x%02x%02x%02x%02x%02x%02x,%x\n"
4264 4268 " compatible: %s",
4265 4269 ddi_driver_name(sf->sf_dip), ddi_get_instance(sf->sf_dip),
4266 4270 target->sft_port_wwn[0], target->sft_port_wwn[1],
4267 4271 target->sft_port_wwn[2], target->sft_port_wwn[3],
4268 4272 target->sft_port_wwn[4], target->sft_port_wwn[5],
4269 4273 target->sft_port_wwn[6], target->sft_port_wwn[7],
4270 4274 target->sft_raid_lun, *compatible);
4271 4275 #endif
4272 4276 goto fail;
4273 4277 }
4274 4278
4275 4279 /* allocate the node */
4276 4280 if (ndi_devi_alloc(sf->sf_dip, nname,
4277 4281 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
4278 4282 goto fail;
4279 4283 }
4280 4284
4281 4285 /* decorate the node with compatible */
4282 4286 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
4283 4287 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
4284 4288 goto fail;
4285 4289 }
4286 4290
4287 4291 /* add addressing properties to the node */
4288 4292 if (sf_create_props(cdip, target, lip_cnt) != 1) {
4289 4293 goto fail;
4290 4294 }
4291 4295
4292 4296 mutex_enter(&target->sft_mutex);
4293 4297 if (target->sft_dip != NULL) {
4294 4298 mutex_exit(&target->sft_mutex);
4295 4299 goto fail;
4296 4300 }
4297 4301 target->sft_dip = cdip;
4298 4302 mutex_exit(&target->sft_mutex);
4299 4303
4300 4304 if (ndi_devi_online_async(cdip, 0) != DDI_SUCCESS) {
4301 4305 goto fail;
4302 4306 }
4303 4307
4304 4308 scsi_hba_nodename_compatible_free(nname, compatible);
4305 4309 return;
4306 4310
4307 4311 fail:
4308 4312 scsi_hba_nodename_compatible_free(nname, compatible);
4309 4313 if (cdip != NULL) {
4310 4314 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP);
4311 4315 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP);
4312 4316 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LIP_CNT_PROP);
4313 4317 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, TARGET_PROP);
4314 4318 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, LUN_PROP);
4315 4319 if (ndi_devi_free(cdip) != NDI_SUCCESS) {
4316 4320 sf_log(sf, CE_WARN, "ndi_devi_free failed\n");
4317 4321 } else {
4318 4322 mutex_enter(&target->sft_mutex);
4319 4323 if (cdip == target->sft_dip) {
4320 4324 target->sft_dip = NULL;
4321 4325 }
4322 4326 mutex_exit(&target->sft_mutex);
4323 4327 }
4324 4328 }
4325 4329 }
4326 4330
4327 4331 /*
4328 4332 * create required properties, returning TRUE iff we succeed, else
4329 4333 * returning FALSE
4330 4334 */
4331 4335 static int
4332 4336 sf_create_props(dev_info_t *cdip, struct sf_target *target, int lip_cnt)
4333 4337 {
4334 4338 int tgt_id = sf_alpa_to_switch[target->sft_al_pa];
4335 4339
4336 4340
4337 4341 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4338 4342 cdip, NODE_WWN_PROP, target->sft_node_wwn, FC_WWN_SIZE) !=
4339 4343 DDI_PROP_SUCCESS) {
4340 4344 return (FALSE);
4341 4345 }
4342 4346
4343 4347 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE,
4344 4348 cdip, PORT_WWN_PROP, target->sft_port_wwn, FC_WWN_SIZE) !=
4345 4349 DDI_PROP_SUCCESS) {
4346 4350 return (FALSE);
4347 4351 }
4348 4352
4349 4353 if (ndi_prop_update_int(DDI_DEV_T_NONE,
4350 4354 cdip, LIP_CNT_PROP, lip_cnt) != DDI_PROP_SUCCESS) {
4351 4355 return (FALSE);
4352 4356 }
4353 4357
4354 4358 if (ndi_prop_update_int(DDI_DEV_T_NONE,
4355 4359 cdip, TARGET_PROP, tgt_id) != DDI_PROP_SUCCESS) {
4356 4360 return (FALSE);
4357 4361 }
4358 4362
4359 4363 #ifndef RAID_LUNS
4360 4364 if (ndi_prop_update_int(DDI_DEV_T_NONE,
4361 4365 cdip, LUN_PROP, target->sft_lun.l) != DDI_PROP_SUCCESS) {
4362 4366 return (0);
4363 4367 }
4364 4368 #else
4365 4369 if (ndi_prop_update_int(DDI_DEV_T_NONE,
4366 4370 cdip, LUN_PROP, target->sft_raid_lun) != DDI_PROP_SUCCESS) {
4367 4371 return (0);
4368 4372 }
4369 4373 #endif
4370 4374
4371 4375 return (TRUE);
4372 4376 }
4373 4377
4374 4378
4375 4379 /*
4376 4380 * called by the transport to offline a target
4377 4381 */
4378 4382 /* ARGSUSED */
4379 4383 static void
4380 4384 sf_offline_target(struct sf *sf, struct sf_target *target)
4381 4385 {
4382 4386 dev_info_t *dip;
4383 4387 struct sf_target *next_target = NULL;
4384 4388 struct sf_hp_elem *elem;
4385 4389
4386 4390 ASSERT(mutex_owned(&sf->sf_mutex));
4387 4391
4388 4392 if (sf_core && (sf_core & SF_CORE_OFFLINE_TARGET)) {
4389 4393 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
4390 4394 sf_core = 0;
4391 4395 }
4392 4396
4393 4397 while (target != NULL) {
4394 4398 sf_log(sf, CE_NOTE,
4395 4399 "!target 0x%x al_pa 0x%x lun %" PRIx64 " offlined\n",
4396 4400 sf_alpa_to_switch[target->sft_al_pa],
4397 4401 target->sft_al_pa, SCSA_LUN(target));
4398 4402 mutex_enter(&target->sft_mutex);
4399 4403 target->sft_state &= ~(SF_TARGET_BUSY|SF_TARGET_MARK);
4400 4404 target->sft_state |= SF_TARGET_OFFLINE;
4401 4405 mutex_exit(&target->sft_mutex);
4402 4406 mutex_exit(&sf->sf_mutex);
4403 4407
4404 4408 /* XXXX if this is LUN 0, offline all other LUNs */
4405 4409 if (next_target || target->sft_lun.l == 0)
4406 4410 next_target = target->sft_next_lun;
4407 4411
4408 4412 /* abort all cmds for this target */
4409 4413 sf_abort_all(sf, target, FALSE, sf->sf_lip_cnt, FALSE);
4410 4414
4411 4415 mutex_enter(&sf->sf_mutex);
4412 4416 mutex_enter(&target->sft_mutex);
4413 4417 if (target->sft_state & SF_TARGET_INIT_DONE) {
4414 4418 dip = target->sft_dip;
4415 4419 mutex_exit(&target->sft_mutex);
4416 4420 mutex_exit(&sf->sf_mutex);
4417 4421 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
4418 4422 TARGET_PROP);
4419 4423 (void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
4420 4424 dip, FCAL_REMOVE_EVENT, &sf_remove_eid,
4421 4425 NDI_EVENT_NOPASS);
4422 4426 (void) ndi_event_run_callbacks(sf->sf_event_hdl,
4423 4427 target->sft_dip, sf_remove_eid, NULL);
4424 4428
4425 4429 elem = kmem_zalloc(sizeof (struct sf_hp_elem),
4426 4430 KM_NOSLEEP);
4427 4431 if (elem != NULL) {
4428 4432 elem->dip = dip;
4429 4433 elem->target = target;
4430 4434 elem->what = SF_OFFLINE;
4431 4435 mutex_enter(&sf->sf_hp_daemon_mutex);
4432 4436 if (sf->sf_hp_elem_tail != NULL) {
4433 4437 sf->sf_hp_elem_tail->next = elem;
4434 4438 sf->sf_hp_elem_tail = elem;
4435 4439 } else {
4436 4440 sf->sf_hp_elem_head =
4437 4441 sf->sf_hp_elem_tail =
4438 4442 elem;
4439 4443 }
4440 4444 cv_signal(&sf->sf_hp_daemon_cv);
4441 4445 mutex_exit(&sf->sf_hp_daemon_mutex);
4442 4446 } else {
4443 4447 /* don't do NDI_DEVI_REMOVE for now */
4444 4448 if (ndi_devi_offline(dip, 0) != NDI_SUCCESS) {
4445 4449 SF_DEBUG(1, (sf, CE_WARN,
4446 4450 "target %x lun %" PRIx64 ", "
4447 4451 "device offline failed",
4448 4452 sf_alpa_to_switch[target->
4449 4453 sft_al_pa],
4450 4454 SCSA_LUN(target)));
4451 4455 } else {
4452 4456 SF_DEBUG(1, (sf, CE_NOTE,
4453 4457 "target %x, lun %" PRIx64 ", "
4454 4458 "device offline succeeded\n",
4455 4459 sf_alpa_to_switch[target->
4456 4460 sft_al_pa],
4457 4461 SCSA_LUN(target)));
4458 4462 }
4459 4463 }
4460 4464 mutex_enter(&sf->sf_mutex);
4461 4465 } else {
4462 4466 mutex_exit(&target->sft_mutex);
4463 4467 }
4464 4468 target = next_target;
4465 4469 }
4466 4470 }
4467 4471
4468 4472
4469 4473 /*
4470 4474 * routine to get/set a capability
4471 4475 *
4472 4476 * returning:
4473 4477 * 1 (TRUE) boolean capability is true (on get)
4474 4478 * 0 (FALSE) invalid capability, can't set capability (on set),
4475 4479 * or boolean capability is false (on get)
4476 4480 * -1 (UNDEFINED) can't find capability (SCSA) or unsupported capability
4477 4481 * 3 when getting SCSI version number
4478 4482 * AL_PA when getting port initiator ID
4479 4483 */
4480 4484 static int
4481 4485 sf_commoncap(struct scsi_address *ap, char *cap,
4482 4486 int val, int tgtonly, int doset)
4483 4487 {
4484 4488 struct sf *sf = ADDR2SF(ap);
4485 4489 int cidx;
4486 4490 int rval = FALSE;
4487 4491
4488 4492
4489 4493 if (cap == NULL) {
4490 4494 SF_DEBUG(3, (sf, CE_WARN, "sf_commoncap: invalid arg"));
4491 4495 return (rval);
4492 4496 }
4493 4497
4494 4498 /* get index of capability string */
4495 4499 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
4496 4500 /* can't find capability */
4497 4501 return (UNDEFINED);
4498 4502 }
4499 4503
4500 4504 if (doset) {
4501 4505 /*
4502 4506 * Process setcap request.
4503 4507 */
4504 4508
4505 4509 /*
4506 4510 * At present, we can only set binary (0/1) values
4507 4511 */
4508 4512 switch (cidx) {
4509 4513 case SCSI_CAP_ARQ: /* can't set this capability */
4510 4514 break;
4511 4515 default:
4512 4516 SF_DEBUG(3, (sf, CE_WARN,
4513 4517 "sf_setcap: unsupported %d", cidx));
4514 4518 rval = UNDEFINED;
4515 4519 break;
4516 4520 }
4517 4521
4518 4522 SF_DEBUG(4, (sf, CE_NOTE,
4519 4523 "set cap: cap=%s,val=0x%x,tgtonly=0x%x"
4520 4524 ",doset=0x%x,rval=%d\n",
4521 4525 cap, val, tgtonly, doset, rval));
4522 4526
4523 4527 } else {
4524 4528 /*
4525 4529 * Process getcap request.
4526 4530 */
4527 4531 switch (cidx) {
4528 4532 case SCSI_CAP_DMA_MAX:
4529 4533 break; /* don't' have this capability */
4530 4534 case SCSI_CAP_INITIATOR_ID:
4531 4535 rval = sf->sf_al_pa;
4532 4536 break;
4533 4537 case SCSI_CAP_ARQ:
4534 4538 rval = TRUE; /* do have this capability */
4535 4539 break;
4536 4540 case SCSI_CAP_RESET_NOTIFICATION:
4537 4541 case SCSI_CAP_TAGGED_QING:
4538 4542 rval = TRUE; /* do have this capability */
4539 4543 break;
4540 4544 case SCSI_CAP_SCSI_VERSION:
4541 4545 rval = 3;
4542 4546 break;
4543 4547 case SCSI_CAP_INTERCONNECT_TYPE:
4544 4548 rval = INTERCONNECT_FIBRE;
4545 4549 break;
4546 4550 default:
4547 4551 SF_DEBUG(4, (sf, CE_WARN,
4548 4552 "sf_scsi_getcap: unsupported"));
4549 4553 rval = UNDEFINED;
4550 4554 break;
4551 4555 }
4552 4556 SF_DEBUG(4, (sf, CE_NOTE,
4553 4557 "get cap: cap=%s,val=0x%x,tgtonly=0x%x,"
4554 4558 "doset=0x%x,rval=%d\n",
4555 4559 cap, val, tgtonly, doset, rval));
4556 4560 }
4557 4561
4558 4562 return (rval);
4559 4563 }
4560 4564
4561 4565
4562 4566 /*
4563 4567 * called by the transport to get a capability
4564 4568 */
4565 4569 static int
4566 4570 sf_getcap(struct scsi_address *ap, char *cap, int whom)
4567 4571 {
4568 4572 return (sf_commoncap(ap, cap, 0, whom, FALSE));
4569 4573 }
4570 4574
4571 4575
4572 4576 /*
4573 4577 * called by the transport to set a capability
4574 4578 */
4575 4579 static int
4576 4580 sf_setcap(struct scsi_address *ap, char *cap, int value, int whom)
4577 4581 {
4578 4582 return (sf_commoncap(ap, cap, value, whom, TRUE));
4579 4583 }
4580 4584
4581 4585
4582 4586 /*
4583 4587 * called by the transport to abort a target
4584 4588 */
4585 4589 static int
4586 4590 sf_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4587 4591 {
4588 4592 struct sf *sf = ADDR2SF(ap);
4589 4593 struct sf_target *target = ADDR2TARGET(ap);
4590 4594 struct sf_pkt *cmd, *ncmd, *pcmd;
4591 4595 struct fcal_packet *fpkt;
4592 4596 int rval = 0, t, my_rval = FALSE;
4593 4597 int old_target_state;
4594 4598 int lip_cnt;
4595 4599 int tgt_id;
4596 4600 fc_frame_header_t *hp;
4597 4601 int deferred_destroy;
4598 4602
4599 4603 deferred_destroy = 0;
4600 4604
4601 4605 if (pkt != NULL) {
4602 4606 cmd = PKT2CMD(pkt);
4603 4607 fpkt = cmd->cmd_fp_pkt;
4604 4608 SF_DEBUG(2, (sf, CE_NOTE, "sf_abort packet %p\n",
4605 4609 (void *)fpkt));
4606 4610 pcmd = NULL;
4607 4611 mutex_enter(&sf->sf_cmd_mutex);
4608 4612 ncmd = sf->sf_pkt_head;
4609 4613 while (ncmd != NULL) {
4610 4614 if (ncmd == cmd) {
4611 4615 if (pcmd != NULL) {
4612 4616 pcmd->cmd_next = cmd->cmd_next;
4613 4617 } else {
4614 4618 sf->sf_pkt_head = cmd->cmd_next;
4615 4619 }
4616 4620 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
4617 4621 cmd->cmd_state = SF_STATE_IDLE;
4618 4622 pkt->pkt_reason = CMD_ABORTED;
4619 4623 pkt->pkt_statistics |= STAT_ABORTED;
4620 4624 my_rval = TRUE;
4621 4625 break;
4622 4626 } else {
4623 4627 pcmd = ncmd;
4624 4628 ncmd = ncmd->cmd_next;
4625 4629 }
4626 4630 }
4627 4631 mutex_exit(&sf->sf_cmd_mutex);
4628 4632 if (ncmd == NULL) {
4629 4633 mutex_enter(&cmd->cmd_abort_mutex);
4630 4634 if (cmd->cmd_state == SF_STATE_ISSUED) {
4631 4635 cmd->cmd_state = SF_STATE_ABORTING;
4632 4636 cmd->cmd_timeout = sf_watchdog_time + 20;
4633 4637 mutex_exit(&cmd->cmd_abort_mutex);
4634 4638 /* call transport to abort command */
4635 4639 if (((rval = soc_abort(sf->sf_sochandle,
4636 4640 sf->sf_socp, sf->sf_sochandle->fcal_portno,
4637 4641 fpkt, 1)) == FCAL_ABORTED) ||
4638 4642 (rval == FCAL_ABORT_FAILED)) {
4639 4643 my_rval = TRUE;
4640 4644 pkt->pkt_reason = CMD_ABORTED;
4641 4645 pkt->pkt_statistics |= STAT_ABORTED;
4642 4646 cmd->cmd_state = SF_STATE_IDLE;
4643 4647 } else if (rval == FCAL_BAD_ABORT) {
4644 4648 cmd->cmd_timeout = sf_watchdog_time
4645 4649 + 20;
4646 4650 my_rval = FALSE;
4647 4651 } else {
4648 4652 SF_DEBUG(1, (sf, CE_NOTE,
4649 4653 "Command Abort failed\n"));
4650 4654 }
4651 4655 } else {
4652 4656 mutex_exit(&cmd->cmd_abort_mutex);
4653 4657 }
4654 4658 }
4655 4659 } else {
4656 4660 SF_DEBUG(2, (sf, CE_NOTE, "sf_abort target\n"));
4657 4661 mutex_enter(&sf->sf_mutex);
4658 4662 lip_cnt = sf->sf_lip_cnt;
4659 4663 mutex_enter(&target->sft_mutex);
4660 4664 if (target->sft_state & (SF_TARGET_BUSY |
4661 4665 SF_TARGET_OFFLINE)) {
4662 4666 mutex_exit(&target->sft_mutex);
4663 4667 return (rval);
4664 4668 }
4665 4669 old_target_state = target->sft_state;
4666 4670 target->sft_state |= SF_TARGET_BUSY;
4667 4671 mutex_exit(&target->sft_mutex);
4668 4672 mutex_exit(&sf->sf_mutex);
4669 4673
4670 4674 if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4671 4675 0, 0, 0, NULL, 0)) != NULL) {
4672 4676
4673 4677 cmd = PKT2CMD(pkt);
4674 4678 cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 1;
4675 4679 cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4676 4680 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4677 4681
4678 4682 /* prepare the packet for transport */
4679 4683 if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4680 4684
4681 4685 cmd->cmd_state = SF_STATE_ISSUED;
4682 4686 /*
4683 4687 * call transport to send a pkt polled
4684 4688 *
4685 4689 * if that fails call the transport to abort it
4686 4690 */
4687 4691 if (soc_transport_poll(sf->sf_sochandle,
4688 4692 cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4689 4693 CQ_REQUEST_1) == FCAL_TRANSPORT_SUCCESS) {
4690 4694 (void) ddi_dma_sync(
4691 4695 cmd->cmd_cr_pool->rsp_dma_handle,
4692 4696 (off_t)
4693 4697 ((caddr_t)cmd->cmd_rsp_block -
4694 4698 cmd->cmd_cr_pool->rsp_base),
4695 4699 FCP_MAX_RSP_IU_SIZE,
4696 4700 DDI_DMA_SYNC_FORKERNEL);
4697 4701 if (((struct fcp_rsp_info *)
4698 4702 (&cmd->cmd_rsp_block->
4699 4703 fcp_response_len + 1))->
4700 4704 rsp_code == FCP_NO_FAILURE) {
4701 4705 /* abort cmds for this targ */
4702 4706 sf_abort_all(sf, target, TRUE,
4703 4707 lip_cnt, TRUE);
4704 4708 } else {
4705 4709 hp = &cmd->cmd_fp_pkt->
4706 4710 fcal_socal_request.
4707 4711 sr_fc_frame_hdr;
4708 4712 tgt_id = sf_alpa_to_switch[
4709 4713 (uchar_t)hp->d_id];
4710 4714 sf->sf_stats.tstats[tgt_id].
4711 4715 task_mgmt_failures++;
4712 4716 SF_DEBUG(1, (sf, CE_NOTE,
4713 4717 "Target %d Abort Task "
4714 4718 "Set failed\n", hp->d_id));
4715 4719 }
4716 4720 } else {
4717 4721 mutex_enter(&cmd->cmd_abort_mutex);
4718 4722 if (cmd->cmd_state == SF_STATE_ISSUED) {
4719 4723 cmd->cmd_state = SF_STATE_ABORTING;
4720 4724 cmd->cmd_timeout = sf_watchdog_time
4721 4725 + 20;
4722 4726 mutex_exit(&cmd->cmd_abort_mutex);
4723 4727 if ((t = soc_abort(sf->sf_sochandle,
4724 4728 sf->sf_socp, sf->sf_sochandle->
4725 4729 fcal_portno, cmd->cmd_fp_pkt, 1)) !=
4726 4730 FCAL_ABORTED &&
4727 4731 (t != FCAL_ABORT_FAILED)) {
4728 4732 sf_log(sf, CE_NOTE,
4729 4733 "sf_abort failed, "
4730 4734 "initiating LIP\n");
4731 4735 sf_force_lip(sf);
4732 4736 deferred_destroy = 1;
4733 4737 }
4734 4738 } else {
4735 4739 mutex_exit(&cmd->cmd_abort_mutex);
4736 4740 }
4737 4741 }
4738 4742 }
4739 4743 if (!deferred_destroy) {
4740 4744 cmd->cmd_fp_pkt->fcal_pkt_comp =
4741 4745 sf_cmd_callback;
4742 4746 cmd->cmd_block->fcp_cntl.cntl_abort_tsk = 0;
4743 4747 sf_scsi_destroy_pkt(ap, pkt);
4744 4748 my_rval = TRUE;
4745 4749 }
4746 4750 }
4747 4751 mutex_enter(&sf->sf_mutex);
4748 4752 if (lip_cnt == sf->sf_lip_cnt) {
4749 4753 mutex_enter(&target->sft_mutex);
4750 4754 target->sft_state = old_target_state;
4751 4755 mutex_exit(&target->sft_mutex);
4752 4756 }
4753 4757 mutex_exit(&sf->sf_mutex);
4754 4758 }
4755 4759 return (my_rval);
4756 4760 }
4757 4761
4758 4762
4759 4763 /*
4760 4764 * called by the transport and internally to reset a target
4761 4765 */
4762 4766 static int
4763 4767 sf_reset(struct scsi_address *ap, int level)
4764 4768 {
4765 4769 struct scsi_pkt *pkt;
4766 4770 struct fcal_packet *fpkt;
4767 4771 struct sf *sf = ADDR2SF(ap);
4768 4772 struct sf_target *target = ADDR2TARGET(ap), *ntarget;
4769 4773 struct sf_pkt *cmd;
4770 4774 int rval = FALSE, t;
4771 4775 int lip_cnt;
4772 4776 int tgt_id, ret;
4773 4777 fc_frame_header_t *hp;
4774 4778 int deferred_destroy;
4775 4779
4776 4780 /* We don't support RESET_LUN yet. */
4777 4781 if (level == RESET_TARGET) {
4778 4782 struct sf_reset_list *p;
4779 4783
4780 4784 if ((p = kmem_alloc(sizeof (struct sf_reset_list), KM_NOSLEEP))
4781 4785 == NULL)
4782 4786 return (rval);
4783 4787
4784 4788 SF_DEBUG(2, (sf, CE_NOTE, "sf_reset target\n"));
4785 4789 mutex_enter(&sf->sf_mutex);
4786 4790 /* All target resets go to LUN 0 */
4787 4791 if (target->sft_lun.l) {
4788 4792 target = sf_lookup_target(sf, target->sft_port_wwn, 0);
4789 4793 }
4790 4794 mutex_enter(&target->sft_mutex);
4791 4795 if (target->sft_state & (SF_TARGET_BUSY |
4792 4796 SF_TARGET_OFFLINE)) {
4793 4797 mutex_exit(&target->sft_mutex);
4794 4798 mutex_exit(&sf->sf_mutex);
4795 4799 kmem_free(p, sizeof (struct sf_reset_list));
4796 4800 return (rval);
4797 4801 }
4798 4802 lip_cnt = sf->sf_lip_cnt;
4799 4803 target->sft_state |= SF_TARGET_BUSY;
4800 4804 for (ntarget = target->sft_next_lun;
4801 4805 ntarget;
4802 4806 ntarget = ntarget->sft_next_lun) {
4803 4807 mutex_enter(&ntarget->sft_mutex);
4804 4808 /*
4805 4809 * XXXX If we supported RESET_LUN we should check here
4806 4810 * to see if any LUN were being reset and somehow fail
4807 4811 * that operation.
4808 4812 */
4809 4813 ntarget->sft_state |= SF_TARGET_BUSY;
4810 4814 mutex_exit(&ntarget->sft_mutex);
4811 4815 }
4812 4816 mutex_exit(&target->sft_mutex);
4813 4817 mutex_exit(&sf->sf_mutex);
4814 4818
4815 4819 deferred_destroy = 0;
4816 4820 if ((pkt = sf_scsi_init_pkt(ap, NULL, NULL, 0,
4817 4821 0, 0, 0, NULL, 0)) != NULL) {
4818 4822 cmd = PKT2CMD(pkt);
4819 4823 cmd->cmd_block->fcp_cntl.cntl_reset = 1;
4820 4824 cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
4821 4825 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
4822 4826
4823 4827 /* prepare the packet for transport */
4824 4828 if (sf_prepare_pkt(sf, cmd, target) == TRAN_ACCEPT) {
4825 4829 /* call transport to send a pkt polled */
4826 4830 cmd->cmd_state = SF_STATE_ISSUED;
4827 4831 if ((ret = soc_transport_poll(sf->sf_sochandle,
4828 4832 cmd->cmd_fp_pkt, SF_ABORT_TIMEOUT,
4829 4833 CQ_REQUEST_1)) == FCAL_TRANSPORT_SUCCESS) {
4830 4834 (void) ddi_dma_sync(cmd->cmd_cr_pool->
4831 4835 rsp_dma_handle, (caddr_t)cmd->
4832 4836 cmd_rsp_block - cmd->cmd_cr_pool->
4833 4837 rsp_base, FCP_MAX_RSP_IU_SIZE,
4834 4838 DDI_DMA_SYNC_FORKERNEL);
4835 4839 fpkt = cmd->cmd_fp_pkt;
4836 4840 if ((fpkt->fcal_pkt_status ==
4837 4841 FCAL_STATUS_OK) &&
4838 4842 (((struct fcp_rsp_info *)
4839 4843 (&cmd->cmd_rsp_block->
4840 4844 fcp_response_len + 1))->
4841 4845 rsp_code == FCP_NO_FAILURE)) {
4842 4846 sf_log(sf, CE_NOTE,
4843 4847 "!sf%d: Target 0x%x Reset "
4844 4848 "successful\n",
4845 4849 ddi_get_instance(\
4846 4850 sf->sf_dip),
4847 4851 sf_alpa_to_switch[
4848 4852 target->sft_al_pa]);
4849 4853 rval = TRUE;
4850 4854 } else {
4851 4855 hp = &cmd->cmd_fp_pkt->
4852 4856 fcal_socal_request.
4853 4857 sr_fc_frame_hdr;
4854 4858 tgt_id = sf_alpa_to_switch[
4855 4859 (uchar_t)hp->d_id];
4856 4860 sf->sf_stats.tstats[tgt_id].
4857 4861 task_mgmt_failures++;
4858 4862 sf_log(sf, CE_NOTE,
4859 4863 "!sf%d: Target 0x%x "
4860 4864 "Reset failed."
4861 4865 "Status code 0x%x "
4862 4866 "Resp code 0x%x\n",
4863 4867 ddi_get_instance(\
4864 4868 sf->sf_dip),
4865 4869 tgt_id,
4866 4870 fpkt->fcal_pkt_status,
4867 4871 ((struct fcp_rsp_info *)
4868 4872 (&cmd->cmd_rsp_block->
4869 4873 fcp_response_len + 1))->
4870 4874 rsp_code);
4871 4875 }
4872 4876 } else {
4873 4877 sf_log(sf, CE_NOTE, "!sf%d: Target "
4874 4878 "0x%x Reset Failed. Ret=%x\n",
4875 4879 ddi_get_instance(sf->sf_dip),
4876 4880 sf_alpa_to_switch[
4877 4881 target->sft_al_pa], ret);
4878 4882 mutex_enter(&cmd->cmd_abort_mutex);
4879 4883 if (cmd->cmd_state == SF_STATE_ISSUED) {
4880 4884 /* call the transport to abort a cmd */
4881 4885 cmd->cmd_timeout = sf_watchdog_time
4882 4886 + 20;
4883 4887 cmd->cmd_state = SF_STATE_ABORTING;
4884 4888 mutex_exit(&cmd->cmd_abort_mutex);
4885 4889 if (((t = soc_abort(sf->sf_sochandle,
4886 4890 sf->sf_socp,
4887 4891 sf->sf_sochandle->fcal_portno,
4888 4892 cmd->cmd_fp_pkt, 1)) !=
4889 4893 FCAL_ABORTED) &&
4890 4894 (t != FCAL_ABORT_FAILED)) {
4891 4895 sf_log(sf, CE_NOTE,
4892 4896 "!sf%d: Target 0x%x Reset "
4893 4897 "failed. Abort Failed, "
4894 4898 "forcing LIP\n",
4895 4899 ddi_get_instance(
4896 4900 sf->sf_dip),
4897 4901 sf_alpa_to_switch[
4898 4902 target->sft_al_pa]);
4899 4903 sf_force_lip(sf);
4900 4904 rval = TRUE;
4901 4905 deferred_destroy = 1;
4902 4906 }
4903 4907 } else {
4904 4908 mutex_exit
4905 4909 (&cmd->cmd_abort_mutex);
4906 4910 }
4907 4911 }
4908 4912 }
4909 4913 /*
4910 4914 * Defer releasing the packet if we abort returned with
4911 4915 * a BAD_ABORT or timed out, because there is a
4912 4916 * possibility that the ucode might return it.
4913 4917 * We wait for at least 20s and let it be released
4914 4918 * by the sf_watch thread
4915 4919 */
4916 4920 if (!deferred_destroy) {
4917 4921 cmd->cmd_block->fcp_cntl.cntl_reset = 0;
4918 4922 cmd->cmd_fp_pkt->fcal_pkt_comp =
4919 4923 sf_cmd_callback;
4920 4924 cmd->cmd_state = SF_STATE_IDLE;
4921 4925 /* for cache */
4922 4926 sf_scsi_destroy_pkt(ap, pkt);
4923 4927 }
4924 4928 } else {
4925 4929 cmn_err(CE_WARN, "!sf%d: Target 0x%x Reset Failed. "
4926 4930 "Resource allocation error.\n",
4927 4931 ddi_get_instance(sf->sf_dip),
4928 4932 sf_alpa_to_switch[target->sft_al_pa]);
4929 4933 }
4930 4934 mutex_enter(&sf->sf_mutex);
4931 4935 if ((rval == TRUE) && (lip_cnt == sf->sf_lip_cnt)) {
4932 4936 p->target = target;
4933 4937 p->lip_cnt = lip_cnt;
4934 4938 p->timeout = ddi_get_lbolt() +
4935 4939 drv_usectohz(SF_TARGET_RESET_DELAY);
4936 4940 p->next = sf->sf_reset_list;
4937 4941 sf->sf_reset_list = p;
4938 4942 mutex_exit(&sf->sf_mutex);
4939 4943 mutex_enter(&sf_global_mutex);
4940 4944 if (sf_reset_timeout_id == 0) {
4941 4945 sf_reset_timeout_id = timeout(
4942 4946 sf_check_reset_delay, NULL,
4943 4947 drv_usectohz(SF_TARGET_RESET_DELAY));
4944 4948 }
4945 4949 mutex_exit(&sf_global_mutex);
4946 4950 } else {
4947 4951 if (lip_cnt == sf->sf_lip_cnt) {
4948 4952 mutex_enter(&target->sft_mutex);
4949 4953 target->sft_state &= ~SF_TARGET_BUSY;
4950 4954 for (ntarget = target->sft_next_lun;
4951 4955 ntarget;
4952 4956 ntarget = ntarget->sft_next_lun) {
4953 4957 mutex_enter(&ntarget->sft_mutex);
4954 4958 ntarget->sft_state &= ~SF_TARGET_BUSY;
4955 4959 mutex_exit(&ntarget->sft_mutex);
4956 4960 }
4957 4961 mutex_exit(&target->sft_mutex);
4958 4962 }
4959 4963 mutex_exit(&sf->sf_mutex);
4960 4964 kmem_free(p, sizeof (struct sf_reset_list));
4961 4965 }
4962 4966 } else {
4963 4967 mutex_enter(&sf->sf_mutex);
4964 4968 if ((sf->sf_state == SF_STATE_OFFLINE) &&
4965 4969 (sf_watchdog_time < sf->sf_timer)) {
4966 4970 /*
4967 4971 * We are currently in a lip, so let this one
4968 4972 * finish before forcing another one.
4969 4973 */
4970 4974 mutex_exit(&sf->sf_mutex);
4971 4975 return (TRUE);
4972 4976 }
4973 4977 mutex_exit(&sf->sf_mutex);
4974 4978 sf_log(sf, CE_NOTE, "!sf:Target driver initiated lip\n");
4975 4979 sf_force_lip(sf);
4976 4980 rval = TRUE;
4977 4981 }
4978 4982 return (rval);
4979 4983 }
4980 4984
4981 4985
4982 4986 /*
4983 4987 * abort all commands for a target
4984 4988 *
4985 4989 * if try_abort is set then send an abort
4986 4990 * if abort is set then this is abort, else this is a reset
4987 4991 */
4988 4992 static void
4989 4993 sf_abort_all(struct sf *sf, struct sf_target *target, int abort, int
4990 4994 lip_cnt, int try_abort)
4991 4995 {
4992 4996 struct sf_target *ntarget;
4993 4997 struct sf_pkt *cmd, *head = NULL, *tail = NULL, *pcmd = NULL, *tcmd;
4994 4998 struct fcal_packet *fpkt;
4995 4999 struct scsi_pkt *pkt;
4996 5000 int rval = FCAL_ABORTED;
4997 5001
4998 5002 /*
4999 5003 * First pull all commands for all LUNs on this target out of the
5000 5004 * overflow list. We can tell it's the same target by comparing
5001 5005 * the node WWN.
5002 5006 */
5003 5007 mutex_enter(&sf->sf_mutex);
5004 5008 if (lip_cnt == sf->sf_lip_cnt) {
5005 5009 mutex_enter(&sf->sf_cmd_mutex);
5006 5010 cmd = sf->sf_pkt_head;
5007 5011 while (cmd != NULL) {
5008 5012 ntarget = ADDR2TARGET(&cmd->cmd_pkt->
5009 5013 pkt_address);
5010 5014 if (ntarget == target) {
5011 5015 if (pcmd != NULL)
5012 5016 pcmd->cmd_next = cmd->cmd_next;
5013 5017 else
5014 5018 sf->sf_pkt_head = cmd->cmd_next;
5015 5019 if (sf->sf_pkt_tail == cmd) {
5016 5020 sf->sf_pkt_tail = pcmd;
5017 5021 if (pcmd != NULL)
5018 5022 pcmd->cmd_next = NULL;
5019 5023 }
5020 5024 tcmd = cmd->cmd_next;
5021 5025 if (head == NULL) {
5022 5026 head = cmd;
5023 5027 tail = cmd;
5024 5028 } else {
5025 5029 tail->cmd_next = cmd;
5026 5030 tail = cmd;
5027 5031 }
5028 5032 cmd->cmd_next = NULL;
5029 5033 cmd = tcmd;
5030 5034 } else {
5031 5035 pcmd = cmd;
5032 5036 cmd = cmd->cmd_next;
5033 5037 }
5034 5038 }
5035 5039 mutex_exit(&sf->sf_cmd_mutex);
5036 5040 }
5037 5041 mutex_exit(&sf->sf_mutex);
5038 5042
5039 5043 /*
5040 5044 * Now complete all the commands on our list. In the process,
5041 5045 * the completion routine may take the commands off the target
5042 5046 * lists.
5043 5047 */
5044 5048 cmd = head;
5045 5049 while (cmd != NULL) {
5046 5050 pkt = cmd->cmd_pkt;
5047 5051 if (abort) {
5048 5052 pkt->pkt_reason = CMD_ABORTED;
5049 5053 pkt->pkt_statistics |= STAT_ABORTED;
5050 5054 } else {
5051 5055 pkt->pkt_reason = CMD_RESET;
5052 5056 pkt->pkt_statistics |= STAT_DEV_RESET;
5053 5057 }
5054 5058 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5055 5059 cmd->cmd_state = SF_STATE_IDLE;
5056 5060 cmd = cmd->cmd_next;
5057 5061 /*
5058 5062 * call the packet completion routine only for
5059 5063 * non-polled commands. Ignore the polled commands as
5060 5064 * they timeout and will be handled differently
5061 5065 */
5062 5066 if ((pkt->pkt_comp) && !(pkt->pkt_flags & FLAG_NOINTR))
5063 5067 (*pkt->pkt_comp)(pkt);
5064 5068
5065 5069 }
5066 5070
5067 5071 /*
5068 5072 * Finally get all outstanding commands for each LUN, and abort them if
5069 5073 * they've been issued, and call the completion routine.
5070 5074 * For the case where sf_offline_target is called from sf_watch
5071 5075 * due to a Offline Timeout, it is quite possible that the soc+
5072 5076 * ucode is hosed and therefore cannot return the commands.
5073 5077 * Clear up all the issued commands as well.
5074 5078 * Try_abort will be false only if sf_abort_all is coming from
5075 5079 * sf_target_offline.
5076 5080 */
5077 5081
5078 5082 if (try_abort || sf->sf_state == SF_STATE_OFFLINE) {
5079 5083 mutex_enter(&target->sft_pkt_mutex);
5080 5084 cmd = tcmd = target->sft_pkt_head;
5081 5085 while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
5082 5086 fpkt = cmd->cmd_fp_pkt;
5083 5087 pkt = cmd->cmd_pkt;
5084 5088 mutex_enter(&cmd->cmd_abort_mutex);
5085 5089 if ((cmd->cmd_state == SF_STATE_ISSUED) &&
5086 5090 (fpkt->fcal_cmd_state &
5087 5091 FCAL_CMD_IN_TRANSPORT) &&
5088 5092 ((fpkt->fcal_cmd_state & FCAL_CMD_COMPLETE) ==
5089 5093 0) && !(pkt->pkt_flags & FLAG_NOINTR)) {
5090 5094 cmd->cmd_state = SF_STATE_ABORTING;
5091 5095 cmd->cmd_timeout = sf_watchdog_time +
5092 5096 cmd->cmd_pkt->pkt_time + 20;
5093 5097 mutex_exit(&cmd->cmd_abort_mutex);
5094 5098 mutex_exit(&target->sft_pkt_mutex);
5095 5099 if (try_abort) {
5096 5100 /* call the transport to abort a pkt */
5097 5101 rval = soc_abort(sf->sf_sochandle,
5098 5102 sf->sf_socp,
5099 5103 sf->sf_sochandle->fcal_portno,
5100 5104 fpkt, 1);
5101 5105 }
5102 5106 if ((rval == FCAL_ABORTED) ||
5103 5107 (rval == FCAL_ABORT_FAILED)) {
5104 5108 if (abort) {
5105 5109 pkt->pkt_reason = CMD_ABORTED;
5106 5110 pkt->pkt_statistics |=
5107 5111 STAT_ABORTED;
5108 5112 } else {
5109 5113 pkt->pkt_reason = CMD_RESET;
5110 5114 pkt->pkt_statistics |=
5111 5115 STAT_DEV_RESET;
5112 5116 }
5113 5117 cmd->cmd_state = SF_STATE_IDLE;
5114 5118 if (pkt->pkt_comp)
5115 5119 (*pkt->pkt_comp)(pkt);
5116 5120 }
5117 5121 mutex_enter(&sf->sf_mutex);
5118 5122 if (lip_cnt != sf->sf_lip_cnt) {
5119 5123 mutex_exit(&sf->sf_mutex);
5120 5124 return;
5121 5125 }
5122 5126 mutex_exit(&sf->sf_mutex);
5123 5127 mutex_enter(&target->sft_pkt_mutex);
5124 5128 cmd = target->sft_pkt_head;
5125 5129 } else {
5126 5130 mutex_exit(&cmd->cmd_abort_mutex);
5127 5131 cmd = cmd->cmd_forw;
5128 5132 }
5129 5133 }
5130 5134 mutex_exit(&target->sft_pkt_mutex);
5131 5135 }
5132 5136 }
5133 5137
5134 5138
5135 5139 /*
5136 5140 * called by the transport to start a packet
5137 5141 */
5138 5142 static int
5139 5143 sf_start(struct scsi_address *ap, struct scsi_pkt *pkt)
5140 5144 {
5141 5145 struct sf *sf = ADDR2SF(ap);
5142 5146 struct sf_target *target = ADDR2TARGET(ap);
5143 5147 struct sf_pkt *cmd = PKT2CMD(pkt);
5144 5148 int rval;
5145 5149
5146 5150
5147 5151 SF_DEBUG(6, (sf, CE_NOTE, "sf_start\n"));
5148 5152
5149 5153 if (cmd->cmd_state == SF_STATE_ISSUED) {
5150 5154 cmn_err(CE_PANIC, "sf: issuing packet twice 0x%p\n",
5151 5155 (void *)cmd);
5152 5156 }
5153 5157
5154 5158 /* prepare the packet for transport */
5155 5159 if ((rval = sf_prepare_pkt(sf, cmd, target)) != TRAN_ACCEPT) {
5156 5160 return (rval);
5157 5161 }
5158 5162
5159 5163 if (target->sft_state & (SF_TARGET_BUSY|SF_TARGET_OFFLINE)) {
5160 5164 if (target->sft_state & SF_TARGET_OFFLINE) {
5161 5165 return (TRAN_FATAL_ERROR);
5162 5166 }
5163 5167 if (pkt->pkt_flags & FLAG_NOINTR) {
5164 5168 return (TRAN_BUSY);
5165 5169 }
5166 5170 mutex_enter(&sf->sf_cmd_mutex);
5167 5171 sf->sf_use_lock = TRUE;
5168 5172 goto enque;
5169 5173 }
5170 5174
5171 5175
5172 5176 /* if no interrupts then do polled I/O */
5173 5177 if (pkt->pkt_flags & FLAG_NOINTR) {
5174 5178 return (sf_dopoll(sf, cmd));
5175 5179 }
5176 5180
5177 5181 /* regular interrupt-driven I/O */
5178 5182
5179 5183 if (!sf->sf_use_lock) {
5180 5184
5181 5185 /* locking no needed */
5182 5186
5183 5187 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
5184 5188 sf_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
5185 5189 cmd->cmd_state = SF_STATE_ISSUED;
5186 5190
5187 5191 /* call the transport to send a pkt */
5188 5192 if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt,
5189 5193 FCAL_NOSLEEP, CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5190 5194 cmd->cmd_state = SF_STATE_IDLE;
5191 5195 return (TRAN_BADPKT);
5192 5196 }
5193 5197 return (TRAN_ACCEPT);
5194 5198 }
5195 5199
5196 5200 /* regular I/O using locking */
5197 5201
5198 5202 mutex_enter(&sf->sf_cmd_mutex);
5199 5203 if ((sf->sf_ncmds >= sf->sf_throttle) ||
5200 5204 (sf->sf_pkt_head != NULL)) {
5201 5205 enque:
5202 5206 /*
5203 5207 * either we're throttling back or there are already commands
5204 5208 * on the queue, so enqueue this one for later
5205 5209 */
5206 5210 cmd->cmd_flags |= CFLAG_IN_QUEUE;
5207 5211 if (sf->sf_pkt_head != NULL) {
5208 5212 /* add to the queue */
5209 5213 sf->sf_pkt_tail->cmd_next = cmd;
5210 5214 cmd->cmd_next = NULL;
5211 5215 sf->sf_pkt_tail = cmd;
5212 5216 } else {
5213 5217 /* this is the first entry in the queue */
5214 5218 sf->sf_pkt_head = sf->sf_pkt_tail = cmd;
5215 5219 cmd->cmd_next = NULL;
5216 5220 }
5217 5221 mutex_exit(&sf->sf_cmd_mutex);
5218 5222 return (TRAN_ACCEPT);
5219 5223 }
5220 5224
5221 5225 /*
5222 5226 * start this packet now
5223 5227 */
5224 5228
5225 5229 /* still have cmd mutex */
5226 5230 return (sf_start_internal(sf, cmd));
5227 5231 }
5228 5232
5229 5233
5230 5234 /*
5231 5235 * internal routine to start a packet from the queue now
5232 5236 *
5233 5237 * enter with cmd mutex held and leave with it released
5234 5238 */
5235 5239 static int
5236 5240 sf_start_internal(struct sf *sf, struct sf_pkt *cmd)
5237 5241 {
5238 5242 /* we have the cmd mutex */
5239 5243 sf->sf_ncmds++;
5240 5244 mutex_exit(&sf->sf_cmd_mutex);
5241 5245
5242 5246 ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5243 5247 SF_DEBUG(6, (sf, CE_NOTE, "sf_start_internal\n"));
5244 5248
5245 5249 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ? sf_watchdog_time +
5246 5250 cmd->cmd_pkt->pkt_time : 0;
5247 5251 cmd->cmd_state = SF_STATE_ISSUED;
5248 5252
5249 5253 /* call transport to send the pkt */
5250 5254 if (soc_transport(sf->sf_sochandle, cmd->cmd_fp_pkt, FCAL_NOSLEEP,
5251 5255 CQ_REQUEST_1) != FCAL_TRANSPORT_SUCCESS) {
5252 5256 cmd->cmd_state = SF_STATE_IDLE;
5253 5257 mutex_enter(&sf->sf_cmd_mutex);
5254 5258 sf->sf_ncmds--;
5255 5259 mutex_exit(&sf->sf_cmd_mutex);
5256 5260 return (TRAN_BADPKT);
5257 5261 }
5258 5262 return (TRAN_ACCEPT);
5259 5263 }
5260 5264
5261 5265
5262 5266 /*
5263 5267 * prepare a packet for transport
5264 5268 */
5265 5269 static int
5266 5270 sf_prepare_pkt(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5267 5271 {
5268 5272 struct fcp_cmd *fcmd = cmd->cmd_block;
5269 5273
5270 5274 /* XXXX Need to set the LUN ? */
5271 5275 bcopy((caddr_t)&target->sft_lun.b,
5272 5276 (caddr_t)&fcmd->fcp_ent_addr,
5273 5277 FCP_LUN_SIZE);
5274 5278 cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
5275 5279 cmd->cmd_pkt->pkt_state = 0;
5276 5280 cmd->cmd_pkt->pkt_statistics = 0;
5277 5281
5278 5282
5279 5283 if ((cmd->cmd_pkt->pkt_comp == NULL) &&
5280 5284 ((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0)) {
5281 5285 return (TRAN_BADPKT);
5282 5286 }
5283 5287
5284 5288 /* invalidate imp field(s) of rsp block */
5285 5289 cmd->cmd_rsp_block->fcp_u.i_fcp_status = SF_BAD_DMA_MAGIC;
5286 5290
5287 5291 /* set up amt of I/O to do */
5288 5292 if (cmd->cmd_flags & CFLAG_DMAVALID) {
5289 5293 cmd->cmd_pkt->pkt_resid = cmd->cmd_dmacount;
5290 5294 if (cmd->cmd_flags & CFLAG_CMDIOPB) {
5291 5295 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
5292 5296 DDI_DMA_SYNC_FORDEV);
5293 5297 }
5294 5298 } else {
5295 5299 cmd->cmd_pkt->pkt_resid = 0;
5296 5300 }
5297 5301
5298 5302 /* set up the Tagged Queuing type */
5299 5303 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
5300 5304 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
5301 5305 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
5302 5306 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
5303 5307 }
5304 5308
5305 5309 /*
5306 5310 * Sync the cmd segment
5307 5311 */
5308 5312 (void) ddi_dma_sync(cmd->cmd_cr_pool->cmd_dma_handle,
5309 5313 (caddr_t)fcmd - cmd->cmd_cr_pool->cmd_base,
5310 5314 sizeof (struct fcp_cmd), DDI_DMA_SYNC_FORDEV);
5311 5315
5312 5316 sf_fill_ids(sf, cmd, target);
5313 5317 return (TRAN_ACCEPT);
5314 5318 }
5315 5319
5316 5320
5317 5321 /*
5318 5322 * fill in packet hdr source and destination IDs and hdr byte count
5319 5323 */
5320 5324 static void
5321 5325 sf_fill_ids(struct sf *sf, struct sf_pkt *cmd, struct sf_target *target)
5322 5326 {
5323 5327 struct fcal_packet *fpkt = cmd->cmd_fp_pkt;
5324 5328 fc_frame_header_t *hp;
5325 5329
5326 5330
5327 5331 hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
5328 5332 hp->d_id = target->sft_al_pa;
5329 5333 hp->s_id = sf->sf_al_pa;
5330 5334 fpkt->fcal_socal_request.sr_soc_hdr.sh_byte_cnt =
5331 5335 cmd->cmd_dmacookie.dmac_size;
5332 5336 }
5333 5337
5334 5338
5335 5339 /*
5336 5340 * do polled I/O using transport
5337 5341 */
5338 5342 static int
5339 5343 sf_dopoll(struct sf *sf, struct sf_pkt *cmd)
5340 5344 {
5341 5345 int timeout;
5342 5346 int rval;
5343 5347
5344 5348
5345 5349 mutex_enter(&sf->sf_cmd_mutex);
5346 5350 sf->sf_ncmds++;
5347 5351 mutex_exit(&sf->sf_cmd_mutex);
5348 5352
5349 5353 timeout = cmd->cmd_pkt->pkt_time ? cmd->cmd_pkt->pkt_time
5350 5354 : SF_POLL_TIMEOUT;
5351 5355 cmd->cmd_timeout = 0;
5352 5356 cmd->cmd_fp_pkt->fcal_pkt_comp = NULL;
5353 5357 cmd->cmd_state = SF_STATE_ISSUED;
5354 5358
5355 5359 /* call transport to send a pkt polled */
5356 5360 rval = soc_transport_poll(sf->sf_sochandle, cmd->cmd_fp_pkt,
5357 5361 timeout*1000000, CQ_REQUEST_1);
5358 5362 mutex_enter(&cmd->cmd_abort_mutex);
5359 5363 cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5360 5364 if (rval != FCAL_TRANSPORT_SUCCESS) {
5361 5365 if (rval == FCAL_TRANSPORT_TIMEOUT) {
5362 5366 cmd->cmd_state = SF_STATE_ABORTING;
5363 5367 mutex_exit(&cmd->cmd_abort_mutex);
5364 5368 (void) sf_target_timeout(sf, cmd);
5365 5369 } else {
5366 5370 mutex_exit(&cmd->cmd_abort_mutex);
5367 5371 }
5368 5372 cmd->cmd_state = SF_STATE_IDLE;
5369 5373 cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5370 5374 mutex_enter(&sf->sf_cmd_mutex);
5371 5375 sf->sf_ncmds--;
5372 5376 mutex_exit(&sf->sf_cmd_mutex);
5373 5377 return (TRAN_BADPKT);
5374 5378 }
5375 5379 mutex_exit(&cmd->cmd_abort_mutex);
5376 5380 cmd->cmd_fp_pkt->fcal_pkt_comp = sf_cmd_callback;
5377 5381 sf_cmd_callback(cmd->cmd_fp_pkt);
5378 5382 return (TRAN_ACCEPT);
5379 5383 }
5380 5384
5381 5385
5382 5386 /* a shortcut for defining debug messages below */
5383 5387 #ifdef DEBUG
5384 5388 #define SF_DMSG1(s) msg1 = s
5385 5389 #else
5386 5390 #define SF_DMSG1(s) /* do nothing */
5387 5391 #endif
5388 5392
5389 5393
5390 5394 /*
5391 5395 * the pkt_comp callback for command packets
5392 5396 */
5393 5397 static void
5394 5398 sf_cmd_callback(struct fcal_packet *fpkt)
5395 5399 {
5396 5400 struct sf_pkt *cmd = (struct sf_pkt *)fpkt->fcal_pkt_private;
5397 5401 struct scsi_pkt *pkt = cmd->cmd_pkt;
5398 5402 struct sf *sf = ADDR2SF(&pkt->pkt_address);
5399 5403 struct sf_target *target = ADDR2TARGET(&pkt->pkt_address);
5400 5404 struct fcp_rsp *rsp;
5401 5405 char *msg1 = NULL;
5402 5406 char *msg2 = NULL;
5403 5407 short ncmds;
5404 5408 int tgt_id;
5405 5409 int good_scsi_status = TRUE;
5406 5410
5407 5411
5408 5412
5409 5413 if (cmd->cmd_state == SF_STATE_IDLE) {
5410 5414 cmn_err(CE_PANIC, "sf: completing idle packet 0x%p\n",
5411 5415 (void *)cmd);
5412 5416 }
5413 5417
5414 5418 mutex_enter(&cmd->cmd_abort_mutex);
5415 5419 if (cmd->cmd_state == SF_STATE_ABORTING) {
5416 5420 /* cmd already being aborted -- nothing to do */
5417 5421 mutex_exit(&cmd->cmd_abort_mutex);
5418 5422 return;
5419 5423 }
5420 5424
5421 5425 cmd->cmd_state = SF_STATE_IDLE;
5422 5426 mutex_exit(&cmd->cmd_abort_mutex);
5423 5427
5424 5428 if (fpkt->fcal_pkt_status == FCAL_STATUS_OK) {
5425 5429
5426 5430 (void) ddi_dma_sync(cmd->cmd_cr_pool->rsp_dma_handle,
5427 5431 (caddr_t)cmd->cmd_rsp_block - cmd->cmd_cr_pool->rsp_base,
5428 5432 FCP_MAX_RSP_IU_SIZE, DDI_DMA_SYNC_FORKERNEL);
5429 5433
5430 5434 rsp = (struct fcp_rsp *)cmd->cmd_rsp_block;
5431 5435
5432 5436 if (rsp->fcp_u.i_fcp_status == SF_BAD_DMA_MAGIC) {
5433 5437
5434 5438 if (sf_core && (sf_core & SF_CORE_BAD_DMA)) {
5435 5439 sf_token = (int *)(uintptr_t)
5436 5440 fpkt->fcal_socal_request.\
5437 5441 sr_soc_hdr.sh_request_token;
5438 5442 (void) soc_take_core(sf->sf_sochandle,
5439 5443 sf->sf_socp);
5440 5444 }
5441 5445
5442 5446 pkt->pkt_reason = CMD_INCOMPLETE;
5443 5447 pkt->pkt_state = STATE_GOT_BUS;
5444 5448 pkt->pkt_statistics |= STAT_ABORTED;
5445 5449
5446 5450 } else {
5447 5451
5448 5452 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
5449 5453 STATE_SENT_CMD | STATE_GOT_STATUS;
5450 5454 pkt->pkt_resid = 0;
5451 5455 if (cmd->cmd_flags & CFLAG_DMAVALID) {
5452 5456 pkt->pkt_state |= STATE_XFERRED_DATA;
5453 5457 }
5454 5458
5455 5459 if ((pkt->pkt_scbp != NULL) &&
5456 5460 ((*(pkt->pkt_scbp) =
5457 5461 rsp->fcp_u.fcp_status.scsi_status)
5458 5462 != STATUS_GOOD)) {
5459 5463 good_scsi_status = FALSE;
5460 5464 /*
5461 5465 * The next two checks make sure that if there
5462 5466 * is no sense data or a valid response and
5463 5467 * the command came back with check condition,
5464 5468 * the command should be retried
5465 5469 */
5466 5470 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
5467 5471 !rsp->fcp_u.fcp_status.sense_len_set) {
5468 5472 pkt->pkt_state &= ~STATE_XFERRED_DATA;
5469 5473 pkt->pkt_resid = cmd->cmd_dmacount;
5470 5474 }
5471 5475 }
5472 5476
5473 5477 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
5474 5478 (pkt->pkt_state & STATE_XFERRED_DATA)) {
5475 5479 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0,
5476 5480 (uint_t)0, DDI_DMA_SYNC_FORCPU);
5477 5481 }
5478 5482 /*
5479 5483 * Update the transfer resid, if appropriate
5480 5484 */
5481 5485 if (rsp->fcp_u.fcp_status.resid_over ||
5482 5486 rsp->fcp_u.fcp_status.resid_under)
5483 5487 pkt->pkt_resid = rsp->fcp_resid;
5484 5488
5485 5489 /*
5486 5490 * Check to see if the SCSI command failed.
5487 5491 *
5488 5492 */
5489 5493
5490 5494 /*
5491 5495 * First see if we got a FCP protocol error.
5492 5496 */
5493 5497 if (rsp->fcp_u.fcp_status.rsp_len_set) {
5494 5498 struct fcp_rsp_info *bep;
5495 5499
5496 5500 bep = (struct fcp_rsp_info *)
5497 5501 (&rsp->fcp_response_len + 1);
5498 5502 if (bep->rsp_code != FCP_NO_FAILURE) {
5499 5503 pkt->pkt_reason = CMD_TRAN_ERR;
5500 5504 tgt_id = pkt->pkt_address.a_target;
5501 5505 switch (bep->rsp_code) {
5502 5506 case FCP_CMND_INVALID:
5503 5507 SF_DMSG1("FCP_RSP FCP_CMND "
5504 5508 "fields invalid");
5505 5509 break;
5506 5510 case FCP_TASK_MGMT_NOT_SUPPTD:
5507 5511 SF_DMSG1("FCP_RSP Task"
5508 5512 "Management Function"
5509 5513 "Not Supported");
5510 5514 break;
5511 5515 case FCP_TASK_MGMT_FAILED:
5512 5516 SF_DMSG1("FCP_RSP Task "
5513 5517 "Management Function"
5514 5518 "Failed");
5515 5519 sf->sf_stats.tstats[tgt_id].
5516 5520 task_mgmt_failures++;
5517 5521 break;
5518 5522 case FCP_DATA_RO_MISMATCH:
5519 5523 SF_DMSG1("FCP_RSP FCP_DATA RO "
5520 5524 "mismatch with "
5521 5525 "FCP_XFER_RDY DATA_RO");
5522 5526 sf->sf_stats.tstats[tgt_id].
5523 5527 data_ro_mismatches++;
5524 5528 break;
5525 5529 case FCP_DL_LEN_MISMATCH:
5526 5530 SF_DMSG1("FCP_RSP FCP_DATA "
5527 5531 "length "
5528 5532 "different than BURST_LEN");
5529 5533 sf->sf_stats.tstats[tgt_id].
5530 5534 dl_len_mismatches++;
5531 5535 break;
5532 5536 default:
5533 5537 SF_DMSG1("FCP_RSP invalid "
5534 5538 "RSP_CODE");
5535 5539 break;
5536 5540 }
5537 5541 }
5538 5542 }
5539 5543
5540 5544 /*
5541 5545 * See if we got a SCSI error with sense data
5542 5546 */
5543 5547 if (rsp->fcp_u.fcp_status.sense_len_set) {
5544 5548 uchar_t rqlen = min(rsp->fcp_sense_len,
5545 5549 sizeof (struct scsi_extended_sense));
5546 5550 caddr_t sense = (caddr_t)rsp +
5547 5551 sizeof (struct fcp_rsp) +
5548 5552 rsp->fcp_response_len;
5549 5553 struct scsi_arq_status *arq;
5550 5554 struct scsi_extended_sense *sensep =
5551 5555 (struct scsi_extended_sense *)sense;
5552 5556
5553 5557 if (rsp->fcp_u.fcp_status.scsi_status !=
5554 5558 STATUS_GOOD) {
5555 5559 if (rsp->fcp_u.fcp_status.scsi_status
5556 5560 == STATUS_CHECK) {
5557 5561 if (sensep->es_key ==
5558 5562 KEY_RECOVERABLE_ERROR)
5559 5563 good_scsi_status = 1;
5560 5564 if (sensep->es_key ==
5561 5565 KEY_UNIT_ATTENTION &&
5562 5566 sensep->es_add_code == 0x3f &&
5563 5567 sensep->es_qual_code == 0x0e) {
5564 5568 /* REPORT_LUNS_HAS_CHANGED */
5565 5569 sf_log(sf, CE_NOTE,
5566 5570 "!REPORT_LUNS_HAS_CHANGED\n");
5567 5571 sf_force_lip(sf);
5568 5572 }
5569 5573 }
5570 5574 }
5571 5575
5572 5576 if ((pkt->pkt_scbp != NULL) &&
5573 5577 (cmd->cmd_scblen >=
5574 5578 sizeof (struct scsi_arq_status))) {
5575 5579
5576 5580 pkt->pkt_state |= STATE_ARQ_DONE;
5577 5581
5578 5582 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
5579 5583 /*
5580 5584 * copy out sense information
5581 5585 */
5582 5586 bcopy(sense, (caddr_t)&arq->sts_sensedata,
5583 5587 rqlen);
5584 5588 arq->sts_rqpkt_resid =
5585 5589 sizeof (struct scsi_extended_sense) -
5586 5590 rqlen;
5587 5591 *((uchar_t *)&arq->sts_rqpkt_status) =
5588 5592 STATUS_GOOD;
5589 5593 arq->sts_rqpkt_reason = 0;
5590 5594 arq->sts_rqpkt_statistics = 0;
5591 5595 arq->sts_rqpkt_state = STATE_GOT_BUS |
5592 5596 STATE_GOT_TARGET | STATE_SENT_CMD |
5593 5597 STATE_GOT_STATUS | STATE_ARQ_DONE |
5594 5598 STATE_XFERRED_DATA;
5595 5599 }
5596 5600 target->sft_alive = TRUE;
5597 5601 }
5598 5602
5599 5603 /*
5600 5604 * The firmware returns the number of bytes actually
5601 5605 * xfered into/out of host. Compare this with what
5602 5606 * we asked and if it is different, we lost frames ?
5603 5607 */
5604 5608 if ((pkt->pkt_reason == 0) && (pkt->pkt_resid == 0) &&
5605 5609 (good_scsi_status) &&
5606 5610 (pkt->pkt_state & STATE_XFERRED_DATA) &&
5607 5611 (!(cmd->cmd_flags & CFLAG_CMDIOPB)) &&
5608 5612 (target->sft_device_type != DTYPE_ESI)) {
5609 5613 int byte_cnt =
5610 5614 fpkt->fcal_socal_request.
5611 5615 sr_soc_hdr.sh_byte_cnt;
5612 5616 if (cmd->cmd_flags & CFLAG_DMASEND) {
5613 5617 if (byte_cnt != 0) {
5614 5618 sf_log(sf, CE_NOTE,
5615 5619 "!sf_cmd_callback: Lost Frame: "
5616 5620 "(write) received 0x%x expected"
5617 5621 " 0x%x target 0x%x\n",
5618 5622 byte_cnt, cmd->cmd_dmacount,
5619 5623 sf_alpa_to_switch[
5620 5624 target->sft_al_pa]);
5621 5625 pkt->pkt_reason = CMD_INCOMPLETE;
5622 5626 pkt->pkt_statistics |= STAT_ABORTED;
5623 5627 }
5624 5628 } else if (byte_cnt < cmd->cmd_dmacount) {
5625 5629 sf_log(sf, CE_NOTE,
5626 5630 "!sf_cmd_callback: "
5627 5631 "Lost Frame: (read) "
5628 5632 "received 0x%x expected 0x%x "
5629 5633 "target 0x%x\n", byte_cnt,
5630 5634 cmd->cmd_dmacount,
5631 5635 sf_alpa_to_switch[
5632 5636 target->sft_al_pa]);
5633 5637 pkt->pkt_reason = CMD_INCOMPLETE;
5634 5638 pkt->pkt_statistics |= STAT_ABORTED;
5635 5639 }
5636 5640 }
5637 5641 }
5638 5642
5639 5643 } else {
5640 5644
5641 5645 /* pkt status was not ok */
5642 5646
5643 5647 switch (fpkt->fcal_pkt_status) {
5644 5648
5645 5649 case FCAL_STATUS_ERR_OFFLINE:
5646 5650 SF_DMSG1("Fibre Channel Offline");
5647 5651 mutex_enter(&target->sft_mutex);
5648 5652 if (!(target->sft_state & SF_TARGET_OFFLINE)) {
5649 5653 target->sft_state |= (SF_TARGET_BUSY
5650 5654 | SF_TARGET_MARK);
5651 5655 }
5652 5656 mutex_exit(&target->sft_mutex);
5653 5657 (void) ndi_event_retrieve_cookie(sf->sf_event_hdl,
5654 5658 target->sft_dip, FCAL_REMOVE_EVENT,
5655 5659 &sf_remove_eid, NDI_EVENT_NOPASS);
5656 5660 (void) ndi_event_run_callbacks(sf->sf_event_hdl,
5657 5661 target->sft_dip, sf_remove_eid, NULL);
5658 5662 pkt->pkt_reason = CMD_TRAN_ERR;
5659 5663 pkt->pkt_statistics |= STAT_BUS_RESET;
5660 5664 break;
5661 5665
5662 5666 case FCAL_STATUS_MAX_XCHG_EXCEEDED:
5663 5667 sf_throttle(sf);
5664 5668 sf->sf_use_lock = TRUE;
5665 5669 pkt->pkt_reason = CMD_TRAN_ERR;
5666 5670 pkt->pkt_state = STATE_GOT_BUS;
5667 5671 pkt->pkt_statistics |= STAT_ABORTED;
5668 5672 break;
5669 5673
5670 5674 case FCAL_STATUS_TIMEOUT:
5671 5675 SF_DMSG1("Fibre Channel Timeout");
5672 5676 pkt->pkt_reason = CMD_TIMEOUT;
5673 5677 break;
5674 5678
5675 5679 case FCAL_STATUS_ERR_OVERRUN:
5676 5680 SF_DMSG1("CMD_DATA_OVR");
5677 5681 pkt->pkt_reason = CMD_DATA_OVR;
5678 5682 break;
5679 5683
5680 5684 case FCAL_STATUS_UNKNOWN_CQ_TYPE:
5681 5685 SF_DMSG1("Unknown CQ type");
5682 5686 pkt->pkt_reason = CMD_TRAN_ERR;
5683 5687 break;
5684 5688
5685 5689 case FCAL_STATUS_BAD_SEG_CNT:
5686 5690 SF_DMSG1("Bad SEG CNT");
5687 5691 pkt->pkt_reason = CMD_TRAN_ERR;
5688 5692 break;
5689 5693
5690 5694 case FCAL_STATUS_BAD_XID:
5691 5695 SF_DMSG1("Fibre Channel Invalid X_ID");
5692 5696 pkt->pkt_reason = CMD_TRAN_ERR;
5693 5697 break;
5694 5698
5695 5699 case FCAL_STATUS_XCHG_BUSY:
5696 5700 SF_DMSG1("Fibre Channel Exchange Busy");
5697 5701 pkt->pkt_reason = CMD_TRAN_ERR;
5698 5702 break;
5699 5703
5700 5704 case FCAL_STATUS_INSUFFICIENT_CQES:
5701 5705 SF_DMSG1("Insufficient CQEs");
5702 5706 pkt->pkt_reason = CMD_TRAN_ERR;
5703 5707 break;
5704 5708
5705 5709 case FCAL_STATUS_ALLOC_FAIL:
5706 5710 SF_DMSG1("ALLOC FAIL");
5707 5711 pkt->pkt_reason = CMD_TRAN_ERR;
5708 5712 break;
5709 5713
5710 5714 case FCAL_STATUS_BAD_SID:
5711 5715 SF_DMSG1("Fibre Channel Invalid S_ID");
5712 5716 pkt->pkt_reason = CMD_TRAN_ERR;
5713 5717 break;
5714 5718
5715 5719 case FCAL_STATUS_INCOMPLETE_DMA_ERR:
5716 5720 if (sf_core && (sf_core & SF_CORE_INCOMPLETE_DMA)) {
5717 5721 sf_token = (int *)(uintptr_t)
5718 5722 fpkt->fcal_socal_request.\
5719 5723 sr_soc_hdr.sh_request_token;
5720 5724 (void) soc_take_core(sf->sf_sochandle,
5721 5725 sf->sf_socp);
5722 5726 sf_core = 0;
5723 5727 }
5724 5728 msg2 =
5725 5729 "INCOMPLETE DMA XFER due to bad SOC+ card, replace HBA";
5726 5730 pkt->pkt_reason = CMD_INCOMPLETE;
5727 5731 pkt->pkt_state = STATE_GOT_BUS;
5728 5732 pkt->pkt_statistics |= STAT_ABORTED;
5729 5733 break;
5730 5734
5731 5735 case FCAL_STATUS_CRC_ERR:
5732 5736 msg2 = "Fibre Channel CRC Error on frames";
5733 5737 pkt->pkt_reason = CMD_INCOMPLETE;
5734 5738 pkt->pkt_state = STATE_GOT_BUS;
5735 5739 pkt->pkt_statistics |= STAT_ABORTED;
5736 5740 break;
5737 5741
5738 5742 case FCAL_STATUS_NO_SEQ_INIT:
5739 5743 SF_DMSG1("Fibre Channel Seq Init Error");
5740 5744 pkt->pkt_reason = CMD_TRAN_ERR;
5741 5745 break;
5742 5746
5743 5747 case FCAL_STATUS_OPEN_FAIL:
5744 5748 pkt->pkt_reason = CMD_TRAN_ERR;
5745 5749 SF_DMSG1("Fibre Channel Open Failure");
5746 5750 if ((target->sft_state & (SF_TARGET_BUSY |
5747 5751 SF_TARGET_MARK | SF_TARGET_OFFLINE)) == 0) {
5748 5752 sf_log(sf, CE_NOTE,
5749 5753 "!Open failure to target 0x%x "
5750 5754 "forcing LIP\n",
5751 5755 sf_alpa_to_switch[target->sft_al_pa]);
5752 5756 sf_force_lip(sf);
5753 5757 }
5754 5758 break;
5755 5759
5756 5760
5757 5761 case FCAL_STATUS_ONLINE_TIMEOUT:
5758 5762 SF_DMSG1("Fibre Channel Online Timeout");
5759 5763 pkt->pkt_reason = CMD_TRAN_ERR;
5760 5764 break;
5761 5765
5762 5766 default:
5763 5767 SF_DMSG1("Unknown FC Status");
5764 5768 pkt->pkt_reason = CMD_TRAN_ERR;
5765 5769 break;
5766 5770 }
5767 5771 }
5768 5772
5769 5773 #ifdef DEBUG
5770 5774 /*
5771 5775 * msg1 will be non-NULL if we've detected some sort of error
5772 5776 */
5773 5777 if (msg1 != NULL && sfdebug >= 4) {
5774 5778 sf_log(sf, CE_WARN,
5775 5779 "!Transport error on cmd=0x%p target=0x%x: %s\n",
5776 5780 (void *)fpkt, pkt->pkt_address.a_target, msg1);
5777 5781 }
5778 5782 #endif
5779 5783
5780 5784 if (msg2 != NULL) {
5781 5785 sf_log(sf, CE_WARN, "!Transport error on target=0x%x: %s\n",
5782 5786 pkt->pkt_address.a_target, msg2);
5783 5787 }
5784 5788
5785 5789 ncmds = fpkt->fcal_ncmds;
5786 5790 ASSERT(ncmds >= 0);
5787 5791 if (ncmds >= (sf->sf_throttle - SF_HI_CMD_DELTA)) {
5788 5792 #ifdef DEBUG
5789 5793 if (!sf->sf_use_lock) {
5790 5794 SF_DEBUG(4, (sf, CE_NOTE, "use lock flag on\n"));
5791 5795 }
5792 5796 #endif
5793 5797 sf->sf_use_lock = TRUE;
5794 5798 }
5795 5799
5796 5800 mutex_enter(&sf->sf_cmd_mutex);
5797 5801 sf->sf_ncmds = ncmds;
5798 5802 sf_throttle_start(sf);
5799 5803 mutex_exit(&sf->sf_cmd_mutex);
5800 5804
5801 5805 if (!msg1 && !msg2)
5802 5806 SF_DEBUG(6, (sf, CE_NOTE, "Completing pkt 0x%p\n",
5803 5807 (void *)pkt));
5804 5808 if (pkt->pkt_comp != NULL) {
5805 5809 (*pkt->pkt_comp)(pkt);
5806 5810 }
5807 5811 }
5808 5812
5809 5813 #undef SF_DMSG1
5810 5814
5811 5815
5812 5816
5813 5817 /*
5814 5818 * start throttling for this instance
5815 5819 */
5816 5820 static void
5817 5821 sf_throttle_start(struct sf *sf)
5818 5822 {
5819 5823 struct sf_pkt *cmd, *prev_cmd = NULL;
5820 5824 struct scsi_pkt *pkt;
5821 5825 struct sf_target *target;
5822 5826
5823 5827
5824 5828 ASSERT(mutex_owned(&sf->sf_cmd_mutex));
5825 5829
5826 5830 cmd = sf->sf_pkt_head;
5827 5831 while ((cmd != NULL) &&
5828 5832 (sf->sf_state == SF_STATE_ONLINE) &&
5829 5833 (sf->sf_ncmds < sf->sf_throttle)) {
5830 5834
5831 5835 pkt = CMD2PKT(cmd);
5832 5836
5833 5837 target = ADDR2TARGET(&pkt->pkt_address);
5834 5838 if (target->sft_state & SF_TARGET_BUSY) {
5835 5839 /* this command is busy -- go to next */
5836 5840 ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5837 5841 prev_cmd = cmd;
5838 5842 cmd = cmd->cmd_next;
5839 5843 continue;
5840 5844 }
5841 5845
5842 5846 ASSERT(cmd->cmd_state != SF_STATE_ISSUED);
5843 5847
5844 5848 /* this cmd not busy and not issued */
5845 5849
5846 5850 /* remove this packet from the queue */
5847 5851 if (sf->sf_pkt_head == cmd) {
5848 5852 /* this was the first packet */
5849 5853 sf->sf_pkt_head = cmd->cmd_next;
5850 5854 } else if (sf->sf_pkt_tail == cmd) {
5851 5855 /* this was the last packet */
5852 5856 sf->sf_pkt_tail = prev_cmd;
5853 5857 if (prev_cmd != NULL) {
5854 5858 prev_cmd->cmd_next = NULL;
5855 5859 }
5856 5860 } else {
5857 5861 /* some packet in the middle of the queue */
5858 5862 ASSERT(prev_cmd != NULL);
5859 5863 prev_cmd->cmd_next = cmd->cmd_next;
5860 5864 }
5861 5865 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
5862 5866
5863 5867 if (target->sft_state & SF_TARGET_OFFLINE) {
5864 5868 mutex_exit(&sf->sf_cmd_mutex);
5865 5869 pkt->pkt_reason = CMD_TRAN_ERR;
5866 5870 if (pkt->pkt_comp != NULL) {
5867 5871 (*pkt->pkt_comp)(cmd->cmd_pkt);
5868 5872 }
5869 5873 } else {
5870 5874 sf_fill_ids(sf, cmd, target);
5871 5875 if (sf_start_internal(sf, cmd) != TRAN_ACCEPT) {
5872 5876 pkt->pkt_reason = CMD_TRAN_ERR;
5873 5877 if (pkt->pkt_comp != NULL) {
5874 5878 (*pkt->pkt_comp)(cmd->cmd_pkt);
5875 5879 }
5876 5880 }
5877 5881 }
5878 5882 mutex_enter(&sf->sf_cmd_mutex);
5879 5883 cmd = sf->sf_pkt_head;
5880 5884 prev_cmd = NULL;
5881 5885 }
5882 5886 }
5883 5887
5884 5888
5885 5889 /*
5886 5890 * called when the max exchange value is exceeded to throttle back commands
5887 5891 */
5888 5892 static void
5889 5893 sf_throttle(struct sf *sf)
5890 5894 {
5891 5895 int cmdmax = sf->sf_sochandle->fcal_cmdmax;
5892 5896
5893 5897
5894 5898 mutex_enter(&sf->sf_cmd_mutex);
5895 5899
5896 5900 sf->sf_flag = TRUE;
5897 5901
5898 5902 if (sf->sf_ncmds > (cmdmax / 2)) {
5899 5903 sf->sf_throttle = cmdmax / 2;
5900 5904 } else {
5901 5905 if (sf->sf_ncmds > SF_DECR_DELTA) {
5902 5906 sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5903 5907 } else {
5904 5908 /*
5905 5909 * This case is just a safeguard, should not really
5906 5910 * happen(ncmds < SF_DECR_DELTA and MAX_EXCHG exceed
5907 5911 */
5908 5912 sf->sf_throttle = SF_DECR_DELTA;
5909 5913 }
5910 5914 }
5911 5915 mutex_exit(&sf->sf_cmd_mutex);
5912 5916
5913 5917 sf = sf->sf_sibling;
5914 5918 if (sf != NULL) {
5915 5919 mutex_enter(&sf->sf_cmd_mutex);
5916 5920 sf->sf_flag = TRUE;
5917 5921 if (sf->sf_ncmds >= (cmdmax / 2)) {
5918 5922 sf->sf_throttle = cmdmax / 2;
5919 5923 } else {
5920 5924 if (sf->sf_ncmds > SF_DECR_DELTA) {
5921 5925 sf->sf_throttle = sf->sf_ncmds - SF_DECR_DELTA;
5922 5926 } else {
5923 5927 sf->sf_throttle = SF_DECR_DELTA;
5924 5928 }
5925 5929 }
5926 5930
5927 5931 mutex_exit(&sf->sf_cmd_mutex);
5928 5932 }
5929 5933 }
5930 5934
5931 5935
5932 5936 /*
5933 5937 * sf watchdog routine, called for a timeout
5934 5938 */
5935 5939 /*ARGSUSED*/
5936 5940 static void
5937 5941 sf_watch(void *arg)
5938 5942 {
5939 5943 struct sf *sf;
5940 5944 struct sf_els_hdr *privp;
5941 5945 static int count = 0, pscan_count = 0;
5942 5946 int cmdmax, i, mescount = 0;
5943 5947 struct sf_target *target;
5944 5948
5945 5949
5946 5950 sf_watchdog_time += sf_watchdog_timeout;
5947 5951 count++;
5948 5952 pscan_count++;
5949 5953
5950 5954 mutex_enter(&sf_global_mutex);
5951 5955 sf_watch_running = 1;
5952 5956 for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
5953 5957
5954 5958 mutex_exit(&sf_global_mutex);
5955 5959
5956 5960 /* disable throttling while we're suspended */
5957 5961 mutex_enter(&sf->sf_mutex);
5958 5962 if (sf->sf_state & SF_STATE_SUSPENDED) {
5959 5963 mutex_exit(&sf->sf_mutex);
5960 5964 SF_DEBUG(1, (sf, CE_CONT,
5961 5965 "sf_watch, sf%d:throttle disabled "
5962 5966 "due to DDI_SUSPEND\n",
5963 5967 ddi_get_instance(sf->sf_dip)));
5964 5968 mutex_enter(&sf_global_mutex);
5965 5969 continue;
5966 5970 }
5967 5971 mutex_exit(&sf->sf_mutex);
5968 5972
5969 5973 cmdmax = sf->sf_sochandle->fcal_cmdmax;
5970 5974
5971 5975 if (sf->sf_take_core) {
5972 5976 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
5973 5977 }
5974 5978
5975 5979 mutex_enter(&sf->sf_cmd_mutex);
5976 5980
5977 5981 if (!sf->sf_flag) {
5978 5982 if (sf->sf_throttle < (cmdmax / 2)) {
5979 5983 sf->sf_throttle = cmdmax / 2;
5980 5984 } else if ((sf->sf_throttle += SF_INCR_DELTA) >
5981 5985 cmdmax) {
5982 5986 sf->sf_throttle = cmdmax;
5983 5987 }
5984 5988 } else {
5985 5989 sf->sf_flag = FALSE;
5986 5990 }
5987 5991
5988 5992 sf->sf_ncmds_exp_avg = (sf->sf_ncmds + sf->sf_ncmds_exp_avg)
5989 5993 >> 2;
5990 5994 if ((sf->sf_ncmds <= (sf->sf_throttle - SF_LO_CMD_DELTA)) &&
5991 5995 (sf->sf_pkt_head == NULL)) {
5992 5996 #ifdef DEBUG
5993 5997 if (sf->sf_use_lock) {
5994 5998 SF_DEBUG(4, (sf, CE_NOTE,
5995 5999 "use lock flag off\n"));
5996 6000 }
5997 6001 #endif
5998 6002 sf->sf_use_lock = FALSE;
5999 6003 }
6000 6004
6001 6005 if (sf->sf_state == SF_STATE_ONLINE && sf->sf_pkt_head &&
6002 6006 sf->sf_ncmds < sf->sf_throttle) {
6003 6007 sf_throttle_start(sf);
6004 6008 }
6005 6009
6006 6010 mutex_exit(&sf->sf_cmd_mutex);
6007 6011
6008 6012 if (pscan_count >= sf_pool_scan_cnt) {
6009 6013 if (sf->sf_ncmds_exp_avg < (sf->sf_cr_pool_cnt <<
6010 6014 SF_LOG2_ELEMS_IN_POOL) - SF_FREE_CR_EPSILON) {
6011 6015 sf_crpool_free(sf);
6012 6016 }
6013 6017 }
6014 6018 mutex_enter(&sf->sf_mutex);
6015 6019
6016 6020 privp = sf->sf_els_list;
6017 6021 while (privp != NULL) {
6018 6022 if (privp->timeout < sf_watchdog_time) {
6019 6023 /* timeout this command */
6020 6024 privp = sf_els_timeout(sf, privp);
6021 6025 } else if ((privp->timeout == SF_INVALID_TIMEOUT) &&
6022 6026 (privp->lip_cnt != sf->sf_lip_cnt)) {
6023 6027 if (privp->prev != NULL) {
6024 6028 privp->prev->next = privp->next;
6025 6029 }
6026 6030 if (sf->sf_els_list == privp) {
6027 6031 sf->sf_els_list = privp->next;
6028 6032 }
6029 6033 if (privp->next != NULL) {
6030 6034 privp->next->prev = privp->prev;
6031 6035 }
6032 6036 mutex_exit(&sf->sf_mutex);
6033 6037 sf_els_free(privp->fpkt);
6034 6038 mutex_enter(&sf->sf_mutex);
6035 6039 privp = sf->sf_els_list;
6036 6040 } else {
6037 6041 privp = privp->next;
6038 6042 }
6039 6043 }
6040 6044
6041 6045 if (sf->sf_online_timer && sf->sf_online_timer <
6042 6046 sf_watchdog_time) {
6043 6047 for (i = 0; i < sf_max_targets; i++) {
6044 6048 target = sf->sf_targets[i];
6045 6049 if (target != NULL) {
6046 6050 if (!mescount && target->sft_state &
6047 6051 SF_TARGET_BUSY) {
6048 6052 sf_log(sf, CE_WARN, "!Loop "
6049 6053 "Unstable: Failed to bring "
6050 6054 "Loop Online\n");
6051 6055 mescount = 1;
6052 6056 }
6053 6057 target->sft_state |= SF_TARGET_MARK;
6054 6058 }
6055 6059 }
6056 6060 sf_finish_init(sf, sf->sf_lip_cnt);
6057 6061 sf->sf_state = SF_STATE_INIT;
6058 6062 sf->sf_online_timer = 0;
6059 6063 }
6060 6064
6061 6065 if (sf->sf_state == SF_STATE_ONLINE) {
6062 6066 mutex_exit(&sf->sf_mutex);
6063 6067 if (count >= sf_pkt_scan_cnt) {
6064 6068 sf_check_targets(sf);
6065 6069 }
6066 6070 } else if ((sf->sf_state == SF_STATE_OFFLINE) &&
6067 6071 (sf->sf_timer < sf_watchdog_time)) {
6068 6072 for (i = 0; i < sf_max_targets; i++) {
6069 6073 target = sf->sf_targets[i];
6070 6074 if ((target != NULL) &&
6071 6075 (target->sft_state &
6072 6076 SF_TARGET_BUSY)) {
6073 6077 sf_log(sf, CE_WARN,
6074 6078 "!Offline Timeout\n");
6075 6079 if (sf_core && (sf_core &
6076 6080 SF_CORE_OFFLINE_TIMEOUT)) {
6077 6081 (void) soc_take_core(
6078 6082 sf->sf_sochandle,
6079 6083 sf->sf_socp);
6080 6084 sf_core = 0;
6081 6085 }
6082 6086 break;
6083 6087 }
6084 6088 }
6085 6089 sf_finish_init(sf, sf->sf_lip_cnt);
6086 6090 sf->sf_state = SF_STATE_INIT;
6087 6091 mutex_exit(&sf->sf_mutex);
6088 6092 } else {
6089 6093 mutex_exit(&sf->sf_mutex);
6090 6094 }
6091 6095 mutex_enter(&sf_global_mutex);
6092 6096 }
6093 6097 mutex_exit(&sf_global_mutex);
6094 6098 if (count >= sf_pkt_scan_cnt) {
6095 6099 count = 0;
6096 6100 }
6097 6101 if (pscan_count >= sf_pool_scan_cnt) {
6098 6102 pscan_count = 0;
6099 6103 }
6100 6104
6101 6105 /* reset timeout */
6102 6106 sf_watchdog_id = timeout(sf_watch, (caddr_t)0, sf_watchdog_tick);
6103 6107
6104 6108 /* signal waiting thread */
6105 6109 mutex_enter(&sf_global_mutex);
6106 6110 sf_watch_running = 0;
6107 6111 cv_broadcast(&sf_watch_cv);
6108 6112 mutex_exit(&sf_global_mutex);
6109 6113 }
6110 6114
6111 6115
6112 6116 /*
6113 6117 * called during a timeout to check targets
6114 6118 */
6115 6119 static void
6116 6120 sf_check_targets(struct sf *sf)
6117 6121 {
6118 6122 struct sf_target *target;
6119 6123 int i;
6120 6124 struct sf_pkt *cmd;
6121 6125 struct scsi_pkt *pkt;
6122 6126 int lip_cnt;
6123 6127
6124 6128 mutex_enter(&sf->sf_mutex);
6125 6129 lip_cnt = sf->sf_lip_cnt;
6126 6130 mutex_exit(&sf->sf_mutex);
6127 6131
6128 6132 /* check scan all possible targets */
6129 6133 for (i = 0; i < sf_max_targets; i++) {
6130 6134 target = sf->sf_targets[i];
6131 6135 while (target != NULL) {
6132 6136 mutex_enter(&target->sft_pkt_mutex);
6133 6137 if (target->sft_alive && target->sft_scan_count !=
6134 6138 sf_target_scan_cnt) {
6135 6139 target->sft_alive = 0;
6136 6140 target->sft_scan_count++;
6137 6141 mutex_exit(&target->sft_pkt_mutex);
6138 6142 return;
6139 6143 }
6140 6144 target->sft_alive = 0;
6141 6145 target->sft_scan_count = 0;
6142 6146 cmd = target->sft_pkt_head;
6143 6147 while (cmd != (struct sf_pkt *)&target->sft_pkt_head) {
6144 6148 mutex_enter(&cmd->cmd_abort_mutex);
6145 6149 if (cmd->cmd_state == SF_STATE_ISSUED &&
6146 6150 ((cmd->cmd_timeout && sf_watchdog_time >
6147 6151 #ifdef DEBUG
6148 6152 cmd->cmd_timeout) || sf_abort_flag)) {
6149 6153 sf_abort_flag = 0;
6150 6154 #else
6151 6155 cmd->cmd_timeout))) {
6152 6156 #endif
6153 6157 cmd->cmd_timeout = 0;
6154 6158 /* prevent reset from getting at this packet */
6155 6159 cmd->cmd_state = SF_STATE_ABORTING;
6156 6160 mutex_exit(&cmd->cmd_abort_mutex);
6157 6161 mutex_exit(&target->sft_pkt_mutex);
6158 6162 sf->sf_stats.tstats[i].timeouts++;
6159 6163 if (sf_target_timeout(sf, cmd))
6160 6164 return;
6161 6165 else {
6162 6166 if (lip_cnt != sf->sf_lip_cnt) {
6163 6167 return;
6164 6168 } else {
6165 6169 mutex_enter(&target->
6166 6170 sft_pkt_mutex);
6167 6171 cmd = target->
6168 6172 sft_pkt_head;
6169 6173 }
6170 6174 }
6171 6175 /*
6172 6176 * if the abort and lip fail, a reset will be carried out.
6173 6177 * But the reset will ignore this packet. We have waited at least
6174 6178 * 20 seconds after the initial timeout. Now, complete it here.
6175 6179 * This also takes care of spurious bad aborts.
6176 6180 */
6177 6181 } else if ((cmd->cmd_state ==
6178 6182 SF_STATE_ABORTING) && (cmd->cmd_timeout
6179 6183 <= sf_watchdog_time)) {
6180 6184 cmd->cmd_state = SF_STATE_IDLE;
6181 6185 mutex_exit(&cmd->cmd_abort_mutex);
6182 6186 mutex_exit(&target->sft_pkt_mutex);
6183 6187 SF_DEBUG(1, (sf, CE_NOTE,
6184 6188 "Command 0x%p to sft 0x%p"
6185 6189 " delayed release\n",
6186 6190 (void *)cmd, (void *)target));
6187 6191 pkt = cmd->cmd_pkt;
6188 6192 pkt->pkt_statistics |=
6189 6193 (STAT_TIMEOUT|STAT_ABORTED);
6190 6194 pkt->pkt_reason = CMD_TIMEOUT;
6191 6195 if (pkt->pkt_comp) {
6192 6196 scsi_hba_pkt_comp(pkt);
6193 6197 /* handle deferred_destroy case */
6194 6198 } else {
6195 6199 if ((cmd->cmd_block->fcp_cntl.
6196 6200 cntl_reset == 1) ||
6197 6201 (cmd->cmd_block->
6198 6202 fcp_cntl.cntl_abort_tsk ==
6199 6203 1)) {
6200 6204 cmd->cmd_block->
6201 6205 fcp_cntl.
6202 6206 cntl_reset = 0;
6203 6207 cmd->cmd_block->
6204 6208 fcp_cntl.
6205 6209 cntl_abort_tsk = 0;
6206 6210 cmd->cmd_fp_pkt->
6207 6211 fcal_pkt_comp =
6208 6212 sf_cmd_callback;
6209 6213 /* for cache */
6210 6214 sf_scsi_destroy_pkt
6211 6215 (&pkt->pkt_address,
6212 6216 pkt);
6213 6217 }
6214 6218 }
6215 6219 mutex_enter(&target->sft_pkt_mutex);
6216 6220 cmd = target->sft_pkt_head;
6217 6221 } else {
6218 6222 mutex_exit(&cmd->cmd_abort_mutex);
6219 6223 cmd = cmd->cmd_forw;
6220 6224 }
6221 6225 }
6222 6226 mutex_exit(&target->sft_pkt_mutex);
6223 6227 target = target->sft_next_lun;
6224 6228 }
6225 6229 }
6226 6230 }
6227 6231
6228 6232
6229 6233 /*
6230 6234 * a command to a target has timed out
6231 6235 * return TRUE iff cmd abort failed or timed out, else return FALSE
6232 6236 */
6233 6237 static int
6234 6238 sf_target_timeout(struct sf *sf, struct sf_pkt *cmd)
6235 6239 {
6236 6240 int rval;
6237 6241 struct scsi_pkt *pkt;
6238 6242 struct fcal_packet *fpkt;
6239 6243 int tgt_id;
6240 6244 int retval = FALSE;
6241 6245
6242 6246
6243 6247 SF_DEBUG(1, (sf, CE_NOTE, "Command 0x%p to target %x timed out\n",
6244 6248 (void *)cmd->cmd_fp_pkt, cmd->cmd_pkt->pkt_address.a_target));
6245 6249
6246 6250 fpkt = cmd->cmd_fp_pkt;
6247 6251
6248 6252 if (sf_core && (sf_core & SF_CORE_CMD_TIMEOUT)) {
6249 6253 sf_token = (int *)(uintptr_t)
6250 6254 fpkt->fcal_socal_request.sr_soc_hdr.\
6251 6255 sh_request_token;
6252 6256 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6253 6257 sf_core = 0;
6254 6258 }
6255 6259
6256 6260 /* call the transport to abort a command */
6257 6261 rval = soc_abort(sf->sf_sochandle, sf->sf_socp,
6258 6262 sf->sf_sochandle->fcal_portno, fpkt, 1);
6259 6263
6260 6264 switch (rval) {
6261 6265 case FCAL_ABORTED:
6262 6266 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort succeeded\n"));
6263 6267 pkt = cmd->cmd_pkt;
6264 6268 cmd->cmd_state = SF_STATE_IDLE;
6265 6269 pkt->pkt_statistics |= (STAT_TIMEOUT|STAT_ABORTED);
6266 6270 pkt->pkt_reason = CMD_TIMEOUT;
6267 6271 if (pkt->pkt_comp != NULL) {
6268 6272 (*pkt->pkt_comp)(pkt);
6269 6273 }
6270 6274 break; /* success */
6271 6275
6272 6276 case FCAL_ABORT_FAILED:
6273 6277 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort failed at target\n"));
6274 6278 pkt = cmd->cmd_pkt;
6275 6279 cmd->cmd_state = SF_STATE_IDLE;
6276 6280 pkt->pkt_reason = CMD_TIMEOUT;
6277 6281 pkt->pkt_statistics |= STAT_TIMEOUT;
6278 6282 tgt_id = pkt->pkt_address.a_target;
6279 6283 sf->sf_stats.tstats[tgt_id].abts_failures++;
6280 6284 if (pkt->pkt_comp != NULL) {
6281 6285 (*pkt->pkt_comp)(pkt);
6282 6286 }
6283 6287 break;
6284 6288
6285 6289 case FCAL_BAD_ABORT:
6286 6290 if (sf_core && (sf_core & SF_CORE_BAD_ABORT)) {
6287 6291 sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6288 6292 sr_soc_hdr.sh_request_token;
6289 6293 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6290 6294 sf_core = 0;
6291 6295 }
6292 6296 SF_DEBUG(1, (sf, CE_NOTE, "Command Abort bad abort\n"));
6293 6297 cmd->cmd_timeout = sf_watchdog_time + cmd->cmd_pkt->pkt_time
6294 6298 + 20;
6295 6299 break;
6296 6300
6297 6301 case FCAL_TIMEOUT:
6298 6302 retval = TRUE;
6299 6303 break;
6300 6304
6301 6305 default:
6302 6306 pkt = cmd->cmd_pkt;
6303 6307 tgt_id = pkt->pkt_address.a_target;
6304 6308 sf_log(sf, CE_WARN,
6305 6309 "Command Abort failed target 0x%x, forcing a LIP\n", tgt_id);
6306 6310 if (sf_core && (sf_core & SF_CORE_ABORT_TIMEOUT)) {
6307 6311 sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6308 6312 sr_soc_hdr.sh_request_token;
6309 6313 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6310 6314 sf_core = 0;
6311 6315 }
6312 6316 sf_force_lip(sf);
6313 6317 retval = TRUE;
6314 6318 break;
6315 6319 }
6316 6320
6317 6321 return (retval);
6318 6322 }
6319 6323
6320 6324
6321 6325 /*
6322 6326 * an ELS command has timed out
6323 6327 * return ???
6324 6328 */
6325 6329 static struct sf_els_hdr *
6326 6330 sf_els_timeout(struct sf *sf, struct sf_els_hdr *privp)
6327 6331 {
6328 6332 struct fcal_packet *fpkt;
6329 6333 int rval, dflag, timeout = SF_ELS_TIMEOUT;
6330 6334 uint_t lip_cnt = privp->lip_cnt;
6331 6335 uchar_t els_code = privp->els_code;
6332 6336 struct sf_target *target = privp->target;
6333 6337 char what[64];
6334 6338
6335 6339 fpkt = privp->fpkt;
6336 6340 dflag = privp->delayed_retry;
6337 6341 /* use as temporary state variable */
6338 6342 privp->timeout = SF_INVALID_TIMEOUT;
6339 6343 mutex_exit(&sf->sf_mutex);
6340 6344
6341 6345 if (privp->fpkt->fcal_pkt_comp == sf_els_callback) {
6342 6346 /*
6343 6347 * take socal core if required. Timeouts for IB and hosts
6344 6348 * are not very interesting, so we take socal core only
6345 6349 * if the timeout is *not* for a IB or host.
6346 6350 */
6347 6351 if (sf_core && (sf_core & SF_CORE_ELS_TIMEOUT) &&
6348 6352 ((sf_alpa_to_switch[privp->dest_nport_id] &
6349 6353 0x0d) != 0x0d) && ((privp->dest_nport_id != 1) ||
6350 6354 (privp->dest_nport_id != 2) ||
6351 6355 (privp->dest_nport_id != 4) ||
6352 6356 (privp->dest_nport_id != 8) ||
6353 6357 (privp->dest_nport_id != 0xf))) {
6354 6358 sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6355 6359 sr_soc_hdr.sh_request_token;
6356 6360 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6357 6361 sf_core = 0;
6358 6362 }
6359 6363 (void) sprintf(what, "ELS 0x%x", privp->els_code);
6360 6364 } else if (privp->fpkt->fcal_pkt_comp == sf_reportlun_callback) {
6361 6365 if (sf_core && (sf_core & SF_CORE_REPORTLUN_TIMEOUT)) {
6362 6366 sf_token = (int *)(uintptr_t)fpkt->fcal_socal_request.\
6363 6367 sr_soc_hdr.sh_request_token;
6364 6368 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6365 6369 sf_core = 0;
6366 6370 }
6367 6371 timeout = SF_FCP_TIMEOUT;
6368 6372 (void) sprintf(what, "REPORT_LUNS");
6369 6373 } else if (privp->fpkt->fcal_pkt_comp == sf_inq_callback) {
6370 6374 if (sf_core && (sf_core & SF_CORE_INQUIRY_TIMEOUT)) {
6371 6375 sf_token = (int *)(uintptr_t)
6372 6376 fpkt->fcal_socal_request.\
6373 6377 sr_soc_hdr.sh_request_token;
6374 6378 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6375 6379 sf_core = 0;
6376 6380 }
6377 6381 timeout = SF_FCP_TIMEOUT;
6378 6382 (void) sprintf(what, "INQUIRY to LUN 0x%lx",
6379 6383 (long)SCSA_LUN(target));
6380 6384 } else {
6381 6385 (void) sprintf(what, "UNKNOWN OPERATION");
6382 6386 }
6383 6387
6384 6388 if (dflag) {
6385 6389 /* delayed retry */
6386 6390 SF_DEBUG(2, (sf, CE_CONT,
6387 6391 "!sf%d: %s to target %x delayed retry\n",
6388 6392 ddi_get_instance(sf->sf_dip), what,
6389 6393 sf_alpa_to_switch[privp->dest_nport_id]));
6390 6394 privp->delayed_retry = FALSE;
6391 6395 goto try_again;
6392 6396 }
6393 6397
6394 6398 sf_log(sf, CE_NOTE, "!%s to target 0x%x alpa 0x%x timed out\n",
6395 6399 what, sf_alpa_to_switch[privp->dest_nport_id],
6396 6400 privp->dest_nport_id);
6397 6401
6398 6402 rval = soc_abort(sf->sf_sochandle, sf->sf_socp, sf->sf_sochandle
6399 6403 ->fcal_portno, fpkt, 1);
6400 6404 if (rval == FCAL_ABORTED || rval == FCAL_ABORT_FAILED) {
6401 6405 SF_DEBUG(1, (sf, CE_NOTE, "!%s abort to al_pa %x succeeded\n",
6402 6406 what, privp->dest_nport_id));
6403 6407 try_again:
6404 6408
6405 6409 mutex_enter(&sf->sf_mutex);
6406 6410 if (privp->prev != NULL) {
6407 6411 privp->prev->next = privp->next;
6408 6412 }
6409 6413 if (sf->sf_els_list == privp) {
6410 6414 sf->sf_els_list = privp->next;
6411 6415 }
6412 6416 if (privp->next != NULL) {
6413 6417 privp->next->prev = privp->prev;
6414 6418 }
6415 6419 privp->prev = privp->next = NULL;
6416 6420 if (lip_cnt == sf->sf_lip_cnt) {
6417 6421 privp->timeout = sf_watchdog_time + timeout;
6418 6422 if ((++(privp->retries) < sf_els_retries) ||
6419 6423 (dflag && (privp->retries < SF_BSY_RETRIES))) {
6420 6424 mutex_exit(&sf->sf_mutex);
6421 6425 sf_log(sf, CE_NOTE,
6422 6426 "!%s to target 0x%x retrying\n",
6423 6427 what,
6424 6428 sf_alpa_to_switch[privp->dest_nport_id]);
6425 6429 if (sf_els_transport(sf, privp) == 1) {
6426 6430 mutex_enter(&sf->sf_mutex);
6427 6431 return (sf->sf_els_list); /* success */
6428 6432 }
6429 6433 mutex_enter(&sf->sf_mutex);
6430 6434 fpkt = NULL;
6431 6435 }
6432 6436 if ((lip_cnt == sf->sf_lip_cnt) &&
6433 6437 (els_code != LA_ELS_LOGO)) {
6434 6438 if (target != NULL) {
6435 6439 sf_offline_target(sf, target);
6436 6440 }
6437 6441 if (sf->sf_lip_cnt == lip_cnt) {
6438 6442 sf->sf_device_count--;
6439 6443 ASSERT(sf->sf_device_count >= 0);
6440 6444 if (sf->sf_device_count == 0) {
6441 6445 sf_finish_init(sf,
6442 6446 sf->sf_lip_cnt);
6443 6447 }
6444 6448 }
6445 6449 }
6446 6450 privp = sf->sf_els_list;
6447 6451 mutex_exit(&sf->sf_mutex);
6448 6452 if (fpkt != NULL) {
6449 6453 sf_els_free(fpkt);
6450 6454 }
6451 6455 } else {
6452 6456 mutex_exit(&sf->sf_mutex);
6453 6457 sf_els_free(privp->fpkt);
6454 6458 privp = NULL;
6455 6459 }
6456 6460 } else {
6457 6461 if (sf_core && (sf_core & SF_CORE_ELS_FAILED)) {
6458 6462 sf_token = (int *)(uintptr_t)
6459 6463 fpkt->fcal_socal_request.\
6460 6464 sr_soc_hdr.sh_request_token;
6461 6465 (void) soc_take_core(sf->sf_sochandle, sf->sf_socp);
6462 6466 sf_core = 0;
6463 6467 }
6464 6468 sf_log(sf, CE_NOTE, "%s abort to target 0x%x failed. "
6465 6469 "status=0x%x, forcing LIP\n", what,
6466 6470 sf_alpa_to_switch[privp->dest_nport_id], rval);
6467 6471 privp = NULL;
6468 6472 if (sf->sf_lip_cnt == lip_cnt) {
6469 6473 sf_force_lip(sf);
6470 6474 }
6471 6475 }
6472 6476
6473 6477 mutex_enter(&sf->sf_mutex);
6474 6478 return (privp);
6475 6479 }
6476 6480
6477 6481
6478 6482 /*
6479 6483 * called by timeout when a reset times out
6480 6484 */
6481 6485 /*ARGSUSED*/
6482 6486 static void
6483 6487 sf_check_reset_delay(void *arg)
6484 6488 {
6485 6489 struct sf *sf;
6486 6490 struct sf_target *target;
6487 6491 struct sf_reset_list *rp, *tp;
6488 6492 uint_t lip_cnt, reset_timeout_flag = FALSE;
6489 6493 clock_t lb;
6490 6494
6491 6495 lb = ddi_get_lbolt();
6492 6496
6493 6497 mutex_enter(&sf_global_mutex);
6494 6498
6495 6499 sf_reset_timeout_id = 0;
6496 6500
6497 6501 for (sf = sf_head; sf != NULL; sf = sf->sf_next) {
6498 6502
6499 6503 mutex_exit(&sf_global_mutex);
6500 6504 mutex_enter(&sf->sf_mutex);
6501 6505
6502 6506 /* is this type cast needed? */
6503 6507 tp = (struct sf_reset_list *)&sf->sf_reset_list;
6504 6508
6505 6509 rp = sf->sf_reset_list;
6506 6510 while (rp != NULL) {
6507 6511 if (((rp->timeout - lb) < 0) &&
6508 6512 (rp->lip_cnt == sf->sf_lip_cnt)) {
6509 6513 tp->next = rp->next;
6510 6514 mutex_exit(&sf->sf_mutex);
6511 6515 target = rp->target;
6512 6516 lip_cnt = rp->lip_cnt;
6513 6517 kmem_free(rp, sizeof (struct sf_reset_list));
6514 6518 /* abort all cmds for this target */
6515 6519 while (target) {
6516 6520 sf_abort_all(sf, target, FALSE,
6517 6521 lip_cnt, TRUE);
6518 6522 mutex_enter(&target->sft_mutex);
6519 6523 if (lip_cnt == sf->sf_lip_cnt) {
6520 6524 target->sft_state &=
6521 6525 ~SF_TARGET_BUSY;
6522 6526 }
6523 6527 mutex_exit(&target->sft_mutex);
6524 6528 target = target->sft_next_lun;
6525 6529 }
6526 6530 mutex_enter(&sf->sf_mutex);
6527 6531 tp = (struct sf_reset_list *)
6528 6532 &sf->sf_reset_list;
6529 6533 rp = sf->sf_reset_list;
6530 6534 lb = ddi_get_lbolt();
6531 6535 } else if (rp->lip_cnt != sf->sf_lip_cnt) {
6532 6536 tp->next = rp->next;
6533 6537 kmem_free(rp, sizeof (struct sf_reset_list));
6534 6538 rp = tp->next;
6535 6539 } else {
6536 6540 reset_timeout_flag = TRUE;
6537 6541 tp = rp;
6538 6542 rp = rp->next;
6539 6543 }
6540 6544 }
6541 6545 mutex_exit(&sf->sf_mutex);
6542 6546 mutex_enter(&sf_global_mutex);
6543 6547 }
6544 6548
6545 6549 if (reset_timeout_flag && (sf_reset_timeout_id == 0)) {
6546 6550 sf_reset_timeout_id = timeout(sf_check_reset_delay,
6547 6551 NULL, drv_usectohz(SF_TARGET_RESET_DELAY));
6548 6552 }
6549 6553
6550 6554 mutex_exit(&sf_global_mutex);
6551 6555 }
6552 6556
6553 6557
6554 6558 /*
6555 6559 * called to "reset the bus", i.e. force loop initialization (and address
6556 6560 * re-negotiation)
6557 6561 */
6558 6562 static void
6559 6563 sf_force_lip(struct sf *sf)
6560 6564 {
6561 6565 int i;
6562 6566 struct sf_target *target;
6563 6567
6564 6568
6565 6569 /* disable restart of lip if we're suspended */
6566 6570 mutex_enter(&sf->sf_mutex);
6567 6571 if (sf->sf_state & SF_STATE_SUSPENDED) {
6568 6572 mutex_exit(&sf->sf_mutex);
6569 6573 SF_DEBUG(1, (sf, CE_CONT,
6570 6574 "sf_force_lip, sf%d: lip restart disabled "
6571 6575 "due to DDI_SUSPEND\n",
6572 6576 ddi_get_instance(sf->sf_dip)));
6573 6577 return;
6574 6578 }
6575 6579
6576 6580 sf_log(sf, CE_NOTE, "Forcing lip\n");
6577 6581
6578 6582 for (i = 0; i < sf_max_targets; i++) {
6579 6583 target = sf->sf_targets[i];
6580 6584 while (target != NULL) {
6581 6585 mutex_enter(&target->sft_mutex);
6582 6586 if (!(target->sft_state & SF_TARGET_OFFLINE))
6583 6587 target->sft_state |= SF_TARGET_BUSY;
6584 6588 mutex_exit(&target->sft_mutex);
6585 6589 target = target->sft_next_lun;
6586 6590 }
6587 6591 }
6588 6592
6589 6593 sf->sf_lip_cnt++;
6590 6594 sf->sf_timer = sf_watchdog_time + SF_OFFLINE_TIMEOUT;
6591 6595 sf->sf_state = SF_STATE_OFFLINE;
6592 6596 mutex_exit(&sf->sf_mutex);
6593 6597 sf->sf_stats.lip_count++; /* no mutex for this? */
6594 6598
6595 6599 #ifdef DEBUG
6596 6600 /* are we allowing LIPs ?? */
6597 6601 if (sf_lip_flag != 0) {
6598 6602 #endif
6599 6603 /* call the transport to force loop initialization */
6600 6604 if (((i = soc_force_lip(sf->sf_sochandle, sf->sf_socp,
6601 6605 sf->sf_sochandle->fcal_portno, 1,
6602 6606 FCAL_FORCE_LIP)) != FCAL_SUCCESS) &&
6603 6607 (i != FCAL_TIMEOUT)) {
6604 6608 /* force LIP failed */
6605 6609 if (sf_core && (sf_core & SF_CORE_LIP_FAILED)) {
6606 6610 (void) soc_take_core(sf->sf_sochandle,
6607 6611 sf->sf_socp);
6608 6612 sf_core = 0;
6609 6613 }
6610 6614 #ifdef DEBUG
6611 6615 /* are we allowing reset after LIP failed ?? */
6612 6616 if (sf_reset_flag != 0) {
6613 6617 #endif
6614 6618 /* restart socal after resetting it */
6615 6619 sf_log(sf, CE_NOTE,
6616 6620 "!Force lip failed Status code 0x%x."
6617 6621 " Reseting\n", i);
6618 6622 /* call transport to force a reset */
6619 6623 soc_force_reset(sf->sf_sochandle, sf->sf_socp,
6620 6624 sf->sf_sochandle->fcal_portno, 1);
6621 6625 #ifdef DEBUG
6622 6626 }
6623 6627 #endif
6624 6628 }
6625 6629 #ifdef DEBUG
6626 6630 }
6627 6631 #endif
6628 6632 }
6629 6633
6630 6634
6631 6635 /*
6632 6636 * called by the transport when an unsolicited ELS is received
6633 6637 */
6634 6638 static void
6635 6639 sf_unsol_els_callback(void *arg, soc_response_t *srp, caddr_t payload)
6636 6640 {
6637 6641 struct sf *sf = (struct sf *)arg;
6638 6642 els_payload_t *els = (els_payload_t *)payload;
6639 6643 struct la_els_rjt *rsp;
6640 6644 int i, tgt_id;
6641 6645 uchar_t dest_id;
6642 6646 struct fcal_packet *fpkt;
6643 6647 fc_frame_header_t *hp;
6644 6648 struct sf_els_hdr *privp;
6645 6649
6646 6650
6647 6651 if ((els == NULL) || ((i = srp->sr_soc_hdr.sh_byte_cnt) == 0)) {
6648 6652 return;
6649 6653 }
6650 6654
6651 6655 if (i > SOC_CQE_PAYLOAD) {
6652 6656 i = SOC_CQE_PAYLOAD;
6653 6657 }
6654 6658
6655 6659 dest_id = (uchar_t)srp->sr_fc_frame_hdr.s_id;
6656 6660 tgt_id = sf_alpa_to_switch[dest_id];
6657 6661
6658 6662 switch (els->els_cmd.c.ls_command) {
6659 6663
6660 6664 case LA_ELS_LOGO:
6661 6665 /*
6662 6666 * logout received -- log the fact
6663 6667 */
6664 6668 sf->sf_stats.tstats[tgt_id].logouts_recvd++;
6665 6669 sf_log(sf, CE_NOTE, "!LOGO recvd from target %x, %s\n",
6666 6670 tgt_id,
6667 6671 sf_lip_on_plogo ? "Forcing LIP...." : "");
6668 6672 if (sf_lip_on_plogo) {
6669 6673 sf_force_lip(sf);
6670 6674 }
6671 6675 break;
6672 6676
6673 6677 default: /* includes LA_ELS_PLOGI */
6674 6678 /*
6675 6679 * something besides a logout received -- we don't handle
6676 6680 * this so send back a reject saying its unsupported
6677 6681 */
6678 6682
6679 6683 sf_log(sf, CE_NOTE, "!ELS 0x%x recvd from target 0x%x\n",
6680 6684 els->els_cmd.c.ls_command, tgt_id);
6681 6685
6682 6686
6683 6687 /* allocate room for a response */
6684 6688 if (sf_els_alloc(sf, dest_id, sizeof (struct sf_els_hdr),
6685 6689 sizeof (struct la_els_rjt), sizeof (union sf_els_rsp),
6686 6690 (caddr_t *)&privp, (caddr_t *)&rsp) == NULL) {
6687 6691 break;
6688 6692 }
6689 6693
6690 6694 fpkt = privp->fpkt;
6691 6695
6692 6696 /* fill in pkt header */
6693 6697 hp = &fpkt->fcal_socal_request.sr_fc_frame_hdr;
6694 6698 hp->r_ctl = R_CTL_ELS_RSP;
6695 6699 hp->f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
6696 6700 hp->ox_id = srp->sr_fc_frame_hdr.ox_id;
6697 6701 hp->rx_id = srp->sr_fc_frame_hdr.rx_id;
6698 6702 fpkt->fcal_socal_request.sr_cqhdr.cq_hdr_type =
6699 6703 CQ_TYPE_OUTBOUND;
6700 6704
6701 6705 fpkt->fcal_socal_request.sr_soc_hdr.sh_seg_cnt = 1;
6702 6706
6703 6707 /* fill in response */
6704 6708 rsp->ls_code = LA_ELS_RJT; /* reject this ELS */
6705 6709 rsp->mbz[0] = 0;
6706 6710 rsp->mbz[1] = 0;
6707 6711 rsp->mbz[2] = 0;
6708 6712 ((struct la_els_logi *)privp->rsp)->ls_code = LA_ELS_ACC;
6709 6713 *((int *)&rsp->reserved) = 0;
6710 6714 rsp->reason_code = RJT_UNSUPPORTED;
6711 6715 privp->retries = sf_els_retries;
6712 6716 privp->els_code = LA_ELS_RJT;
6713 6717 privp->timeout = (unsigned)0xffffffff;
6714 6718 (void) sf_els_transport(sf, privp);
6715 6719 break;
6716 6720 }
6717 6721 }
6718 6722
6719 6723
6720 6724 /*
6721 6725 * Error logging, printing, and debug print routines
6722 6726 */
6723 6727
6724 6728 /*PRINTFLIKE3*/
6725 6729 static void
6726 6730 sf_log(struct sf *sf, int level, const char *fmt, ...)
6727 6731 {
6728 6732 char buf[256];
6729 6733 dev_info_t *dip;
6730 6734 va_list ap;
6731 6735
6732 6736 if (sf != NULL) {
6733 6737 dip = sf->sf_dip;
6734 6738 } else {
6735 6739 dip = NULL;
6736 6740 }
6737 6741
6738 6742 va_start(ap, fmt);
6739 6743 (void) vsprintf(buf, fmt, ap);
6740 6744 va_end(ap);
6741 6745 scsi_log(dip, "sf", level, buf);
6742 6746 }
6743 6747
6744 6748
6745 6749 /*
6746 6750 * called to get some sf kstats -- return 0 on success else return errno
6747 6751 */
6748 6752 static int
6749 6753 sf_kstat_update(kstat_t *ksp, int rw)
6750 6754 {
6751 6755 struct sf *sf;
6752 6756
6753 6757 if (rw == KSTAT_WRITE) {
6754 6758 /* can't write */
6755 6759 return (EACCES);
6756 6760 }
6757 6761
6758 6762 sf = ksp->ks_private;
6759 6763 sf->sf_stats.ncmds = sf->sf_ncmds;
6760 6764 sf->sf_stats.throttle_limit = sf->sf_throttle;
6761 6765 sf->sf_stats.cr_pool_size = sf->sf_cr_pool_cnt;
6762 6766
6763 6767 return (0); /* success */
6764 6768 }
6765 6769
6766 6770
6767 6771 /*
6768 6772 * Unix Entry Points
6769 6773 */
6770 6774
6771 6775 /*
6772 6776 * driver entry point for opens on control device
6773 6777 */
6774 6778 /* ARGSUSED */
6775 6779 static int
6776 6780 sf_open(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
6777 6781 {
6778 6782 dev_t dev = *dev_p;
6779 6783 struct sf *sf;
6780 6784
6781 6785
6782 6786 /* just ensure soft state exists for this device */
6783 6787 sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6784 6788 if (sf == NULL) {
6785 6789 return (ENXIO);
6786 6790 }
6787 6791
6788 6792 ++(sf->sf_check_n_close);
6789 6793
6790 6794 return (0);
6791 6795 }
6792 6796
6793 6797
6794 6798 /*
6795 6799 * driver entry point for last close on control device
6796 6800 */
6797 6801 /* ARGSUSED */
6798 6802 static int
6799 6803 sf_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
6800 6804 {
6801 6805 struct sf *sf;
6802 6806
6803 6807 sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6804 6808 if (sf == NULL) {
6805 6809 return (ENXIO);
6806 6810 }
6807 6811
6808 6812 if (!sf->sf_check_n_close) { /* if this flag is zero */
6809 6813 cmn_err(CE_WARN, "sf%d: trying to close unopened instance",
6810 6814 SF_MINOR2INST(getminor(dev)));
6811 6815 return (ENODEV);
6812 6816 } else {
6813 6817 --(sf->sf_check_n_close);
6814 6818 }
6815 6819 return (0);
6816 6820 }
6817 6821
6818 6822
6819 6823 /*
6820 6824 * driver entry point for sf ioctl commands
6821 6825 */
6822 6826 /* ARGSUSED */
6823 6827 static int
6824 6828 sf_ioctl(dev_t dev,
6825 6829 int cmd, intptr_t arg, int mode, cred_t *cred_p, int *rval_p)
6826 6830 {
6827 6831 struct sf *sf;
6828 6832 struct sf_target *target;
6829 6833 uchar_t al_pa;
6830 6834 struct sf_al_map map;
6831 6835 int cnt, i;
6832 6836 int retval; /* return value */
6833 6837 struct devctl_iocdata *dcp;
6834 6838 dev_info_t *cdip;
6835 6839 struct scsi_address ap;
6836 6840 scsi_hba_tran_t *tran;
6837 6841
6838 6842
6839 6843 sf = ddi_get_soft_state(sf_state, SF_MINOR2INST(getminor(dev)));
6840 6844 if (sf == NULL) {
6841 6845 return (ENXIO);
6842 6846 }
6843 6847
6844 6848 /* handle all ioctls */
6845 6849 switch (cmd) {
6846 6850
6847 6851 /*
6848 6852 * We can use the generic implementation for these ioctls
6849 6853 */
6850 6854 case DEVCTL_DEVICE_GETSTATE:
6851 6855 case DEVCTL_DEVICE_ONLINE:
6852 6856 case DEVCTL_DEVICE_OFFLINE:
6853 6857 case DEVCTL_BUS_GETSTATE:
6854 6858 return (ndi_devctl_ioctl(sf->sf_dip, cmd, arg, mode, 0));
6855 6859
6856 6860 /*
6857 6861 * return FC map
6858 6862 */
6859 6863 case SFIOCGMAP:
6860 6864 if ((sf->sf_lilp_map->lilp_magic != FCAL_LILP_MAGIC &&
6861 6865 sf->sf_lilp_map->lilp_magic != FCAL_BADLILP_MAGIC) ||
6862 6866 sf->sf_state != SF_STATE_ONLINE) {
6863 6867 retval = ENOENT;
6864 6868 goto dun;
6865 6869 }
6866 6870 mutex_enter(&sf->sf_mutex);
6867 6871 if (sf->sf_lilp_map->lilp_magic == FCAL_BADLILP_MAGIC) {
6868 6872 int i, j = 0;
6869 6873
6870 6874 /* Need to generate a fake lilp map */
6871 6875 for (i = 0; i < sf_max_targets; i++) {
6872 6876 if (sf->sf_targets[i])
6873 6877 sf->sf_lilp_map->lilp_alpalist[j++] =
6874 6878 sf->sf_targets[i]->
6875 6879 sft_hard_address;
6876 6880 }
6877 6881 sf->sf_lilp_map->lilp_length = (uchar_t)j;
6878 6882 }
6879 6883 cnt = sf->sf_lilp_map->lilp_length;
6880 6884 map.sf_count = (short)cnt;
6881 6885 bcopy((caddr_t)&sf->sf_sochandle->fcal_n_wwn,
6882 6886 (caddr_t)&map.sf_hba_addr.sf_node_wwn,
6883 6887 sizeof (la_wwn_t));
6884 6888 bcopy((caddr_t)&sf->sf_sochandle->fcal_p_wwn,
6885 6889 (caddr_t)&map.sf_hba_addr.sf_port_wwn,
6886 6890 sizeof (la_wwn_t));
6887 6891 map.sf_hba_addr.sf_al_pa = sf->sf_al_pa;
6888 6892 map.sf_hba_addr.sf_hard_address = 0;
6889 6893 map.sf_hba_addr.sf_inq_dtype = DTYPE_UNKNOWN;
6890 6894 for (i = 0; i < cnt; i++) {
6891 6895 al_pa = sf->sf_lilp_map->lilp_alpalist[i];
6892 6896 map.sf_addr_pair[i].sf_al_pa = al_pa;
6893 6897 if (al_pa == sf->sf_al_pa) {
6894 6898 (void) bcopy((caddr_t)&sf->sf_sochandle
6895 6899 ->fcal_n_wwn, (caddr_t)&map.
6896 6900 sf_addr_pair[i].sf_node_wwn,
6897 6901 sizeof (la_wwn_t));
6898 6902 (void) bcopy((caddr_t)&sf->sf_sochandle
6899 6903 ->fcal_p_wwn, (caddr_t)&map.
6900 6904 sf_addr_pair[i].sf_port_wwn,
6901 6905 sizeof (la_wwn_t));
6902 6906 map.sf_addr_pair[i].sf_hard_address =
6903 6907 al_pa;
6904 6908 map.sf_addr_pair[i].sf_inq_dtype =
6905 6909 DTYPE_PROCESSOR;
6906 6910 continue;
6907 6911 }
6908 6912 target = sf->sf_targets[sf_alpa_to_switch[
6909 6913 al_pa]];
6910 6914 if (target != NULL) {
6911 6915 mutex_enter(&target->sft_mutex);
6912 6916 if (!(target->sft_state &
6913 6917 (SF_TARGET_OFFLINE |
6914 6918 SF_TARGET_BUSY))) {
6915 6919 bcopy((caddr_t)&target->
6916 6920 sft_node_wwn,
6917 6921 (caddr_t)&map.sf_addr_pair
6918 6922 [i].sf_node_wwn,
6919 6923 sizeof (la_wwn_t));
6920 6924 bcopy((caddr_t)&target->
6921 6925 sft_port_wwn,
6922 6926 (caddr_t)&map.sf_addr_pair
6923 6927 [i].sf_port_wwn,
6924 6928 sizeof (la_wwn_t));
6925 6929 map.sf_addr_pair[i].
6926 6930 sf_hard_address
6927 6931 = target->sft_hard_address;
6928 6932 map.sf_addr_pair[i].
6929 6933 sf_inq_dtype
6930 6934 = target->sft_device_type;
6931 6935 mutex_exit(&target->sft_mutex);
6932 6936 continue;
6933 6937 }
6934 6938 mutex_exit(&target->sft_mutex);
6935 6939 }
6936 6940 bzero((caddr_t)&map.sf_addr_pair[i].
6937 6941 sf_node_wwn, sizeof (la_wwn_t));
6938 6942 bzero((caddr_t)&map.sf_addr_pair[i].
6939 6943 sf_port_wwn, sizeof (la_wwn_t));
6940 6944 map.sf_addr_pair[i].sf_inq_dtype =
6941 6945 DTYPE_UNKNOWN;
6942 6946 }
6943 6947 mutex_exit(&sf->sf_mutex);
6944 6948 if (ddi_copyout((caddr_t)&map, (caddr_t)arg,
6945 6949 sizeof (struct sf_al_map), mode) != 0) {
6946 6950 retval = EFAULT;
6947 6951 goto dun;
6948 6952 }
6949 6953 break;
6950 6954
6951 6955 /*
6952 6956 * handle device control ioctls
6953 6957 */
6954 6958 case DEVCTL_DEVICE_RESET:
6955 6959 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) {
6956 6960 retval = EFAULT;
6957 6961 goto dun;
6958 6962 }
6959 6963 if ((ndi_dc_getname(dcp) == NULL) ||
6960 6964 (ndi_dc_getaddr(dcp) == NULL)) {
6961 6965 ndi_dc_freehdl(dcp);
6962 6966 retval = EINVAL;
6963 6967 goto dun;
6964 6968 }
6965 6969 cdip = ndi_devi_find(sf->sf_dip,
6966 6970 ndi_dc_getname(dcp), ndi_dc_getaddr(dcp));
6967 6971 ndi_dc_freehdl(dcp);
6968 6972
6969 6973 if (cdip == NULL) {
6970 6974 retval = ENXIO;
6971 6975 goto dun;
6972 6976 }
6973 6977
6974 6978 if ((target = sf_get_target_from_dip(sf, cdip)) == NULL) {
6975 6979 retval = ENXIO;
6976 6980 goto dun;
6977 6981 }
6978 6982 mutex_enter(&target->sft_mutex);
6979 6983 if (!(target->sft_state & SF_TARGET_INIT_DONE)) {
6980 6984 mutex_exit(&target->sft_mutex);
6981 6985 retval = ENXIO;
6982 6986 goto dun;
6983 6987 }
6984 6988
6985 6989 /* This is ugly */
6986 6990 tran = kmem_zalloc(scsi_hba_tran_size(), KM_SLEEP);
6987 6991 bcopy(target->sft_tran, tran, scsi_hba_tran_size());
6988 6992 mutex_exit(&target->sft_mutex);
6989 6993 ap.a_hba_tran = tran;
6990 6994 ap.a_target = sf_alpa_to_switch[target->sft_al_pa];
6991 6995 if (sf_reset(&ap, RESET_TARGET) == FALSE) {
6992 6996 retval = EIO;
6993 6997 } else {
6994 6998 retval = 0;
6995 6999 }
6996 7000 kmem_free(tran, scsi_hba_tran_size());
6997 7001 goto dun;
6998 7002
6999 7003 case DEVCTL_BUS_QUIESCE:
7000 7004 case DEVCTL_BUS_UNQUIESCE:
7001 7005 retval = ENOTSUP;
7002 7006 goto dun;
7003 7007
7004 7008 case DEVCTL_BUS_RESET:
7005 7009 case DEVCTL_BUS_RESETALL:
7006 7010 sf_force_lip(sf);
7007 7011 break;
7008 7012
7009 7013 default:
7010 7014 retval = ENOTTY;
7011 7015 goto dun;
7012 7016 }
7013 7017
7014 7018 retval = 0; /* success */
7015 7019
7016 7020 dun:
7017 7021 return (retval);
7018 7022 }
7019 7023
7020 7024
7021 7025 /*
7022 7026 * get the target given a DIP
7023 7027 */
7024 7028 static struct sf_target *
7025 7029 sf_get_target_from_dip(struct sf *sf, dev_info_t *dip)
7026 7030 {
7027 7031 int i;
7028 7032 struct sf_target *target;
7029 7033
7030 7034
7031 7035 /* scan each hash queue for the DIP in question */
7032 7036 for (i = 0; i < SF_NUM_HASH_QUEUES; i++) {
7033 7037 target = sf->sf_wwn_lists[i];
7034 7038 while (target != NULL) {
7035 7039 if (target->sft_dip == dip) {
7036 7040 return (target); /* success: target found */
7037 7041 }
7038 7042 target = target->sft_next;
7039 7043 }
7040 7044 }
7041 7045 return (NULL); /* failure: target not found */
7042 7046 }
7043 7047
7044 7048
7045 7049 /*
7046 7050 * called by the transport to get an event cookie
7047 7051 */
7048 7052 static int
7049 7053 sf_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
7050 7054 ddi_eventcookie_t *event_cookiep)
7051 7055 {
7052 7056 struct sf *sf;
7053 7057
7054 7058 sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7055 7059 if (sf == NULL) {
7056 7060 /* can't find instance for this device */
7057 7061 return (DDI_FAILURE);
7058 7062 }
7059 7063
7060 7064 return (ndi_event_retrieve_cookie(sf->sf_event_hdl, rdip, name,
7061 7065 event_cookiep, NDI_EVENT_NOPASS));
7062 7066
7063 7067 }
7064 7068
7065 7069
7066 7070 /*
7067 7071 * called by the transport to add an event callback
7068 7072 */
7069 7073 static int
7070 7074 sf_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
7071 7075 ddi_eventcookie_t eventid, void (*callback)(dev_info_t *dip,
7072 7076 ddi_eventcookie_t event, void *arg, void *impl_data), void *arg,
7073 7077 ddi_callback_id_t *cb_id)
7074 7078 {
7075 7079 struct sf *sf;
7076 7080
7077 7081 sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7078 7082 if (sf == NULL) {
7079 7083 /* can't find instance for this device */
7080 7084 return (DDI_FAILURE);
7081 7085 }
7082 7086
7083 7087 return (ndi_event_add_callback(sf->sf_event_hdl, rdip,
7084 7088 eventid, callback, arg, NDI_SLEEP, cb_id));
7085 7089
7086 7090 }
7087 7091
7088 7092
7089 7093 /*
7090 7094 * called by the transport to remove an event callback
7091 7095 */
7092 7096 static int
7093 7097 sf_bus_remove_eventcall(dev_info_t *devi, ddi_callback_id_t cb_id)
7094 7098 {
7095 7099 struct sf *sf;
7096 7100
7097 7101 sf = ddi_get_soft_state(sf_state, ddi_get_instance(devi));
7098 7102 if (sf == NULL) {
7099 7103 /* can't find instance for this device */
7100 7104 return (DDI_FAILURE);
7101 7105 }
7102 7106
7103 7107 return (ndi_event_remove_callback(sf->sf_event_hdl, cb_id));
7104 7108 }
7105 7109
7106 7110
7107 7111 /*
7108 7112 * called by the transport to post an event
7109 7113 */
7110 7114 static int
7111 7115 sf_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
7112 7116 ddi_eventcookie_t eventid, void *impldata)
7113 7117 {
7114 7118 ddi_eventcookie_t remove_cookie, cookie;
7115 7119
7116 7120 /* is this a remove event ?? */
7117 7121 struct sf *sf = ddi_get_soft_state(sf_state, ddi_get_instance(dip));
7118 7122 remove_cookie = ndi_event_tag_to_cookie(sf->sf_event_hdl,
7119 7123 SF_EVENT_TAG_REMOVE);
7120 7124
7121 7125 if (remove_cookie == eventid) {
7122 7126 struct sf_target *target;
7123 7127
7124 7128 /* handle remove event */
7125 7129
7126 7130 if (sf == NULL) {
7127 7131 /* no sf instance for this device */
7128 7132 return (NDI_FAILURE);
7129 7133 }
7130 7134
7131 7135 /* get the target for this event */
7132 7136 if ((target = sf_get_target_from_dip(sf, rdip)) != NULL) {
7133 7137 /*
7134 7138 * clear device info for this target and mark as
7135 7139 * not done
7136 7140 */
7137 7141 mutex_enter(&target->sft_mutex);
7138 7142 target->sft_dip = NULL;
7139 7143 target->sft_state &= ~SF_TARGET_INIT_DONE;
7140 7144 mutex_exit(&target->sft_mutex);
7141 7145 return (NDI_SUCCESS); /* event handled */
7142 7146 }
7143 7147
7144 7148 /* no target for this event */
7145 7149 return (NDI_FAILURE);
7146 7150 }
7147 7151
7148 7152 /* an insertion event */
7149 7153 if (ndi_busop_get_eventcookie(dip, rdip, FCAL_INSERT_EVENT, &cookie)
7150 7154 != NDI_SUCCESS) {
7151 7155 return (NDI_FAILURE);
7152 7156 }
7153 7157
7154 7158 return (ndi_post_event(dip, rdip, cookie, impldata));
7155 7159 }
7156 7160
7157 7161
7158 7162 /*
7159 7163 * the sf hotplug daemon, one thread per sf instance
7160 7164 */
7161 7165 static void
7162 7166 sf_hp_daemon(void *arg)
7163 7167 {
7164 7168 struct sf *sf = (struct sf *)arg;
7165 7169 struct sf_hp_elem *elem;
7166 7170 struct sf_target *target;
7167 7171 int tgt_id;
7168 7172 callb_cpr_t cprinfo;
7169 7173
7170 7174 CALLB_CPR_INIT(&cprinfo, &sf->sf_hp_daemon_mutex,
7171 7175 callb_generic_cpr, "sf_hp_daemon");
7172 7176
7173 7177 mutex_enter(&sf->sf_hp_daemon_mutex);
7174 7178
7175 7179 do {
7176 7180 while (sf->sf_hp_elem_head != NULL) {
7177 7181
7178 7182 /* save ptr to head of list */
7179 7183 elem = sf->sf_hp_elem_head;
7180 7184
7181 7185 /* take element off of list */
7182 7186 if (sf->sf_hp_elem_head == sf->sf_hp_elem_tail) {
7183 7187 /* element only one in list -- list now empty */
7184 7188 sf->sf_hp_elem_head = NULL;
7185 7189 sf->sf_hp_elem_tail = NULL;
7186 7190 } else {
7187 7191 /* remove element from head of list */
7188 7192 sf->sf_hp_elem_head = sf->sf_hp_elem_head->next;
7189 7193 }
7190 7194
7191 7195 mutex_exit(&sf->sf_hp_daemon_mutex);
7192 7196
7193 7197 switch (elem->what) {
7194 7198 case SF_ONLINE:
7195 7199 /* online this target */
7196 7200 target = elem->target;
7197 7201 (void) ndi_devi_online(elem->dip, 0);
7198 7202 (void) ndi_event_retrieve_cookie(
7199 7203 sf->sf_event_hdl,
7200 7204 target->sft_dip, FCAL_INSERT_EVENT,
7201 7205 &sf_insert_eid, NDI_EVENT_NOPASS);
7202 7206 (void) ndi_event_run_callbacks(sf->sf_event_hdl,
7203 7207 target->sft_dip, sf_insert_eid, NULL);
7204 7208 break;
7205 7209 case SF_OFFLINE:
7206 7210 /* offline this target */
7207 7211 target = elem->target;
7208 7212 tgt_id = sf_alpa_to_switch[target->sft_al_pa];
7209 7213 /* don't do NDI_DEVI_REMOVE for now */
7210 7214 if (ndi_devi_offline(elem->dip, 0) !=
7211 7215 NDI_SUCCESS) {
7212 7216 SF_DEBUG(1, (sf, CE_WARN, "target %x, "
7213 7217 "device offline failed", tgt_id));
7214 7218 } else {
7215 7219 SF_DEBUG(1, (sf, CE_NOTE, "target %x, "
7216 7220 "device offline succeeded\n",
7217 7221 tgt_id));
7218 7222 }
7219 7223 break;
7220 7224 }
7221 7225 kmem_free(elem, sizeof (struct sf_hp_elem));
7222 7226 mutex_enter(&sf->sf_hp_daemon_mutex);
7223 7227 }
7224 7228
7225 7229 /* if exit is not already signaled */
7226 7230 if (sf->sf_hp_exit == 0) {
7227 7231 /* wait to be signaled by work or exit */
7228 7232 CALLB_CPR_SAFE_BEGIN(&cprinfo);
7229 7233 cv_wait(&sf->sf_hp_daemon_cv, &sf->sf_hp_daemon_mutex);
7230 7234 CALLB_CPR_SAFE_END(&cprinfo, &sf->sf_hp_daemon_mutex);
7231 7235 }
7232 7236 } while (sf->sf_hp_exit == 0);
7233 7237
7234 7238 /* sf_hp_daemon_mutex is dropped by CALLB_CPR_EXIT */
7235 7239 CALLB_CPR_EXIT(&cprinfo);
7236 7240 thread_exit(); /* no more hotplug thread */
7237 7241 /* NOTREACHED */
7238 7242 }
↓ open down ↓ |
7168 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX