Print this page
3373 gcc >= 4.5 concerns about offsetof()
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/avs/ns/rdc/rdc_io.c
+++ new/usr/src/uts/common/avs/ns/rdc/rdc_io.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 #include <sys/types.h>
27 27 #include <sys/ksynch.h>
28 28 #include <sys/cmn_err.h>
29 29 #include <sys/kmem.h>
30 30 #include <sys/conf.h>
31 31 #include <sys/errno.h>
32 32
33 33 #ifdef _SunOS_5_6
34 34 /*
35 35 * on 2.6 both dki_lock.h and rpc/types.h define bool_t so we
36 36 * define enum_t here as it is all we need from rpc/types.h
37 37 * anyway and make it look like we included it. Yuck.
38 38 */
39 39 #define _RPC_TYPES_H
40 40 typedef int enum_t;
41 41 #else
42 42 #ifndef DS_DDICT
43 43 #include <rpc/types.h>
44 44 #endif
45 45 #endif /* _SunOS_5_6 */
46 46
47 47 #include <sys/ddi.h>
48 48
49 49 #include <sys/nsc_thread.h>
50 50 #include <sys/nsctl/nsctl.h>
51 51
52 52 #include <sys/sdt.h> /* dtrace is S10 or later */
53 53
54 54 #include "rdc_io.h"
55 55 #include "rdc_bitmap.h"
56 56 #include "rdc_update.h"
57 57 #include "rdc_ioctl.h"
58 58 #include "rdcsrv.h"
59 59 #include "rdc_diskq.h"
60 60
61 61 #include <sys/unistat/spcs_s.h>
62 62 #include <sys/unistat/spcs_s_k.h>
63 63 #include <sys/unistat/spcs_errors.h>
64 64
65 65 volatile int net_exit;
66 66 nsc_size_t MAX_RDC_FBAS;
67 67
68 68 #ifdef DEBUG
69 69 int RDC_MAX_SYNC_THREADS = 8;
70 70 int rdc_maxthreads_last = 8;
71 71 #endif
72 72
73 73 kmutex_t rdc_ping_lock; /* Ping lock */
74 74 static kmutex_t net_blk_lock;
75 75
76 76 /*
77 77 * rdc_conf_lock is used as a global device configuration lock.
78 78 * It is also used by enable/resume and disable/suspend code to ensure that
79 79 * the transition of an rdc set between configured and unconfigured is
80 80 * atomic.
81 81 *
82 82 * krdc->group->lock is used to protect state changes of a configured rdc
83 83 * set (e.g. changes to urdc->flags), such as enabled to disabled and vice
84 84 * versa.
85 85 *
86 86 * rdc_many_lock is also used to protect changes in group membership. A group
87 87 * linked list cannot change while this lock is held. The many list and the
88 88 * multi-hop list are both protected by rdc_many_lock.
89 89 */
90 90 kmutex_t rdc_conf_lock;
91 91 kmutex_t rdc_many_lock; /* Many/multi-list lock */
92 92
93 93 static kmutex_t rdc_net_hnd_id_lock; /* Network handle id lock */
94 94 int rdc_debug = 0;
95 95 int rdc_debug_sleep = 0;
96 96
97 97 static int rdc_net_hnd_id = 1;
98 98
99 99 extern kmutex_t rdc_clnt_lock;
100 100
101 101 static void rdc_ditemsfree(rdc_net_dataset_t *);
102 102 void rdc_clnt_destroy(void);
103 103
104 104 rdc_k_info_t *rdc_k_info;
105 105 rdc_u_info_t *rdc_u_info;
106 106
107 107 unsigned long rdc_async_timeout;
108 108
109 109 nsc_size_t rdc_maxthres_queue = RDC_MAXTHRES_QUEUE;
110 110 int rdc_max_qitems = RDC_MAX_QITEMS;
111 111 int rdc_asyncthr = RDC_ASYNCTHR;
112 112 static nsc_svc_t *rdc_volume_update;
113 113 static int rdc_prealloc_handle = 1;
114 114
115 115 extern int _rdc_rsrv_diskq(rdc_group_t *group);
116 116 extern void _rdc_rlse_diskq(rdc_group_t *group);
117 117
118 118 /*
119 119 * Forward declare all statics that are used before defined
120 120 * to enforce parameter checking
121 121 *
122 122 * Some (if not all) of these could be removed if the code were reordered
123 123 */
124 124
125 125 static void rdc_volume_update_svc(intptr_t);
126 126 static void halt_sync(rdc_k_info_t *krdc);
127 127 void rdc_kstat_create(int index);
128 128 void rdc_kstat_delete(int index);
129 129 static int rdc_checkforbitmap(int, nsc_off_t);
130 130 static int rdc_installbitmap(int, void *, int, nsc_off_t, int, int *, int);
131 131 static rdc_group_t *rdc_newgroup();
132 132
133 133 int rdc_enable_diskq(rdc_k_info_t *krdc);
134 134 void rdc_close_diskq(rdc_group_t *group);
135 135 int rdc_suspend_diskq(rdc_k_info_t *krdc);
136 136 int rdc_resume_diskq(rdc_k_info_t *krdc);
137 137 void rdc_init_diskq_header(rdc_group_t *grp, dqheader *header);
138 138 void rdc_fail_diskq(rdc_k_info_t *krdc, int wait, int dolog);
139 139 void rdc_unfail_diskq(rdc_k_info_t *krdc);
140 140 void rdc_unintercept_diskq(rdc_group_t *grp);
141 141 int rdc_stamp_diskq(rdc_k_info_t *krdc, int rsrvd, int flags);
142 142 void rdc_qfiller_thr(rdc_k_info_t *krdc);
143 143
144 144 nstset_t *_rdc_ioset;
145 145 nstset_t *_rdc_flset;
146 146
147 147 /*
148 148 * RDC threadset tunables
149 149 */
150 150 int rdc_threads = 64; /* default number of threads */
151 151 int rdc_threads_inc = 8; /* increment for changing the size of the set */
152 152
153 153 /*
154 154 * Private threadset manipulation variables
155 155 */
156 156 static int rdc_threads_hysteresis = 2;
157 157 /* hysteresis for threadset resizing */
158 158 static int rdc_sets_active; /* number of sets currently enabled */
159 159
160 160 #ifdef DEBUG
161 161 kmutex_t rdc_cntlock;
162 162 #endif
163 163
164 164 /*
165 165 * rdc_thread_deconfigure - rdc is being deconfigured, stop any
166 166 * thread activity.
167 167 *
168 168 * Inherently single-threaded by the Solaris module unloading code.
169 169 */
170 170 static void
171 171 rdc_thread_deconfigure(void)
172 172 {
173 173 nst_destroy(_rdc_ioset);
174 174 _rdc_ioset = NULL;
175 175
176 176 nst_destroy(_rdc_flset);
177 177 _rdc_flset = NULL;
178 178
179 179 nst_destroy(sync_info.rdc_syncset);
180 180 sync_info.rdc_syncset = NULL;
181 181 }
182 182
183 183 /*
184 184 * rdc_thread_configure - rdc is being configured, initialize the
185 185 * threads we need for flushing aync volumes.
186 186 *
187 187 * Must be called with rdc_conf_lock held.
188 188 */
189 189 static int
190 190 rdc_thread_configure(void)
191 191 {
192 192 ASSERT(MUTEX_HELD(&rdc_conf_lock));
193 193
194 194 if ((_rdc_ioset = nst_init("rdc_thr", rdc_threads)) == NULL)
195 195 return (EINVAL);
196 196
197 197 if ((_rdc_flset = nst_init("rdc_flushthr", 2)) == NULL)
198 198 return (EINVAL);
199 199
200 200 if ((sync_info.rdc_syncset =
201 201 nst_init("rdc_syncthr", RDC_MAX_SYNC_THREADS)) == NULL)
202 202 return (EINVAL);
203 203
204 204 return (0);
205 205 }
206 206
207 207
208 208 /*
209 209 * rdc_thread_tune - called to tune the size of the rdc threadset.
210 210 *
211 211 * Called from the config code when an rdc_set has been enabled or disabled.
212 212 * 'sets' is the increment to the number of active rdc_sets.
213 213 *
214 214 * Must be called with rdc_conf_lock held.
215 215 */
216 216 static void
217 217 rdc_thread_tune(int sets)
218 218 {
219 219 int incr = (sets > 0) ? 1 : -1;
220 220 int change = 0;
221 221 int nthreads;
222 222
223 223 ASSERT(MUTEX_HELD(&rdc_conf_lock));
224 224
225 225 if (sets < 0)
226 226 sets = -sets;
227 227
228 228 while (sets--) {
229 229 nthreads = nst_nthread(_rdc_ioset);
230 230 rdc_sets_active += incr;
231 231
232 232 if (rdc_sets_active >= nthreads)
233 233 change += nst_add_thread(_rdc_ioset, rdc_threads_inc);
234 234 else if ((rdc_sets_active <
235 235 (nthreads - (rdc_threads_inc + rdc_threads_hysteresis))) &&
236 236 ((nthreads - rdc_threads_inc) >= rdc_threads))
237 237 change -= nst_del_thread(_rdc_ioset, rdc_threads_inc);
238 238 }
239 239
240 240 #ifdef DEBUG
241 241 if (change) {
242 242 cmn_err(CE_NOTE, "!rdc_thread_tune: "
243 243 "nsets %d, nthreads %d, nthreads change %d",
244 244 rdc_sets_active, nst_nthread(_rdc_ioset), change);
245 245 }
246 246 #endif
247 247 }
248 248
249 249
250 250 /*
251 251 * _rdc_unload() - cache is being unloaded,
252 252 * deallocate any dual copy structures allocated during cache
253 253 * loading.
254 254 */
255 255 void
256 256 _rdc_unload(void)
257 257 {
258 258 int i;
259 259 rdc_k_info_t *krdc;
260 260
261 261 if (rdc_volume_update) {
262 262 (void) nsc_unregister_svc(rdc_volume_update);
263 263 rdc_volume_update = NULL;
264 264 }
265 265
266 266 rdc_thread_deconfigure();
267 267
268 268 if (rdc_k_info != NULL) {
269 269 for (i = 0; i < rdc_max_sets; i++) {
270 270 krdc = &rdc_k_info[i];
271 271 mutex_destroy(&krdc->dc_sleep);
272 272 mutex_destroy(&krdc->bmapmutex);
273 273 mutex_destroy(&krdc->kstat_mutex);
274 274 mutex_destroy(&krdc->bmp_kstat_mutex);
275 275 mutex_destroy(&krdc->syncbitmutex);
276 276 cv_destroy(&krdc->busycv);
277 277 cv_destroy(&krdc->closingcv);
278 278 cv_destroy(&krdc->haltcv);
279 279 cv_destroy(&krdc->synccv);
280 280 }
281 281 }
282 282
283 283 mutex_destroy(&sync_info.lock);
284 284 mutex_destroy(&rdc_ping_lock);
285 285 mutex_destroy(&net_blk_lock);
286 286 mutex_destroy(&rdc_conf_lock);
287 287 mutex_destroy(&rdc_many_lock);
288 288 mutex_destroy(&rdc_net_hnd_id_lock);
289 289 mutex_destroy(&rdc_clnt_lock);
290 290 #ifdef DEBUG
291 291 mutex_destroy(&rdc_cntlock);
292 292 #endif
293 293 net_exit = ATM_EXIT;
294 294
295 295 if (rdc_k_info != NULL)
296 296 kmem_free(rdc_k_info, sizeof (*rdc_k_info) * rdc_max_sets);
297 297 if (rdc_u_info != NULL)
298 298 kmem_free(rdc_u_info, sizeof (*rdc_u_info) * rdc_max_sets);
299 299 rdc_k_info = NULL;
300 300 rdc_u_info = NULL;
301 301 rdc_max_sets = 0;
302 302 }
303 303
304 304
305 305 /*
306 306 * _rdc_load() - rdc is being loaded, Allocate anything
307 307 * that will be needed while the cache is loaded but doesn't really
308 308 * depend on configuration parameters.
309 309 *
310 310 */
311 311 int
312 312 _rdc_load(void)
313 313 {
314 314 int i;
315 315 rdc_k_info_t *krdc;
316 316
317 317 mutex_init(&rdc_ping_lock, NULL, MUTEX_DRIVER, NULL);
318 318 mutex_init(&net_blk_lock, NULL, MUTEX_DRIVER, NULL);
319 319 mutex_init(&rdc_conf_lock, NULL, MUTEX_DRIVER, NULL);
320 320 mutex_init(&rdc_many_lock, NULL, MUTEX_DRIVER, NULL);
321 321 mutex_init(&rdc_net_hnd_id_lock, NULL, MUTEX_DRIVER, NULL);
322 322 mutex_init(&rdc_clnt_lock, NULL, MUTEX_DRIVER, NULL);
323 323 mutex_init(&sync_info.lock, NULL, MUTEX_DRIVER, NULL);
324 324
325 325 #ifdef DEBUG
326 326 mutex_init(&rdc_cntlock, NULL, MUTEX_DRIVER, NULL);
327 327 #endif
328 328
329 329 if ((i = nsc_max_devices()) < rdc_max_sets)
330 330 rdc_max_sets = i;
331 331 /* following case for partial installs that may fail */
332 332 if (!rdc_max_sets)
333 333 rdc_max_sets = 1024;
334 334
335 335 rdc_k_info = kmem_zalloc(sizeof (*rdc_k_info) * rdc_max_sets, KM_SLEEP);
336 336 if (!rdc_k_info)
337 337 return (ENOMEM);
338 338
339 339 rdc_u_info = kmem_zalloc(sizeof (*rdc_u_info) * rdc_max_sets, KM_SLEEP);
340 340 if (!rdc_u_info) {
341 341 kmem_free(rdc_k_info, sizeof (*rdc_k_info) * rdc_max_sets);
342 342 return (ENOMEM);
343 343 }
344 344
345 345 net_exit = ATM_NONE;
346 346 for (i = 0; i < rdc_max_sets; i++) {
347 347 krdc = &rdc_k_info[i];
348 348 bzero(krdc, sizeof (*krdc));
349 349 krdc->index = i;
350 350 mutex_init(&krdc->dc_sleep, NULL, MUTEX_DRIVER, NULL);
351 351 mutex_init(&krdc->bmapmutex, NULL, MUTEX_DRIVER, NULL);
352 352 mutex_init(&krdc->kstat_mutex, NULL, MUTEX_DRIVER, NULL);
353 353 mutex_init(&krdc->bmp_kstat_mutex, NULL, MUTEX_DRIVER, NULL);
354 354 mutex_init(&krdc->syncbitmutex, NULL, MUTEX_DRIVER, NULL);
355 355 cv_init(&krdc->busycv, NULL, CV_DRIVER, NULL);
356 356 cv_init(&krdc->closingcv, NULL, CV_DRIVER, NULL);
357 357 cv_init(&krdc->haltcv, NULL, CV_DRIVER, NULL);
358 358 cv_init(&krdc->synccv, NULL, CV_DRIVER, NULL);
359 359 }
360 360
361 361 rdc_volume_update = nsc_register_svc("RDCVolumeUpdated",
362 362 rdc_volume_update_svc);
363 363
364 364 return (0);
365 365 }
366 366
367 367 static void
368 368 rdc_u_init(rdc_u_info_t *urdc)
369 369 {
370 370 const int index = (int)(urdc - &rdc_u_info[0]);
371 371
372 372 if (urdc->secondary.addr.maxlen)
373 373 free_rdc_netbuf(&urdc->secondary.addr);
374 374 if (urdc->primary.addr.maxlen)
375 375 free_rdc_netbuf(&urdc->primary.addr);
376 376
377 377 bzero(urdc, sizeof (rdc_u_info_t));
378 378
379 379 urdc->index = index;
380 380 urdc->maxqfbas = rdc_maxthres_queue;
381 381 urdc->maxqitems = rdc_max_qitems;
382 382 urdc->asyncthr = rdc_asyncthr;
383 383 }
384 384
385 385 /*
386 386 * _rdc_configure() - cache is being configured.
387 387 *
388 388 * Initialize dual copy structures
389 389 */
390 390 int
391 391 _rdc_configure(void)
392 392 {
393 393 int index;
394 394 rdc_k_info_t *krdc;
395 395
396 396 for (index = 0; index < rdc_max_sets; index++) {
397 397 krdc = &rdc_k_info[index];
398 398
399 399 krdc->remote_index = -1;
400 400 krdc->dcio_bitmap = NULL;
401 401 krdc->bitmap_ref = NULL;
402 402 krdc->bitmap_size = 0;
403 403 krdc->bitmap_write = 0;
404 404 krdc->disk_status = 0;
405 405 krdc->many_next = krdc;
406 406
407 407 rdc_u_init(&rdc_u_info[index]);
408 408 }
409 409
410 410 rdc_async_timeout = 120 * HZ; /* Seconds * HZ */
411 411 MAX_RDC_FBAS = FBA_LEN(RDC_MAXDATA);
412 412 if (net_exit != ATM_INIT) {
413 413 net_exit = ATM_INIT;
414 414 return (0);
415 415 }
416 416 return (0);
417 417 }
418 418
419 419 /*
420 420 * _rdc_deconfigure - rdc is being deconfigured, shut down any
421 421 * dual copy operations and return to an unconfigured state.
422 422 */
423 423 void
424 424 _rdc_deconfigure(void)
425 425 {
426 426 rdc_k_info_t *krdc;
427 427 rdc_u_info_t *urdc;
428 428 int index;
429 429
430 430 for (index = 0; index < rdc_max_sets; index++) {
431 431 krdc = &rdc_k_info[index];
432 432 urdc = &rdc_u_info[index];
433 433
434 434 krdc->remote_index = -1;
435 435 krdc->dcio_bitmap = NULL;
436 436 krdc->bitmap_ref = NULL;
437 437 krdc->bitmap_size = 0;
438 438 krdc->bitmap_write = 0;
439 439 krdc->disk_status = 0;
440 440 krdc->many_next = krdc;
441 441
442 442 if (urdc->primary.addr.maxlen)
443 443 free_rdc_netbuf(&(urdc->primary.addr));
444 444
445 445 if (urdc->secondary.addr.maxlen)
446 446 free_rdc_netbuf(&(urdc->secondary.addr));
447 447
448 448 bzero(urdc, sizeof (rdc_u_info_t));
449 449 urdc->index = index;
450 450 }
451 451 net_exit = ATM_EXIT;
452 452 rdc_clnt_destroy();
453 453
454 454 }
455 455
456 456
457 457 /*
458 458 * Lock primitives, containing checks that lock ordering isn't broken
459 459 */
460 460 /*ARGSUSED*/
461 461 void
462 462 rdc_many_enter(rdc_k_info_t *krdc)
463 463 {
464 464 ASSERT(!MUTEX_HELD(&krdc->bmapmutex));
465 465
466 466 mutex_enter(&rdc_many_lock);
467 467 }
468 468
469 469 /* ARGSUSED */
470 470 void
471 471 rdc_many_exit(rdc_k_info_t *krdc)
472 472 {
473 473 mutex_exit(&rdc_many_lock);
474 474 }
475 475
476 476 void
477 477 rdc_group_enter(rdc_k_info_t *krdc)
478 478 {
479 479 ASSERT(!MUTEX_HELD(&rdc_many_lock));
480 480 ASSERT(!MUTEX_HELD(&rdc_conf_lock));
481 481 ASSERT(!MUTEX_HELD(&krdc->bmapmutex));
482 482
483 483 mutex_enter(&krdc->group->lock);
484 484 }
485 485
486 486 void
487 487 rdc_group_exit(rdc_k_info_t *krdc)
488 488 {
489 489 mutex_exit(&krdc->group->lock);
490 490 }
491 491
492 492 /*
493 493 * Suspend and disable operations use this function to wait until it is safe
494 494 * to do continue, without trashing data structures used by other ioctls.
495 495 */
496 496 static void
497 497 wait_busy(rdc_k_info_t *krdc)
498 498 {
499 499 ASSERT(MUTEX_HELD(&rdc_conf_lock));
500 500
501 501 while (krdc->busy_count > 0)
502 502 cv_wait(&krdc->busycv, &rdc_conf_lock);
503 503 }
504 504
505 505
506 506 /*
507 507 * Other ioctls use this function to hold off disable and suspend.
508 508 */
509 509 void
510 510 set_busy(rdc_k_info_t *krdc)
511 511 {
512 512 ASSERT(MUTEX_HELD(&rdc_conf_lock));
513 513
514 514 wait_busy(krdc);
515 515
516 516 krdc->busy_count++;
517 517 }
518 518
519 519
520 520 /*
521 521 * Other ioctls use this function to allow disable and suspend to continue.
522 522 */
523 523 void
524 524 wakeup_busy(rdc_k_info_t *krdc)
525 525 {
526 526 ASSERT(MUTEX_HELD(&rdc_conf_lock));
527 527
528 528 if (krdc->busy_count <= 0)
529 529 return;
530 530
531 531 krdc->busy_count--;
532 532 cv_broadcast(&krdc->busycv);
533 533 }
534 534
535 535
536 536 /*
537 537 * Remove the rdc set from its group, and destroy the group if no longer in
538 538 * use.
539 539 */
540 540 static void
541 541 remove_from_group(rdc_k_info_t *krdc)
542 542 {
543 543 rdc_k_info_t *p;
544 544 rdc_group_t *group;
545 545
546 546 ASSERT(MUTEX_HELD(&rdc_conf_lock));
547 547
548 548 rdc_many_enter(krdc);
549 549 group = krdc->group;
550 550
551 551 group->count--;
552 552
553 553 /*
554 554 * lock queue while looking at thrnum
555 555 */
556 556 mutex_enter(&group->ra_queue.net_qlock);
557 557 if ((group->rdc_thrnum == 0) && (group->count == 0)) {
558 558
559 559 /*
560 560 * Assure the we've stopped and the flusher thread has not
561 561 * fallen back to sleep
562 562 */
563 563 if (krdc->group->ra_queue.qfill_sleeping != RDC_QFILL_DEAD) {
564 564 group->ra_queue.qfflags |= RDC_QFILLSTOP;
565 565 while (krdc->group->ra_queue.qfflags & RDC_QFILLSTOP) {
566 566 if (krdc->group->ra_queue.qfill_sleeping ==
567 567 RDC_QFILL_ASLEEP)
568 568 cv_broadcast(&group->ra_queue.qfcv);
569 569 mutex_exit(&group->ra_queue.net_qlock);
570 570 delay(2);
571 571 mutex_enter(&group->ra_queue.net_qlock);
572 572 }
573 573 }
574 574 mutex_exit(&group->ra_queue.net_qlock);
575 575
576 576 mutex_enter(&group->diskqmutex);
577 577 rdc_close_diskq(group);
578 578 mutex_exit(&group->diskqmutex);
579 579 rdc_delgroup(group);
580 580 rdc_many_exit(krdc);
581 581 krdc->group = NULL;
582 582 return;
583 583 }
584 584 mutex_exit(&group->ra_queue.net_qlock);
585 585 /*
586 586 * Always clear the group field.
587 587 * no, you need it set in rdc_flush_memq().
588 588 * to call rdc_group_log()
589 589 * krdc->group = NULL;
590 590 */
591 591
592 592 /* Take this rdc structure off the group list */
593 593
594 594 for (p = krdc->group_next; p->group_next != krdc; p = p->group_next)
595 595 ;
596 596 p->group_next = krdc->group_next;
597 597
598 598 rdc_many_exit(krdc);
599 599 }
600 600
601 601
602 602 /*
603 603 * Add the rdc set to its group, setting up a new group if it's the first one.
604 604 */
605 605 static int
606 606 add_to_group(rdc_k_info_t *krdc, int options, int cmd)
607 607 {
608 608 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
609 609 rdc_u_info_t *utmp;
610 610 rdc_k_info_t *ktmp;
611 611 int index;
612 612 rdc_group_t *group;
613 613 int rc = 0;
614 614 nsthread_t *trc;
615 615
616 616 ASSERT(MUTEX_HELD(&rdc_conf_lock));
617 617
618 618 /*
619 619 * Look for matching group name, primary host name and secondary
620 620 * host name.
621 621 */
622 622
623 623 rdc_many_enter(krdc);
624 624 for (index = 0; index < rdc_max_sets; index++) {
625 625 utmp = &rdc_u_info[index];
626 626 ktmp = &rdc_k_info[index];
627 627
628 628 if (urdc->group_name[0] == 0)
629 629 break;
630 630
631 631 if (!IS_CONFIGURED(ktmp))
632 632 continue;
633 633
634 634 if (strncmp(utmp->group_name, urdc->group_name,
635 635 NSC_MAXPATH) != 0)
636 636 continue;
637 637 if (strncmp(utmp->primary.intf, urdc->primary.intf,
638 638 MAX_RDC_HOST_SIZE) != 0) {
639 639 /* Same group name, different primary interface */
640 640 rdc_many_exit(krdc);
641 641 return (-1);
642 642 }
643 643 if (strncmp(utmp->secondary.intf, urdc->secondary.intf,
644 644 MAX_RDC_HOST_SIZE) != 0) {
645 645 /* Same group name, different secondary interface */
646 646 rdc_many_exit(krdc);
647 647 return (-1);
648 648 }
649 649
650 650 /* Group already exists, so add this set to the group */
651 651
652 652 if (((options & RDC_OPT_ASYNC) == 0) &&
653 653 ((ktmp->type_flag & RDC_ASYNCMODE) != 0)) {
654 654 /* Must be same mode as existing group members */
655 655 rdc_many_exit(krdc);
656 656 return (-1);
657 657 }
658 658 if (((options & RDC_OPT_ASYNC) != 0) &&
659 659 ((ktmp->type_flag & RDC_ASYNCMODE) == 0)) {
660 660 /* Must be same mode as existing group members */
661 661 rdc_many_exit(krdc);
662 662 return (-1);
663 663 }
664 664
665 665 /* cannont reconfigure existing group into new queue this way */
666 666 if ((cmd != RDC_CMD_RESUME) &&
667 667 !RDC_IS_DISKQ(ktmp->group) && urdc->disk_queue[0] != '\0') {
668 668 rdc_many_exit(krdc);
669 669 return (RDC_EQNOADD);
670 670 }
671 671
672 672 ktmp->group->count++;
673 673 krdc->group = ktmp->group;
674 674 krdc->group_next = ktmp->group_next;
675 675 ktmp->group_next = krdc;
676 676
677 677 urdc->autosync = utmp->autosync; /* Same as rest */
678 678
679 679 (void) strncpy(urdc->disk_queue, utmp->disk_queue, NSC_MAXPATH);
680 680
681 681 rdc_many_exit(krdc);
682 682 return (0);
683 683 }
684 684
685 685 /* This must be a new group */
686 686 group = rdc_newgroup();
687 687 krdc->group = group;
688 688 krdc->group_next = krdc;
689 689 urdc->autosync = -1; /* Unknown */
690 690
691 691 /*
692 692 * Tune the thread set by one for each thread created
693 693 */
694 694 rdc_thread_tune(1);
695 695
696 696 trc = nst_create(_rdc_ioset, rdc_qfiller_thr, (void *)krdc, NST_SLEEP);
697 697 if (trc == NULL) {
698 698 rc = -1;
699 699 cmn_err(CE_NOTE, "!unable to create queue filler daemon");
700 700 goto fail;
701 701 }
702 702
703 703 if (urdc->disk_queue[0] == '\0') {
704 704 krdc->group->flags |= RDC_MEMQUE;
705 705 } else {
706 706 krdc->group->flags |= RDC_DISKQUE;
707 707
708 708 /* XXX check here for resume or enable and act accordingly */
709 709
710 710 if (cmd == RDC_CMD_RESUME) {
711 711 rc = rdc_resume_diskq(krdc);
712 712
713 713 } else if (cmd == RDC_CMD_ENABLE) {
714 714 rc = rdc_enable_diskq(krdc);
715 715 if ((rc == RDC_EQNOADD) && (cmd != RDC_CMD_ENABLE)) {
716 716 cmn_err(CE_WARN, "!disk queue %s enable failed,"
717 717 " enabling memory queue",
718 718 urdc->disk_queue);
719 719 krdc->group->flags &= ~RDC_DISKQUE;
720 720 krdc->group->flags |= RDC_MEMQUE;
721 721 bzero(urdc->disk_queue, NSC_MAXPATH);
722 722 }
723 723 }
724 724 }
725 725 fail:
726 726 rdc_many_exit(krdc);
727 727 return (rc);
728 728 }
729 729
730 730
731 731 /*
732 732 * Move the set to a new group if possible
733 733 */
734 734 static int
735 735 change_group(rdc_k_info_t *krdc, int options)
736 736 {
737 737 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
738 738 rdc_u_info_t *utmp;
739 739 rdc_k_info_t *ktmp;
740 740 rdc_k_info_t *next;
741 741 char tmpq[NSC_MAXPATH];
742 742 int index;
743 743 int rc = -1;
744 744 rdc_group_t *group, *old_group;
745 745 nsthread_t *trc;
746 746
747 747 ASSERT(MUTEX_HELD(&rdc_conf_lock));
748 748
749 749 /*
750 750 * Look for matching group name, primary host name and secondary
751 751 * host name.
752 752 */
753 753
754 754 bzero(&tmpq, sizeof (tmpq));
755 755 rdc_many_enter(krdc);
756 756
757 757 old_group = krdc->group;
758 758 next = krdc->group_next;
759 759
760 760 if (RDC_IS_DISKQ(old_group)) { /* can't keep your own queue */
761 761 (void) strncpy(tmpq, urdc->disk_queue, NSC_MAXPATH);
762 762 bzero(urdc->disk_queue, sizeof (urdc->disk_queue));
763 763 }
764 764 for (index = 0; index < rdc_max_sets; index++) {
765 765 utmp = &rdc_u_info[index];
766 766 ktmp = &rdc_k_info[index];
767 767
768 768 if (ktmp == krdc)
769 769 continue;
770 770
771 771 if (urdc->group_name[0] == 0)
772 772 break;
773 773
774 774 if (!IS_CONFIGURED(ktmp))
775 775 continue;
776 776
777 777 if (strncmp(utmp->group_name, urdc->group_name,
778 778 NSC_MAXPATH) != 0)
779 779 continue;
780 780 if (strncmp(utmp->primary.intf, urdc->primary.intf,
781 781 MAX_RDC_HOST_SIZE) != 0)
782 782 goto bad;
783 783 if (strncmp(utmp->secondary.intf, urdc->secondary.intf,
784 784 MAX_RDC_HOST_SIZE) != 0)
785 785 goto bad;
786 786
787 787 /* Group already exists, so add this set to the group */
788 788
789 789 if (((options & RDC_OPT_ASYNC) == 0) &&
790 790 ((ktmp->type_flag & RDC_ASYNCMODE) != 0)) {
791 791 /* Must be same mode as existing group members */
792 792 goto bad;
793 793 }
794 794 if (((options & RDC_OPT_ASYNC) != 0) &&
795 795 ((ktmp->type_flag & RDC_ASYNCMODE) == 0)) {
796 796 /* Must be same mode as existing group members */
797 797 goto bad;
798 798 }
799 799
800 800 ktmp->group->count++;
801 801 krdc->group = ktmp->group;
802 802 krdc->group_next = ktmp->group_next;
803 803 ktmp->group_next = krdc;
804 804 bzero(urdc->disk_queue, sizeof (urdc->disk_queue));
805 805 (void) strncpy(urdc->disk_queue, utmp->disk_queue, NSC_MAXPATH);
806 806
807 807 goto good;
808 808 }
809 809
810 810 /* This must be a new group */
811 811 group = rdc_newgroup();
812 812 krdc->group = group;
813 813 krdc->group_next = krdc;
814 814
815 815 trc = nst_create(_rdc_ioset, rdc_qfiller_thr, (void *)krdc, NST_SLEEP);
816 816 if (trc == NULL) {
817 817 rc = -1;
818 818 cmn_err(CE_NOTE, "!unable to create queue filler daemon");
819 819 goto bad;
820 820 }
821 821
822 822 if (urdc->disk_queue[0] == 0) {
823 823 krdc->group->flags |= RDC_MEMQUE;
824 824 } else {
825 825 krdc->group->flags |= RDC_DISKQUE;
826 826 if ((rc = rdc_enable_diskq(krdc)) < 0)
827 827 goto bad;
828 828 }
829 829 good:
830 830 if (options & RDC_OPT_ASYNC) {
831 831 krdc->type_flag |= RDC_ASYNCMODE;
832 832 rdc_set_flags(urdc, RDC_ASYNC);
833 833 } else {
834 834 krdc->type_flag &= ~RDC_ASYNCMODE;
835 835 rdc_clr_flags(urdc, RDC_ASYNC);
836 836 }
837 837
838 838 old_group->count--;
839 839 if (!old_group->rdc_writer && old_group->count == 0) {
840 840 /* Group now empty, so destroy */
841 841 if (RDC_IS_DISKQ(old_group)) {
842 842 rdc_unintercept_diskq(old_group);
843 843 mutex_enter(&old_group->diskqmutex);
844 844 rdc_close_diskq(old_group);
845 845 mutex_exit(&old_group->diskqmutex);
846 846 }
847 847
848 848 mutex_enter(&old_group->ra_queue.net_qlock);
849 849
850 850 /*
851 851 * Assure the we've stopped and the flusher thread has not
852 852 * fallen back to sleep
853 853 */
854 854 if (old_group->ra_queue.qfill_sleeping != RDC_QFILL_DEAD) {
855 855 old_group->ra_queue.qfflags |= RDC_QFILLSTOP;
856 856 while (old_group->ra_queue.qfflags & RDC_QFILLSTOP) {
857 857 if (old_group->ra_queue.qfill_sleeping ==
858 858 RDC_QFILL_ASLEEP)
859 859 cv_broadcast(&old_group->ra_queue.qfcv);
860 860 mutex_exit(&old_group->ra_queue.net_qlock);
861 861 delay(2);
862 862 mutex_enter(&old_group->ra_queue.net_qlock);
863 863 }
864 864 }
865 865 mutex_exit(&old_group->ra_queue.net_qlock);
866 866
867 867 rdc_delgroup(old_group);
868 868 rdc_many_exit(krdc);
869 869 return (0);
870 870 }
871 871
872 872 /* Take this rdc structure off the old group list */
873 873
874 874 for (ktmp = next; ktmp->group_next != krdc; ktmp = ktmp->group_next)
875 875 ;
876 876 ktmp->group_next = next;
877 877
878 878 rdc_many_exit(krdc);
879 879 return (0);
880 880
881 881 bad:
882 882 /* Leave existing group status alone */
883 883 (void) strncpy(urdc->disk_queue, tmpq, NSC_MAXPATH);
884 884 rdc_many_exit(krdc);
885 885 return (rc);
886 886 }
887 887
888 888
889 889 /*
890 890 * Set flags for an rdc set, setting the group flags as necessary.
891 891 */
892 892 void
893 893 rdc_set_flags(rdc_u_info_t *urdc, int flags)
894 894 {
895 895 rdc_k_info_t *krdc = &rdc_k_info[urdc->index];
896 896 int vflags, sflags, bflags, ssflags;
897 897
898 898 DTRACE_PROBE2(rdc_set_flags, int, krdc->index, int, flags);
899 899 vflags = flags & RDC_VFLAGS;
900 900 sflags = flags & RDC_SFLAGS;
901 901 bflags = flags & RDC_BFLAGS;
902 902 ssflags = flags & RDC_SYNC_STATE_FLAGS;
903 903
904 904 if (vflags) {
905 905 /* normal volume flags */
906 906 ASSERT(MUTEX_HELD(&rdc_conf_lock) ||
907 907 MUTEX_HELD(&krdc->group->lock));
908 908 if (ssflags)
909 909 mutex_enter(&krdc->bmapmutex);
910 910
911 911 urdc->flags |= vflags;
912 912
913 913 if (ssflags)
914 914 mutex_exit(&krdc->bmapmutex);
915 915 }
916 916
917 917 if (sflags) {
918 918 /* Sync state flags that are protected by a different lock */
919 919 ASSERT(MUTEX_HELD(&rdc_many_lock));
920 920 urdc->sync_flags |= sflags;
921 921 }
922 922
923 923 if (bflags) {
924 924 /* Bmap state flags that are protected by a different lock */
925 925 ASSERT(MUTEX_HELD(&krdc->bmapmutex));
926 926 urdc->bmap_flags |= bflags;
927 927 }
928 928
929 929 }
930 930
931 931
932 932 /*
933 933 * Clear flags for an rdc set, clearing the group flags as necessary.
934 934 */
935 935 void
936 936 rdc_clr_flags(rdc_u_info_t *urdc, int flags)
937 937 {
938 938 rdc_k_info_t *krdc = &rdc_k_info[urdc->index];
939 939 int vflags, sflags, bflags;
940 940
941 941 DTRACE_PROBE2(rdc_clr_flags, int, krdc->index, int, flags);
942 942 vflags = flags & RDC_VFLAGS;
943 943 sflags = flags & RDC_SFLAGS;
944 944 bflags = flags & RDC_BFLAGS;
945 945
946 946 if (vflags) {
947 947 /* normal volume flags */
948 948 ASSERT(MUTEX_HELD(&rdc_conf_lock) ||
949 949 MUTEX_HELD(&krdc->group->lock));
950 950 urdc->flags &= ~vflags;
951 951
952 952 }
953 953
954 954 if (sflags) {
955 955 /* Sync state flags that are protected by a different lock */
956 956 ASSERT(MUTEX_HELD(&rdc_many_lock));
957 957 urdc->sync_flags &= ~sflags;
958 958 }
959 959
960 960 if (bflags) {
961 961 /* Bmap state flags that are protected by a different lock */
962 962 ASSERT(MUTEX_HELD(&krdc->bmapmutex));
963 963 urdc->bmap_flags &= ~bflags;
964 964 }
965 965 }
966 966
967 967
968 968 /*
969 969 * Get the flags for an rdc set.
970 970 */
971 971 int
972 972 rdc_get_vflags(rdc_u_info_t *urdc)
973 973 {
974 974 return (urdc->flags | urdc->sync_flags | urdc->bmap_flags);
975 975 }
976 976
977 977
978 978 /*
979 979 * Initialise flags for an rdc set.
980 980 */
981 981 static void
982 982 rdc_init_flags(rdc_u_info_t *urdc)
983 983 {
984 984 urdc->flags = 0;
985 985 urdc->mflags = 0;
986 986 urdc->sync_flags = 0;
987 987 urdc->bmap_flags = 0;
988 988 }
989 989
990 990
991 991 /*
992 992 * Set flags for a many group.
993 993 */
994 994 void
995 995 rdc_set_mflags(rdc_u_info_t *urdc, int flags)
996 996 {
997 997 rdc_k_info_t *krdc = &rdc_k_info[urdc->index];
998 998 rdc_k_info_t *this = krdc;
999 999
1000 1000 ASSERT(!(flags & ~RDC_MFLAGS));
1001 1001
1002 1002 if (flags == 0)
1003 1003 return;
1004 1004
1005 1005 ASSERT(MUTEX_HELD(&rdc_many_lock));
1006 1006
1007 1007 rdc_set_flags(urdc, flags); /* set flags on local urdc */
1008 1008
1009 1009 urdc->mflags |= flags;
1010 1010 for (krdc = krdc->many_next; krdc != this; krdc = krdc->many_next) {
1011 1011 urdc = &rdc_u_info[krdc->index];
1012 1012 if (!IS_ENABLED(urdc))
1013 1013 continue;
1014 1014 urdc->mflags |= flags;
1015 1015 }
1016 1016 }
1017 1017
1018 1018
1019 1019 /*
1020 1020 * Clear flags for a many group.
1021 1021 */
1022 1022 void
1023 1023 rdc_clr_mflags(rdc_u_info_t *urdc, int flags)
1024 1024 {
1025 1025 rdc_k_info_t *krdc = &rdc_k_info[urdc->index];
1026 1026 rdc_k_info_t *this = krdc;
1027 1027 rdc_u_info_t *utmp;
1028 1028
1029 1029 ASSERT(!(flags & ~RDC_MFLAGS));
1030 1030
1031 1031 if (flags == 0)
1032 1032 return;
1033 1033
1034 1034 ASSERT(MUTEX_HELD(&rdc_many_lock));
1035 1035
1036 1036 rdc_clr_flags(urdc, flags); /* clear flags on local urdc */
1037 1037
1038 1038 /*
1039 1039 * We must maintain the mflags based on the set of flags for
1040 1040 * all the urdc's that are chained up.
1041 1041 */
1042 1042
1043 1043 /*
1044 1044 * First look through all the urdc's and remove bits from
1045 1045 * the 'flags' variable that are in use elsewhere.
1046 1046 */
1047 1047
1048 1048 for (krdc = krdc->many_next; krdc != this; krdc = krdc->many_next) {
1049 1049 utmp = &rdc_u_info[krdc->index];
1050 1050 if (!IS_ENABLED(utmp))
1051 1051 continue;
1052 1052 flags &= ~(rdc_get_vflags(utmp) & RDC_MFLAGS);
1053 1053 if (flags == 0)
1054 1054 break;
1055 1055 }
1056 1056
1057 1057 /*
1058 1058 * Now clear flags as necessary.
1059 1059 */
1060 1060
1061 1061 if (flags != 0) {
1062 1062 urdc->mflags &= ~flags;
1063 1063 for (krdc = krdc->many_next; krdc != this;
1064 1064 krdc = krdc->many_next) {
1065 1065 utmp = &rdc_u_info[krdc->index];
1066 1066 if (!IS_ENABLED(utmp))
1067 1067 continue;
1068 1068 utmp->mflags &= ~flags;
1069 1069 }
1070 1070 }
1071 1071 }
1072 1072
1073 1073
1074 1074 int
1075 1075 rdc_get_mflags(rdc_u_info_t *urdc)
1076 1076 {
1077 1077 return (urdc->mflags);
1078 1078 }
1079 1079
1080 1080
1081 1081 void
1082 1082 rdc_set_flags_log(rdc_u_info_t *urdc, int flags, char *why)
1083 1083 {
1084 1084 DTRACE_PROBE2(rdc_set_flags_log, int, urdc->index, int, flags);
1085 1085
1086 1086 rdc_set_flags(urdc, flags);
1087 1087
1088 1088 if (why == NULL)
1089 1089 return;
1090 1090
1091 1091 if (flags & RDC_LOGGING)
1092 1092 cmn_err(CE_NOTE, "!sndr: %s:%s entered logging mode: %s",
1093 1093 urdc->secondary.intf, urdc->secondary.file, why);
1094 1094 if (flags & RDC_VOL_FAILED)
1095 1095 cmn_err(CE_NOTE, "!sndr: %s:%s volume failed: %s",
1096 1096 urdc->secondary.intf, urdc->secondary.file, why);
1097 1097 if (flags & RDC_BMP_FAILED)
1098 1098 cmn_err(CE_NOTE, "!sndr: %s:%s bitmap failed: %s",
1099 1099 urdc->secondary.intf, urdc->secondary.file, why);
1100 1100 }
1101 1101 /*
1102 1102 * rdc_lor(source, dest, len)
1103 1103 * logically OR memory pointed to by source and dest, copying result into dest.
1104 1104 */
1105 1105 void
1106 1106 rdc_lor(const uchar_t *source, uchar_t *dest, int len)
1107 1107 {
1108 1108 int i;
1109 1109
1110 1110 if (source == NULL)
1111 1111 return;
1112 1112
1113 1113 for (i = 0; i < len; i++)
1114 1114 *dest++ |= *source++;
1115 1115 }
1116 1116
1117 1117
1118 1118 static int
1119 1119 check_filesize(int index, spcs_s_info_t kstatus)
1120 1120 {
1121 1121 uint64_t remote_size;
1122 1122 char tmp1[16], tmp2[16];
1123 1123 rdc_u_info_t *urdc = &rdc_u_info[index];
1124 1124 int status;
1125 1125
1126 1126 status = rdc_net_getsize(index, &remote_size);
1127 1127 if (status) {
1128 1128 (void) spcs_s_inttostring(status, tmp1, sizeof (tmp1), 0);
1129 1129 spcs_s_add(kstatus, RDC_EGETSIZE, urdc->secondary.intf,
1130 1130 urdc->secondary.file, tmp1);
1131 1131 (void) rdc_net_state(index, CCIO_ENABLELOG);
1132 1132 return (RDC_EGETSIZE);
1133 1133 }
1134 1134 if (remote_size < (unsigned long long)urdc->volume_size) {
1135 1135 (void) spcs_s_inttostring(
1136 1136 urdc->volume_size, tmp1, sizeof (tmp1), 0);
1137 1137 /*
1138 1138 * Cheat, and covert to int, until we have
1139 1139 * spcs_s_unsignedlonginttostring().
1140 1140 */
1141 1141 status = (int)remote_size;
1142 1142 (void) spcs_s_inttostring(status, tmp2, sizeof (tmp2), 0);
1143 1143 spcs_s_add(kstatus, RDC_ESIZE, urdc->primary.intf,
1144 1144 urdc->primary.file, tmp1, urdc->secondary.intf,
1145 1145 urdc->secondary.file, tmp2);
1146 1146 (void) rdc_net_state(index, CCIO_ENABLELOG);
1147 1147 return (RDC_ESIZE);
1148 1148 }
1149 1149 return (0);
1150 1150 }
1151 1151
1152 1152
1153 1153 static void
1154 1154 rdc_volume_update_svc(intptr_t arg)
1155 1155 {
1156 1156 rdc_update_t *update = (rdc_update_t *)arg;
1157 1157 rdc_k_info_t *krdc;
1158 1158 rdc_k_info_t *this;
1159 1159 rdc_u_info_t *urdc;
1160 1160 struct net_bdata6 bd;
1161 1161 int index;
1162 1162 int rc;
1163 1163
1164 1164 #ifdef DEBUG_IIUPDATE
1165 1165 cmn_err(CE_NOTE, "!SNDR received update request for %s",
1166 1166 update->volume);
1167 1167 #endif
1168 1168
1169 1169 if ((update->protocol != RDC_SVC_ONRETURN) &&
1170 1170 (update->protocol != RDC_SVC_VOL_ENABLED)) {
1171 1171 /* don't understand what the client intends to do */
1172 1172 update->denied = 1;
1173 1173 spcs_s_add(update->status, RDC_EVERSION);
1174 1174 return;
1175 1175 }
1176 1176
1177 1177 index = rdc_lookup_enabled(update->volume, 0);
1178 1178 if (index < 0)
1179 1179 return;
1180 1180
1181 1181 /*
1182 1182 * warn II that this volume is in use by sndr so
1183 1183 * II can validate the sizes of the master vs shadow
1184 1184 * and avoid trouble later down the line with
1185 1185 * size mis-matches between urdc->volume_size and
1186 1186 * what is returned from nsc_partsize() which may
1187 1187 * be the size of the master when replicating the shadow
1188 1188 */
1189 1189 if (update->protocol == RDC_SVC_VOL_ENABLED) {
1190 1190 if (index >= 0)
1191 1191 update->denied = 1;
1192 1192 return;
1193 1193 }
1194 1194
1195 1195 krdc = &rdc_k_info[index];
1196 1196 urdc = &rdc_u_info[index];
1197 1197 this = krdc;
1198 1198
1199 1199 do {
1200 1200 if (!(rdc_get_vflags(urdc) & RDC_LOGGING)) {
1201 1201 #ifdef DEBUG_IIUPDATE
1202 1202 cmn_err(CE_NOTE, "!SNDR refused update request for %s",
1203 1203 update->volume);
1204 1204 #endif
1205 1205 update->denied = 1;
1206 1206 spcs_s_add(update->status, RDC_EMIRRORUP);
1207 1207 return;
1208 1208 }
1209 1209 /* 1->many - all must be logging */
1210 1210 if (IS_MANY(krdc) && IS_STATE(urdc, RDC_PRIMARY)) {
1211 1211 rdc_many_enter(krdc);
1212 1212 for (krdc = krdc->many_next; krdc != this;
1213 1213 krdc = krdc->many_next) {
1214 1214 urdc = &rdc_u_info[krdc->index];
1215 1215 if (!IS_ENABLED(urdc))
1216 1216 continue;
1217 1217 break;
1218 1218 }
1219 1219 rdc_many_exit(krdc);
1220 1220 }
1221 1221 } while (krdc != this);
1222 1222
1223 1223 #ifdef DEBUG_IIUPDATE
1224 1224 cmn_err(CE_NOTE, "!SNDR allowed update request for %s", update->volume);
1225 1225 #endif
1226 1226 urdc = &rdc_u_info[krdc->index];
1227 1227 do {
1228 1228
1229 1229 bd.size = min(krdc->bitmap_size, (nsc_size_t)update->size);
1230 1230 bd.data.data_val = (char *)update->bitmap;
1231 1231 bd.offset = 0;
1232 1232 bd.cd = index;
1233 1233
1234 1234 if ((rc = RDC_OR_BITMAP(&bd)) != 0) {
1235 1235 update->denied = 1;
1236 1236 spcs_s_add(update->status, rc);
1237 1237 return;
1238 1238 }
1239 1239 urdc = &rdc_u_info[index];
1240 1240 urdc->bits_set = RDC_COUNT_BITMAP(krdc);
1241 1241 if (IS_MANY(krdc) && IS_STATE(urdc, RDC_PRIMARY)) {
1242 1242 rdc_many_enter(krdc);
1243 1243 for (krdc = krdc->many_next; krdc != this;
1244 1244 krdc = krdc->many_next) {
1245 1245 index = krdc->index;
1246 1246 if (!IS_ENABLED(urdc))
1247 1247 continue;
1248 1248 break;
1249 1249 }
1250 1250 rdc_many_exit(krdc);
1251 1251 }
1252 1252 } while (krdc != this);
1253 1253
1254 1254
1255 1255 /* II (or something else) has updated us, so no need for a sync */
1256 1256 if (rdc_get_vflags(urdc) & (RDC_SYNC_NEEDED | RDC_RSYNC_NEEDED)) {
1257 1257 rdc_many_enter(krdc);
1258 1258 rdc_clr_flags(urdc, RDC_SYNC_NEEDED | RDC_RSYNC_NEEDED);
1259 1259 rdc_many_exit(krdc);
1260 1260 }
1261 1261
1262 1262 if (krdc->bitmap_write > 0)
1263 1263 (void) rdc_write_bitmap(krdc);
1264 1264 }
1265 1265
1266 1266
1267 1267 /*
1268 1268 * rdc_check()
1269 1269 *
1270 1270 * Return 0 if the set is configured, enabled and the supplied
1271 1271 * addressing information matches the in-kernel config, otherwise
1272 1272 * return 1.
1273 1273 */
1274 1274 static int
1275 1275 rdc_check(rdc_k_info_t *krdc, rdc_set_t *rdc_set)
1276 1276 {
1277 1277 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
1278 1278
1279 1279 ASSERT(MUTEX_HELD(&krdc->group->lock));
1280 1280
1281 1281 if (!IS_ENABLED(urdc))
1282 1282 return (1);
1283 1283
1284 1284 if (strncmp(urdc->primary.file, rdc_set->primary.file,
1285 1285 NSC_MAXPATH) != 0) {
1286 1286 #ifdef DEBUG
1287 1287 cmn_err(CE_WARN, "!rdc_check: primary file mismatch %s vs %s",
1288 1288 urdc->primary.file, rdc_set->primary.file);
1289 1289 #endif
1290 1290 return (1);
1291 1291 }
1292 1292
1293 1293 if (rdc_set->primary.addr.len != 0 &&
1294 1294 bcmp(urdc->primary.addr.buf, rdc_set->primary.addr.buf,
1295 1295 urdc->primary.addr.len) != 0) {
1296 1296 #ifdef DEBUG
1297 1297 cmn_err(CE_WARN, "!rdc_check: primary address mismatch for %s",
1298 1298 urdc->primary.file);
1299 1299 #endif
1300 1300 return (1);
1301 1301 }
1302 1302
1303 1303 if (strncmp(urdc->secondary.file, rdc_set->secondary.file,
1304 1304 NSC_MAXPATH) != 0) {
1305 1305 #ifdef DEBUG
1306 1306 cmn_err(CE_WARN, "!rdc_check: secondary file mismatch %s vs %s",
1307 1307 urdc->secondary.file, rdc_set->secondary.file);
1308 1308 #endif
1309 1309 return (1);
1310 1310 }
1311 1311
1312 1312 if (rdc_set->secondary.addr.len != 0 &&
1313 1313 bcmp(urdc->secondary.addr.buf, rdc_set->secondary.addr.buf,
1314 1314 urdc->secondary.addr.len) != 0) {
1315 1315 #ifdef DEBUG
1316 1316 cmn_err(CE_WARN, "!rdc_check: secondary addr mismatch for %s",
1317 1317 urdc->secondary.file);
1318 1318 #endif
1319 1319 return (1);
1320 1320 }
1321 1321
1322 1322 return (0);
1323 1323 }
1324 1324
1325 1325
1326 1326 /*
1327 1327 * Lookup enabled sets for a bitmap match
1328 1328 */
1329 1329
1330 1330 int
1331 1331 rdc_lookup_bitmap(char *pathname)
1332 1332 {
1333 1333 rdc_u_info_t *urdc;
1334 1334 #ifdef DEBUG
1335 1335 rdc_k_info_t *krdc;
1336 1336 #endif
1337 1337 int index;
1338 1338
1339 1339 for (index = 0; index < rdc_max_sets; index++) {
1340 1340 urdc = &rdc_u_info[index];
1341 1341 #ifdef DEBUG
1342 1342 krdc = &rdc_k_info[index];
1343 1343 #endif
1344 1344 ASSERT(krdc->index == index);
1345 1345 ASSERT(urdc->index == index);
1346 1346
1347 1347 if (!IS_ENABLED(urdc))
1348 1348 continue;
1349 1349
1350 1350 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
1351 1351 if (strncmp(pathname, urdc->primary.bitmap,
1352 1352 NSC_MAXPATH) == 0)
1353 1353 return (index);
1354 1354 } else {
1355 1355 if (strncmp(pathname, urdc->secondary.bitmap,
1356 1356 NSC_MAXPATH) == 0)
1357 1357 return (index);
1358 1358 }
1359 1359 }
1360 1360
1361 1361 return (-1);
1362 1362 }
1363 1363
1364 1364
1365 1365 /*
1366 1366 * Translate a pathname to index into rdc_k_info[].
1367 1367 * Returns first match that is enabled.
1368 1368 */
1369 1369
1370 1370 int
1371 1371 rdc_lookup_enabled(char *pathname, int allow_disabling)
1372 1372 {
1373 1373 rdc_u_info_t *urdc;
1374 1374 rdc_k_info_t *krdc;
1375 1375 int index;
1376 1376
1377 1377 restart:
1378 1378 for (index = 0; index < rdc_max_sets; index++) {
1379 1379 urdc = &rdc_u_info[index];
1380 1380 krdc = &rdc_k_info[index];
1381 1381
1382 1382 ASSERT(krdc->index == index);
1383 1383 ASSERT(urdc->index == index);
1384 1384
1385 1385 if (!IS_ENABLED(urdc))
1386 1386 continue;
1387 1387
1388 1388 if (allow_disabling == 0 && krdc->type_flag & RDC_UNREGISTER)
1389 1389 continue;
1390 1390
1391 1391 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
1392 1392 if (strncmp(pathname, urdc->primary.file,
1393 1393 NSC_MAXPATH) == 0)
1394 1394 return (index);
1395 1395 } else {
1396 1396 if (strncmp(pathname, urdc->secondary.file,
1397 1397 NSC_MAXPATH) == 0)
1398 1398 return (index);
1399 1399 }
1400 1400 }
1401 1401
1402 1402 if (allow_disabling == 0) {
1403 1403 /* None found, or only a disabling one found, so try again */
1404 1404 allow_disabling = 1;
1405 1405 goto restart;
1406 1406 }
1407 1407
1408 1408 return (-1);
1409 1409 }
1410 1410
1411 1411
1412 1412 /*
1413 1413 * Translate a pathname to index into rdc_k_info[].
1414 1414 * Returns first match that is configured.
1415 1415 *
1416 1416 * Used by enable & resume code.
1417 1417 * Must be called with rdc_conf_lock held.
1418 1418 */
1419 1419
1420 1420 int
1421 1421 rdc_lookup_configured(char *pathname)
1422 1422 {
1423 1423 rdc_u_info_t *urdc;
1424 1424 rdc_k_info_t *krdc;
1425 1425 int index;
1426 1426
1427 1427 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1428 1428
1429 1429 for (index = 0; index < rdc_max_sets; index++) {
1430 1430 urdc = &rdc_u_info[index];
1431 1431 krdc = &rdc_k_info[index];
1432 1432
1433 1433 ASSERT(krdc->index == index);
1434 1434 ASSERT(urdc->index == index);
1435 1435
1436 1436 if (!IS_CONFIGURED(krdc))
1437 1437 continue;
1438 1438
1439 1439 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
1440 1440 if (strncmp(pathname, urdc->primary.file,
1441 1441 NSC_MAXPATH) == 0)
1442 1442 return (index);
1443 1443 } else {
1444 1444 if (strncmp(pathname, urdc->secondary.file,
1445 1445 NSC_MAXPATH) == 0)
1446 1446 return (index);
1447 1447 }
1448 1448 }
1449 1449
1450 1450 return (-1);
1451 1451 }
1452 1452
1453 1453
1454 1454 /*
1455 1455 * Looks up a configured set with matching secondary interface:volume
1456 1456 * to check for illegal many-to-one volume configs. To be used during
1457 1457 * enable and resume processing.
1458 1458 *
1459 1459 * Must be called with rdc_conf_lock held.
1460 1460 */
1461 1461
1462 1462 static int
1463 1463 rdc_lookup_many2one(rdc_set_t *rdc_set)
1464 1464 {
1465 1465 rdc_u_info_t *urdc;
1466 1466 rdc_k_info_t *krdc;
1467 1467 int index;
1468 1468
1469 1469 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1470 1470
1471 1471 for (index = 0; index < rdc_max_sets; index++) {
1472 1472 urdc = &rdc_u_info[index];
1473 1473 krdc = &rdc_k_info[index];
1474 1474
1475 1475 if (!IS_CONFIGURED(krdc))
1476 1476 continue;
1477 1477
1478 1478 if (strncmp(urdc->secondary.file,
1479 1479 rdc_set->secondary.file, NSC_MAXPATH) != 0)
1480 1480 continue;
1481 1481 if (strncmp(urdc->secondary.intf,
1482 1482 rdc_set->secondary.intf, MAX_RDC_HOST_SIZE) != 0)
1483 1483 continue;
1484 1484
1485 1485 break;
1486 1486 }
1487 1487
1488 1488 if (index < rdc_max_sets)
1489 1489 return (index);
1490 1490 else
1491 1491 return (-1);
1492 1492 }
1493 1493
1494 1494
1495 1495 /*
1496 1496 * Looks up an rdc set to check if it is already configured, to be used from
1497 1497 * functions called from the config ioctl where the interface names can be
1498 1498 * used for comparison.
1499 1499 *
1500 1500 * Must be called with rdc_conf_lock held.
1501 1501 */
1502 1502
1503 1503 int
1504 1504 rdc_lookup_byname(rdc_set_t *rdc_set)
1505 1505 {
1506 1506 rdc_u_info_t *urdc;
1507 1507 rdc_k_info_t *krdc;
1508 1508 int index;
1509 1509
1510 1510 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1511 1511
1512 1512 for (index = 0; index < rdc_max_sets; index++) {
1513 1513 urdc = &rdc_u_info[index];
1514 1514 krdc = &rdc_k_info[index];
1515 1515
1516 1516 ASSERT(krdc->index == index);
1517 1517 ASSERT(urdc->index == index);
1518 1518
1519 1519 if (!IS_CONFIGURED(krdc))
1520 1520 continue;
1521 1521
1522 1522 if (strncmp(urdc->primary.file, rdc_set->primary.file,
1523 1523 NSC_MAXPATH) != 0)
1524 1524 continue;
1525 1525 if (strncmp(urdc->primary.intf, rdc_set->primary.intf,
1526 1526 MAX_RDC_HOST_SIZE) != 0)
1527 1527 continue;
1528 1528 if (strncmp(urdc->secondary.file, rdc_set->secondary.file,
1529 1529 NSC_MAXPATH) != 0)
1530 1530 continue;
1531 1531 if (strncmp(urdc->secondary.intf, rdc_set->secondary.intf,
1532 1532 MAX_RDC_HOST_SIZE) != 0)
1533 1533 continue;
1534 1534
1535 1535 break;
1536 1536 }
1537 1537
1538 1538 if (index < rdc_max_sets)
1539 1539 return (index);
1540 1540 else
1541 1541 return (-1);
1542 1542 }
1543 1543
1544 1544 /*
1545 1545 * Looks up a secondary hostname and device, to be used from
1546 1546 * functions called from the config ioctl where the interface names can be
1547 1547 * used for comparison.
1548 1548 *
1549 1549 * Must be called with rdc_conf_lock held.
1550 1550 */
1551 1551
1552 1552 int
1553 1553 rdc_lookup_byhostdev(char *intf, char *file)
1554 1554 {
1555 1555 rdc_u_info_t *urdc;
1556 1556 rdc_k_info_t *krdc;
1557 1557 int index;
1558 1558
1559 1559 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1560 1560
1561 1561 for (index = 0; index < rdc_max_sets; index++) {
1562 1562 urdc = &rdc_u_info[index];
1563 1563 krdc = &rdc_k_info[index];
1564 1564
1565 1565 ASSERT(krdc->index == index);
1566 1566 ASSERT(urdc->index == index);
1567 1567
1568 1568 if (!IS_CONFIGURED(krdc))
1569 1569 continue;
1570 1570
1571 1571 if (strncmp(urdc->secondary.file, file,
1572 1572 NSC_MAXPATH) != 0)
1573 1573 continue;
1574 1574 if (strncmp(urdc->secondary.intf, intf,
1575 1575 MAX_RDC_HOST_SIZE) != 0)
1576 1576 continue;
1577 1577 break;
1578 1578 }
1579 1579
1580 1580 if (index < rdc_max_sets)
1581 1581 return (index);
1582 1582 else
1583 1583 return (-1);
1584 1584 }
1585 1585
1586 1586
1587 1587 /*
1588 1588 * Looks up an rdc set to see if it is currently enabled, to be used on the
1589 1589 * server so that the interface addresses must be used for comparison, as
1590 1590 * the interface names may differ from those used on the client.
1591 1591 *
1592 1592 */
1593 1593
1594 1594 int
1595 1595 rdc_lookup_byaddr(rdc_set_t *rdc_set)
1596 1596 {
1597 1597 rdc_u_info_t *urdc;
1598 1598 #ifdef DEBUG
1599 1599 rdc_k_info_t *krdc;
1600 1600 #endif
1601 1601 int index;
1602 1602
1603 1603 for (index = 0; index < rdc_max_sets; index++) {
1604 1604 urdc = &rdc_u_info[index];
1605 1605 #ifdef DEBUG
1606 1606 krdc = &rdc_k_info[index];
1607 1607 #endif
1608 1608 ASSERT(krdc->index == index);
1609 1609 ASSERT(urdc->index == index);
1610 1610
1611 1611 if (!IS_ENABLED(urdc))
1612 1612 continue;
1613 1613
1614 1614 if (strcmp(urdc->primary.file, rdc_set->primary.file) != 0)
1615 1615 continue;
1616 1616
1617 1617 if (strcmp(urdc->secondary.file, rdc_set->secondary.file) != 0)
1618 1618 continue;
1619 1619
1620 1620 if (bcmp(urdc->primary.addr.buf, rdc_set->primary.addr.buf,
1621 1621 urdc->primary.addr.len) != 0) {
1622 1622 continue;
1623 1623 }
1624 1624
1625 1625 if (bcmp(urdc->secondary.addr.buf, rdc_set->secondary.addr.buf,
1626 1626 urdc->secondary.addr.len) != 0) {
1627 1627 continue;
1628 1628 }
1629 1629
1630 1630 break;
1631 1631 }
1632 1632
1633 1633 if (index < rdc_max_sets)
1634 1634 return (index);
1635 1635 else
1636 1636 return (-1);
1637 1637 }
1638 1638
1639 1639
1640 1640 /*
1641 1641 * Return index of first multihop or 1-to-many
1642 1642 * Behavior controlled by setting ismany.
1643 1643 * ismany TRUE (one-to-many)
1644 1644 * ismany FALSE (multihops)
1645 1645 *
1646 1646 */
1647 1647 static int
1648 1648 rdc_lookup_multimany(rdc_k_info_t *krdc, const int ismany)
1649 1649 {
1650 1650 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
1651 1651 rdc_u_info_t *utmp;
1652 1652 rdc_k_info_t *ktmp;
1653 1653 char *pathname;
1654 1654 int index;
1655 1655 int role;
1656 1656
1657 1657 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1658 1658 ASSERT(MUTEX_HELD(&rdc_many_lock));
1659 1659
1660 1660 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
1661 1661 /* this host is the primary of the krdc set */
1662 1662 pathname = urdc->primary.file;
1663 1663 if (ismany) {
1664 1664 /*
1665 1665 * 1-many sets are linked by primary :
1666 1666 * look for matching primary on this host
1667 1667 */
1668 1668 role = RDC_PRIMARY;
1669 1669 } else {
1670 1670 /*
1671 1671 * multihop sets link primary to secondary :
1672 1672 * look for matching secondary on this host
1673 1673 */
1674 1674 role = 0;
1675 1675 }
1676 1676 } else {
1677 1677 /* this host is the secondary of the krdc set */
1678 1678 pathname = urdc->secondary.file;
1679 1679 if (ismany) {
1680 1680 /*
1681 1681 * 1-many sets are linked by primary, so if
1682 1682 * this host is the secondary of the set this
1683 1683 * cannot require 1-many linkage.
1684 1684 */
1685 1685 return (-1);
1686 1686 } else {
1687 1687 /*
1688 1688 * multihop sets link primary to secondary :
1689 1689 * look for matching primary on this host
1690 1690 */
1691 1691 role = RDC_PRIMARY;
1692 1692 }
1693 1693 }
1694 1694
1695 1695 for (index = 0; index < rdc_max_sets; index++) {
1696 1696 utmp = &rdc_u_info[index];
1697 1697 ktmp = &rdc_k_info[index];
1698 1698
1699 1699 if (!IS_CONFIGURED(ktmp)) {
1700 1700 continue;
1701 1701 }
1702 1702
1703 1703 if (role == RDC_PRIMARY) {
1704 1704 /*
1705 1705 * Find a primary that is this host and is not
1706 1706 * krdc but shares the same data volume as krdc.
1707 1707 */
1708 1708 if ((rdc_get_vflags(utmp) & RDC_PRIMARY) &&
1709 1709 strncmp(utmp->primary.file, pathname,
1710 1710 NSC_MAXPATH) == 0 && (krdc != ktmp)) {
1711 1711 break;
1712 1712 }
1713 1713 } else {
1714 1714 /*
1715 1715 * Find a secondary that is this host and is not
1716 1716 * krdc but shares the same data volume as krdc.
1717 1717 */
1718 1718 if (!(rdc_get_vflags(utmp) & RDC_PRIMARY) &&
1719 1719 strncmp(utmp->secondary.file, pathname,
1720 1720 NSC_MAXPATH) == 0 && (krdc != ktmp)) {
1721 1721 break;
1722 1722 }
1723 1723 }
1724 1724 }
1725 1725
1726 1726 if (index < rdc_max_sets)
1727 1727 return (index);
1728 1728 else
1729 1729 return (-1);
1730 1730 }
1731 1731
1732 1732 /*
1733 1733 * Returns secondary match that is configured.
1734 1734 *
1735 1735 * Used by enable & resume code.
1736 1736 * Must be called with rdc_conf_lock held.
1737 1737 */
1738 1738
1739 1739 static int
1740 1740 rdc_lookup_secondary(char *pathname)
1741 1741 {
1742 1742 rdc_u_info_t *urdc;
1743 1743 rdc_k_info_t *krdc;
1744 1744 int index;
1745 1745
1746 1746 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1747 1747
1748 1748 for (index = 0; index < rdc_max_sets; index++) {
1749 1749 urdc = &rdc_u_info[index];
1750 1750 krdc = &rdc_k_info[index];
1751 1751
1752 1752 ASSERT(krdc->index == index);
1753 1753 ASSERT(urdc->index == index);
1754 1754
1755 1755 if (!IS_CONFIGURED(krdc))
1756 1756 continue;
1757 1757
1758 1758 if (!IS_STATE(urdc, RDC_PRIMARY)) {
1759 1759 if (strncmp(pathname, urdc->secondary.file,
1760 1760 NSC_MAXPATH) == 0)
1761 1761 return (index);
1762 1762 }
1763 1763 }
1764 1764
1765 1765 return (-1);
1766 1766 }
1767 1767
1768 1768
1769 1769 static nsc_fd_t *
1770 1770 rdc_open_direct(rdc_k_info_t *krdc)
1771 1771 {
1772 1772 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
1773 1773 int rc;
1774 1774
1775 1775 if (krdc->remote_fd == NULL)
1776 1776 krdc->remote_fd = nsc_open(urdc->direct_file,
1777 1777 NSC_RDCHR_ID|NSC_DEVICE|NSC_RDWR, 0, 0, &rc);
1778 1778 return (krdc->remote_fd);
1779 1779 }
1780 1780
1781 1781 static void
1782 1782 rdc_close_direct(rdc_k_info_t *krdc)
1783 1783 {
1784 1784 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
1785 1785
1786 1786 urdc->direct_file[0] = 0;
1787 1787 if (krdc->remote_fd) {
1788 1788 if (nsc_close(krdc->remote_fd) == 0) {
1789 1789 krdc->remote_fd = NULL;
1790 1790 }
1791 1791 }
1792 1792 }
1793 1793
1794 1794
1795 1795 #ifdef DEBUG_MANY
1796 1796 static void
1797 1797 print_many(rdc_k_info_t *start)
1798 1798 {
1799 1799 rdc_k_info_t *p = start;
1800 1800 rdc_u_info_t *q = &rdc_u_info[p->index];
1801 1801
1802 1802 do {
1803 1803 cmn_err(CE_CONT, "!krdc %p, %s %s (many_nxt %p multi_nxt %p)\n",
1804 1804 p, q->primary.file, q->secondary.file, p->many_next,
1805 1805 p->multi_next);
1806 1806 delay(10);
1807 1807 p = p->many_next;
1808 1808 q = &rdc_u_info[p->index];
1809 1809 } while (p && p != start);
1810 1810 }
1811 1811 #endif /* DEBUG_MANY */
1812 1812
1813 1813
1814 1814 static int
1815 1815 add_to_multi(rdc_k_info_t *krdc)
1816 1816 {
1817 1817 rdc_u_info_t *urdc;
1818 1818 rdc_k_info_t *ktmp;
1819 1819 rdc_u_info_t *utmp;
1820 1820 int mindex;
1821 1821 int domulti;
1822 1822
1823 1823 urdc = &rdc_u_info[krdc->index];
1824 1824
1825 1825 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1826 1826 ASSERT(MUTEX_HELD(&rdc_many_lock));
1827 1827
1828 1828 /* Now find companion krdc */
1829 1829 mindex = rdc_lookup_multimany(krdc, FALSE);
1830 1830
1831 1831 #ifdef DEBUG_MANY
1832 1832 cmn_err(CE_NOTE,
1833 1833 "!add_to_multi: lookup_multimany: mindex %d prim %s sec %s",
1834 1834 mindex, urdc->primary.file, urdc->secondary.file);
1835 1835 #endif
1836 1836
1837 1837 if (mindex >= 0) {
1838 1838 ktmp = &rdc_k_info[mindex];
1839 1839 utmp = &rdc_u_info[mindex];
1840 1840
1841 1841 domulti = 1;
1842 1842
1843 1843 if ((rdc_get_vflags(urdc) & RDC_PRIMARY) &&
1844 1844 ktmp->multi_next != NULL) {
1845 1845 /*
1846 1846 * We are adding a new primary to a many
1847 1847 * group that is the target of a multihop, just
1848 1848 * ignore it since we are linked in elsewhere.
1849 1849 */
1850 1850 domulti = 0;
1851 1851 }
1852 1852
1853 1853 if (domulti) {
1854 1854 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
1855 1855 /* Is previous leg using direct file I/O? */
1856 1856 if (utmp->direct_file[0] != 0) {
1857 1857 /* It is, so cannot proceed */
1858 1858 return (-1);
1859 1859 }
1860 1860 } else {
1861 1861 /* Is this leg using direct file I/O? */
1862 1862 if (urdc->direct_file[0] != 0) {
1863 1863 /* It is, so cannot proceed */
1864 1864 return (-1);
1865 1865 }
1866 1866 }
1867 1867 krdc->multi_next = ktmp;
1868 1868 ktmp->multi_next = krdc;
1869 1869 }
1870 1870 } else {
1871 1871 krdc->multi_next = NULL;
1872 1872 #ifdef DEBUG_MANY
1873 1873 cmn_err(CE_NOTE, "!add_to_multi: NULL multi_next index %d",
1874 1874 krdc->index);
1875 1875 #endif
1876 1876 }
1877 1877
1878 1878 return (0);
1879 1879 }
1880 1880
1881 1881
1882 1882 /*
1883 1883 * Add a new set to the circular list of 1-to-many primaries and chain
1884 1884 * up any multihop as well.
1885 1885 */
1886 1886 static int
1887 1887 add_to_many(rdc_k_info_t *krdc)
1888 1888 {
1889 1889 rdc_k_info_t *okrdc;
1890 1890 int oindex;
1891 1891
1892 1892 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1893 1893
1894 1894 rdc_many_enter(krdc);
1895 1895
1896 1896 if (add_to_multi(krdc) < 0) {
1897 1897 rdc_many_exit(krdc);
1898 1898 return (-1);
1899 1899 }
1900 1900
1901 1901 oindex = rdc_lookup_multimany(krdc, TRUE);
1902 1902 if (oindex < 0) {
1903 1903 #ifdef DEBUG_MANY
1904 1904 print_many(krdc);
1905 1905 #endif
1906 1906 rdc_many_exit(krdc);
1907 1907 return (0);
1908 1908 }
1909 1909
1910 1910 okrdc = &rdc_k_info[oindex];
1911 1911
1912 1912 #ifdef DEBUG_MANY
1913 1913 print_many(okrdc);
1914 1914 #endif
1915 1915 krdc->many_next = okrdc->many_next;
1916 1916 okrdc->many_next = krdc;
1917 1917
1918 1918 #ifdef DEBUG_MANY
1919 1919 print_many(okrdc);
1920 1920 #endif
1921 1921 rdc_many_exit(krdc);
1922 1922 return (0);
1923 1923 }
1924 1924
1925 1925
1926 1926 /*
1927 1927 * Remove a set from the circular list of 1-to-many primaries.
1928 1928 */
1929 1929 static void
1930 1930 remove_from_many(rdc_k_info_t *old)
1931 1931 {
1932 1932 rdc_u_info_t *uold = &rdc_u_info[old->index];
1933 1933 rdc_k_info_t *p, *q;
1934 1934
1935 1935 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1936 1936
1937 1937 rdc_many_enter(old);
1938 1938
1939 1939 #ifdef DEBUG_MANY
1940 1940 cmn_err(CE_NOTE, "!rdc: before remove_from_many");
1941 1941 print_many(old);
1942 1942 #endif
1943 1943
1944 1944 if (old->many_next == old) {
1945 1945 /* remove from multihop */
1946 1946 if ((q = old->multi_next) != NULL) {
1947 1947 ASSERT(q->multi_next == old);
1948 1948 q->multi_next = NULL;
1949 1949 old->multi_next = NULL;
1950 1950 }
1951 1951
1952 1952 rdc_many_exit(old);
1953 1953 return;
1954 1954 }
1955 1955
1956 1956 /* search */
1957 1957 for (p = old->many_next; p->many_next != old; p = p->many_next)
1958 1958 ;
1959 1959
1960 1960 p->many_next = old->many_next;
1961 1961 old->many_next = old;
1962 1962
1963 1963 if ((q = old->multi_next) != NULL) {
1964 1964 /*
1965 1965 * old was part of a multihop, so switch multi pointers
1966 1966 * to someone remaining on the many chain
1967 1967 */
1968 1968 ASSERT(p->multi_next == NULL);
1969 1969
1970 1970 q->multi_next = p;
1971 1971 p->multi_next = q;
1972 1972 old->multi_next = NULL;
1973 1973 }
1974 1974
1975 1975 #ifdef DEBUG_MANY
1976 1976 if (p == old) {
1977 1977 cmn_err(CE_NOTE, "!rdc: after remove_from_many empty");
1978 1978 } else {
1979 1979 cmn_err(CE_NOTE, "!rdc: after remove_from_many");
1980 1980 print_many(p);
1981 1981 }
1982 1982 #endif
1983 1983
1984 1984 rdc_clr_mflags(&rdc_u_info[p->index],
1985 1985 (rdc_get_vflags(uold) & RDC_MFLAGS));
1986 1986
1987 1987 rdc_many_exit(old);
1988 1988 }
1989 1989
1990 1990
1991 1991 static int
1992 1992 _rdc_enable(rdc_set_t *rdc_set, int options, spcs_s_info_t kstatus)
1993 1993 {
1994 1994 int index;
1995 1995 char *rhost;
1996 1996 struct netbuf *addrp;
1997 1997 rdc_k_info_t *krdc;
1998 1998 rdc_u_info_t *urdc;
1999 1999 rdc_srv_t *svp = NULL;
2000 2000 char *local_file;
2001 2001 char *local_bitmap;
2002 2002 char *diskq;
2003 2003 int rc;
2004 2004 nsc_size_t maxfbas;
2005 2005 rdc_group_t *grp;
2006 2006
2007 2007 if ((rdc_set->primary.intf[0] == 0) ||
2008 2008 (rdc_set->primary.addr.len == 0) ||
2009 2009 (rdc_set->primary.file[0] == 0) ||
2010 2010 (rdc_set->primary.bitmap[0] == 0) ||
2011 2011 (rdc_set->secondary.intf[0] == 0) ||
2012 2012 (rdc_set->secondary.addr.len == 0) ||
2013 2013 (rdc_set->secondary.file[0] == 0) ||
2014 2014 (rdc_set->secondary.bitmap[0] == 0)) {
2015 2015 spcs_s_add(kstatus, RDC_EEMPTY);
2016 2016 return (RDC_EEMPTY);
2017 2017 }
2018 2018
2019 2019 /* Next check there aren't any enabled rdc sets which match. */
2020 2020
2021 2021 mutex_enter(&rdc_conf_lock);
2022 2022
2023 2023 if (rdc_lookup_byname(rdc_set) >= 0) {
2024 2024 mutex_exit(&rdc_conf_lock);
2025 2025 spcs_s_add(kstatus, RDC_EENABLED, rdc_set->primary.intf,
2026 2026 rdc_set->primary.file, rdc_set->secondary.intf,
2027 2027 rdc_set->secondary.file);
2028 2028 return (RDC_EENABLED);
2029 2029 }
2030 2030
2031 2031 if (rdc_lookup_many2one(rdc_set) >= 0) {
2032 2032 mutex_exit(&rdc_conf_lock);
2033 2033 spcs_s_add(kstatus, RDC_EMANY2ONE, rdc_set->primary.intf,
2034 2034 rdc_set->primary.file, rdc_set->secondary.intf,
2035 2035 rdc_set->secondary.file);
2036 2036 return (RDC_EMANY2ONE);
2037 2037 }
2038 2038
2039 2039 if (rdc_set->netconfig->knc_proto == NULL) {
2040 2040 mutex_exit(&rdc_conf_lock);
2041 2041 spcs_s_add(kstatus, RDC_ENETCONFIG);
2042 2042 return (RDC_ENETCONFIG);
2043 2043 }
2044 2044
2045 2045 if (rdc_set->primary.addr.len == 0) {
2046 2046 mutex_exit(&rdc_conf_lock);
2047 2047 spcs_s_add(kstatus, RDC_ENETBUF, rdc_set->primary.file);
2048 2048 return (RDC_ENETBUF);
2049 2049 }
2050 2050
2051 2051 if (rdc_set->secondary.addr.len == 0) {
2052 2052 mutex_exit(&rdc_conf_lock);
2053 2053 spcs_s_add(kstatus, RDC_ENETBUF, rdc_set->secondary.file);
2054 2054 return (RDC_ENETBUF);
2055 2055 }
2056 2056
2057 2057 /* Check that the local data volume isn't in use as a bitmap */
2058 2058 if (options & RDC_OPT_PRIMARY)
2059 2059 local_file = rdc_set->primary.file;
2060 2060 else
2061 2061 local_file = rdc_set->secondary.file;
2062 2062 if (rdc_lookup_bitmap(local_file) >= 0) {
2063 2063 mutex_exit(&rdc_conf_lock);
2064 2064 spcs_s_add(kstatus, RDC_EVOLINUSE, local_file);
2065 2065 return (RDC_EVOLINUSE);
2066 2066 }
2067 2067
2068 2068 /* check that the secondary data volume isn't in use */
2069 2069 if (!(options & RDC_OPT_PRIMARY)) {
2070 2070 local_file = rdc_set->secondary.file;
2071 2071 if (rdc_lookup_secondary(local_file) >= 0) {
2072 2072 mutex_exit(&rdc_conf_lock);
2073 2073 spcs_s_add(kstatus, RDC_EVOLINUSE, local_file);
2074 2074 return (RDC_EVOLINUSE);
2075 2075 }
2076 2076 }
2077 2077
2078 2078 /* check that the local data vol is not in use as a diskqueue */
2079 2079 if (options & RDC_OPT_PRIMARY) {
2080 2080 if (rdc_lookup_diskq(rdc_set->primary.file) >= 0) {
2081 2081 mutex_exit(&rdc_conf_lock);
2082 2082 spcs_s_add(kstatus,
2083 2083 RDC_EVOLINUSE, rdc_set->primary.file);
2084 2084 return (RDC_EVOLINUSE);
2085 2085 }
2086 2086 }
2087 2087
2088 2088 /* Check that the bitmap isn't in use as a data volume */
2089 2089 if (options & RDC_OPT_PRIMARY)
2090 2090 local_bitmap = rdc_set->primary.bitmap;
2091 2091 else
2092 2092 local_bitmap = rdc_set->secondary.bitmap;
2093 2093 if (rdc_lookup_configured(local_bitmap) >= 0) {
2094 2094 mutex_exit(&rdc_conf_lock);
2095 2095 spcs_s_add(kstatus, RDC_EBMPINUSE, local_bitmap);
2096 2096 return (RDC_EBMPINUSE);
2097 2097 }
2098 2098
2099 2099 /* Check that the bitmap isn't already in use as a bitmap */
2100 2100 if (rdc_lookup_bitmap(local_bitmap) >= 0) {
2101 2101 mutex_exit(&rdc_conf_lock);
2102 2102 spcs_s_add(kstatus, RDC_EBMPINUSE, local_bitmap);
2103 2103 return (RDC_EBMPINUSE);
2104 2104 }
2105 2105
2106 2106 /* check that the diskq (if here) is not in use */
2107 2107 diskq = rdc_set->disk_queue;
2108 2108 if (diskq[0] && rdc_diskq_inuse(rdc_set, diskq)) {
2109 2109 mutex_exit(&rdc_conf_lock);
2110 2110 spcs_s_add(kstatus, RDC_EDISKQINUSE, diskq);
2111 2111 return (RDC_EDISKQINUSE);
2112 2112 }
2113 2113
2114 2114
2115 2115 /* Set urdc->volume_size */
2116 2116 index = rdc_dev_open(rdc_set, options);
2117 2117 if (index < 0) {
2118 2118 mutex_exit(&rdc_conf_lock);
2119 2119 if (options & RDC_OPT_PRIMARY)
2120 2120 spcs_s_add(kstatus, RDC_EOPEN, rdc_set->primary.intf,
2121 2121 rdc_set->primary.file);
2122 2122 else
2123 2123 spcs_s_add(kstatus, RDC_EOPEN, rdc_set->secondary.intf,
2124 2124 rdc_set->secondary.file);
2125 2125 return (RDC_EOPEN);
2126 2126 }
2127 2127
2128 2128 urdc = &rdc_u_info[index];
2129 2129 krdc = &rdc_k_info[index];
2130 2130
2131 2131 /* copy relevant parts of rdc_set to urdc field by field */
2132 2132
2133 2133 (void) strncpy(urdc->primary.intf, rdc_set->primary.intf,
2134 2134 MAX_RDC_HOST_SIZE);
2135 2135 (void) strncpy(urdc->secondary.intf, rdc_set->secondary.intf,
2136 2136 MAX_RDC_HOST_SIZE);
2137 2137
2138 2138 (void) strncpy(urdc->group_name, rdc_set->group_name, NSC_MAXPATH);
2139 2139 (void) strncpy(urdc->disk_queue, rdc_set->disk_queue, NSC_MAXPATH);
2140 2140
2141 2141 dup_rdc_netbuf(&rdc_set->primary.addr, &urdc->primary.addr);
2142 2142 (void) strncpy(urdc->primary.file, rdc_set->primary.file, NSC_MAXPATH);
2143 2143 (void) strncpy(urdc->primary.bitmap, rdc_set->primary.bitmap,
2144 2144 NSC_MAXPATH);
2145 2145
2146 2146 dup_rdc_netbuf(&rdc_set->secondary.addr, &urdc->secondary.addr);
2147 2147 (void) strncpy(urdc->secondary.file, rdc_set->secondary.file,
2148 2148 NSC_MAXPATH);
2149 2149 (void) strncpy(urdc->secondary.bitmap, rdc_set->secondary.bitmap,
2150 2150 NSC_MAXPATH);
2151 2151
2152 2152 urdc->setid = rdc_set->setid;
2153 2153
2154 2154 /*
2155 2155 * before we try to add to group, or create one, check out
2156 2156 * if we are doing the wrong thing with the diskq
2157 2157 */
2158 2158
2159 2159 if (urdc->disk_queue[0] && (options & RDC_OPT_SYNC)) {
2160 2160 mutex_exit(&rdc_conf_lock);
2161 2161 rdc_dev_close(krdc);
2162 2162 spcs_s_add(kstatus, RDC_EQWRONGMODE);
2163 2163 return (RDC_EQWRONGMODE);
2164 2164 }
2165 2165
2166 2166 if ((rc = add_to_group(krdc, options, RDC_CMD_ENABLE)) != 0) {
2167 2167 mutex_exit(&rdc_conf_lock);
2168 2168 rdc_dev_close(krdc);
2169 2169 if (rc == RDC_EQNOADD) {
2170 2170 spcs_s_add(kstatus, RDC_EQNOADD, rdc_set->disk_queue);
2171 2171 return (RDC_EQNOADD);
2172 2172 } else {
2173 2173 spcs_s_add(kstatus, RDC_EGROUP,
2174 2174 rdc_set->primary.intf, rdc_set->primary.file,
2175 2175 rdc_set->secondary.intf, rdc_set->secondary.file,
2176 2176 rdc_set->group_name);
2177 2177 return (RDC_EGROUP);
2178 2178 }
2179 2179 }
2180 2180
2181 2181 /*
2182 2182 * maxfbas was set in rdc_dev_open as primary's maxfbas.
2183 2183 * If diskq's maxfbas is smaller, then use diskq's.
2184 2184 */
2185 2185 grp = krdc->group;
2186 2186 if (grp && RDC_IS_DISKQ(grp) && (grp->diskqfd != 0)) {
2187 2187 rc = _rdc_rsrv_diskq(grp);
2188 2188 if (RDC_SUCCESS(rc)) {
2189 2189 rc = nsc_maxfbas(grp->diskqfd, 0, &maxfbas);
2190 2190 if (rc == 0) {
2191 2191 #ifdef DEBUG
2192 2192 if (krdc->maxfbas != maxfbas)
2193 2193 cmn_err(CE_NOTE,
2194 2194 "!_rdc_enable: diskq maxfbas = %"
2195 2195 NSC_SZFMT ", primary maxfbas = %"
2196 2196 NSC_SZFMT, maxfbas, krdc->maxfbas);
2197 2197 #endif
2198 2198 krdc->maxfbas = min(krdc->maxfbas, maxfbas);
2199 2199 } else {
2200 2200 cmn_err(CE_WARN,
2201 2201 "!_rdc_enable: diskq maxfbas failed (%d)",
2202 2202 rc);
2203 2203 }
2204 2204 _rdc_rlse_diskq(grp);
2205 2205 } else {
2206 2206 cmn_err(CE_WARN,
2207 2207 "!_rdc_enable: diskq reserve failed (%d)", rc);
2208 2208 }
2209 2209 }
2210 2210
2211 2211 rdc_init_flags(urdc);
2212 2212 (void) strncpy(urdc->direct_file, rdc_set->direct_file, NSC_MAXPATH);
2213 2213 if ((options & RDC_OPT_PRIMARY) && rdc_set->direct_file[0]) {
2214 2214 if (rdc_open_direct(krdc) == NULL)
2215 2215 rdc_set_flags(urdc, RDC_FCAL_FAILED);
2216 2216 }
2217 2217
2218 2218 krdc->many_next = krdc;
2219 2219
2220 2220 ASSERT(krdc->type_flag == 0);
2221 2221 krdc->type_flag = RDC_CONFIGURED;
2222 2222
2223 2223 if (options & RDC_OPT_PRIMARY)
2224 2224 rdc_set_flags(urdc, RDC_PRIMARY);
2225 2225
2226 2226 if (options & RDC_OPT_ASYNC)
2227 2227 krdc->type_flag |= RDC_ASYNCMODE;
2228 2228
2229 2229 set_busy(krdc);
2230 2230 urdc->syshostid = rdc_set->syshostid;
2231 2231
2232 2232 if (add_to_many(krdc) < 0) {
2233 2233 mutex_exit(&rdc_conf_lock);
2234 2234
2235 2235 rdc_group_enter(krdc);
2236 2236
2237 2237 spcs_s_add(kstatus, RDC_EMULTI);
2238 2238 rc = RDC_EMULTI;
2239 2239 goto fail;
2240 2240 }
2241 2241
2242 2242 /* Configured but not enabled */
2243 2243 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2244 2244
2245 2245 mutex_exit(&rdc_conf_lock);
2246 2246
2247 2247 rdc_group_enter(krdc);
2248 2248
2249 2249 /* Configured but not enabled */
2250 2250 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2251 2251
2252 2252 /*
2253 2253 * The rdc set is configured but not yet enabled. Other operations must
2254 2254 * ignore this set until it is enabled.
2255 2255 */
2256 2256
2257 2257 urdc->sync_pos = 0;
2258 2258
2259 2259 if (rdc_set->maxqfbas > 0)
2260 2260 urdc->maxqfbas = rdc_set->maxqfbas;
2261 2261 else
2262 2262 urdc->maxqfbas = rdc_maxthres_queue;
2263 2263
2264 2264 if (rdc_set->maxqitems > 0)
2265 2265 urdc->maxqitems = rdc_set->maxqitems;
2266 2266 else
2267 2267 urdc->maxqitems = rdc_max_qitems;
2268 2268
2269 2269 if (rdc_set->asyncthr > 0)
2270 2270 urdc->asyncthr = rdc_set->asyncthr;
2271 2271 else
2272 2272 urdc->asyncthr = rdc_asyncthr;
2273 2273
2274 2274 if (urdc->autosync == -1) {
2275 2275 /* Still unknown */
2276 2276 if (rdc_set->autosync > 0)
2277 2277 urdc->autosync = 1;
2278 2278 else
2279 2279 urdc->autosync = 0;
2280 2280 }
2281 2281
2282 2282 urdc->netconfig = rdc_set->netconfig;
2283 2283
2284 2284 if (options & RDC_OPT_PRIMARY) {
2285 2285 rhost = rdc_set->secondary.intf;
2286 2286 addrp = &rdc_set->secondary.addr;
2287 2287 } else {
2288 2288 rhost = rdc_set->primary.intf;
2289 2289 addrp = &rdc_set->primary.addr;
2290 2290 }
2291 2291
2292 2292 if (options & RDC_OPT_ASYNC)
2293 2293 rdc_set_flags(urdc, RDC_ASYNC);
2294 2294
2295 2295 svp = rdc_create_svinfo(rhost, addrp, urdc->netconfig);
2296 2296 if (svp == NULL) {
2297 2297 spcs_s_add(kstatus, ENOMEM);
2298 2298 rc = ENOMEM;
2299 2299 goto fail;
2300 2300 }
2301 2301 urdc->netconfig = NULL; /* This will be no good soon */
2302 2302
2303 2303 rdc_kstat_create(index);
2304 2304
2305 2305 /* Don't set krdc->intf here */
2306 2306
2307 2307 if (rdc_enable_bitmap(krdc, options & RDC_OPT_SETBMP) < 0)
2308 2308 goto bmpfail;
2309 2309
2310 2310 RDC_ZERO_BITREF(krdc);
2311 2311 if (krdc->lsrv == NULL)
2312 2312 krdc->lsrv = svp;
2313 2313 else {
2314 2314 #ifdef DEBUG
2315 2315 cmn_err(CE_WARN, "!_rdc_enable: krdc->lsrv already set: %p",
2316 2316 (void *) krdc->lsrv);
2317 2317 #endif
2318 2318 rdc_destroy_svinfo(svp);
2319 2319 }
2320 2320 svp = NULL;
2321 2321
2322 2322 /* Configured but not enabled */
2323 2323 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2324 2324
2325 2325 /* And finally */
2326 2326
2327 2327 krdc->remote_index = -1;
2328 2328 /* Should we set the whole group logging? */
2329 2329 rdc_set_flags(urdc, RDC_ENABLED | RDC_LOGGING);
2330 2330
2331 2331 rdc_group_exit(krdc);
2332 2332
2333 2333 if (rdc_intercept(krdc) != 0) {
2334 2334 rdc_group_enter(krdc);
2335 2335 rdc_clr_flags(urdc, RDC_ENABLED);
2336 2336 if (options & RDC_OPT_PRIMARY)
2337 2337 spcs_s_add(kstatus, RDC_EREGISTER, urdc->primary.file);
2338 2338 else
2339 2339 spcs_s_add(kstatus, RDC_EREGISTER,
2340 2340 urdc->secondary.file);
2341 2341 #ifdef DEBUG
2342 2342 cmn_err(CE_NOTE, "!nsc_register_path failed %s",
2343 2343 urdc->primary.file);
2344 2344 #endif
2345 2345 rc = RDC_EREGISTER;
2346 2346 goto bmpfail;
2347 2347 }
2348 2348 #ifdef DEBUG
2349 2349 cmn_err(CE_NOTE, "!SNDR: enabled %s %s", urdc->primary.file,
2350 2350 urdc->secondary.file);
2351 2351 #endif
2352 2352
2353 2353 rdc_write_state(urdc);
2354 2354
2355 2355 mutex_enter(&rdc_conf_lock);
2356 2356 wakeup_busy(krdc);
2357 2357 mutex_exit(&rdc_conf_lock);
2358 2358
2359 2359 return (0);
2360 2360
2361 2361 bmpfail:
2362 2362 if (options & RDC_OPT_PRIMARY)
2363 2363 spcs_s_add(kstatus, RDC_EBITMAP, rdc_set->primary.bitmap);
2364 2364 else
2365 2365 spcs_s_add(kstatus, RDC_EBITMAP, rdc_set->secondary.bitmap);
2366 2366 rc = RDC_EBITMAP;
2367 2367 if (rdc_get_vflags(urdc) & RDC_ENABLED) {
2368 2368 rdc_group_exit(krdc);
2369 2369 (void) rdc_unintercept(krdc);
2370 2370 rdc_group_enter(krdc);
2371 2371 }
2372 2372
2373 2373 fail:
2374 2374 rdc_kstat_delete(index);
2375 2375 rdc_group_exit(krdc);
2376 2376 if (krdc->intf) {
2377 2377 rdc_if_t *ip = krdc->intf;
2378 2378 mutex_enter(&rdc_conf_lock);
2379 2379 krdc->intf = NULL;
2380 2380 rdc_remove_from_if(ip);
2381 2381 mutex_exit(&rdc_conf_lock);
2382 2382 }
2383 2383 rdc_group_enter(krdc);
2384 2384 /* Configured but not enabled */
2385 2385 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2386 2386
2387 2387 rdc_dev_close(krdc);
2388 2388 rdc_close_direct(krdc);
2389 2389 rdc_destroy_svinfo(svp);
2390 2390
2391 2391 /* Configured but not enabled */
2392 2392 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2393 2393
2394 2394 rdc_group_exit(krdc);
2395 2395
2396 2396 mutex_enter(&rdc_conf_lock);
2397 2397
2398 2398 /* Configured but not enabled */
2399 2399 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2400 2400
2401 2401 remove_from_group(krdc);
2402 2402
2403 2403 if (IS_MANY(krdc) || IS_MULTI(krdc))
2404 2404 remove_from_many(krdc);
2405 2405
2406 2406 rdc_u_init(urdc);
2407 2407
2408 2408 ASSERT(krdc->type_flag & RDC_CONFIGURED);
2409 2409 krdc->type_flag = 0;
2410 2410 wakeup_busy(krdc);
2411 2411
2412 2412 mutex_exit(&rdc_conf_lock);
2413 2413
2414 2414 return (rc);
2415 2415 }
2416 2416
2417 2417 static int
2418 2418 rdc_enable(rdc_config_t *uparms, spcs_s_info_t kstatus)
2419 2419 {
2420 2420 int rc;
2421 2421 char itmp[10];
2422 2422
2423 2423 if (!(uparms->options & RDC_OPT_SYNC) &&
2424 2424 !(uparms->options & RDC_OPT_ASYNC)) {
2425 2425 rc = RDC_EEINVAL;
2426 2426 (void) spcs_s_inttostring(
2427 2427 uparms->options, itmp, sizeof (itmp), 1);
2428 2428 spcs_s_add(kstatus, RDC_EEINVAL, itmp);
2429 2429 goto done;
2430 2430 }
2431 2431
2432 2432 if (!(uparms->options & RDC_OPT_PRIMARY) &&
2433 2433 !(uparms->options & RDC_OPT_SECONDARY)) {
2434 2434 rc = RDC_EEINVAL;
2435 2435 (void) spcs_s_inttostring(
2436 2436 uparms->options, itmp, sizeof (itmp), 1);
2437 2437 spcs_s_add(kstatus, RDC_EEINVAL, itmp);
2438 2438 goto done;
2439 2439 }
2440 2440
2441 2441 if (!(uparms->options & RDC_OPT_SETBMP) &&
2442 2442 !(uparms->options & RDC_OPT_CLRBMP)) {
2443 2443 rc = RDC_EEINVAL;
2444 2444 (void) spcs_s_inttostring(
2445 2445 uparms->options, itmp, sizeof (itmp), 1);
2446 2446 spcs_s_add(kstatus, RDC_EEINVAL, itmp);
2447 2447 goto done;
2448 2448 }
2449 2449
2450 2450 rc = _rdc_enable(uparms->rdc_set, uparms->options, kstatus);
2451 2451 done:
2452 2452 return (rc);
2453 2453 }
2454 2454
2455 2455 /* ARGSUSED */
2456 2456 static int
2457 2457 _rdc_disable(rdc_k_info_t *krdc, rdc_config_t *uap, spcs_s_info_t kstatus)
2458 2458 {
2459 2459 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
2460 2460 rdc_if_t *ip;
2461 2461 int index = krdc->index;
2462 2462 disk_queue *q;
2463 2463 rdc_set_t *rdc_set = uap->rdc_set;
2464 2464
2465 2465 ASSERT(krdc->group != NULL);
2466 2466 rdc_group_enter(krdc);
2467 2467 #ifdef DEBUG
2468 2468 ASSERT(rdc_check(krdc, rdc_set) == 0);
2469 2469 #else
2470 2470 if (((uap->options & RDC_OPT_FORCE_DISABLE) == 0) &&
2471 2471 rdc_check(krdc, rdc_set)) {
2472 2472 rdc_group_exit(krdc);
2473 2473 spcs_s_add(kstatus, RDC_EALREADY, rdc_set->primary.file,
2474 2474 rdc_set->secondary.file);
2475 2475 return (RDC_EALREADY);
2476 2476 }
2477 2477 #endif
2478 2478
2479 2479 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
2480 2480 halt_sync(krdc);
2481 2481 ASSERT(IS_ENABLED(urdc));
2482 2482 }
2483 2483 q = &krdc->group->diskq;
2484 2484
2485 2485 if (IS_ASYNC(urdc) && RDC_IS_DISKQ(krdc->group) &&
2486 2486 ((!IS_STATE(urdc, RDC_LOGGING)) && (!QEMPTY(q)))) {
2487 2487 krdc->type_flag &= ~RDC_DISABLEPEND;
2488 2488 rdc_group_exit(krdc);
2489 2489 spcs_s_add(kstatus, RDC_EQNOTEMPTY, urdc->disk_queue);
2490 2490 return (RDC_EQNOTEMPTY);
2491 2491 }
2492 2492 rdc_group_exit(krdc);
2493 2493 (void) rdc_unintercept(krdc);
2494 2494
2495 2495 #ifdef DEBUG
2496 2496 cmn_err(CE_NOTE, "!SNDR: disabled %s %s", urdc->primary.file,
2497 2497 urdc->secondary.file);
2498 2498 #endif
2499 2499
2500 2500 /* Configured but not enabled */
2501 2501 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2502 2502
2503 2503 /*
2504 2504 * No new io can come in through the io provider.
2505 2505 * Wait for the async flusher to finish.
2506 2506 */
2507 2507
2508 2508 if (IS_ASYNC(urdc) && !RDC_IS_DISKQ(krdc->group)) {
2509 2509 int tries = 2; /* in case of hopelessly stuck flusher threads */
2510 2510 #ifdef DEBUG
2511 2511 net_queue *qp = &krdc->group->ra_queue;
2512 2512 #endif
2513 2513 do {
2514 2514 if (!krdc->group->rdc_writer)
2515 2515 (void) rdc_writer(krdc->index);
2516 2516
2517 2517 (void) rdc_drain_queue(krdc->index);
2518 2518
2519 2519 } while (krdc->group->rdc_writer && tries--);
2520 2520
2521 2521 /* ok, force it to happen... */
2522 2522 if (rdc_drain_queue(krdc->index) != 0) {
2523 2523 do {
2524 2524 mutex_enter(&krdc->group->ra_queue.net_qlock);
2525 2525 krdc->group->asyncdis = 1;
2526 2526 cv_broadcast(&krdc->group->asyncqcv);
2527 2527 mutex_exit(&krdc->group->ra_queue.net_qlock);
2528 2528 cmn_err(CE_WARN,
2529 2529 "!SNDR: async I/O pending and not flushed "
2530 2530 "for %s during disable",
2531 2531 urdc->primary.file);
2532 2532 #ifdef DEBUG
2533 2533 cmn_err(CE_WARN,
2534 2534 "!nitems: %" NSC_SZFMT " nblocks: %"
2535 2535 NSC_SZFMT " head: 0x%p tail: 0x%p",
2536 2536 qp->nitems, qp->blocks,
2537 2537 (void *)qp->net_qhead,
2538 2538 (void *)qp->net_qtail);
2539 2539 #endif
2540 2540 } while (krdc->group->rdc_thrnum > 0);
2541 2541 }
2542 2542 }
2543 2543
2544 2544 mutex_enter(&rdc_conf_lock);
2545 2545 ip = krdc->intf;
2546 2546 krdc->intf = 0;
2547 2547
2548 2548 if (ip) {
2549 2549 rdc_remove_from_if(ip);
2550 2550 }
2551 2551
2552 2552 mutex_exit(&rdc_conf_lock);
2553 2553
2554 2554 rdc_group_enter(krdc);
2555 2555
2556 2556 /* Configured but not enabled */
2557 2557 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2558 2558
2559 2559 /* Must not hold group lock during this function */
2560 2560 rdc_group_exit(krdc);
2561 2561 while (rdc_dump_alloc_bufs_cd(krdc->index) == EAGAIN)
2562 2562 delay(2);
2563 2563 rdc_group_enter(krdc);
2564 2564
2565 2565 (void) rdc_clear_state(krdc);
2566 2566
2567 2567 rdc_free_bitmap(krdc, RDC_CMD_DISABLE);
2568 2568 rdc_close_bitmap(krdc);
2569 2569
2570 2570 rdc_dev_close(krdc);
2571 2571 rdc_close_direct(krdc);
2572 2572
2573 2573 /* Configured but not enabled */
2574 2574 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2575 2575
2576 2576 rdc_group_exit(krdc);
2577 2577
2578 2578 /*
2579 2579 * we should now unregister the queue, with no conflicting
2580 2580 * locks held. This is the last(only) member of the group
2581 2581 */
2582 2582 if (krdc->group && RDC_IS_DISKQ(krdc->group) &&
2583 2583 krdc->group->count == 1) { /* stop protecting queue */
2584 2584 rdc_unintercept_diskq(krdc->group);
2585 2585 }
2586 2586
2587 2587 mutex_enter(&rdc_conf_lock);
2588 2588
2589 2589 /* Configured but not enabled */
2590 2590 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2591 2591
2592 2592 wait_busy(krdc);
2593 2593
2594 2594 if (IS_MANY(krdc) || IS_MULTI(krdc))
2595 2595 remove_from_many(krdc);
2596 2596
2597 2597 remove_from_group(krdc);
2598 2598
2599 2599 krdc->remote_index = -1;
2600 2600 ASSERT(krdc->type_flag & RDC_CONFIGURED);
2601 2601 ASSERT(krdc->type_flag & RDC_DISABLEPEND);
2602 2602 krdc->type_flag = 0;
2603 2603 #ifdef DEBUG
2604 2604 if (krdc->dcio_bitmap)
2605 2605 cmn_err(CE_WARN, "!_rdc_disable: possible mem leak, "
2606 2606 "dcio_bitmap");
2607 2607 #endif
2608 2608 krdc->dcio_bitmap = NULL;
2609 2609 krdc->bitmap_ref = NULL;
2610 2610 krdc->bitmap_size = 0;
2611 2611 krdc->maxfbas = 0;
2612 2612 krdc->bitmap_write = 0;
2613 2613 krdc->disk_status = 0;
2614 2614 rdc_destroy_svinfo(krdc->lsrv);
2615 2615 krdc->lsrv = NULL;
2616 2616 krdc->multi_next = NULL;
2617 2617
2618 2618 rdc_u_init(urdc);
2619 2619
2620 2620 mutex_exit(&rdc_conf_lock);
2621 2621 rdc_kstat_delete(index);
2622 2622
2623 2623 return (0);
2624 2624 }
2625 2625
2626 2626 static int
2627 2627 rdc_disable(rdc_config_t *uparms, spcs_s_info_t kstatus)
2628 2628 {
2629 2629 rdc_k_info_t *krdc;
2630 2630 int index;
2631 2631 int rc;
2632 2632
2633 2633 mutex_enter(&rdc_conf_lock);
2634 2634
2635 2635 index = rdc_lookup_byname(uparms->rdc_set);
2636 2636 if (index >= 0)
2637 2637 krdc = &rdc_k_info[index];
2638 2638 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
2639 2639 mutex_exit(&rdc_conf_lock);
2640 2640 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
2641 2641 uparms->rdc_set->secondary.file);
2642 2642 return (RDC_EALREADY);
2643 2643 }
2644 2644
2645 2645 krdc->type_flag |= RDC_DISABLEPEND;
2646 2646 wait_busy(krdc);
2647 2647 if (krdc->type_flag == 0) {
2648 2648 /* A resume or enable failed */
2649 2649 mutex_exit(&rdc_conf_lock);
2650 2650 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
2651 2651 uparms->rdc_set->secondary.file);
2652 2652 return (RDC_EALREADY);
2653 2653 }
2654 2654 mutex_exit(&rdc_conf_lock);
2655 2655
2656 2656 rc = _rdc_disable(krdc, uparms, kstatus);
2657 2657 return (rc);
2658 2658 }
2659 2659
2660 2660
2661 2661 /*
2662 2662 * Checks whether the state of one of the other sets in the 1-many or
2663 2663 * multi-hop config should prevent a sync from starting on this one.
2664 2664 * Return NULL if no just cause or impediment is found, otherwise return
2665 2665 * a pointer to the offending set.
2666 2666 */
2667 2667 static rdc_u_info_t *
2668 2668 rdc_allow_pri_sync(rdc_u_info_t *urdc, int options)
2669 2669 {
2670 2670 rdc_k_info_t *krdc = &rdc_k_info[urdc->index];
2671 2671 rdc_k_info_t *ktmp;
2672 2672 rdc_u_info_t *utmp;
2673 2673 rdc_k_info_t *kmulti = NULL;
2674 2674
2675 2675 ASSERT(rdc_get_vflags(urdc) & RDC_PRIMARY);
2676 2676
2677 2677 rdc_many_enter(krdc);
2678 2678
2679 2679 /*
2680 2680 * In the reverse sync case we need to check the previous leg of
2681 2681 * the multi-hop config. The link to that set can be from any of
2682 2682 * the 1-many list, so as we go through we keep an eye open for it.
2683 2683 */
2684 2684 if ((options & RDC_OPT_REVERSE) && (IS_MULTI(krdc))) {
2685 2685 /* This set links to the first leg */
2686 2686 ktmp = krdc->multi_next;
2687 2687 utmp = &rdc_u_info[ktmp->index];
2688 2688 if (IS_ENABLED(utmp))
2689 2689 kmulti = ktmp;
2690 2690 }
2691 2691
2692 2692 if (IS_MANY(krdc)) {
2693 2693 for (ktmp = krdc->many_next; ktmp != krdc;
2694 2694 ktmp = ktmp->many_next) {
2695 2695 utmp = &rdc_u_info[ktmp->index];
2696 2696
2697 2697 if (!IS_ENABLED(utmp))
2698 2698 continue;
2699 2699
2700 2700 if (options & RDC_OPT_FORWARD) {
2701 2701 /*
2702 2702 * Reverse sync needed is bad, as it means a
2703 2703 * reverse sync in progress or started and
2704 2704 * didn't complete, so this primary volume
2705 2705 * is not consistent. So we shouldn't copy
2706 2706 * it to its secondary.
2707 2707 */
2708 2708 if (rdc_get_mflags(utmp) & RDC_RSYNC_NEEDED) {
2709 2709 rdc_many_exit(krdc);
2710 2710 return (utmp);
2711 2711 }
2712 2712 } else {
2713 2713 /* Reverse, so see if we need to spot kmulti */
2714 2714 if ((kmulti == NULL) && (IS_MULTI(ktmp))) {
2715 2715 /* This set links to the first leg */
2716 2716 kmulti = ktmp->multi_next;
2717 2717 if (!IS_ENABLED(
2718 2718 &rdc_u_info[kmulti->index]))
2719 2719 kmulti = NULL;
2720 2720 }
2721 2721
2722 2722 /*
2723 2723 * Non-logging is bad, as the bitmap will
2724 2724 * be updated with the bits for this sync.
2725 2725 */
2726 2726 if (!(rdc_get_vflags(utmp) & RDC_LOGGING)) {
2727 2727 rdc_many_exit(krdc);
2728 2728 return (utmp);
2729 2729 }
2730 2730 }
2731 2731 }
2732 2732 }
2733 2733
2734 2734 if (kmulti) {
2735 2735 utmp = &rdc_u_info[kmulti->index];
2736 2736 ktmp = kmulti; /* In case we decide we do need to use ktmp */
2737 2737
2738 2738 ASSERT(options & RDC_OPT_REVERSE);
2739 2739
2740 2740 if (IS_REPLICATING(utmp)) {
2741 2741 /*
2742 2742 * Replicating is bad as data is already flowing to
2743 2743 * the target of the requested sync operation.
2744 2744 */
2745 2745 rdc_many_exit(krdc);
2746 2746 return (utmp);
2747 2747 }
2748 2748
2749 2749 if (rdc_get_vflags(utmp) & RDC_SYNCING) {
2750 2750 /*
2751 2751 * Forward sync in progress is bad, as data is
2752 2752 * already flowing to the target of the requested
2753 2753 * sync operation.
2754 2754 * Reverse sync in progress is bad, as the primary
2755 2755 * has already decided which data to copy.
2756 2756 */
2757 2757 rdc_many_exit(krdc);
2758 2758 return (utmp);
2759 2759 }
2760 2760
2761 2761 /*
2762 2762 * Clear the "sync needed" flags, as the multi-hop secondary
2763 2763 * will be updated via this requested sync operation, so does
2764 2764 * not need to complete its aborted forward sync.
2765 2765 */
2766 2766 if (rdc_get_vflags(utmp) & RDC_SYNC_NEEDED)
2767 2767 rdc_clr_flags(utmp, RDC_SYNC_NEEDED);
2768 2768 }
2769 2769
2770 2770 if (IS_MANY(krdc) && (options & RDC_OPT_REVERSE)) {
2771 2771 for (ktmp = krdc->many_next; ktmp != krdc;
2772 2772 ktmp = ktmp->many_next) {
2773 2773 utmp = &rdc_u_info[ktmp->index];
2774 2774 if (!IS_ENABLED(utmp))
2775 2775 continue;
2776 2776
2777 2777 /*
2778 2778 * Clear any "reverse sync needed" flags, as the
2779 2779 * volume will be updated via this requested
2780 2780 * sync operation, so does not need to complete
2781 2781 * its aborted reverse sync.
2782 2782 */
2783 2783 if (rdc_get_mflags(utmp) & RDC_RSYNC_NEEDED)
2784 2784 rdc_clr_mflags(utmp, RDC_RSYNC_NEEDED);
2785 2785 }
2786 2786 }
2787 2787
2788 2788 rdc_many_exit(krdc);
2789 2789
2790 2790 return (NULL);
2791 2791 }
2792 2792
2793 2793 static void
2794 2794 _rdc_sync_wrthr(void *thrinfo)
2795 2795 {
2796 2796 rdc_syncthr_t *syncinfo = (rdc_syncthr_t *)thrinfo;
2797 2797 nsc_buf_t *handle = NULL;
2798 2798 rdc_k_info_t *krdc = syncinfo->krdc;
2799 2799 int rc;
2800 2800 int tries = 0;
2801 2801
2802 2802 DTRACE_PROBE2(rdc_sync_loop_netwrite_start, int, krdc->index,
2803 2803 nsc_buf_t *, handle);
2804 2804
2805 2805 retry:
2806 2806 rc = nsc_alloc_buf(RDC_U_FD(krdc), syncinfo->offset, syncinfo->len,
2807 2807 NSC_READ | NSC_NOCACHE, &handle);
2808 2808
2809 2809 if (!RDC_SUCCESS(rc) || krdc->remote_index < 0) {
2810 2810 DTRACE_PROBE(rdc_sync_wrthr_alloc_buf_err);
2811 2811 goto failed;
2812 2812 }
2813 2813
2814 2814 rdc_group_enter(krdc);
2815 2815 if ((krdc->disk_status == 1) || (krdc->dcio_bitmap == NULL)) {
2816 2816 rdc_group_exit(krdc);
2817 2817 goto failed;
2818 2818 }
2819 2819 rdc_group_exit(krdc);
2820 2820
2821 2821 if ((rc = rdc_net_write(krdc->index, krdc->remote_index, handle,
2822 2822 handle->sb_pos, handle->sb_len, RDC_NOSEQ, RDC_NOQUE, NULL)) > 0) {
2823 2823 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
2824 2824
2825 2825 /*
2826 2826 * The following is to handle
2827 2827 * the case where the secondary side
2828 2828 * has thrown our buffer handle token away in a
2829 2829 * attempt to preserve its health on restart
2830 2830 */
2831 2831 if ((rc == EPROTO) && (tries < 3)) {
2832 2832 (void) nsc_free_buf(handle);
2833 2833 handle = NULL;
2834 2834 tries++;
2835 2835 delay(HZ >> 2);
2836 2836 goto retry;
2837 2837 }
2838 2838
2839 2839 DTRACE_PROBE(rdc_sync_wrthr_remote_write_err);
2840 2840 cmn_err(CE_WARN, "!rdc_sync_wrthr: remote write failed (%d) "
2841 2841 "0x%x", rc, rdc_get_vflags(urdc));
2842 2842
2843 2843 goto failed;
2844 2844 }
2845 2845 (void) nsc_free_buf(handle);
2846 2846 handle = NULL;
2847 2847
2848 2848 return;
2849 2849 failed:
2850 2850 (void) nsc_free_buf(handle);
2851 2851 syncinfo->status->offset = syncinfo->offset;
2852 2852 }
2853 2853
2854 2854 /*
2855 2855 * see above comments on _rdc_sync_wrthr
2856 2856 */
2857 2857 static void
2858 2858 _rdc_sync_rdthr(void *thrinfo)
2859 2859 {
2860 2860 rdc_syncthr_t *syncinfo = (rdc_syncthr_t *)thrinfo;
2861 2861 nsc_buf_t *handle = NULL;
2862 2862 rdc_k_info_t *krdc = syncinfo->krdc;
2863 2863 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
2864 2864 int rc;
2865 2865
2866 2866 rc = nsc_alloc_buf(RDC_U_FD(krdc), syncinfo->offset, syncinfo->len,
2867 2867 NSC_WRITE | NSC_WRTHRU | NSC_NOCACHE, &handle);
2868 2868
2869 2869 if (!RDC_SUCCESS(rc) || krdc->remote_index < 0) {
2870 2870 goto failed;
2871 2871 }
2872 2872 rdc_group_enter(krdc);
2873 2873 if ((krdc->disk_status == 1) || (krdc->dcio_bitmap == NULL)) {
2874 2874 rdc_group_exit(krdc);
2875 2875 goto failed;
2876 2876 }
2877 2877 rdc_group_exit(krdc);
2878 2878
2879 2879 rc = rdc_net_read(krdc->index, krdc->remote_index, handle,
2880 2880 handle->sb_pos, handle->sb_len);
2881 2881
2882 2882 if (!RDC_SUCCESS(rc)) {
2883 2883 cmn_err(CE_WARN, "!rdc_sync_rdthr: remote read failed(%d)", rc);
2884 2884 goto failed;
2885 2885 }
2886 2886 if (!IS_STATE(urdc, RDC_FULL))
2887 2887 rdc_set_bitmap_many(krdc, handle->sb_pos, handle->sb_len);
2888 2888
2889 2889 rc = nsc_write(handle, handle->sb_pos, handle->sb_len, 0);
2890 2890
2891 2891 if (!RDC_SUCCESS(rc)) {
2892 2892 rdc_many_enter(krdc);
2893 2893 rdc_set_flags_log(urdc, RDC_VOL_FAILED, "nsc_write failed");
2894 2894 rdc_many_exit(krdc);
2895 2895 rdc_write_state(urdc);
2896 2896 goto failed;
2897 2897 }
2898 2898
2899 2899 (void) nsc_free_buf(handle);
2900 2900 handle = NULL;
2901 2901
2902 2902 return;
2903 2903 failed:
2904 2904 (void) nsc_free_buf(handle);
2905 2905 syncinfo->status->offset = syncinfo->offset;
2906 2906 }
2907 2907
2908 2908 /*
2909 2909 * _rdc_sync_wrthr
2910 2910 * sync loop write thread
2911 2911 * if there are avail threads, we have not
2912 2912 * used up the pipe, so the sync loop will, if
2913 2913 * possible use these to multithread the write/read
2914 2914 */
2915 2915 void
2916 2916 _rdc_sync_thread(void *thrinfo)
2917 2917 {
2918 2918 rdc_syncthr_t *syncinfo = (rdc_syncthr_t *)thrinfo;
2919 2919 rdc_k_info_t *krdc = syncinfo->krdc;
2920 2920 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
2921 2921 rdc_thrsync_t *sync = &krdc->syncs;
2922 2922 uint_t bitmask;
2923 2923 int rc;
2924 2924
2925 2925 rc = _rdc_rsrv_devs(krdc, RDC_RAW, RDC_INTERNAL);
2926 2926 if (!RDC_SUCCESS(rc))
2927 2927 goto failed;
2928 2928
2929 2929 if (IS_STATE(urdc, RDC_SLAVE))
2930 2930 _rdc_sync_rdthr(thrinfo);
2931 2931 else
2932 2932 _rdc_sync_wrthr(thrinfo);
2933 2933
2934 2934 _rdc_rlse_devs(krdc, RDC_RAW);
2935 2935
2936 2936 if (krdc->dcio_bitmap == NULL) {
2937 2937 #ifdef DEBUG
2938 2938 cmn_err(CE_NOTE, "!_rdc_sync_wrthr: NULL bitmap");
2939 2939 #else
2940 2940 /*EMPTY*/
2941 2941 #endif
2942 2942 } else if (syncinfo->status->offset < 0) {
2943 2943
2944 2944 RDC_SET_BITMASK(syncinfo->offset, syncinfo->len, &bitmask);
2945 2945 RDC_CLR_BITMAP(krdc, syncinfo->offset, syncinfo->len, \
2946 2946 bitmask, RDC_BIT_FORCE);
2947 2947 }
2948 2948
2949 2949 failed:
2950 2950 /*
2951 2951 * done with this, get rid of it.
2952 2952 * the status is not freed, it should still be a status chain
2953 2953 * that _rdc_sync() has the head of
2954 2954 */
2955 2955 kmem_free(syncinfo, sizeof (*syncinfo));
2956 2956
2957 2957 /*
2958 2958 * decrement the global sync thread num
2959 2959 */
2960 2960 mutex_enter(&sync_info.lock);
2961 2961 sync_info.active_thr--;
2962 2962 /* LINTED */
2963 2963 RDC_AVAIL_THR_TUNE(sync_info);
2964 2964 mutex_exit(&sync_info.lock);
2965 2965
2966 2966 /*
2967 2967 * krdc specific stuff
2968 2968 */
2969 2969 mutex_enter(&sync->lock);
2970 2970 sync->complete++;
2971 2971 cv_broadcast(&sync->cv);
2972 2972 mutex_exit(&sync->lock);
2973 2973 }
2974 2974
2975 2975 int
2976 2976 _rdc_setup_syncthr(rdc_syncthr_t **synthr, nsc_off_t offset,
2977 2977 nsc_size_t len, rdc_k_info_t *krdc, sync_status_t *stats)
2978 2978 {
2979 2979 rdc_syncthr_t *tmp;
2980 2980 /* alloc here, free in the sync thread */
2981 2981 tmp =
2982 2982 (rdc_syncthr_t *)kmem_zalloc(sizeof (rdc_syncthr_t), KM_NOSLEEP);
2983 2983
2984 2984 if (tmp == NULL)
2985 2985 return (-1);
2986 2986 tmp->offset = offset;
2987 2987 tmp->len = len;
2988 2988 tmp->status = stats;
2989 2989 tmp->krdc = krdc;
2990 2990
2991 2991 *synthr = tmp;
2992 2992 return (0);
2993 2993 }
2994 2994
2995 2995 sync_status_t *
2996 2996 _rdc_new_sync_status()
2997 2997 {
2998 2998 sync_status_t *s;
2999 2999
3000 3000 s = (sync_status_t *)kmem_zalloc(sizeof (*s), KM_NOSLEEP);
3001 3001 s->offset = -1;
3002 3002 return (s);
3003 3003 }
3004 3004
3005 3005 void
3006 3006 _rdc_free_sync_status(sync_status_t *status)
3007 3007 {
3008 3008 sync_status_t *s;
3009 3009
3010 3010 while (status) {
3011 3011 s = status->next;
3012 3012 kmem_free(status, sizeof (*status));
3013 3013 status = s;
3014 3014 }
3015 3015 }
3016 3016 int
3017 3017 _rdc_sync_status_ok(sync_status_t *status, int *offset)
3018 3018 {
3019 3019 #ifdef DEBUG_SYNCSTATUS
3020 3020 int i = 0;
3021 3021 #endif
3022 3022 while (status) {
3023 3023 if (status->offset >= 0) {
3024 3024 *offset = status->offset;
3025 3025 return (-1);
3026 3026 }
3027 3027 status = status->next;
3028 3028 #ifdef DEBUG_SYNCSTATUS
3029 3029 i++;
3030 3030 #endif
3031 3031 }
3032 3032 #ifdef DEBUGSYNCSTATUS
3033 3033 cmn_err(CE_NOTE, "!rdc_sync_status_ok: checked %d statuses", i);
3034 3034 #endif
3035 3035 return (0);
3036 3036 }
3037 3037
3038 3038 int mtsync = 1;
3039 3039 /*
3040 3040 * _rdc_sync() : rdc sync loop
3041 3041 *
3042 3042 */
3043 3043 static void
3044 3044 _rdc_sync(rdc_k_info_t *krdc)
3045 3045 {
3046 3046 nsc_size_t size = 0;
3047 3047 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
3048 3048 int rtype;
3049 3049 int sts;
3050 3050 int reserved = 0;
3051 3051 nsc_buf_t *alloc_h = NULL;
3052 3052 nsc_buf_t *handle = NULL;
3053 3053 nsc_off_t mask;
3054 3054 nsc_size_t maxbit;
3055 3055 nsc_size_t len;
3056 3056 nsc_off_t offset = 0;
3057 3057 int sync_completed = 0;
3058 3058 int tries = 0;
3059 3059 int rc;
3060 3060 int queuing = 0;
3061 3061 uint_t bitmask;
3062 3062 sync_status_t *ss, *sync_status = NULL;
3063 3063 rdc_thrsync_t *sync = &krdc->syncs;
3064 3064 rdc_syncthr_t *syncinfo;
3065 3065 nsthread_t *trc = NULL;
3066 3066
3067 3067 if (IS_STATE(urdc, RDC_QUEUING) && !IS_STATE(urdc, RDC_FULL)) {
3068 3068 /* flusher is handling the sync in the update case */
3069 3069 queuing = 1;
3070 3070 goto sync_done;
3071 3071 }
3072 3072
3073 3073 /*
3074 3074 * Main sync/resync loop
3075 3075 */
3076 3076 DTRACE_PROBE(rdc_sync_loop_start);
3077 3077
3078 3078 rtype = RDC_RAW;
3079 3079 sts = _rdc_rsrv_devs(krdc, rtype, RDC_INTERNAL);
3080 3080
3081 3081 DTRACE_PROBE(rdc_sync_loop_rsrv);
3082 3082
3083 3083 if (sts != 0)
3084 3084 goto failed_noincr;
3085 3085
3086 3086 reserved = 1;
3087 3087
3088 3088 /*
3089 3089 * pre-allocate a handle if we can - speeds up the sync.
3090 3090 */
3091 3091
3092 3092 if (rdc_prealloc_handle) {
3093 3093 alloc_h = nsc_alloc_handle(RDC_U_FD(krdc), NULL, NULL, NULL);
3094 3094 #ifdef DEBUG
3095 3095 if (!alloc_h) {
3096 3096 cmn_err(CE_WARN,
3097 3097 "!rdc sync: failed to pre-alloc handle");
3098 3098 }
3099 3099 #endif
3100 3100 } else {
3101 3101 alloc_h = NULL;
3102 3102 }
3103 3103
3104 3104 ASSERT(urdc->volume_size != 0);
3105 3105 size = urdc->volume_size;
3106 3106 mask = ~(LOG_TO_FBA_NUM(1) - 1);
3107 3107 maxbit = FBA_TO_LOG_NUM(size - 1);
3108 3108
3109 3109 /*
3110 3110 * as this while loop can also move data, it is counted as a
3111 3111 * sync loop thread
3112 3112 */
3113 3113 rdc_group_enter(krdc);
3114 3114 rdc_clr_flags(urdc, RDC_LOGGING);
3115 3115 rdc_set_flags(urdc, RDC_SYNCING);
3116 3116 krdc->group->synccount++;
3117 3117 rdc_group_exit(krdc);
3118 3118 mutex_enter(&sync_info.lock);
3119 3119 sync_info.active_thr++;
3120 3120 /* LINTED */
3121 3121 RDC_AVAIL_THR_TUNE(sync_info);
3122 3122 mutex_exit(&sync_info.lock);
3123 3123
3124 3124 while (offset < size) {
3125 3125 rdc_group_enter(krdc);
3126 3126 ASSERT(krdc->aux_state & RDC_AUXSYNCIP);
3127 3127 if (krdc->disk_status == 1 || krdc->dcio_bitmap == NULL) {
3128 3128 rdc_group_exit(krdc);
3129 3129 if (krdc->disk_status == 1) {
3130 3130 DTRACE_PROBE(rdc_sync_loop_disk_status_err);
3131 3131 } else {
3132 3132 DTRACE_PROBE(rdc_sync_loop_dcio_bitmap_err);
3133 3133 }
3134 3134 goto failed; /* halt sync */
3135 3135 }
3136 3136 rdc_group_exit(krdc);
3137 3137
3138 3138 if (!(rdc_get_vflags(urdc) & RDC_FULL)) {
3139 3139 mutex_enter(&krdc->syncbitmutex);
3140 3140 krdc->syncbitpos = FBA_TO_LOG_NUM(offset);
3141 3141 len = 0;
3142 3142
3143 3143 /* skip unnecessary chunks */
3144 3144
3145 3145 while (krdc->syncbitpos <= maxbit &&
3146 3146 !RDC_BIT_ISSET(krdc, krdc->syncbitpos)) {
3147 3147 offset += LOG_TO_FBA_NUM(1);
3148 3148 krdc->syncbitpos++;
3149 3149 }
3150 3150
3151 3151 /* check for boundary */
3152 3152
3153 3153 if (offset >= size) {
3154 3154 mutex_exit(&krdc->syncbitmutex);
3155 3155 goto sync_done;
3156 3156 }
3157 3157
3158 3158 /* find maximal length we can transfer */
3159 3159
3160 3160 while (krdc->syncbitpos <= maxbit &&
3161 3161 RDC_BIT_ISSET(krdc, krdc->syncbitpos)) {
3162 3162 len += LOG_TO_FBA_NUM(1);
3163 3163 krdc->syncbitpos++;
3164 3164 /* we can only read maxfbas anyways */
3165 3165 if (len >= krdc->maxfbas)
3166 3166 break;
3167 3167 }
3168 3168
3169 3169 len = min(len, (size - offset));
3170 3170
3171 3171 } else {
3172 3172 len = size - offset;
3173 3173 }
3174 3174
3175 3175 /* truncate to the io provider limit */
3176 3176 ASSERT(krdc->maxfbas != 0);
3177 3177 len = min(len, krdc->maxfbas);
3178 3178
3179 3179 if (len > LOG_TO_FBA_NUM(1)) {
3180 3180 /*
3181 3181 * If the update is larger than a bitmap chunk,
3182 3182 * then truncate to a whole number of bitmap
3183 3183 * chunks.
3184 3184 *
3185 3185 * If the update is smaller than a bitmap
3186 3186 * chunk, this must be the last write.
3187 3187 */
3188 3188 len &= mask;
3189 3189 }
3190 3190
3191 3191 if (!(rdc_get_vflags(urdc) & RDC_FULL)) {
3192 3192 krdc->syncbitpos = FBA_TO_LOG_NUM(offset + len);
3193 3193 mutex_exit(&krdc->syncbitmutex);
3194 3194 }
3195 3195
3196 3196 /*
3197 3197 * Find out if we can reserve a thread here ...
3198 3198 * note: skip the mutex for the first check, if the number
3199 3199 * is up there, why bother even grabbing the mutex to
3200 3200 * only realize that we can't have a thread anyways
3201 3201 */
3202 3202
3203 3203 if (mtsync && sync_info.active_thr < RDC_MAX_SYNC_THREADS) {
3204 3204
3205 3205 mutex_enter(&sync_info.lock);
3206 3206 if (sync_info.avail_thr >= 1) {
3207 3207 if (sync_status == NULL) {
3208 3208 ss = sync_status =
3209 3209 _rdc_new_sync_status();
3210 3210 } else {
3211 3211 ss = ss->next = _rdc_new_sync_status();
3212 3212 }
3213 3213 if (ss == NULL) {
3214 3214 mutex_exit(&sync_info.lock);
3215 3215 #ifdef DEBUG
3216 3216 cmn_err(CE_WARN, "!rdc_sync: can't "
3217 3217 "allocate status for mt sync");
3218 3218 #endif
3219 3219 goto retry;
3220 3220 }
3221 3221 /*
3222 3222 * syncinfo protected by sync_info lock but
3223 3223 * not part of the sync_info structure
3224 3224 * be careful if moving
3225 3225 */
3226 3226 if (_rdc_setup_syncthr(&syncinfo,
3227 3227 offset, len, krdc, ss) < 0) {
3228 3228 _rdc_free_sync_status(ss);
3229 3229 }
3230 3230
3231 3231 trc = nst_create(sync_info.rdc_syncset,
3232 3232 _rdc_sync_thread, syncinfo, NST_SLEEP);
3233 3233
3234 3234 if (trc == NULL) {
3235 3235 mutex_exit(&sync_info.lock);
3236 3236 #ifdef DEBUG
3237 3237 cmn_err(CE_NOTE, "!rdc_sync: unable to "
3238 3238 "mt sync");
3239 3239 #endif
3240 3240 _rdc_free_sync_status(ss);
3241 3241 kmem_free(syncinfo, sizeof (*syncinfo));
3242 3242 syncinfo = NULL;
3243 3243 goto retry;
3244 3244 } else {
3245 3245 mutex_enter(&sync->lock);
3246 3246 sync->threads++;
3247 3247 mutex_exit(&sync->lock);
3248 3248 }
3249 3249
3250 3250 sync_info.active_thr++;
3251 3251 /* LINTED */
3252 3252 RDC_AVAIL_THR_TUNE(sync_info);
3253 3253
3254 3254 mutex_exit(&sync_info.lock);
3255 3255 goto threaded;
3256 3256 }
3257 3257 mutex_exit(&sync_info.lock);
3258 3258 }
3259 3259 retry:
3260 3260 handle = alloc_h;
3261 3261 DTRACE_PROBE(rdc_sync_loop_allocbuf_start);
3262 3262 if (rdc_get_vflags(urdc) & RDC_SLAVE)
3263 3263 sts = nsc_alloc_buf(RDC_U_FD(krdc), offset, len,
3264 3264 NSC_WRITE | NSC_WRTHRU | NSC_NOCACHE, &handle);
3265 3265 else
3266 3266 sts = nsc_alloc_buf(RDC_U_FD(krdc), offset, len,
3267 3267 NSC_READ | NSC_NOCACHE, &handle);
3268 3268
3269 3269 DTRACE_PROBE(rdc_sync_loop_allocbuf_end);
3270 3270 if (sts > 0) {
3271 3271 if (handle && handle != alloc_h) {
3272 3272 (void) nsc_free_buf(handle);
3273 3273 }
3274 3274
3275 3275 handle = NULL;
3276 3276 DTRACE_PROBE(rdc_sync_loop_allocbuf_err);
3277 3277 goto failed;
3278 3278 }
3279 3279
3280 3280 if (rdc_get_vflags(urdc) & RDC_SLAVE) {
3281 3281 /* overwrite buffer with remote data */
3282 3282 sts = rdc_net_read(krdc->index, krdc->remote_index,
3283 3283 handle, handle->sb_pos, handle->sb_len);
3284 3284
3285 3285 if (!RDC_SUCCESS(sts)) {
3286 3286 #ifdef DEBUG
3287 3287 cmn_err(CE_WARN,
3288 3288 "!rdc sync: remote read failed (%d)", sts);
3289 3289 #endif
3290 3290 DTRACE_PROBE(rdc_sync_loop_remote_read_err);
3291 3291 goto failed;
3292 3292 }
3293 3293 if (!(rdc_get_vflags(urdc) & RDC_FULL))
3294 3294 rdc_set_bitmap_many(krdc, handle->sb_pos,
3295 3295 handle->sb_len);
3296 3296
3297 3297 /* commit locally */
3298 3298
3299 3299 sts = nsc_write(handle, handle->sb_pos,
3300 3300 handle->sb_len, 0);
3301 3301
3302 3302 if (!RDC_SUCCESS(sts)) {
3303 3303 /* reverse sync needed already set */
3304 3304 rdc_many_enter(krdc);
3305 3305 rdc_set_flags_log(urdc, RDC_VOL_FAILED,
3306 3306 "write failed during sync");
3307 3307 rdc_many_exit(krdc);
3308 3308 rdc_write_state(urdc);
3309 3309 DTRACE_PROBE(rdc_sync_loop_nsc_write_err);
3310 3310 goto failed;
3311 3311 }
3312 3312 } else {
3313 3313 /* send local data to remote */
3314 3314 DTRACE_PROBE2(rdc_sync_loop_netwrite_start,
3315 3315 int, krdc->index, nsc_buf_t *, handle);
3316 3316
3317 3317 if ((sts = rdc_net_write(krdc->index,
3318 3318 krdc->remote_index, handle, handle->sb_pos,
3319 3319 handle->sb_len, RDC_NOSEQ, RDC_NOQUE, NULL)) > 0) {
3320 3320
3321 3321 /*
3322 3322 * The following is to handle
3323 3323 * the case where the secondary side
3324 3324 * has thrown our buffer handle token away in a
3325 3325 * attempt to preserve its health on restart
3326 3326 */
3327 3327 if ((sts == EPROTO) && (tries < 3)) {
3328 3328 (void) nsc_free_buf(handle);
3329 3329 handle = NULL;
3330 3330 tries++;
3331 3331 delay(HZ >> 2);
3332 3332 goto retry;
3333 3333 }
3334 3334 #ifdef DEBUG
3335 3335 cmn_err(CE_WARN,
3336 3336 "!rdc sync: remote write failed (%d) 0x%x",
3337 3337 sts, rdc_get_vflags(urdc));
3338 3338 #endif
3339 3339 DTRACE_PROBE(rdc_sync_loop_netwrite_err);
3340 3340 goto failed;
3341 3341 }
3342 3342 DTRACE_PROBE(rdc_sync_loop_netwrite_end);
3343 3343 }
3344 3344
3345 3345 (void) nsc_free_buf(handle);
3346 3346 handle = NULL;
3347 3347
3348 3348 if (krdc->dcio_bitmap == NULL) {
3349 3349 #ifdef DEBUG
3350 3350 cmn_err(CE_NOTE, "!_rdc_sync: NULL bitmap");
3351 3351 #else
3352 3352 ;
3353 3353 /*EMPTY*/
3354 3354 #endif
3355 3355 } else {
3356 3356
3357 3357 RDC_SET_BITMASK(offset, len, &bitmask);
3358 3358 RDC_CLR_BITMAP(krdc, offset, len, bitmask, \
3359 3359 RDC_BIT_FORCE);
3360 3360 ASSERT(!IS_ASYNC(urdc));
3361 3361 }
3362 3362
3363 3363 /*
3364 3364 * Only release/reserve if someone is waiting
3365 3365 */
3366 3366 if (krdc->devices->id_release || nsc_waiting(RDC_U_FD(krdc))) {
3367 3367 DTRACE_PROBE(rdc_sync_loop_rlse_start);
3368 3368 if (alloc_h) {
3369 3369 (void) nsc_free_handle(alloc_h);
3370 3370 alloc_h = NULL;
3371 3371 }
3372 3372
3373 3373 _rdc_rlse_devs(krdc, rtype);
3374 3374 reserved = 0;
3375 3375 delay(2);
3376 3376
3377 3377 rtype = RDC_RAW;
3378 3378 sts = _rdc_rsrv_devs(krdc, rtype, RDC_INTERNAL);
3379 3379 if (sts != 0) {
3380 3380 handle = NULL;
3381 3381 DTRACE_PROBE(rdc_sync_loop_rdc_rsrv_err);
3382 3382 goto failed;
3383 3383 }
3384 3384
3385 3385 reserved = 1;
3386 3386
3387 3387 if (rdc_prealloc_handle) {
3388 3388 alloc_h = nsc_alloc_handle(RDC_U_FD(krdc),
3389 3389 NULL, NULL, NULL);
3390 3390 #ifdef DEBUG
3391 3391 if (!alloc_h) {
3392 3392 cmn_err(CE_WARN, "!rdc_sync: "
3393 3393 "failed to pre-alloc handle");
3394 3394 }
3395 3395 #endif
3396 3396 }
3397 3397 DTRACE_PROBE(rdc_sync_loop_rlse_end);
3398 3398 }
3399 3399 threaded:
3400 3400 offset += len;
3401 3401 urdc->sync_pos = offset;
3402 3402 }
3403 3403
3404 3404 sync_done:
3405 3405 sync_completed = 1;
3406 3406
3407 3407 failed:
3408 3408 krdc->group->synccount--;
3409 3409 failed_noincr:
3410 3410 mutex_enter(&sync->lock);
3411 3411 while (sync->complete != sync->threads) {
3412 3412 cv_wait(&sync->cv, &sync->lock);
3413 3413 }
3414 3414 sync->complete = 0;
3415 3415 sync->threads = 0;
3416 3416 mutex_exit(&sync->lock);
3417 3417
3418 3418 /*
3419 3419 * if sync_completed is 0 here,
3420 3420 * we know that the main sync thread failed anyway
3421 3421 * so just free the statuses and fail
3422 3422 */
3423 3423 if (sync_completed && (_rdc_sync_status_ok(sync_status, &rc) < 0)) {
3424 3424 urdc->sync_pos = rc;
3425 3425 sync_completed = 0; /* at least 1 thread failed */
3426 3426 }
3427 3427
3428 3428 _rdc_free_sync_status(sync_status);
3429 3429
3430 3430 /*
3431 3431 * we didn't increment, we didn't even sync,
3432 3432 * so don't dec sync_info.active_thr
3433 3433 */
3434 3434 if (!queuing) {
3435 3435 mutex_enter(&sync_info.lock);
3436 3436 sync_info.active_thr--;
3437 3437 /* LINTED */
3438 3438 RDC_AVAIL_THR_TUNE(sync_info);
3439 3439 mutex_exit(&sync_info.lock);
3440 3440 }
3441 3441
3442 3442 if (handle) {
3443 3443 (void) nsc_free_buf(handle);
3444 3444 }
3445 3445
3446 3446 if (alloc_h) {
3447 3447 (void) nsc_free_handle(alloc_h);
3448 3448 }
3449 3449
3450 3450 if (reserved) {
3451 3451 _rdc_rlse_devs(krdc, rtype);
3452 3452 }
3453 3453
3454 3454 notstarted:
3455 3455 rdc_group_enter(krdc);
3456 3456 ASSERT(krdc->aux_state & RDC_AUXSYNCIP);
3457 3457 if (IS_STATE(urdc, RDC_QUEUING))
3458 3458 rdc_clr_flags(urdc, RDC_QUEUING);
3459 3459
3460 3460 if (sync_completed) {
3461 3461 (void) rdc_net_state(krdc->index, CCIO_DONE);
3462 3462 } else {
3463 3463 (void) rdc_net_state(krdc->index, CCIO_ENABLELOG);
3464 3464 }
3465 3465
3466 3466 rdc_clr_flags(urdc, RDC_SYNCING);
3467 3467 if (rdc_get_vflags(urdc) & RDC_SLAVE) {
3468 3468 rdc_many_enter(krdc);
3469 3469 rdc_clr_mflags(urdc, RDC_SLAVE);
3470 3470 rdc_many_exit(krdc);
3471 3471 }
3472 3472 if (krdc->type_flag & RDC_ASYNCMODE)
3473 3473 rdc_set_flags(urdc, RDC_ASYNC);
3474 3474 if (sync_completed) {
3475 3475 rdc_many_enter(krdc);
3476 3476 rdc_clr_mflags(urdc, RDC_RSYNC_NEEDED);
3477 3477 rdc_many_exit(krdc);
3478 3478 } else {
3479 3479 krdc->remote_index = -1;
3480 3480 rdc_set_flags_log(urdc, RDC_LOGGING, "sync failed to complete");
3481 3481 }
3482 3482 rdc_group_exit(krdc);
3483 3483 rdc_write_state(urdc);
3484 3484
3485 3485 mutex_enter(&net_blk_lock);
3486 3486 if (sync_completed)
3487 3487 krdc->sync_done = RDC_COMPLETED;
3488 3488 else
3489 3489 krdc->sync_done = RDC_FAILED;
3490 3490 cv_broadcast(&krdc->synccv);
3491 3491 mutex_exit(&net_blk_lock);
3492 3492
3493 3493 }
3494 3494
3495 3495
3496 3496 static int
3497 3497 rdc_sync(rdc_config_t *uparms, spcs_s_info_t kstatus)
3498 3498 {
3499 3499 rdc_set_t *rdc_set = uparms->rdc_set;
3500 3500 int options = uparms->options;
3501 3501 int rc = 0;
3502 3502 int busy = 0;
3503 3503 int index;
3504 3504 rdc_k_info_t *krdc;
3505 3505 rdc_u_info_t *urdc;
3506 3506 rdc_k_info_t *kmulti;
3507 3507 rdc_u_info_t *umulti;
3508 3508 rdc_group_t *group;
3509 3509 rdc_srv_t *svp;
3510 3510 int sm, um, md;
3511 3511 int sync_completed = 0;
3512 3512 int thrcount;
3513 3513
3514 3514 mutex_enter(&rdc_conf_lock);
3515 3515 index = rdc_lookup_byname(rdc_set);
3516 3516 if (index >= 0)
3517 3517 krdc = &rdc_k_info[index];
3518 3518 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
3519 3519 mutex_exit(&rdc_conf_lock);
3520 3520 spcs_s_add(kstatus, RDC_EALREADY, rdc_set->primary.file,
3521 3521 rdc_set->secondary.file);
3522 3522 rc = RDC_EALREADY;
3523 3523 goto notstarted;
3524 3524 }
3525 3525
3526 3526 urdc = &rdc_u_info[index];
3527 3527 group = krdc->group;
3528 3528 set_busy(krdc);
3529 3529 busy = 1;
3530 3530 if ((krdc->type_flag == 0) || (krdc->type_flag & RDC_DISABLEPEND)) {
3531 3531 /* A resume or enable failed or we raced with a teardown */
3532 3532 mutex_exit(&rdc_conf_lock);
3533 3533 spcs_s_add(kstatus, RDC_EALREADY, rdc_set->primary.file,
3534 3534 rdc_set->secondary.file);
3535 3535 rc = RDC_EALREADY;
3536 3536 goto notstarted;
3537 3537 }
3538 3538 mutex_exit(&rdc_conf_lock);
3539 3539 rdc_group_enter(krdc);
3540 3540
3541 3541 if (!IS_STATE(urdc, RDC_LOGGING)) {
3542 3542 spcs_s_add(kstatus, RDC_ESETNOTLOGGING, urdc->secondary.intf,
3543 3543 urdc->secondary.file);
3544 3544 rc = RDC_ENOTLOGGING;
3545 3545 goto notstarted_unlock;
3546 3546 }
3547 3547
3548 3548 if (rdc_check(krdc, rdc_set)) {
3549 3549 spcs_s_add(kstatus, RDC_EALREADY, rdc_set->primary.file,
3550 3550 rdc_set->secondary.file);
3551 3551 rc = RDC_EALREADY;
3552 3552 goto notstarted_unlock;
3553 3553 }
3554 3554
3555 3555 if (!(rdc_get_vflags(urdc) & RDC_PRIMARY)) {
3556 3556 spcs_s_add(kstatus, RDC_ENOTPRIMARY, rdc_set->primary.intf,
3557 3557 rdc_set->primary.file, rdc_set->secondary.intf,
3558 3558 rdc_set->secondary.file);
3559 3559 rc = RDC_ENOTPRIMARY;
3560 3560 goto notstarted_unlock;
3561 3561 }
3562 3562
3563 3563 if ((options & RDC_OPT_REVERSE) && (IS_STATE(urdc, RDC_QUEUING))) {
3564 3564 /*
3565 3565 * cannot reverse sync when queuing, need to go logging first
3566 3566 */
3567 3567 spcs_s_add(kstatus, RDC_EQNORSYNC, rdc_set->primary.intf,
3568 3568 rdc_set->primary.file, rdc_set->secondary.intf,
3569 3569 rdc_set->secondary.file);
3570 3570 rc = RDC_EQNORSYNC;
3571 3571 goto notstarted_unlock;
3572 3572 }
3573 3573
3574 3574 svp = krdc->lsrv;
3575 3575 krdc->intf = rdc_add_to_if(svp, &(urdc->primary.addr),
3576 3576 &(urdc->secondary.addr), 1);
3577 3577
3578 3578 if (!krdc->intf) {
3579 3579 spcs_s_add(kstatus, RDC_EADDTOIF, urdc->primary.intf,
3580 3580 urdc->secondary.intf);
3581 3581 rc = RDC_EADDTOIF;
3582 3582 goto notstarted_unlock;
3583 3583 }
3584 3584
3585 3585 if (urdc->volume_size == 0) {
3586 3586 /* Implies reserve failed when previous resume was done */
3587 3587 rdc_get_details(krdc);
3588 3588 }
3589 3589 if (urdc->volume_size == 0) {
3590 3590 spcs_s_add(kstatus, RDC_ENOBMAP);
3591 3591 rc = RDC_ENOBMAP;
3592 3592 goto notstarted_unlock;
3593 3593 }
3594 3594
3595 3595 if (krdc->dcio_bitmap == NULL) {
3596 3596 if (rdc_resume_bitmap(krdc) < 0) {
3597 3597 spcs_s_add(kstatus, RDC_ENOBMAP);
3598 3598 rc = RDC_ENOBMAP;
3599 3599 goto notstarted_unlock;
3600 3600 }
3601 3601 }
3602 3602
3603 3603 if ((rdc_get_vflags(urdc) & RDC_BMP_FAILED) && (krdc->bitmapfd)) {
3604 3604 if (rdc_reset_bitmap(krdc)) {
3605 3605 spcs_s_add(kstatus, RDC_EBITMAP);
3606 3606 rc = RDC_EBITMAP;
3607 3607 goto notstarted_unlock;
3608 3608 }
3609 3609 }
3610 3610
3611 3611 if (IS_MANY(krdc) || IS_MULTI(krdc)) {
3612 3612 rdc_u_info_t *ubad;
3613 3613
3614 3614 if ((ubad = rdc_allow_pri_sync(urdc, options)) != NULL) {
3615 3615 spcs_s_add(kstatus, RDC_ESTATE,
3616 3616 ubad->primary.intf, ubad->primary.file,
3617 3617 ubad->secondary.intf, ubad->secondary.file);
3618 3618 rc = RDC_ESTATE;
3619 3619 goto notstarted_unlock;
3620 3620 }
3621 3621 }
3622 3622
3623 3623 /*
3624 3624 * there is a small window where _rdc_sync is still
3625 3625 * running, but has cleared the RDC_SYNCING flag.
3626 3626 * Use aux_state which is only cleared
3627 3627 * after _rdc_sync had done its 'death' broadcast.
3628 3628 */
3629 3629 if (krdc->aux_state & RDC_AUXSYNCIP) {
3630 3630 #ifdef DEBUG
3631 3631 if (!rdc_get_vflags(urdc) & RDC_SYNCING) {
3632 3632 cmn_err(CE_WARN, "!rdc_sync: "
3633 3633 "RDC_AUXSYNCIP set, SYNCING off");
3634 3634 }
3635 3635 #endif
3636 3636 spcs_s_add(kstatus, RDC_ESYNCING, rdc_set->primary.file);
3637 3637 rc = RDC_ESYNCING;
3638 3638 goto notstarted_unlock;
3639 3639 }
3640 3640 if (krdc->disk_status == 1) {
3641 3641 spcs_s_add(kstatus, RDC_ESYNCING, rdc_set->primary.file);
3642 3642 rc = RDC_ESYNCING;
3643 3643 goto notstarted_unlock;
3644 3644 }
3645 3645
3646 3646 if ((options & RDC_OPT_FORWARD) &&
3647 3647 (rdc_get_mflags(urdc) & RDC_RSYNC_NEEDED)) {
3648 3648 /* cannot forward sync if a reverse sync is needed */
3649 3649 spcs_s_add(kstatus, RDC_ERSYNCNEEDED, rdc_set->primary.intf,
3650 3650 rdc_set->primary.file, rdc_set->secondary.intf,
3651 3651 rdc_set->secondary.file);
3652 3652 rc = RDC_ERSYNCNEEDED;
3653 3653 goto notstarted_unlock;
3654 3654 }
3655 3655
3656 3656 urdc->sync_pos = 0;
3657 3657
3658 3658 /* Check if the rdc set is accessible on the remote node */
3659 3659 if (rdc_net_getstate(krdc, &sm, &um, &md, FALSE) < 0) {
3660 3660 /*
3661 3661 * Remote end may be inaccessible, or the rdc set is not
3662 3662 * enabled at the remote end.
3663 3663 */
3664 3664 spcs_s_add(kstatus, RDC_ECONNOPEN, urdc->secondary.intf,
3665 3665 urdc->secondary.file);
3666 3666 rc = RDC_ECONNOPEN;
3667 3667 goto notstarted_unlock;
3668 3668 }
3669 3669 if (options & RDC_OPT_REVERSE)
3670 3670 krdc->remote_index = rdc_net_state(index, CCIO_RSYNC);
3671 3671 else
3672 3672 krdc->remote_index = rdc_net_state(index, CCIO_SLAVE);
3673 3673 if (krdc->remote_index < 0) {
3674 3674 /*
3675 3675 * Remote note probably not in a valid state to be synced,
3676 3676 * as the state was fetched OK above.
3677 3677 */
3678 3678 spcs_s_add(kstatus, RDC_ERSTATE, urdc->secondary.intf,
3679 3679 urdc->secondary.file, urdc->primary.intf,
3680 3680 urdc->primary.file);
3681 3681 rc = RDC_ERSTATE;
3682 3682 goto notstarted_unlock;
3683 3683 }
3684 3684
3685 3685 rc = check_filesize(index, kstatus);
3686 3686 if (rc != 0) {
3687 3687 (void) rdc_net_state(krdc->index, CCIO_ENABLELOG);
3688 3688 goto notstarted_unlock;
3689 3689 }
3690 3690
3691 3691 krdc->sync_done = 0;
3692 3692
3693 3693 mutex_enter(&krdc->bmapmutex);
3694 3694 krdc->aux_state |= RDC_AUXSYNCIP;
3695 3695 mutex_exit(&krdc->bmapmutex);
3696 3696
3697 3697 if (options & RDC_OPT_REVERSE) {
3698 3698 rdc_many_enter(krdc);
3699 3699 rdc_set_mflags(urdc, RDC_SLAVE | RDC_RSYNC_NEEDED);
3700 3700 mutex_enter(&krdc->bmapmutex);
3701 3701 rdc_clr_flags(urdc, RDC_VOL_FAILED);
3702 3702 mutex_exit(&krdc->bmapmutex);
3703 3703 rdc_write_state(urdc);
3704 3704 /* LINTED */
3705 3705 if (kmulti = krdc->multi_next) {
3706 3706 umulti = &rdc_u_info[kmulti->index];
3707 3707 if (IS_ENABLED(umulti) && (rdc_get_vflags(umulti) &
3708 3708 (RDC_VOL_FAILED | RDC_SYNC_NEEDED))) {
3709 3709 rdc_clr_flags(umulti, RDC_SYNC_NEEDED);
3710 3710 rdc_clr_flags(umulti, RDC_VOL_FAILED);
3711 3711 rdc_write_state(umulti);
3712 3712 }
3713 3713 }
3714 3714 rdc_many_exit(krdc);
3715 3715 } else {
3716 3716 rdc_clr_flags(urdc, RDC_FCAL_FAILED);
3717 3717 rdc_write_state(urdc);
3718 3718 }
3719 3719
3720 3720 if (options & RDC_OPT_UPDATE) {
3721 3721 ASSERT(urdc->volume_size != 0);
3722 3722 if (rdc_net_getbmap(index,
3723 3723 BMAP_LOG_BYTES(urdc->volume_size)) > 0) {
3724 3724 spcs_s_add(kstatus, RDC_ENOBMAP);
3725 3725 rc = RDC_ENOBMAP;
3726 3726
3727 3727 (void) rdc_net_state(index, CCIO_ENABLELOG);
3728 3728
3729 3729 rdc_clr_flags(urdc, RDC_SYNCING);
3730 3730 if (options & RDC_OPT_REVERSE) {
3731 3731 rdc_many_enter(krdc);
3732 3732 rdc_clr_mflags(urdc, RDC_SLAVE);
3733 3733 rdc_many_exit(krdc);
3734 3734 }
3735 3735 if (krdc->type_flag & RDC_ASYNCMODE)
3736 3736 rdc_set_flags(urdc, RDC_ASYNC);
3737 3737 krdc->remote_index = -1;
3738 3738 rdc_set_flags_log(urdc, RDC_LOGGING,
3739 3739 "failed to read remote bitmap");
3740 3740 rdc_write_state(urdc);
3741 3741 goto failed;
3742 3742 }
3743 3743 rdc_clr_flags(urdc, RDC_FULL);
3744 3744 } else {
3745 3745 /*
3746 3746 * This is a full sync (not an update sync), mark the
3747 3747 * entire bitmap dirty
3748 3748 */
3749 3749 (void) RDC_FILL_BITMAP(krdc, FALSE);
3750 3750
3751 3751 rdc_set_flags(urdc, RDC_FULL);
3752 3752 }
3753 3753
3754 3754 rdc_group_exit(krdc);
3755 3755
3756 3756 /*
3757 3757 * allow diskq->memq flusher to wake up
3758 3758 */
3759 3759 mutex_enter(&krdc->group->ra_queue.net_qlock);
3760 3760 krdc->group->ra_queue.qfflags &= ~RDC_QFILLSLEEP;
3761 3761 mutex_exit(&krdc->group->ra_queue.net_qlock);
3762 3762
3763 3763 /*
3764 3764 * if this is a full sync on a non-diskq set or
3765 3765 * a diskq set that has failed, clear the async flag
3766 3766 */
3767 3767 if (krdc->type_flag & RDC_ASYNCMODE) {
3768 3768 if ((!(options & RDC_OPT_UPDATE)) ||
3769 3769 (!RDC_IS_DISKQ(krdc->group)) ||
3770 3770 (!(IS_STATE(urdc, RDC_QUEUING)))) {
3771 3771 /* full syncs, or core queue are synchronous */
3772 3772 rdc_group_enter(krdc);
3773 3773 rdc_clr_flags(urdc, RDC_ASYNC);
3774 3774 rdc_group_exit(krdc);
3775 3775 }
3776 3776
3777 3777 /*
3778 3778 * if the queue failed because it was full, lets see
3779 3779 * if we can restart it. After _rdc_sync() is done
3780 3780 * the modes will switch and we will begin disk
3781 3781 * queuing again. NOTE: this should only be called
3782 3782 * once per group, as it clears state for all group
3783 3783 * members, also clears the async flag for all members
3784 3784 */
3785 3785 if (IS_STATE(urdc, RDC_DISKQ_FAILED)) {
3786 3786 rdc_unfail_diskq(krdc);
3787 3787 } else {
3788 3788 /* don't add insult to injury by flushing a dead queue */
3789 3789
3790 3790 /*
3791 3791 * if we are updating, and a diskq and
3792 3792 * the async thread isn't active, start
3793 3793 * it up.
3794 3794 */
3795 3795 if ((options & RDC_OPT_UPDATE) &&
3796 3796 (IS_STATE(urdc, RDC_QUEUING))) {
3797 3797 rdc_group_enter(krdc);
3798 3798 rdc_clr_flags(urdc, RDC_SYNCING);
3799 3799 rdc_group_exit(krdc);
3800 3800 mutex_enter(&krdc->group->ra_queue.net_qlock);
3801 3801 if (krdc->group->ra_queue.qfill_sleeping ==
3802 3802 RDC_QFILL_ASLEEP)
3803 3803 cv_broadcast(&group->ra_queue.qfcv);
3804 3804 mutex_exit(&krdc->group->ra_queue.net_qlock);
3805 3805 thrcount = urdc->asyncthr;
3806 3806 while ((thrcount-- > 0) &&
3807 3807 !krdc->group->rdc_writer) {
3808 3808 (void) rdc_writer(krdc->index);
3809 3809 }
3810 3810 }
3811 3811 }
3812 3812 }
3813 3813
3814 3814 /*
3815 3815 * For a reverse sync, merge the current bitmap with all other sets
3816 3816 * that share this volume.
3817 3817 */
3818 3818 if (options & RDC_OPT_REVERSE) {
3819 3819 retry_many:
3820 3820 rdc_many_enter(krdc);
3821 3821 if (IS_MANY(krdc)) {
3822 3822 rdc_k_info_t *kmany;
3823 3823 rdc_u_info_t *umany;
3824 3824
3825 3825 for (kmany = krdc->many_next; kmany != krdc;
3826 3826 kmany = kmany->many_next) {
3827 3827 umany = &rdc_u_info[kmany->index];
3828 3828 if (!IS_ENABLED(umany))
3829 3829 continue;
3830 3830 ASSERT(umany->flags & RDC_PRIMARY);
3831 3831
3832 3832 if (!mutex_tryenter(&kmany->group->lock)) {
3833 3833 rdc_many_exit(krdc);
3834 3834 /* May merge more than once */
3835 3835 goto retry_many;
3836 3836 }
3837 3837 rdc_merge_bitmaps(krdc, kmany);
3838 3838 mutex_exit(&kmany->group->lock);
3839 3839 }
3840 3840 }
3841 3841 rdc_many_exit(krdc);
3842 3842
3843 3843 retry_multi:
3844 3844 rdc_many_enter(krdc);
3845 3845 if (IS_MULTI(krdc)) {
3846 3846 rdc_k_info_t *kmulti = krdc->multi_next;
3847 3847 rdc_u_info_t *umulti = &rdc_u_info[kmulti->index];
3848 3848
3849 3849 if (IS_ENABLED(umulti)) {
3850 3850 ASSERT(!(umulti->flags & RDC_PRIMARY));
3851 3851
3852 3852 if (!mutex_tryenter(&kmulti->group->lock)) {
3853 3853 rdc_many_exit(krdc);
3854 3854 goto retry_multi;
3855 3855 }
3856 3856 rdc_merge_bitmaps(krdc, kmulti);
3857 3857 mutex_exit(&kmulti->group->lock);
3858 3858 }
3859 3859 }
3860 3860 rdc_many_exit(krdc);
3861 3861 }
3862 3862
3863 3863 rdc_group_enter(krdc);
3864 3864
3865 3865 if (krdc->bitmap_write == 0) {
3866 3866 if (rdc_write_bitmap_fill(krdc) >= 0)
3867 3867 krdc->bitmap_write = -1;
3868 3868 }
3869 3869
3870 3870 if (krdc->bitmap_write > 0)
3871 3871 (void) rdc_write_bitmap(krdc);
3872 3872
3873 3873 urdc->bits_set = RDC_COUNT_BITMAP(krdc);
3874 3874
3875 3875 rdc_group_exit(krdc);
3876 3876
3877 3877 if (options & RDC_OPT_REVERSE) {
3878 3878 (void) _rdc_sync_event_notify(RDC_SYNC_START,
3879 3879 urdc->primary.file, urdc->group_name);
3880 3880 }
3881 3881
3882 3882 /* Now set off the sync itself */
3883 3883
3884 3884 mutex_enter(&net_blk_lock);
3885 3885 if (nsc_create_process(
3886 3886 (void (*)(void *))_rdc_sync, (void *)krdc, FALSE)) {
3887 3887 mutex_exit(&net_blk_lock);
3888 3888 spcs_s_add(kstatus, RDC_ENOPROC);
3889 3889 /*
3890 3890 * We used to just return here,
3891 3891 * but we need to clear the AUXSYNCIP bit
3892 3892 * and there is a very small chance that
3893 3893 * someone may be waiting on the disk_status flag.
3894 3894 */
3895 3895 rc = RDC_ENOPROC;
3896 3896 /*
3897 3897 * need the group lock held at failed.
3898 3898 */
3899 3899 rdc_group_enter(krdc);
3900 3900 goto failed;
3901 3901 }
3902 3902
3903 3903 mutex_enter(&rdc_conf_lock);
3904 3904 wakeup_busy(krdc);
3905 3905 busy = 0;
3906 3906 mutex_exit(&rdc_conf_lock);
3907 3907
3908 3908 while (krdc->sync_done == 0)
3909 3909 cv_wait(&krdc->synccv, &net_blk_lock);
3910 3910 mutex_exit(&net_blk_lock);
3911 3911
3912 3912 rdc_group_enter(krdc);
3913 3913
3914 3914 if (krdc->sync_done == RDC_FAILED) {
3915 3915 char siztmp1[16];
3916 3916 (void) spcs_s_inttostring(
3917 3917 urdc->sync_pos, siztmp1, sizeof (siztmp1),
3918 3918 0);
3919 3919 spcs_s_add(kstatus, RDC_EFAIL, siztmp1);
3920 3920 rc = RDC_EFAIL;
3921 3921 } else
3922 3922 sync_completed = 1;
3923 3923
3924 3924 failed:
3925 3925 /*
3926 3926 * We use this flag now to make halt_sync() wait for
3927 3927 * us to terminate and let us take the group lock.
3928 3928 */
3929 3929 krdc->aux_state &= ~RDC_AUXSYNCIP;
3930 3930 if (krdc->disk_status == 1) {
3931 3931 krdc->disk_status = 0;
3932 3932 cv_broadcast(&krdc->haltcv);
3933 3933 }
3934 3934
3935 3935 notstarted_unlock:
3936 3936 rdc_group_exit(krdc);
3937 3937
3938 3938 if (sync_completed && (options & RDC_OPT_REVERSE)) {
3939 3939 (void) _rdc_sync_event_notify(RDC_SYNC_DONE,
3940 3940 urdc->primary.file, urdc->group_name);
3941 3941 }
3942 3942
3943 3943 notstarted:
3944 3944 if (busy) {
3945 3945 mutex_enter(&rdc_conf_lock);
3946 3946 wakeup_busy(krdc);
3947 3947 mutex_exit(&rdc_conf_lock);
3948 3948 }
3949 3949
3950 3950 return (rc);
3951 3951 }
3952 3952
3953 3953 /* ARGSUSED */
3954 3954 static int
3955 3955 _rdc_suspend(rdc_k_info_t *krdc, rdc_set_t *rdc_set, spcs_s_info_t kstatus)
3956 3956 {
3957 3957 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
3958 3958 rdc_if_t *ip;
3959 3959 int index = krdc->index;
3960 3960
3961 3961 ASSERT(krdc->group != NULL);
3962 3962 rdc_group_enter(krdc);
3963 3963 #ifdef DEBUG
3964 3964 ASSERT(rdc_check(krdc, rdc_set) == 0);
3965 3965 #else
3966 3966 if (rdc_check(krdc, rdc_set)) {
3967 3967 rdc_group_exit(krdc);
3968 3968 spcs_s_add(kstatus, RDC_EALREADY, rdc_set->primary.file,
3969 3969 rdc_set->secondary.file);
3970 3970 return (RDC_EALREADY);
3971 3971 }
3972 3972 #endif
3973 3973
3974 3974 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
3975 3975 halt_sync(krdc);
3976 3976 ASSERT(IS_ENABLED(urdc));
3977 3977 }
3978 3978
3979 3979 rdc_group_exit(krdc);
3980 3980 (void) rdc_unintercept(krdc);
3981 3981
3982 3982 #ifdef DEBUG
3983 3983 cmn_err(CE_NOTE, "!SNDR: suspended %s %s", urdc->primary.file,
3984 3984 urdc->secondary.file);
3985 3985 #endif
3986 3986
3987 3987 /* Configured but not enabled */
3988 3988 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
3989 3989
3990 3990
3991 3991 if (IS_ASYNC(urdc) && !RDC_IS_DISKQ(krdc->group)) {
3992 3992 int tries = 2; /* in case of possibly stuck flusher threads */
3993 3993 #ifdef DEBUG
3994 3994 net_queue *qp = &krdc->group->ra_queue;
3995 3995 #endif
3996 3996 do {
3997 3997 if (!krdc->group->rdc_writer)
3998 3998 (void) rdc_writer(krdc->index);
3999 3999
4000 4000 (void) rdc_drain_queue(krdc->index);
4001 4001
4002 4002 } while (krdc->group->rdc_writer && tries--);
4003 4003
4004 4004 /* ok, force it to happen... */
4005 4005 if (rdc_drain_queue(krdc->index) != 0) {
4006 4006 do {
4007 4007 mutex_enter(&krdc->group->ra_queue.net_qlock);
4008 4008 krdc->group->asyncdis = 1;
4009 4009 cv_broadcast(&krdc->group->asyncqcv);
4010 4010 mutex_exit(&krdc->group->ra_queue.net_qlock);
4011 4011 cmn_err(CE_WARN,
4012 4012 "!SNDR: async I/O pending and not flushed "
4013 4013 "for %s during suspend",
4014 4014 urdc->primary.file);
4015 4015 #ifdef DEBUG
4016 4016 cmn_err(CE_WARN,
4017 4017 "!nitems: %" NSC_SZFMT " nblocks: %"
4018 4018 NSC_SZFMT " head: 0x%p tail: 0x%p",
4019 4019 qp->nitems, qp->blocks,
4020 4020 (void *)qp->net_qhead,
4021 4021 (void *)qp->net_qtail);
4022 4022 #endif
4023 4023 } while (krdc->group->rdc_thrnum > 0);
4024 4024 }
4025 4025 }
4026 4026
4027 4027 mutex_enter(&rdc_conf_lock);
4028 4028 ip = krdc->intf;
4029 4029 krdc->intf = 0;
4030 4030
4031 4031 if (ip) {
4032 4032 rdc_remove_from_if(ip);
4033 4033 }
4034 4034
4035 4035 mutex_exit(&rdc_conf_lock);
4036 4036
4037 4037 rdc_group_enter(krdc);
4038 4038
4039 4039 /* Configured but not enabled */
4040 4040 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4041 4041
4042 4042 rdc_group_exit(krdc);
4043 4043 /* Must not hold group lock during this function */
4044 4044 while (rdc_dump_alloc_bufs_cd(krdc->index) == EAGAIN)
4045 4045 delay(2);
4046 4046 rdc_group_enter(krdc);
4047 4047
4048 4048 /* Don't rdc_clear_state, unlike _rdc_disable */
4049 4049
4050 4050 rdc_free_bitmap(krdc, RDC_CMD_SUSPEND);
4051 4051 rdc_close_bitmap(krdc);
4052 4052
4053 4053 rdc_dev_close(krdc);
4054 4054 rdc_close_direct(krdc);
4055 4055
4056 4056 /* Configured but not enabled */
4057 4057 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4058 4058
4059 4059 rdc_group_exit(krdc);
4060 4060
4061 4061 /*
4062 4062 * we should now unregister the queue, with no conflicting
4063 4063 * locks held. This is the last(only) member of the group
4064 4064 */
4065 4065 if (krdc->group && RDC_IS_DISKQ(krdc->group) &&
4066 4066 krdc->group->count == 1) { /* stop protecting queue */
4067 4067 rdc_unintercept_diskq(krdc->group);
4068 4068 }
4069 4069
4070 4070 mutex_enter(&rdc_conf_lock);
4071 4071
4072 4072 /* Configured but not enabled */
4073 4073 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4074 4074
4075 4075 wait_busy(krdc);
4076 4076
4077 4077 if (IS_MANY(krdc) || IS_MULTI(krdc))
4078 4078 remove_from_many(krdc);
4079 4079
4080 4080 remove_from_group(krdc);
4081 4081
4082 4082 krdc->remote_index = -1;
4083 4083 ASSERT(krdc->type_flag & RDC_CONFIGURED);
4084 4084 ASSERT(krdc->type_flag & RDC_DISABLEPEND);
4085 4085 krdc->type_flag = 0;
4086 4086 #ifdef DEBUG
4087 4087 if (krdc->dcio_bitmap)
4088 4088 cmn_err(CE_WARN, "!_rdc_suspend: possible mem leak, "
4089 4089 "dcio_bitmap");
4090 4090 #endif
4091 4091 krdc->dcio_bitmap = NULL;
4092 4092 krdc->bitmap_ref = NULL;
4093 4093 krdc->bitmap_size = 0;
4094 4094 krdc->maxfbas = 0;
4095 4095 krdc->bitmap_write = 0;
4096 4096 krdc->disk_status = 0;
4097 4097 rdc_destroy_svinfo(krdc->lsrv);
4098 4098 krdc->lsrv = NULL;
4099 4099 krdc->multi_next = NULL;
4100 4100
4101 4101 rdc_u_init(urdc);
4102 4102
4103 4103 mutex_exit(&rdc_conf_lock);
4104 4104 rdc_kstat_delete(index);
4105 4105 return (0);
4106 4106 }
4107 4107
4108 4108 static int
4109 4109 rdc_suspend(rdc_config_t *uparms, spcs_s_info_t kstatus)
4110 4110 {
4111 4111 rdc_k_info_t *krdc;
4112 4112 int index;
4113 4113 int rc;
4114 4114
4115 4115 mutex_enter(&rdc_conf_lock);
4116 4116
4117 4117 index = rdc_lookup_byname(uparms->rdc_set);
4118 4118 if (index >= 0)
4119 4119 krdc = &rdc_k_info[index];
4120 4120 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
4121 4121 mutex_exit(&rdc_conf_lock);
4122 4122 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
4123 4123 uparms->rdc_set->secondary.file);
4124 4124 return (RDC_EALREADY);
4125 4125 }
4126 4126
4127 4127 krdc->type_flag |= RDC_DISABLEPEND;
4128 4128 wait_busy(krdc);
4129 4129 if (krdc->type_flag == 0) {
4130 4130 /* A resume or enable failed */
4131 4131 mutex_exit(&rdc_conf_lock);
4132 4132 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
4133 4133 uparms->rdc_set->secondary.file);
4134 4134 return (RDC_EALREADY);
4135 4135 }
4136 4136 mutex_exit(&rdc_conf_lock);
4137 4137
4138 4138 rc = _rdc_suspend(krdc, uparms->rdc_set, kstatus);
4139 4139 return (rc);
4140 4140 }
4141 4141
4142 4142 static int
4143 4143 _rdc_resume(rdc_set_t *rdc_set, int options, spcs_s_info_t kstatus)
4144 4144 {
4145 4145 int index;
4146 4146 char *rhost;
4147 4147 struct netbuf *addrp;
4148 4148 rdc_k_info_t *krdc;
4149 4149 rdc_u_info_t *urdc;
4150 4150 rdc_srv_t *svp = NULL;
4151 4151 char *local_file;
4152 4152 char *local_bitmap;
4153 4153 int rc, rc1;
4154 4154 nsc_size_t maxfbas;
4155 4155 rdc_group_t *grp;
4156 4156
4157 4157 if ((rdc_set->primary.intf[0] == 0) ||
4158 4158 (rdc_set->primary.addr.len == 0) ||
4159 4159 (rdc_set->primary.file[0] == 0) ||
4160 4160 (rdc_set->primary.bitmap[0] == 0) ||
4161 4161 (rdc_set->secondary.intf[0] == 0) ||
4162 4162 (rdc_set->secondary.addr.len == 0) ||
4163 4163 (rdc_set->secondary.file[0] == 0) ||
4164 4164 (rdc_set->secondary.bitmap[0] == 0)) {
4165 4165 spcs_s_add(kstatus, RDC_EEMPTY);
4166 4166 return (RDC_EEMPTY);
4167 4167 }
4168 4168
4169 4169 /* Next check there aren't any enabled rdc sets which match. */
4170 4170
4171 4171 mutex_enter(&rdc_conf_lock);
4172 4172
4173 4173 if (rdc_lookup_byname(rdc_set) >= 0) {
4174 4174 mutex_exit(&rdc_conf_lock);
4175 4175 spcs_s_add(kstatus, RDC_EENABLED, rdc_set->primary.intf,
4176 4176 rdc_set->primary.file, rdc_set->secondary.intf,
4177 4177 rdc_set->secondary.file);
4178 4178 return (RDC_EENABLED);
4179 4179 }
4180 4180
4181 4181 if (rdc_lookup_many2one(rdc_set) >= 0) {
4182 4182 mutex_exit(&rdc_conf_lock);
4183 4183 spcs_s_add(kstatus, RDC_EMANY2ONE, rdc_set->primary.intf,
4184 4184 rdc_set->primary.file, rdc_set->secondary.intf,
4185 4185 rdc_set->secondary.file);
4186 4186 return (RDC_EMANY2ONE);
4187 4187 }
4188 4188
4189 4189 if (rdc_set->netconfig->knc_proto == NULL) {
4190 4190 mutex_exit(&rdc_conf_lock);
4191 4191 spcs_s_add(kstatus, RDC_ENETCONFIG);
4192 4192 return (RDC_ENETCONFIG);
4193 4193 }
4194 4194
4195 4195 if (rdc_set->primary.addr.len == 0) {
4196 4196 mutex_exit(&rdc_conf_lock);
4197 4197 spcs_s_add(kstatus, RDC_ENETBUF, rdc_set->primary.file);
4198 4198 return (RDC_ENETBUF);
4199 4199 }
4200 4200
4201 4201 if (rdc_set->secondary.addr.len == 0) {
4202 4202 mutex_exit(&rdc_conf_lock);
4203 4203 spcs_s_add(kstatus, RDC_ENETBUF, rdc_set->secondary.file);
4204 4204 return (RDC_ENETBUF);
4205 4205 }
4206 4206
4207 4207 /* Check that the local data volume isn't in use as a bitmap */
4208 4208 if (options & RDC_OPT_PRIMARY)
4209 4209 local_file = rdc_set->primary.file;
4210 4210 else
4211 4211 local_file = rdc_set->secondary.file;
4212 4212 if (rdc_lookup_bitmap(local_file) >= 0) {
4213 4213 mutex_exit(&rdc_conf_lock);
4214 4214 spcs_s_add(kstatus, RDC_EVOLINUSE, local_file);
4215 4215 return (RDC_EVOLINUSE);
4216 4216 }
4217 4217
4218 4218 /* check that the secondary data volume isn't in use */
4219 4219 if (!(options & RDC_OPT_PRIMARY)) {
4220 4220 local_file = rdc_set->secondary.file;
4221 4221 if (rdc_lookup_secondary(local_file) >= 0) {
4222 4222 mutex_exit(&rdc_conf_lock);
4223 4223 spcs_s_add(kstatus, RDC_EVOLINUSE, local_file);
4224 4224 return (RDC_EVOLINUSE);
4225 4225 }
4226 4226 }
4227 4227
4228 4228 /* Check that the bitmap isn't in use as a data volume */
4229 4229 if (options & RDC_OPT_PRIMARY)
4230 4230 local_bitmap = rdc_set->primary.bitmap;
4231 4231 else
4232 4232 local_bitmap = rdc_set->secondary.bitmap;
4233 4233 if (rdc_lookup_configured(local_bitmap) >= 0) {
4234 4234 mutex_exit(&rdc_conf_lock);
4235 4235 spcs_s_add(kstatus, RDC_EBMPINUSE, local_bitmap);
4236 4236 return (RDC_EBMPINUSE);
4237 4237 }
4238 4238
4239 4239 /* Check that the bitmap isn't already in use as a bitmap */
4240 4240 if (rdc_lookup_bitmap(local_bitmap) >= 0) {
4241 4241 mutex_exit(&rdc_conf_lock);
4242 4242 spcs_s_add(kstatus, RDC_EBMPINUSE, local_bitmap);
4243 4243 return (RDC_EBMPINUSE);
4244 4244 }
4245 4245
4246 4246 /* Set urdc->volume_size */
4247 4247 index = rdc_dev_open(rdc_set, options);
4248 4248 if (index < 0) {
4249 4249 mutex_exit(&rdc_conf_lock);
4250 4250 if (options & RDC_OPT_PRIMARY)
4251 4251 spcs_s_add(kstatus, RDC_EOPEN, rdc_set->primary.intf,
4252 4252 rdc_set->primary.file);
4253 4253 else
4254 4254 spcs_s_add(kstatus, RDC_EOPEN, rdc_set->secondary.intf,
4255 4255 rdc_set->secondary.file);
4256 4256 return (RDC_EOPEN);
4257 4257 }
4258 4258
4259 4259 urdc = &rdc_u_info[index];
4260 4260 krdc = &rdc_k_info[index];
4261 4261
4262 4262 /* copy relevant parts of rdc_set to urdc field by field */
4263 4263
4264 4264 (void) strncpy(urdc->primary.intf, rdc_set->primary.intf,
4265 4265 MAX_RDC_HOST_SIZE);
4266 4266 (void) strncpy(urdc->secondary.intf, rdc_set->secondary.intf,
4267 4267 MAX_RDC_HOST_SIZE);
4268 4268
4269 4269 (void) strncpy(urdc->group_name, rdc_set->group_name, NSC_MAXPATH);
4270 4270
4271 4271 dup_rdc_netbuf(&rdc_set->primary.addr, &urdc->primary.addr);
4272 4272 (void) strncpy(urdc->primary.file, rdc_set->primary.file, NSC_MAXPATH);
4273 4273 (void) strncpy(urdc->primary.bitmap, rdc_set->primary.bitmap,
4274 4274 NSC_MAXPATH);
4275 4275
4276 4276 dup_rdc_netbuf(&rdc_set->secondary.addr, &urdc->secondary.addr);
4277 4277 (void) strncpy(urdc->secondary.file, rdc_set->secondary.file,
4278 4278 NSC_MAXPATH);
4279 4279 (void) strncpy(urdc->secondary.bitmap, rdc_set->secondary.bitmap,
4280 4280 NSC_MAXPATH);
4281 4281 (void) strncpy(urdc->disk_queue, rdc_set->disk_queue, NSC_MAXPATH);
4282 4282 urdc->setid = rdc_set->setid;
4283 4283
4284 4284 if ((options & RDC_OPT_SYNC) && urdc->disk_queue[0]) {
4285 4285 mutex_exit(&rdc_conf_lock);
4286 4286 rdc_dev_close(krdc);
4287 4287 spcs_s_add(kstatus, RDC_EQWRONGMODE);
4288 4288 return (RDC_EQWRONGMODE);
4289 4289 }
4290 4290
4291 4291 /*
4292 4292 * init flags now so that state left by failures in add_to_group()
4293 4293 * are preserved.
4294 4294 */
4295 4295 rdc_init_flags(urdc);
4296 4296
4297 4297 if ((rc1 = add_to_group(krdc, options, RDC_CMD_RESUME)) != 0) {
4298 4298 if (rc1 == RDC_EQNOADD) { /* something went wrong with queue */
4299 4299 rdc_fail_diskq(krdc, RDC_WAIT, RDC_NOLOG);
4300 4300 /* don't return a failure here, continue with resume */
4301 4301
4302 4302 } else { /* some other group add failure */
4303 4303 mutex_exit(&rdc_conf_lock);
4304 4304 rdc_dev_close(krdc);
4305 4305 spcs_s_add(kstatus, RDC_EGROUP,
4306 4306 rdc_set->primary.intf, rdc_set->primary.file,
4307 4307 rdc_set->secondary.intf, rdc_set->secondary.file,
4308 4308 rdc_set->group_name);
4309 4309 return (RDC_EGROUP);
4310 4310 }
4311 4311 }
4312 4312
4313 4313 /*
4314 4314 * maxfbas was set in rdc_dev_open as primary's maxfbas.
4315 4315 * If diskq's maxfbas is smaller, then use diskq's.
4316 4316 */
4317 4317 grp = krdc->group;
4318 4318 if (grp && RDC_IS_DISKQ(grp) && (grp->diskqfd != 0)) {
4319 4319 rc = _rdc_rsrv_diskq(grp);
4320 4320 if (RDC_SUCCESS(rc)) {
4321 4321 rc = nsc_maxfbas(grp->diskqfd, 0, &maxfbas);
4322 4322 if (rc == 0) {
4323 4323 #ifdef DEBUG
4324 4324 if (krdc->maxfbas != maxfbas)
4325 4325 cmn_err(CE_NOTE,
4326 4326 "!_rdc_resume: diskq maxfbas = %"
4327 4327 NSC_SZFMT ", primary maxfbas = %"
4328 4328 NSC_SZFMT, maxfbas, krdc->maxfbas);
4329 4329 #endif
4330 4330 krdc->maxfbas = min(krdc->maxfbas,
4331 4331 maxfbas);
4332 4332 } else {
4333 4333 cmn_err(CE_WARN,
4334 4334 "!_rdc_resume: diskq maxfbas failed (%d)",
4335 4335 rc);
4336 4336 }
4337 4337 _rdc_rlse_diskq(grp);
4338 4338 } else {
4339 4339 cmn_err(CE_WARN,
4340 4340 "!_rdc_resume: diskq reserve failed (%d)", rc);
4341 4341 }
4342 4342 }
4343 4343
4344 4344 (void) strncpy(urdc->direct_file, rdc_set->direct_file, NSC_MAXPATH);
4345 4345 if ((options & RDC_OPT_PRIMARY) && rdc_set->direct_file[0]) {
4346 4346 if (rdc_open_direct(krdc) == NULL)
4347 4347 rdc_set_flags(urdc, RDC_FCAL_FAILED);
4348 4348 }
4349 4349
4350 4350 krdc->many_next = krdc;
4351 4351
4352 4352 ASSERT(krdc->type_flag == 0);
4353 4353 krdc->type_flag = RDC_CONFIGURED;
4354 4354
4355 4355 if (options & RDC_OPT_PRIMARY)
4356 4356 rdc_set_flags(urdc, RDC_PRIMARY);
4357 4357
4358 4358 if (options & RDC_OPT_ASYNC)
4359 4359 krdc->type_flag |= RDC_ASYNCMODE;
4360 4360
4361 4361 set_busy(krdc);
4362 4362
4363 4363 urdc->syshostid = rdc_set->syshostid;
4364 4364
4365 4365 if (add_to_many(krdc) < 0) {
4366 4366 mutex_exit(&rdc_conf_lock);
4367 4367
4368 4368 rdc_group_enter(krdc);
4369 4369
4370 4370 spcs_s_add(kstatus, RDC_EMULTI);
4371 4371 rc = RDC_EMULTI;
4372 4372 goto fail;
4373 4373 }
4374 4374
4375 4375 /* Configured but not enabled */
4376 4376 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4377 4377
4378 4378 mutex_exit(&rdc_conf_lock);
4379 4379
4380 4380 if (urdc->volume_size == 0) {
4381 4381 rdc_many_enter(krdc);
4382 4382 if (options & RDC_OPT_PRIMARY)
4383 4383 rdc_set_mflags(urdc, RDC_RSYNC_NEEDED);
4384 4384 else
4385 4385 rdc_set_flags(urdc, RDC_SYNC_NEEDED);
4386 4386 rdc_set_flags(urdc, RDC_VOL_FAILED);
4387 4387 rdc_many_exit(krdc);
4388 4388 }
4389 4389
4390 4390 rdc_group_enter(krdc);
4391 4391
4392 4392 /* Configured but not enabled */
4393 4393 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4394 4394
4395 4395 /*
4396 4396 * The rdc set is configured but not yet enabled. Other operations must
4397 4397 * ignore this set until it is enabled.
4398 4398 */
4399 4399
4400 4400 urdc->sync_pos = 0;
4401 4401
4402 4402 /* Set tunable defaults, we'll pick up tunables from the header later */
4403 4403
4404 4404 urdc->maxqfbas = rdc_maxthres_queue;
4405 4405 urdc->maxqitems = rdc_max_qitems;
4406 4406 urdc->autosync = 0;
4407 4407 urdc->asyncthr = rdc_asyncthr;
4408 4408
4409 4409 urdc->netconfig = rdc_set->netconfig;
4410 4410
4411 4411 if (options & RDC_OPT_PRIMARY) {
4412 4412 rhost = rdc_set->secondary.intf;
4413 4413 addrp = &rdc_set->secondary.addr;
4414 4414 } else {
4415 4415 rhost = rdc_set->primary.intf;
4416 4416 addrp = &rdc_set->primary.addr;
4417 4417 }
4418 4418
4419 4419 if (options & RDC_OPT_ASYNC)
4420 4420 rdc_set_flags(urdc, RDC_ASYNC);
4421 4421
4422 4422 svp = rdc_create_svinfo(rhost, addrp, urdc->netconfig);
4423 4423 if (svp == NULL) {
4424 4424 spcs_s_add(kstatus, ENOMEM);
4425 4425 rc = ENOMEM;
4426 4426 goto fail;
4427 4427 }
4428 4428
4429 4429 urdc->netconfig = NULL; /* This will be no good soon */
4430 4430
4431 4431 /* Don't set krdc->intf here */
4432 4432 rdc_kstat_create(index);
4433 4433
4434 4434 /* if the bitmap resume isn't clean, it will clear queuing flag */
4435 4435
4436 4436 (void) rdc_resume_bitmap(krdc);
4437 4437
4438 4438 if (RDC_IS_DISKQ(krdc->group)) {
4439 4439 disk_queue *q = &krdc->group->diskq;
4440 4440 if ((rc1 == RDC_EQNOADD) ||
4441 4441 IS_QSTATE(q, RDC_QBADRESUME)) {
4442 4442 rdc_clr_flags(urdc, RDC_QUEUING);
4443 4443 RDC_ZERO_BITREF(krdc);
4444 4444 }
4445 4445 }
4446 4446
4447 4447 if (krdc->lsrv == NULL)
4448 4448 krdc->lsrv = svp;
4449 4449 else {
4450 4450 #ifdef DEBUG
4451 4451 cmn_err(CE_WARN, "!_rdc_resume: krdc->lsrv already set: %p",
4452 4452 (void *) krdc->lsrv);
4453 4453 #endif
4454 4454 rdc_destroy_svinfo(svp);
4455 4455 }
4456 4456 svp = NULL;
4457 4457
4458 4458 /* Configured but not enabled */
4459 4459 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4460 4460
4461 4461 /* And finally */
4462 4462
4463 4463 krdc->remote_index = -1;
4464 4464
4465 4465 /* Should we set the whole group logging? */
4466 4466 rdc_set_flags(urdc, RDC_ENABLED | RDC_LOGGING);
4467 4467
4468 4468 rdc_group_exit(krdc);
4469 4469
4470 4470 if (rdc_intercept(krdc) != 0) {
4471 4471 rdc_group_enter(krdc);
4472 4472 rdc_clr_flags(urdc, RDC_ENABLED);
4473 4473 if (options & RDC_OPT_PRIMARY)
4474 4474 spcs_s_add(kstatus, RDC_EREGISTER, urdc->primary.file);
4475 4475 else
4476 4476 spcs_s_add(kstatus, RDC_EREGISTER,
4477 4477 urdc->secondary.file);
4478 4478 #ifdef DEBUG
4479 4479 cmn_err(CE_NOTE, "!nsc_register_path failed %s",
4480 4480 urdc->primary.file);
4481 4481 #endif
4482 4482 rc = RDC_EREGISTER;
4483 4483 goto bmpfail;
4484 4484 }
4485 4485 #ifdef DEBUG
4486 4486 cmn_err(CE_NOTE, "!SNDR: resumed %s %s", urdc->primary.file,
4487 4487 urdc->secondary.file);
4488 4488 #endif
4489 4489
4490 4490 rdc_write_state(urdc);
4491 4491
4492 4492 mutex_enter(&rdc_conf_lock);
4493 4493 wakeup_busy(krdc);
4494 4494 mutex_exit(&rdc_conf_lock);
4495 4495
4496 4496 return (0);
4497 4497
4498 4498 bmpfail:
4499 4499 if (options & RDC_OPT_PRIMARY)
4500 4500 spcs_s_add(kstatus, RDC_EBITMAP, urdc->primary.bitmap);
4501 4501 else
4502 4502 spcs_s_add(kstatus, RDC_EBITMAP, urdc->secondary.bitmap);
4503 4503 rc = RDC_EBITMAP;
4504 4504 if (rdc_get_vflags(urdc) & RDC_ENABLED) {
4505 4505 rdc_group_exit(krdc);
4506 4506 (void) rdc_unintercept(krdc);
4507 4507 rdc_group_enter(krdc);
4508 4508 }
4509 4509
4510 4510 fail:
4511 4511 rdc_kstat_delete(index);
4512 4512 /* Don't unset krdc->intf here, unlike _rdc_enable */
4513 4513
4514 4514 /* Configured but not enabled */
4515 4515 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4516 4516
4517 4517 rdc_dev_close(krdc);
4518 4518 rdc_close_direct(krdc);
4519 4519 rdc_destroy_svinfo(svp);
4520 4520
4521 4521 /* Configured but not enabled */
4522 4522 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4523 4523
4524 4524 rdc_group_exit(krdc);
4525 4525
4526 4526 mutex_enter(&rdc_conf_lock);
4527 4527
4528 4528 /* Configured but not enabled */
4529 4529 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4530 4530
4531 4531 remove_from_group(krdc);
4532 4532
4533 4533 if (IS_MANY(krdc) || IS_MULTI(krdc))
4534 4534 remove_from_many(krdc);
4535 4535
4536 4536 rdc_u_init(urdc);
4537 4537
4538 4538 ASSERT(krdc->type_flag & RDC_CONFIGURED);
4539 4539 krdc->type_flag = 0;
4540 4540 wakeup_busy(krdc);
4541 4541
4542 4542 mutex_exit(&rdc_conf_lock);
4543 4543
4544 4544 return (rc);
4545 4545 }
4546 4546
4547 4547 static int
4548 4548 rdc_resume(rdc_config_t *uparms, spcs_s_info_t kstatus)
4549 4549 {
4550 4550 char itmp[10];
4551 4551 int rc;
4552 4552
4553 4553 if (!(uparms->options & RDC_OPT_SYNC) &&
4554 4554 !(uparms->options & RDC_OPT_ASYNC)) {
4555 4555 (void) spcs_s_inttostring(
4556 4556 uparms->options, itmp, sizeof (itmp), 1);
4557 4557 spcs_s_add(kstatus, RDC_EEINVAL, itmp);
4558 4558 rc = RDC_EEINVAL;
4559 4559 goto done;
4560 4560 }
4561 4561
4562 4562 if (!(uparms->options & RDC_OPT_PRIMARY) &&
4563 4563 !(uparms->options & RDC_OPT_SECONDARY)) {
4564 4564 (void) spcs_s_inttostring(
4565 4565 uparms->options, itmp, sizeof (itmp), 1);
4566 4566 spcs_s_add(kstatus, RDC_EEINVAL, itmp);
4567 4567 rc = RDC_EEINVAL;
4568 4568 goto done;
4569 4569 }
4570 4570
4571 4571 rc = _rdc_resume(uparms->rdc_set, uparms->options, kstatus);
4572 4572 done:
4573 4573 return (rc);
4574 4574 }
4575 4575
4576 4576 /*
4577 4577 * if rdc_group_log is called because a volume has failed,
4578 4578 * we must disgard the queue to preserve write ordering.
4579 4579 * later perhaps, we can keep queuing, but we would have to
4580 4580 * rewrite the i/o path to acommodate that. currently, if there
4581 4581 * is a volume failure, the buffers are satisfied remotely and
4582 4582 * there is no way to satisfy them from the current diskq config
4583 4583 * phew, if we do that.. it will be difficult
4584 4584 */
4585 4585 int
4586 4586 rdc_can_queue(rdc_k_info_t *krdc)
4587 4587 {
4588 4588 rdc_k_info_t *p;
4589 4589 rdc_u_info_t *q;
4590 4590
4591 4591 for (p = krdc->group_next; ; p = p->group_next) {
4592 4592 q = &rdc_u_info[p->index];
4593 4593 if (IS_STATE(q, RDC_VOL_FAILED))
4594 4594 return (0);
4595 4595 if (p == krdc)
4596 4596 break;
4597 4597 }
4598 4598 return (1);
4599 4599 }
4600 4600
4601 4601 /*
4602 4602 * wait here, until all in flight async i/o's have either
4603 4603 * finished or failed. Avoid the race with r_net_state()
4604 4604 * which tells remote end to log.
4605 4605 */
4606 4606 void
4607 4607 rdc_inflwait(rdc_group_t *grp)
4608 4608 {
4609 4609 int bail = RDC_CLNT_TMOUT * 2; /* to include retries */
4610 4610 volatile int *inflitems;
4611 4611
4612 4612 if (RDC_IS_DISKQ(grp))
4613 4613 inflitems = (&(grp->diskq.inflitems));
4614 4614 else
4615 4615 inflitems = (&(grp->ra_queue.inflitems));
4616 4616
4617 4617 while (*inflitems && (--bail > 0))
4618 4618 delay(HZ);
4619 4619 }
4620 4620
4621 4621 void
4622 4622 rdc_group_log(rdc_k_info_t *krdc, int flag, char *why)
4623 4623 {
4624 4624 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
4625 4625 rdc_k_info_t *p;
4626 4626 rdc_u_info_t *q;
4627 4627 int do_group;
4628 4628 int sm, um, md;
4629 4629 disk_queue *dq;
4630 4630
4631 4631 void (*flag_op)(rdc_u_info_t *urdc, int flag);
4632 4632
4633 4633 ASSERT(MUTEX_HELD(&krdc->group->lock));
4634 4634
4635 4635 if (!IS_ENABLED(urdc))
4636 4636 return;
4637 4637
4638 4638 rdc_many_enter(krdc);
4639 4639
4640 4640 if ((flag & RDC_QUEUING) && (!IS_STATE(urdc, RDC_SYNCING)) &&
4641 4641 (rdc_can_queue(krdc))) {
4642 4642 flag_op = rdc_set_flags; /* keep queuing, link error */
4643 4643 flag &= ~RDC_FLUSH;
4644 4644 } else {
4645 4645 flag_op = rdc_clr_flags; /* stop queuing, user request */
4646 4646 }
4647 4647
4648 4648 do_group = 1;
4649 4649 if (!(rdc_get_vflags(urdc) & RDC_PRIMARY))
4650 4650 do_group = 0;
4651 4651 else if ((urdc->group_name[0] == 0) ||
4652 4652 (rdc_get_vflags(urdc) & RDC_LOGGING) ||
4653 4653 (rdc_get_vflags(urdc) & RDC_SYNCING))
4654 4654 do_group = 0;
4655 4655 if (do_group) {
4656 4656 for (p = krdc->group_next; p != krdc; p = p->group_next) {
4657 4657 q = &rdc_u_info[p->index];
4658 4658 if (!IS_ENABLED(q))
4659 4659 continue;
4660 4660 if ((rdc_get_vflags(q) & RDC_LOGGING) ||
4661 4661 (rdc_get_vflags(q) & RDC_SYNCING)) {
4662 4662 do_group = 0;
4663 4663 break;
4664 4664 }
4665 4665 }
4666 4666 }
4667 4667 if (!do_group && (flag & RDC_FORCE_GROUP))
4668 4668 do_group = 1;
4669 4669
4670 4670 rdc_many_exit(krdc);
4671 4671 dq = &krdc->group->diskq;
4672 4672 if (do_group) {
4673 4673 #ifdef DEBUG
4674 4674 cmn_err(CE_NOTE, "!SNDR:Group point-in-time for grp: %s %s:%s",
4675 4675 urdc->group_name, urdc->primary.intf, urdc->secondary.intf);
4676 4676 #endif
4677 4677 DTRACE_PROBE(rdc_diskq_group_PIT);
4678 4678
4679 4679 /* Set group logging at the same PIT under rdc_many_lock */
4680 4680 rdc_many_enter(krdc);
4681 4681 rdc_set_flags_log(urdc, RDC_LOGGING, why);
4682 4682 if (RDC_IS_DISKQ(krdc->group))
4683 4683 flag_op(urdc, RDC_QUEUING);
4684 4684 for (p = krdc->group_next; p != krdc; p = p->group_next) {
4685 4685 q = &rdc_u_info[p->index];
4686 4686 if (!IS_ENABLED(q))
4687 4687 continue;
4688 4688 rdc_set_flags_log(q, RDC_LOGGING,
4689 4689 "consistency group member following leader");
4690 4690 if (RDC_IS_DISKQ(p->group))
4691 4691 flag_op(q, RDC_QUEUING);
4692 4692 }
4693 4693
4694 4694 rdc_many_exit(krdc);
4695 4695
4696 4696 /*
4697 4697 * This can cause the async threads to fail,
4698 4698 * which in turn will call rdc_group_log()
4699 4699 * again. Release the lock and re-aquire.
4700 4700 */
4701 4701 rdc_group_exit(krdc);
4702 4702
4703 4703 while (rdc_dump_alloc_bufs_cd(krdc->index) == EAGAIN)
4704 4704 delay(2);
4705 4705 if (!RDC_IS_DISKQ(krdc->group))
4706 4706 RDC_ZERO_BITREF(krdc);
4707 4707
4708 4708 rdc_inflwait(krdc->group);
4709 4709
4710 4710 /*
4711 4711 * a little lazy, but neat. recall dump_alloc_bufs to
4712 4712 * ensure that the queue pointers & seq are reset properly
4713 4713 * after we have waited for inflight stuff
4714 4714 */
4715 4715 while (rdc_dump_alloc_bufs_cd(krdc->index) == EAGAIN)
4716 4716 delay(2);
4717 4717
4718 4718 rdc_group_enter(krdc);
4719 4719 if (RDC_IS_DISKQ(krdc->group) && (!(flag & RDC_QUEUING))) {
4720 4720 /* fail or user request */
4721 4721 RDC_ZERO_BITREF(krdc);
4722 4722 mutex_enter(&krdc->group->diskq.disk_qlock);
4723 4723 rdc_init_diskq_header(krdc->group,
4724 4724 &krdc->group->diskq.disk_hdr);
4725 4725 SET_QNXTIO(dq, QHEAD(dq));
4726 4726 mutex_exit(&krdc->group->diskq.disk_qlock);
4727 4727 }
4728 4728
4729 4729 if (flag & RDC_ALLREMOTE) {
4730 4730 /* Tell other node to start logging */
4731 4731 if (krdc->lsrv && krdc->intf && !krdc->intf->if_down)
4732 4732 (void) rdc_net_state(krdc->index,
4733 4733 CCIO_ENABLELOG);
4734 4734 }
4735 4735
4736 4736 if (flag & (RDC_ALLREMOTE | RDC_OTHERREMOTE)) {
4737 4737 rdc_many_enter(krdc);
4738 4738 for (p = krdc->group_next; p != krdc;
4739 4739 p = p->group_next) {
4740 4740 if (p->lsrv && krdc->intf &&
4741 4741 !krdc->intf->if_down) {
4742 4742 (void) rdc_net_state(p->index,
4743 4743 CCIO_ENABLELOG);
4744 4744 }
4745 4745 }
4746 4746 rdc_many_exit(krdc);
4747 4747 }
4748 4748
4749 4749 rdc_write_state(urdc);
4750 4750 for (p = krdc->group_next; p != krdc; p = p->group_next) {
4751 4751 q = &rdc_u_info[p->index];
4752 4752 if (!IS_ENABLED(q))
4753 4753 continue;
4754 4754 rdc_write_state(q);
4755 4755 }
4756 4756 } else {
4757 4757 /* No point in time is possible, just deal with single set */
4758 4758
4759 4759 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
4760 4760 halt_sync(krdc);
4761 4761 } else {
4762 4762 if (rdc_net_getstate(krdc, &sm, &um, &md, TRUE) < 0) {
4763 4763 rdc_clr_flags(urdc, RDC_SYNCING);
4764 4764 rdc_set_flags_log(urdc, RDC_LOGGING,
4765 4765 "failed to read remote state");
4766 4766
4767 4767 rdc_write_state(urdc);
4768 4768 while (rdc_dump_alloc_bufs_cd(krdc->index)
4769 4769 == EAGAIN)
4770 4770 delay(2);
4771 4771 if ((RDC_IS_DISKQ(krdc->group)) &&
4772 4772 (!(flag & RDC_QUEUING))) { /* fail! */
4773 4773 mutex_enter(QLOCK(dq));
4774 4774 rdc_init_diskq_header(krdc->group,
4775 4775 &krdc->group->diskq.disk_hdr);
4776 4776 SET_QNXTIO(dq, QHEAD(dq));
4777 4777 mutex_exit(QLOCK(dq));
4778 4778 }
4779 4779
4780 4780 return;
4781 4781 }
4782 4782 }
4783 4783
4784 4784 if (rdc_get_vflags(urdc) & RDC_SYNCING)
4785 4785 return;
4786 4786
4787 4787 if (RDC_IS_DISKQ(krdc->group))
4788 4788 flag_op(urdc, RDC_QUEUING);
4789 4789
4790 4790 if ((RDC_IS_DISKQ(krdc->group)) &&
4791 4791 (!(flag & RDC_QUEUING))) { /* fail! */
4792 4792 RDC_ZERO_BITREF(krdc);
4793 4793 mutex_enter(QLOCK(dq));
4794 4794 rdc_init_diskq_header(krdc->group,
4795 4795 &krdc->group->diskq.disk_hdr);
4796 4796 SET_QNXTIO(dq, QHEAD(dq));
4797 4797 mutex_exit(QLOCK(dq));
4798 4798 }
4799 4799
4800 4800 if (!(rdc_get_vflags(urdc) & RDC_LOGGING)) {
4801 4801 rdc_set_flags_log(urdc, RDC_LOGGING, why);
4802 4802
4803 4803 rdc_write_state(urdc);
4804 4804
4805 4805 while (rdc_dump_alloc_bufs_cd(krdc->index) == EAGAIN)
4806 4806 delay(2);
4807 4807 if (!RDC_IS_DISKQ(krdc->group))
4808 4808 RDC_ZERO_BITREF(krdc);
4809 4809
4810 4810 rdc_inflwait(krdc->group);
4811 4811 /*
4812 4812 * a little lazy, but neat. recall dump_alloc_bufs to
4813 4813 * ensure that the queue pointers & seq are reset
4814 4814 * properly after we have waited for inflight stuff
4815 4815 */
4816 4816 while (rdc_dump_alloc_bufs_cd(krdc->index) == EAGAIN)
4817 4817 delay(2);
4818 4818
4819 4819 if (flag & RDC_ALLREMOTE) {
4820 4820 /* Tell other node to start logging */
4821 4821 if (krdc->lsrv && krdc->intf &&
4822 4822 !krdc->intf->if_down) {
4823 4823 (void) rdc_net_state(krdc->index,
4824 4824 CCIO_ENABLELOG);
4825 4825 }
4826 4826 }
4827 4827 }
4828 4828 }
4829 4829 /*
4830 4830 * just in case any threads were in flight during log cleanup
4831 4831 */
4832 4832 if (RDC_IS_DISKQ(krdc->group)) {
4833 4833 mutex_enter(QLOCK(dq));
4834 4834 cv_broadcast(&dq->qfullcv);
4835 4835 mutex_exit(QLOCK(dq));
4836 4836 }
4837 4837 }
4838 4838
4839 4839 static int
4840 4840 _rdc_log(rdc_k_info_t *krdc, rdc_set_t *rdc_set, spcs_s_info_t kstatus)
4841 4841 {
4842 4842 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
4843 4843 rdc_srv_t *svp;
4844 4844
4845 4845 rdc_group_enter(krdc);
4846 4846 if (rdc_check(krdc, rdc_set)) {
4847 4847 rdc_group_exit(krdc);
4848 4848 spcs_s_add(kstatus, RDC_EALREADY, rdc_set->primary.file,
4849 4849 rdc_set->secondary.file);
4850 4850 return (RDC_EALREADY);
4851 4851 }
4852 4852
4853 4853 svp = krdc->lsrv;
4854 4854 if (rdc_get_vflags(urdc) & RDC_PRIMARY)
4855 4855 krdc->intf = rdc_add_to_if(svp, &(urdc->primary.addr),
4856 4856 &(urdc->secondary.addr), 1);
4857 4857 else
4858 4858 krdc->intf = rdc_add_to_if(svp, &(urdc->secondary.addr),
4859 4859 &(urdc->primary.addr), 0);
4860 4860
4861 4861 if (!krdc->intf) {
4862 4862 rdc_group_exit(krdc);
4863 4863 spcs_s_add(kstatus, RDC_EADDTOIF, urdc->primary.intf,
4864 4864 urdc->secondary.intf);
4865 4865 return (RDC_EADDTOIF);
4866 4866 }
4867 4867
4868 4868 rdc_group_log(krdc, RDC_FLUSH | RDC_ALLREMOTE, NULL);
4869 4869
4870 4870 if (rdc_get_vflags(urdc) & RDC_SYNCING) {
4871 4871 rdc_group_exit(krdc);
4872 4872 spcs_s_add(kstatus, RDC_ESYNCING, urdc->primary.file);
4873 4873 return (RDC_ESYNCING);
4874 4874 }
4875 4875
4876 4876 rdc_group_exit(krdc);
4877 4877
4878 4878 return (0);
4879 4879 }
4880 4880
4881 4881 static int
4882 4882 rdc_log(rdc_config_t *uparms, spcs_s_info_t kstatus)
4883 4883 {
4884 4884 rdc_k_info_t *krdc;
4885 4885 int rc = 0;
4886 4886 int index;
4887 4887
4888 4888 mutex_enter(&rdc_conf_lock);
4889 4889 index = rdc_lookup_byname(uparms->rdc_set);
4890 4890 if (index >= 0)
4891 4891 krdc = &rdc_k_info[index];
4892 4892 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
4893 4893 mutex_exit(&rdc_conf_lock);
4894 4894 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
4895 4895 uparms->rdc_set->secondary.file);
4896 4896 return (RDC_EALREADY);
4897 4897 }
4898 4898
4899 4899 set_busy(krdc);
4900 4900 if (krdc->type_flag == 0) {
4901 4901 /* A resume or enable failed */
4902 4902 wakeup_busy(krdc);
4903 4903 mutex_exit(&rdc_conf_lock);
4904 4904 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
4905 4905 uparms->rdc_set->secondary.file);
4906 4906 return (RDC_EALREADY);
4907 4907 }
4908 4908 mutex_exit(&rdc_conf_lock);
4909 4909
4910 4910 rc = _rdc_log(krdc, uparms->rdc_set, kstatus);
4911 4911
4912 4912 mutex_enter(&rdc_conf_lock);
4913 4913 wakeup_busy(krdc);
4914 4914 mutex_exit(&rdc_conf_lock);
4915 4915
4916 4916 return (rc);
4917 4917 }
4918 4918
4919 4919
4920 4920 static int
4921 4921 rdc_wait(rdc_config_t *uparms, spcs_s_info_t kstatus)
4922 4922 {
4923 4923 rdc_k_info_t *krdc;
4924 4924 rdc_u_info_t *urdc;
4925 4925 int index;
4926 4926 int need_check = 0;
4927 4927
4928 4928 mutex_enter(&rdc_conf_lock);
4929 4929 index = rdc_lookup_byname(uparms->rdc_set);
4930 4930 if (index >= 0)
4931 4931 krdc = &rdc_k_info[index];
4932 4932 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
4933 4933 mutex_exit(&rdc_conf_lock);
4934 4934 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
4935 4935 uparms->rdc_set->secondary.file);
4936 4936 return (RDC_EALREADY);
4937 4937 }
4938 4938
4939 4939 urdc = &rdc_u_info[index];
4940 4940 if (!(rdc_get_vflags(urdc) & RDC_PRIMARY)) {
4941 4941 mutex_exit(&rdc_conf_lock);
4942 4942 return (0);
4943 4943 }
4944 4944
4945 4945 set_busy(krdc);
4946 4946 if (krdc->type_flag == 0) {
4947 4947 /* A resume or enable failed */
4948 4948 wakeup_busy(krdc);
4949 4949 mutex_exit(&rdc_conf_lock);
4950 4950 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
4951 4951 uparms->rdc_set->secondary.file);
4952 4952 return (RDC_EALREADY);
4953 4953 }
4954 4954 mutex_exit(&rdc_conf_lock);
4955 4955
4956 4956 rdc_group_enter(krdc);
4957 4957 if (rdc_check(krdc, uparms->rdc_set)) {
4958 4958 rdc_group_exit(krdc);
4959 4959 mutex_enter(&rdc_conf_lock);
4960 4960 wakeup_busy(krdc);
4961 4961 mutex_exit(&rdc_conf_lock);
4962 4962 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
4963 4963 uparms->rdc_set->secondary.file);
4964 4964 return (RDC_EALREADY);
4965 4965 }
4966 4966
4967 4967 if ((rdc_get_vflags(urdc) & (RDC_SYNCING | RDC_PRIMARY)) !=
4968 4968 (RDC_SYNCING | RDC_PRIMARY)) {
4969 4969 rdc_group_exit(krdc);
4970 4970 mutex_enter(&rdc_conf_lock);
4971 4971 wakeup_busy(krdc);
4972 4972 mutex_exit(&rdc_conf_lock);
4973 4973 return (0);
4974 4974 }
4975 4975 if (rdc_get_vflags(urdc) & RDC_SYNCING) {
4976 4976 need_check = 1;
4977 4977 }
4978 4978 rdc_group_exit(krdc);
4979 4979
4980 4980 mutex_enter(&net_blk_lock);
4981 4981
4982 4982 mutex_enter(&rdc_conf_lock);
4983 4983 wakeup_busy(krdc);
4984 4984 mutex_exit(&rdc_conf_lock);
4985 4985
4986 4986 (void) cv_wait_sig(&krdc->synccv, &net_blk_lock);
4987 4987
4988 4988 mutex_exit(&net_blk_lock);
4989 4989 if (need_check) {
4990 4990 if (krdc->sync_done == RDC_COMPLETED) {
4991 4991 return (0);
4992 4992 } else if (krdc->sync_done == RDC_FAILED) {
4993 4993 return (EIO);
4994 4994 }
4995 4995 }
4996 4996 return (0);
4997 4997 }
4998 4998
4999 4999
5000 5000 static int
5001 5001 rdc_health(rdc_config_t *uparms, spcs_s_info_t kstatus, int *rvp)
5002 5002 {
5003 5003 rdc_k_info_t *krdc;
5004 5004 rdc_u_info_t *urdc;
5005 5005 int rc = 0;
5006 5006 int index;
5007 5007
5008 5008 mutex_enter(&rdc_conf_lock);
5009 5009 index = rdc_lookup_byname(uparms->rdc_set);
5010 5010 if (index >= 0)
5011 5011 krdc = &rdc_k_info[index];
5012 5012 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
5013 5013 mutex_exit(&rdc_conf_lock);
5014 5014 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5015 5015 uparms->rdc_set->secondary.file);
5016 5016 return (RDC_EALREADY);
5017 5017 }
5018 5018
5019 5019 set_busy(krdc);
5020 5020 if (krdc->type_flag == 0) {
5021 5021 /* A resume or enable failed */
5022 5022 wakeup_busy(krdc);
5023 5023 mutex_exit(&rdc_conf_lock);
5024 5024 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5025 5025 uparms->rdc_set->secondary.file);
5026 5026 return (RDC_EALREADY);
5027 5027 }
5028 5028
5029 5029 mutex_exit(&rdc_conf_lock);
5030 5030
5031 5031 rdc_group_enter(krdc);
5032 5032 if (rdc_check(krdc, uparms->rdc_set)) {
5033 5033 rdc_group_exit(krdc);
5034 5034 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5035 5035 uparms->rdc_set->secondary.file);
5036 5036 rc = RDC_EALREADY;
5037 5037 goto done;
5038 5038 }
5039 5039
5040 5040 urdc = &rdc_u_info[index];
5041 5041 if (rdc_isactive_if(&(urdc->primary.addr), &(urdc->secondary.addr)))
5042 5042 *rvp = RDC_ACTIVE;
5043 5043 else
5044 5044 *rvp = RDC_INACTIVE;
5045 5045
5046 5046 rdc_group_exit(krdc);
5047 5047
5048 5048 done:
5049 5049 mutex_enter(&rdc_conf_lock);
5050 5050 wakeup_busy(krdc);
5051 5051 mutex_exit(&rdc_conf_lock);
5052 5052
5053 5053 return (rc);
5054 5054 }
5055 5055
5056 5056
5057 5057 static int
5058 5058 rdc_reconfig(rdc_config_t *uparms, spcs_s_info_t kstatus)
5059 5059 {
5060 5060 rdc_k_info_t *krdc;
5061 5061 rdc_u_info_t *urdc;
5062 5062 int rc = -2;
5063 5063 int index;
5064 5064
5065 5065 mutex_enter(&rdc_conf_lock);
5066 5066 index = rdc_lookup_byname(uparms->rdc_set);
5067 5067 if (index >= 0)
5068 5068 krdc = &rdc_k_info[index];
5069 5069 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
5070 5070 mutex_exit(&rdc_conf_lock);
5071 5071 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5072 5072 uparms->rdc_set->secondary.file);
5073 5073 return (RDC_EALREADY);
5074 5074 }
5075 5075
5076 5076 urdc = &rdc_u_info[index];
5077 5077 set_busy(krdc);
5078 5078 if (krdc->type_flag == 0) {
5079 5079 /* A resume or enable failed */
5080 5080 wakeup_busy(krdc);
5081 5081 mutex_exit(&rdc_conf_lock);
5082 5082 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5083 5083 uparms->rdc_set->secondary.file);
5084 5084 return (RDC_EALREADY);
5085 5085 }
5086 5086
5087 5087 mutex_exit(&rdc_conf_lock);
5088 5088
5089 5089 rdc_group_enter(krdc);
5090 5090 if (rdc_check(krdc, uparms->rdc_set)) {
5091 5091 rdc_group_exit(krdc);
5092 5092 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5093 5093 uparms->rdc_set->secondary.file);
5094 5094 rc = RDC_EALREADY;
5095 5095 goto done;
5096 5096 }
5097 5097 if ((rdc_get_vflags(urdc) & RDC_BMP_FAILED) && (krdc->bitmapfd))
5098 5098 (void) rdc_reset_bitmap(krdc);
5099 5099
5100 5100 /* Move to a new bitmap if necessary */
5101 5101 if (strncmp(urdc->primary.bitmap, uparms->rdc_set->primary.bitmap,
5102 5102 NSC_MAXPATH) != 0) {
5103 5103 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
5104 5104 rc = rdc_move_bitmap(krdc,
5105 5105 uparms->rdc_set->primary.bitmap);
5106 5106 } else {
5107 5107 (void) strncpy(urdc->primary.bitmap,
5108 5108 uparms->rdc_set->primary.bitmap, NSC_MAXPATH);
5109 5109 /* simulate a succesful rdc_move_bitmap */
5110 5110 rc = 0;
5111 5111 }
5112 5112 }
5113 5113 if (strncmp(urdc->secondary.bitmap, uparms->rdc_set->secondary.bitmap,
5114 5114 NSC_MAXPATH) != 0) {
5115 5115 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
5116 5116 (void) strncpy(urdc->secondary.bitmap,
5117 5117 uparms->rdc_set->secondary.bitmap, NSC_MAXPATH);
5118 5118 /* simulate a succesful rdc_move_bitmap */
5119 5119 rc = 0;
5120 5120 } else {
5121 5121 rc = rdc_move_bitmap(krdc,
5122 5122 uparms->rdc_set->secondary.bitmap);
5123 5123 }
5124 5124 }
5125 5125 if (rc == -1) {
5126 5126 rdc_group_exit(krdc);
5127 5127 spcs_s_add(kstatus, RDC_EBMPRECONFIG,
5128 5128 uparms->rdc_set->secondary.intf,
5129 5129 uparms->rdc_set->secondary.file);
5130 5130 rc = RDC_EBMPRECONFIG;
5131 5131 goto done;
5132 5132 }
5133 5133
5134 5134 /*
5135 5135 * At this point we fail any other type of reconfig
5136 5136 * if not in logging mode and we did not do a bitmap reconfig
5137 5137 */
5138 5138
5139 5139 if (!(rdc_get_vflags(urdc) & RDC_LOGGING) && rc == -2) {
5140 5140 /* no other changes possible unless logging */
5141 5141 rdc_group_exit(krdc);
5142 5142 spcs_s_add(kstatus, RDC_ENOTLOGGING,
5143 5143 uparms->rdc_set->primary.intf,
5144 5144 uparms->rdc_set->primary.file,
5145 5145 uparms->rdc_set->secondary.intf,
5146 5146 uparms->rdc_set->secondary.file);
5147 5147 rc = RDC_ENOTLOGGING;
5148 5148 goto done;
5149 5149 }
5150 5150 rc = 0;
5151 5151 /* Change direct file if necessary */
5152 5152 if ((rdc_get_vflags(urdc) & RDC_PRIMARY) &&
5153 5153 strncmp(urdc->direct_file, uparms->rdc_set->direct_file,
5154 5154 NSC_MAXPATH)) {
5155 5155 if (!(rdc_get_vflags(urdc) & RDC_LOGGING)) {
5156 5156 rdc_group_exit(krdc);
5157 5157 goto notlogging;
5158 5158 }
5159 5159 rdc_close_direct(krdc);
5160 5160 (void) strncpy(urdc->direct_file, uparms->rdc_set->direct_file,
5161 5161 NSC_MAXPATH);
5162 5162
5163 5163 if (urdc->direct_file[0]) {
5164 5164 if (rdc_open_direct(krdc) == NULL)
5165 5165 rdc_set_flags(urdc, RDC_FCAL_FAILED);
5166 5166 else
5167 5167 rdc_clr_flags(urdc, RDC_FCAL_FAILED);
5168 5168 }
5169 5169 }
5170 5170
5171 5171 rdc_group_exit(krdc);
5172 5172
5173 5173 /* Change group if necessary */
5174 5174 if (strncmp(urdc->group_name, uparms->rdc_set->group_name,
5175 5175 NSC_MAXPATH) != 0) {
5176 5176 char orig_group[NSC_MAXPATH];
5177 5177 if (!(rdc_get_vflags(urdc) & RDC_LOGGING))
5178 5178 goto notlogging;
5179 5179 mutex_enter(&rdc_conf_lock);
5180 5180
5181 5181 (void) strncpy(orig_group, urdc->group_name, NSC_MAXPATH);
5182 5182 (void) strncpy(urdc->group_name, uparms->rdc_set->group_name,
5183 5183 NSC_MAXPATH);
5184 5184
5185 5185 rc = change_group(krdc, uparms->options);
5186 5186 if (rc == RDC_EQNOADD) {
5187 5187 mutex_exit(&rdc_conf_lock);
5188 5188 spcs_s_add(kstatus, RDC_EQNOADD,
5189 5189 uparms->rdc_set->disk_queue);
5190 5190 goto done;
5191 5191 } else if (rc < 0) {
5192 5192 (void) strncpy(urdc->group_name, orig_group,
5193 5193 NSC_MAXPATH);
5194 5194 mutex_exit(&rdc_conf_lock);
5195 5195 spcs_s_add(kstatus, RDC_EGROUP,
5196 5196 urdc->primary.intf, urdc->primary.file,
5197 5197 urdc->secondary.intf, urdc->secondary.file,
5198 5198 uparms->rdc_set->group_name);
5199 5199 rc = RDC_EGROUP;
5200 5200 goto done;
5201 5201 }
5202 5202
5203 5203 mutex_exit(&rdc_conf_lock);
5204 5204
5205 5205 if (rc >= 0) {
5206 5206 if (!(rdc_get_vflags(urdc) & RDC_LOGGING))
5207 5207 goto notlogging;
5208 5208 if (uparms->options & RDC_OPT_ASYNC) {
5209 5209 mutex_enter(&rdc_conf_lock);
5210 5210 krdc->type_flag |= RDC_ASYNCMODE;
5211 5211 mutex_exit(&rdc_conf_lock);
5212 5212 if (uparms->options & RDC_OPT_PRIMARY)
5213 5213 krdc->bitmap_ref =
5214 5214 (uchar_t *)kmem_zalloc(
5215 5215 (krdc->bitmap_size * BITS_IN_BYTE *
5216 5216 BMAP_REF_PREF_SIZE), KM_SLEEP);
5217 5217 rdc_group_enter(krdc);
5218 5218 rdc_set_flags(urdc, RDC_ASYNC);
5219 5219 rdc_group_exit(krdc);
5220 5220 } else {
5221 5221 mutex_enter(&rdc_conf_lock);
5222 5222 krdc->type_flag &= ~RDC_ASYNCMODE;
5223 5223 mutex_exit(&rdc_conf_lock);
5224 5224 rdc_group_enter(krdc);
5225 5225 rdc_clr_flags(urdc, RDC_ASYNC);
5226 5226 rdc_group_exit(krdc);
5227 5227 if (krdc->bitmap_ref) {
5228 5228 kmem_free(krdc->bitmap_ref,
5229 5229 (krdc->bitmap_size * BITS_IN_BYTE *
5230 5230 BMAP_REF_PREF_SIZE));
5231 5231 krdc->bitmap_ref = NULL;
5232 5232 }
5233 5233 }
5234 5234 }
5235 5235 } else {
5236 5236 if ((((uparms->options & RDC_OPT_ASYNC) == 0) &&
5237 5237 ((krdc->type_flag & RDC_ASYNCMODE) != 0)) ||
5238 5238 (((uparms->options & RDC_OPT_ASYNC) != 0) &&
5239 5239 ((krdc->type_flag & RDC_ASYNCMODE) == 0))) {
5240 5240 if (!(rdc_get_vflags(urdc) & RDC_LOGGING))
5241 5241 goto notlogging;
5242 5242
5243 5243 if (krdc->group->count > 1) {
5244 5244 spcs_s_add(kstatus, RDC_EGROUPMODE);
5245 5245 rc = RDC_EGROUPMODE;
5246 5246 goto done;
5247 5247 }
5248 5248 }
5249 5249
5250 5250 /* Switch sync/async if necessary */
5251 5251 if (krdc->group->count == 1) {
5252 5252 /* Only member of group. Can change sync/async */
5253 5253 if (((uparms->options & RDC_OPT_ASYNC) == 0) &&
5254 5254 ((krdc->type_flag & RDC_ASYNCMODE) != 0)) {
5255 5255 if (!(rdc_get_vflags(urdc) & RDC_LOGGING))
5256 5256 goto notlogging;
5257 5257 /* switch to sync */
5258 5258 mutex_enter(&rdc_conf_lock);
5259 5259 krdc->type_flag &= ~RDC_ASYNCMODE;
5260 5260 if (RDC_IS_DISKQ(krdc->group)) {
5261 5261 krdc->group->flags &= ~RDC_DISKQUE;
5262 5262 krdc->group->flags |= RDC_MEMQUE;
5263 5263 rdc_unintercept_diskq(krdc->group);
5264 5264 mutex_enter(&krdc->group->diskqmutex);
5265 5265 rdc_close_diskq(krdc->group);
5266 5266 mutex_exit(&krdc->group->diskqmutex);
5267 5267 bzero(&urdc->disk_queue,
5268 5268 sizeof (urdc->disk_queue));
5269 5269 }
5270 5270 mutex_exit(&rdc_conf_lock);
5271 5271 rdc_group_enter(krdc);
5272 5272 rdc_clr_flags(urdc, RDC_ASYNC);
5273 5273 rdc_group_exit(krdc);
5274 5274 if (krdc->bitmap_ref) {
5275 5275 kmem_free(krdc->bitmap_ref,
5276 5276 (krdc->bitmap_size * BITS_IN_BYTE *
5277 5277 BMAP_REF_PREF_SIZE));
5278 5278 krdc->bitmap_ref = NULL;
5279 5279 }
5280 5280 } else if (((uparms->options & RDC_OPT_ASYNC) != 0) &&
5281 5281 ((krdc->type_flag & RDC_ASYNCMODE) == 0)) {
5282 5282 if (!(rdc_get_vflags(urdc) & RDC_LOGGING))
5283 5283 goto notlogging;
5284 5284 /* switch to async */
5285 5285 mutex_enter(&rdc_conf_lock);
5286 5286 krdc->type_flag |= RDC_ASYNCMODE;
5287 5287 mutex_exit(&rdc_conf_lock);
5288 5288 if (uparms->options & RDC_OPT_PRIMARY)
5289 5289 krdc->bitmap_ref =
5290 5290 (uchar_t *)kmem_zalloc(
5291 5291 (krdc->bitmap_size * BITS_IN_BYTE *
5292 5292 BMAP_REF_PREF_SIZE), KM_SLEEP);
5293 5293 rdc_group_enter(krdc);
5294 5294 rdc_set_flags(urdc, RDC_ASYNC);
5295 5295 rdc_group_exit(krdc);
5296 5296 }
5297 5297 }
5298 5298 }
5299 5299 /* Reverse concept of primary and secondary */
5300 5300 if ((uparms->options & RDC_OPT_REVERSE_ROLE) != 0) {
5301 5301 rdc_set_t rdc_set;
5302 5302 struct netbuf paddr, saddr;
5303 5303
5304 5304 mutex_enter(&rdc_conf_lock);
5305 5305
5306 5306 /*
5307 5307 * Disallow role reversal for advanced configurations
5308 5308 */
5309 5309
5310 5310 if (IS_MANY(krdc) || IS_MULTI(krdc)) {
5311 5311 mutex_exit(&rdc_conf_lock);
5312 5312 spcs_s_add(kstatus, RDC_EMASTER, urdc->primary.intf,
5313 5313 urdc->primary.file, urdc->secondary.intf,
5314 5314 urdc->secondary.file);
5315 5315 return (RDC_EMASTER);
5316 5316 }
5317 5317 bzero((void *) &rdc_set, sizeof (rdc_set_t));
5318 5318 dup_rdc_netbuf(&urdc->primary.addr, &saddr);
5319 5319 dup_rdc_netbuf(&urdc->secondary.addr, &paddr);
5320 5320 free_rdc_netbuf(&urdc->primary.addr);
5321 5321 free_rdc_netbuf(&urdc->secondary.addr);
5322 5322 dup_rdc_netbuf(&saddr, &urdc->secondary.addr);
5323 5323 dup_rdc_netbuf(&paddr, &urdc->primary.addr);
5324 5324 free_rdc_netbuf(&paddr);
5325 5325 free_rdc_netbuf(&saddr);
5326 5326 /* copy primary parts of urdc to rdc_set field by field */
5327 5327 (void) strncpy(rdc_set.primary.intf, urdc->primary.intf,
5328 5328 MAX_RDC_HOST_SIZE);
5329 5329 (void) strncpy(rdc_set.primary.file, urdc->primary.file,
5330 5330 NSC_MAXPATH);
5331 5331 (void) strncpy(rdc_set.primary.bitmap, urdc->primary.bitmap,
5332 5332 NSC_MAXPATH);
5333 5333
5334 5334 /* Now overwrite urdc primary */
5335 5335 (void) strncpy(urdc->primary.intf, urdc->secondary.intf,
5336 5336 MAX_RDC_HOST_SIZE);
5337 5337 (void) strncpy(urdc->primary.file, urdc->secondary.file,
5338 5338 NSC_MAXPATH);
5339 5339 (void) strncpy(urdc->primary.bitmap, urdc->secondary.bitmap,
5340 5340 NSC_MAXPATH);
5341 5341
5342 5342 /* Now ovwewrite urdc secondary */
5343 5343 (void) strncpy(urdc->secondary.intf, rdc_set.primary.intf,
5344 5344 MAX_RDC_HOST_SIZE);
5345 5345 (void) strncpy(urdc->secondary.file, rdc_set.primary.file,
5346 5346 NSC_MAXPATH);
5347 5347 (void) strncpy(urdc->secondary.bitmap, rdc_set.primary.bitmap,
5348 5348 NSC_MAXPATH);
5349 5349
5350 5350 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
5351 5351 rdc_clr_flags(urdc, RDC_PRIMARY);
5352 5352 if (krdc->intf) {
5353 5353 krdc->intf->issecondary = 1;
5354 5354 krdc->intf->isprimary = 0;
5355 5355 krdc->intf->if_down = 1;
5356 5356 }
5357 5357 } else {
5358 5358 rdc_set_flags(urdc, RDC_PRIMARY);
5359 5359 if (krdc->intf) {
5360 5360 krdc->intf->issecondary = 0;
5361 5361 krdc->intf->isprimary = 1;
5362 5362 krdc->intf->if_down = 1;
5363 5363 }
5364 5364 }
5365 5365
5366 5366 if ((rdc_get_vflags(urdc) & RDC_PRIMARY) &&
5367 5367 ((krdc->type_flag & RDC_ASYNCMODE) != 0)) {
5368 5368 if (!krdc->bitmap_ref)
5369 5369 krdc->bitmap_ref =
5370 5370 (uchar_t *)kmem_zalloc((krdc->bitmap_size *
5371 5371 BITS_IN_BYTE * BMAP_REF_PREF_SIZE),
5372 5372 KM_SLEEP);
5373 5373 if (krdc->bitmap_ref == NULL) {
5374 5374 cmn_err(CE_WARN,
5375 5375 "!rdc_reconfig: bitmap_ref alloc %"
5376 5376 NSC_SZFMT " failed",
5377 5377 krdc->bitmap_size * BITS_IN_BYTE *
5378 5378 BMAP_REF_PREF_SIZE);
5379 5379 mutex_exit(&rdc_conf_lock);
5380 5380 return (-1);
5381 5381 }
5382 5382 }
5383 5383
5384 5384 if ((rdc_get_vflags(urdc) & RDC_PRIMARY) &&
5385 5385 (rdc_get_vflags(urdc) & RDC_SYNC_NEEDED)) {
5386 5386 /* Primary, so reverse sync needed */
5387 5387 rdc_many_enter(krdc);
5388 5388 rdc_clr_flags(urdc, RDC_SYNC_NEEDED);
5389 5389 rdc_set_mflags(urdc, RDC_RSYNC_NEEDED);
5390 5390 rdc_many_exit(krdc);
5391 5391 } else if (rdc_get_vflags(urdc) & RDC_RSYNC_NEEDED) {
5392 5392 /* Secondary, so forward sync needed */
5393 5393 rdc_many_enter(krdc);
5394 5394 rdc_clr_flags(urdc, RDC_RSYNC_NEEDED);
5395 5395 rdc_set_flags(urdc, RDC_SYNC_NEEDED);
5396 5396 rdc_many_exit(krdc);
5397 5397 }
5398 5398
5399 5399 /*
5400 5400 * rewrite bitmap header
5401 5401 */
5402 5402 rdc_write_state(urdc);
5403 5403 mutex_exit(&rdc_conf_lock);
5404 5404 }
5405 5405
5406 5406 done:
5407 5407 mutex_enter(&rdc_conf_lock);
5408 5408 wakeup_busy(krdc);
5409 5409 mutex_exit(&rdc_conf_lock);
5410 5410
5411 5411 return (rc);
5412 5412
5413 5413 notlogging:
5414 5414 /* no other changes possible unless logging */
5415 5415 mutex_enter(&rdc_conf_lock);
5416 5416 wakeup_busy(krdc);
5417 5417 mutex_exit(&rdc_conf_lock);
5418 5418 spcs_s_add(kstatus, RDC_ENOTLOGGING, urdc->primary.intf,
5419 5419 urdc->primary.file, urdc->secondary.intf,
5420 5420 urdc->secondary.file);
5421 5421 return (RDC_ENOTLOGGING);
5422 5422 }
5423 5423
5424 5424 static int
5425 5425 rdc_reset(rdc_config_t *uparms, spcs_s_info_t kstatus)
5426 5426 {
5427 5427 rdc_k_info_t *krdc;
5428 5428 rdc_u_info_t *urdc;
5429 5429 int rc = 0;
5430 5430 int index;
5431 5431 int cleared_error = 0;
5432 5432
5433 5433 mutex_enter(&rdc_conf_lock);
5434 5434 index = rdc_lookup_byname(uparms->rdc_set);
5435 5435 if (index >= 0)
5436 5436 krdc = &rdc_k_info[index];
5437 5437 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
5438 5438 mutex_exit(&rdc_conf_lock);
5439 5439 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5440 5440 uparms->rdc_set->secondary.file);
5441 5441 return (RDC_EALREADY);
5442 5442 }
5443 5443
5444 5444 urdc = &rdc_u_info[index];
5445 5445 set_busy(krdc);
5446 5446 if (krdc->type_flag == 0) {
5447 5447 /* A resume or enable failed */
5448 5448 wakeup_busy(krdc);
5449 5449 mutex_exit(&rdc_conf_lock);
5450 5450 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5451 5451 uparms->rdc_set->secondary.file);
5452 5452 return (RDC_EALREADY);
5453 5453 }
5454 5454
5455 5455 mutex_exit(&rdc_conf_lock);
5456 5456
5457 5457 rdc_group_enter(krdc);
5458 5458 if (rdc_check(krdc, uparms->rdc_set)) {
5459 5459 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5460 5460 uparms->rdc_set->secondary.file);
5461 5461 rc = RDC_EALREADY;
5462 5462 goto done;
5463 5463 }
5464 5464
5465 5465 if ((rdc_get_vflags(urdc) & RDC_BMP_FAILED) && (krdc->bitmapfd)) {
5466 5466 if (rdc_reset_bitmap(krdc) == 0)
5467 5467 cleared_error++;
5468 5468 }
5469 5469
5470 5470 /* Fix direct file if necessary */
5471 5471 if ((rdc_get_vflags(urdc) & RDC_PRIMARY) && urdc->direct_file[0]) {
5472 5472 if (rdc_open_direct(krdc) == NULL)
5473 5473 rdc_set_flags(urdc, RDC_FCAL_FAILED);
5474 5474 else {
5475 5475 rdc_clr_flags(urdc, RDC_FCAL_FAILED);
5476 5476 cleared_error++;
5477 5477 }
5478 5478 }
5479 5479
5480 5480 if ((rdc_get_vflags(urdc) & RDC_VOL_FAILED)) {
5481 5481 rdc_many_enter(krdc);
5482 5482 rdc_clr_flags(urdc, RDC_VOL_FAILED);
5483 5483 cleared_error++;
5484 5484 rdc_many_exit(krdc);
5485 5485 }
5486 5486
5487 5487 if (cleared_error) {
5488 5488 /* cleared an error so we should be in logging mode */
5489 5489 rdc_set_flags_log(urdc, RDC_LOGGING, "set reset");
5490 5490 }
5491 5491 rdc_group_exit(krdc);
5492 5492
5493 5493 if ((rdc_get_vflags(urdc) & RDC_DISKQ_FAILED))
5494 5494 rdc_unfail_diskq(krdc);
5495 5495
5496 5496 done:
5497 5497 mutex_enter(&rdc_conf_lock);
5498 5498 wakeup_busy(krdc);
5499 5499 mutex_exit(&rdc_conf_lock);
5500 5500
5501 5501 return (rc);
5502 5502 }
5503 5503
5504 5504
5505 5505 static int
5506 5506 rdc_tunable(rdc_config_t *uparms, spcs_s_info_t kstatus)
5507 5507 {
5508 5508 rdc_k_info_t *krdc;
5509 5509 rdc_u_info_t *urdc;
5510 5510 rdc_k_info_t *p;
5511 5511 rdc_u_info_t *q;
5512 5512 int rc = 0;
5513 5513 int index;
5514 5514
5515 5515 mutex_enter(&rdc_conf_lock);
5516 5516 index = rdc_lookup_byname(uparms->rdc_set);
5517 5517 if (index >= 0)
5518 5518 krdc = &rdc_k_info[index];
5519 5519 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
5520 5520 mutex_exit(&rdc_conf_lock);
5521 5521 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5522 5522 uparms->rdc_set->secondary.file);
5523 5523 return (RDC_EALREADY);
5524 5524 }
5525 5525
5526 5526 urdc = &rdc_u_info[index];
5527 5527 set_busy(krdc);
5528 5528 if (krdc->type_flag == 0) {
5529 5529 /* A resume or enable failed */
5530 5530 wakeup_busy(krdc);
5531 5531 mutex_exit(&rdc_conf_lock);
5532 5532 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5533 5533 uparms->rdc_set->secondary.file);
5534 5534 return (RDC_EALREADY);
5535 5535 }
5536 5536
5537 5537 mutex_exit(&rdc_conf_lock);
5538 5538
5539 5539 rdc_group_enter(krdc);
5540 5540 if (rdc_check(krdc, uparms->rdc_set)) {
5541 5541 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5542 5542 uparms->rdc_set->secondary.file);
5543 5543 rc = RDC_EALREADY;
5544 5544 goto done;
5545 5545 }
5546 5546
5547 5547 if (uparms->rdc_set->maxqfbas > 0) {
5548 5548 urdc->maxqfbas = uparms->rdc_set->maxqfbas;
5549 5549 rdc_write_state(urdc);
5550 5550 for (p = krdc->group_next; p != krdc; p = p->group_next) {
5551 5551 q = &rdc_u_info[p->index];
5552 5552 q->maxqfbas = urdc->maxqfbas;
5553 5553 rdc_write_state(q);
5554 5554 }
5555 5555 }
5556 5556
5557 5557 if (uparms->rdc_set->maxqitems > 0) {
5558 5558 urdc->maxqitems = uparms->rdc_set->maxqitems;
5559 5559 rdc_write_state(urdc);
5560 5560 for (p = krdc->group_next; p != krdc; p = p->group_next) {
5561 5561 q = &rdc_u_info[p->index];
5562 5562 q->maxqitems = urdc->maxqitems;
5563 5563 rdc_write_state(q);
5564 5564 }
5565 5565 }
5566 5566
5567 5567 if (uparms->options & RDC_OPT_SET_QNOBLOCK) {
5568 5568 disk_queue *que;
5569 5569
5570 5570 if (!RDC_IS_DISKQ(krdc->group)) {
5571 5571 spcs_s_add(kstatus, RDC_EQNOQUEUE, urdc->primary.intf,
5572 5572 urdc->primary.file, urdc->secondary.intf,
5573 5573 urdc->secondary.file);
5574 5574 rc = RDC_EQNOQUEUE;
5575 5575 goto done;
5576 5576 }
5577 5577
5578 5578 que = &krdc->group->diskq;
5579 5579 mutex_enter(QLOCK(que));
5580 5580 SET_QSTATE(que, RDC_QNOBLOCK);
5581 5581 /* queue will fail if this fails */
5582 5582 (void) rdc_stamp_diskq(krdc, 0, RDC_GROUP_LOCKED);
5583 5583 mutex_exit(QLOCK(que));
5584 5584
5585 5585 }
5586 5586
5587 5587 if (uparms->options & RDC_OPT_CLR_QNOBLOCK) {
5588 5588 disk_queue *que;
5589 5589
5590 5590 if (!RDC_IS_DISKQ(krdc->group)) {
5591 5591 spcs_s_add(kstatus, RDC_EQNOQUEUE, urdc->primary.intf,
5592 5592 urdc->primary.file, urdc->secondary.intf,
5593 5593 urdc->secondary.file);
5594 5594 rc = RDC_EQNOQUEUE;
5595 5595 goto done;
5596 5596 }
5597 5597 que = &krdc->group->diskq;
5598 5598 mutex_enter(QLOCK(que));
5599 5599 CLR_QSTATE(que, RDC_QNOBLOCK);
5600 5600 /* queue will fail if this fails */
5601 5601 (void) rdc_stamp_diskq(krdc, 0, RDC_GROUP_LOCKED);
5602 5602 mutex_exit(QLOCK(que));
5603 5603
5604 5604 }
5605 5605 if (uparms->rdc_set->asyncthr > 0) {
5606 5606 urdc->asyncthr = uparms->rdc_set->asyncthr;
5607 5607 rdc_write_state(urdc);
5608 5608 for (p = krdc->group_next; p != krdc; p = p->group_next) {
5609 5609 q = &rdc_u_info[p->index];
5610 5610 q->asyncthr = urdc->asyncthr;
5611 5611 rdc_write_state(q);
5612 5612 }
5613 5613 }
5614 5614
5615 5615 if (uparms->rdc_set->autosync >= 0) {
5616 5616 if (uparms->rdc_set->autosync == 0)
5617 5617 urdc->autosync = 0;
5618 5618 else
5619 5619 urdc->autosync = 1;
5620 5620
5621 5621 rdc_write_state(urdc);
5622 5622
5623 5623 /* Changed autosync, so update rest of the group */
5624 5624
5625 5625 for (p = krdc->group_next; p != krdc; p = p->group_next) {
5626 5626 q = &rdc_u_info[p->index];
5627 5627 q->autosync = urdc->autosync;
5628 5628 rdc_write_state(q);
5629 5629 }
5630 5630 }
5631 5631
5632 5632 done:
5633 5633 rdc_group_exit(krdc);
5634 5634
5635 5635 mutex_enter(&rdc_conf_lock);
↓ open down ↓ |
5635 lines elided |
↑ open up ↑ |
5636 5636 wakeup_busy(krdc);
5637 5637 mutex_exit(&rdc_conf_lock);
5638 5638
5639 5639 return (rc);
5640 5640 }
5641 5641
5642 5642 /*
5643 5643 * Yet another standard thing that is not standard ...
5644 5644 */
5645 5645 #ifndef offsetof
5646 -#define offsetof(s, m) ((size_t)(&((s *)0)->m))
5646 +#if defined(__GNUC__)
5647 +#define offsetof(s, m) __builtin_offsetof(s, m)
5648 +#else
5649 +#define offsetof(s, m) ((size_t)(&(((s *)0)->m)))
5650 +#endif
5647 5651 #endif
5648 5652
5649 5653 static int
5650 5654 rdc_status(void *arg, int mode, rdc_config_t *uparms, spcs_s_info_t kstatus)
5651 5655 {
5652 5656 rdc_k_info_t *krdc;
5653 5657 rdc_u_info_t *urdc;
5654 5658 disk_queue *dqp;
5655 5659 int rc = 0;
5656 5660 int index;
5657 5661 char *ptr;
5658 5662 extern int rdc_status_copy32(const void *, void *, size_t, int);
5659 5663
5660 5664 mutex_enter(&rdc_conf_lock);
5661 5665 index = rdc_lookup_byname(uparms->rdc_set);
5662 5666 if (index >= 0)
5663 5667 krdc = &rdc_k_info[index];
5664 5668 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
5665 5669 mutex_exit(&rdc_conf_lock);
5666 5670 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5667 5671 uparms->rdc_set->secondary.file);
5668 5672 return (RDC_EALREADY);
5669 5673 }
5670 5674
5671 5675 set_busy(krdc);
5672 5676 if (krdc->type_flag == 0) {
5673 5677 /* A resume or enable failed */
5674 5678 wakeup_busy(krdc);
5675 5679 mutex_exit(&rdc_conf_lock);
5676 5680 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5677 5681 uparms->rdc_set->secondary.file);
5678 5682 return (RDC_EALREADY);
5679 5683 }
5680 5684
5681 5685 mutex_exit(&rdc_conf_lock);
5682 5686
5683 5687 rdc_group_enter(krdc);
5684 5688 if (rdc_check(krdc, uparms->rdc_set)) {
5685 5689 rdc_group_exit(krdc);
5686 5690 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5687 5691 uparms->rdc_set->secondary.file);
5688 5692 rc = RDC_EALREADY;
5689 5693 goto done;
5690 5694 }
5691 5695
5692 5696 urdc = &rdc_u_info[index];
5693 5697
5694 5698 /*
5695 5699 * sneak out qstate in urdc->flags
5696 5700 * this is harmless because it's value is not used
5697 5701 * in urdc->flags. the real qstate is kept in
5698 5702 * group->diskq->disk_hdr.h.state
5699 5703 */
5700 5704 if (RDC_IS_DISKQ(krdc->group)) {
5701 5705 dqp = &krdc->group->diskq;
5702 5706 if (IS_QSTATE(dqp, RDC_QNOBLOCK))
5703 5707 urdc->flags |= RDC_QNOBLOCK;
5704 5708 }
5705 5709
5706 5710 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
5707 5711 ptr = (char *)arg + offsetof(struct rdc_config32, rdc_set);
5708 5712 rc = rdc_status_copy32(urdc, ptr, sizeof (struct rdc_set32),
5709 5713 mode);
5710 5714 } else {
5711 5715 ptr = (char *)arg + offsetof(struct rdc_config, rdc_set);
5712 5716 rc = ddi_copyout(urdc, ptr, sizeof (struct rdc_set), mode);
5713 5717 }
5714 5718 /* clear out qstate from flags */
5715 5719 urdc->flags &= ~RDC_QNOBLOCK;
5716 5720
5717 5721 if (rc)
5718 5722 rc = EFAULT;
5719 5723
5720 5724 rdc_group_exit(krdc);
5721 5725 done:
5722 5726 mutex_enter(&rdc_conf_lock);
5723 5727 wakeup_busy(krdc);
5724 5728 mutex_exit(&rdc_conf_lock);
5725 5729
5726 5730 return (rc);
5727 5731 }
5728 5732
5729 5733 /*
5730 5734 * Overwrite the bitmap with one supplied by the
5731 5735 * user.
5732 5736 * Copy into all bitmaps that are tracking this volume.
5733 5737 */
5734 5738
5735 5739 int
5736 5740 rdc_bitmapset(int op, char *sechost, char *secdev, void *bmapaddr, int bmapsz,
5737 5741 nsc_off_t off, int mode)
5738 5742 {
5739 5743 int rc;
5740 5744 rdc_k_info_t *krdc;
5741 5745 int *indexvec;
5742 5746 int index;
5743 5747 int indexit;
5744 5748 kmutex_t **grouplocks;
5745 5749 int i;
5746 5750 int groupind;
5747 5751
5748 5752 if (off % FBA_SIZE(1)) {
5749 5753 /* Must be modulo FBA */
5750 5754 cmn_err(CE_WARN, "!bitmapset: Offset is not on an FBA "
5751 5755 "boundary %llu", (unsigned long long)off);
5752 5756 return (EINVAL);
5753 5757 }
5754 5758 if (bmapsz % FBA_SIZE(1)) {
5755 5759 /* Must be modulo FBA */
5756 5760 cmn_err(CE_WARN, "!bitmapset: Size is not on an FBA "
5757 5761 "boundary %d", bmapsz);
5758 5762 return (EINVAL);
5759 5763 }
5760 5764
5761 5765 mutex_enter(&rdc_conf_lock);
5762 5766 index = rdc_lookup_byhostdev(sechost, secdev);
5763 5767 if (index >= 0) {
5764 5768 krdc = &rdc_k_info[index];
5765 5769 }
5766 5770 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
5767 5771 rc = ENODEV;
5768 5772 mutex_exit(&rdc_conf_lock);
5769 5773 return (rc);
5770 5774 }
5771 5775 indexvec = kmem_alloc(rdc_max_sets * sizeof (int), KM_SLEEP);
5772 5776 grouplocks = kmem_alloc(rdc_max_sets * sizeof (kmutex_t *), KM_SLEEP);
5773 5777
5774 5778 /*
5775 5779 * I now have this set, and I want to take the group
5776 5780 * lock on it, and all the group locks of all the
5777 5781 * sets on the many and multi-hop links.
5778 5782 * I have to take the many lock while traversing the
5779 5783 * many/multi links.
5780 5784 * I think I also need to set the busy count on this
5781 5785 * set, otherwise when I drop the conf_lock, what
5782 5786 * will stop some other process from coming in and
5783 5787 * issuing a disable?
5784 5788 */
5785 5789 set_busy(krdc);
5786 5790 mutex_exit(&rdc_conf_lock);
5787 5791
5788 5792 retrylock:
5789 5793 groupind = 0;
5790 5794 indexit = 0;
5791 5795 rdc_many_enter(krdc);
5792 5796 /*
5793 5797 * Take this initial sets group lock first.
5794 5798 */
5795 5799 if (!mutex_tryenter(&krdc->group->lock)) {
5796 5800 rdc_many_exit(krdc);
5797 5801 goto retrylock;
5798 5802 }
5799 5803
5800 5804 grouplocks[groupind] = &krdc->group->lock;
5801 5805 groupind++;
5802 5806
5803 5807 rc = rdc_checkforbitmap(index, off + bmapsz);
5804 5808 if (rc) {
5805 5809 goto done;
5806 5810 }
5807 5811 indexvec[indexit] = index;
5808 5812 indexit++;
5809 5813 if (IS_MANY(krdc)) {
5810 5814 rdc_k_info_t *ktmp;
5811 5815
5812 5816 for (ktmp = krdc->many_next; ktmp != krdc;
5813 5817 ktmp = ktmp->many_next) {
5814 5818 /*
5815 5819 * attempt to take the group lock,
5816 5820 * if we don't already have it.
5817 5821 */
5818 5822 if (ktmp->group == NULL) {
5819 5823 rc = ENODEV;
5820 5824 goto done;
5821 5825 }
5822 5826 for (i = 0; i < groupind; i++) {
5823 5827 if (grouplocks[i] == &ktmp->group->lock)
5824 5828 /* already have the group lock */
5825 5829 break;
5826 5830 }
5827 5831 /*
5828 5832 * didn't find our lock in our collection,
5829 5833 * attempt to take group lock.
5830 5834 */
5831 5835 if (i >= groupind) {
5832 5836 if (!mutex_tryenter(&ktmp->group->lock)) {
5833 5837 for (i = 0; i < groupind; i++) {
5834 5838 mutex_exit(grouplocks[i]);
5835 5839 }
5836 5840 rdc_many_exit(krdc);
5837 5841 goto retrylock;
5838 5842 }
5839 5843 grouplocks[groupind] = &ktmp->group->lock;
5840 5844 groupind++;
5841 5845 }
5842 5846 rc = rdc_checkforbitmap(ktmp->index, off + bmapsz);
5843 5847 if (rc == 0) {
5844 5848 indexvec[indexit] = ktmp->index;
5845 5849 indexit++;
5846 5850 } else {
5847 5851 goto done;
5848 5852 }
5849 5853 }
5850 5854 }
5851 5855 if (IS_MULTI(krdc)) {
5852 5856 rdc_k_info_t *kmulti = krdc->multi_next;
5853 5857
5854 5858 if (kmulti->group == NULL) {
5855 5859 rc = ENODEV;
5856 5860 goto done;
5857 5861 }
5858 5862 /*
5859 5863 * This can't be in our group already.
5860 5864 */
5861 5865 if (!mutex_tryenter(&kmulti->group->lock)) {
5862 5866 for (i = 0; i < groupind; i++) {
5863 5867 mutex_exit(grouplocks[i]);
5864 5868 }
5865 5869 rdc_many_exit(krdc);
5866 5870 goto retrylock;
5867 5871 }
5868 5872 grouplocks[groupind] = &kmulti->group->lock;
5869 5873 groupind++;
5870 5874
5871 5875 rc = rdc_checkforbitmap(kmulti->index, off + bmapsz);
5872 5876 if (rc == 0) {
5873 5877 indexvec[indexit] = kmulti->index;
5874 5878 indexit++;
5875 5879 } else {
5876 5880 goto done;
5877 5881 }
5878 5882 }
5879 5883 rc = rdc_installbitmap(op, bmapaddr, bmapsz, off, mode, indexvec,
5880 5884 indexit);
5881 5885 done:
5882 5886 for (i = 0; i < groupind; i++) {
5883 5887 mutex_exit(grouplocks[i]);
5884 5888 }
5885 5889 rdc_many_exit(krdc);
5886 5890 mutex_enter(&rdc_conf_lock);
5887 5891 wakeup_busy(krdc);
5888 5892 mutex_exit(&rdc_conf_lock);
5889 5893 kmem_free(indexvec, rdc_max_sets * sizeof (int));
5890 5894 kmem_free(grouplocks, rdc_max_sets * sizeof (kmutex_t *));
5891 5895 return (rc);
5892 5896 }
5893 5897
5894 5898 static int
5895 5899 rdc_checkforbitmap(int index, nsc_off_t limit)
5896 5900 {
5897 5901 rdc_k_info_t *krdc;
5898 5902 rdc_u_info_t *urdc;
5899 5903
5900 5904 krdc = &rdc_k_info[index];
5901 5905 urdc = &rdc_u_info[index];
5902 5906
5903 5907 if (!IS_ENABLED(urdc)) {
5904 5908 return (EIO);
5905 5909 }
5906 5910 if (!(rdc_get_vflags(urdc) & RDC_LOGGING)) {
5907 5911 return (ENXIO);
5908 5912 }
5909 5913 if (krdc->dcio_bitmap == NULL) {
5910 5914 cmn_err(CE_WARN, "!checkforbitmap: No bitmap for set (%s:%s)",
5911 5915 urdc->secondary.intf, urdc->secondary.file);
5912 5916 return (ENOENT);
5913 5917 }
5914 5918 if (limit > krdc->bitmap_size) {
5915 5919 cmn_err(CE_WARN, "!checkbitmap: Bitmap exceeded, "
5916 5920 "incore %" NSC_SZFMT " user supplied %" NSC_SZFMT
5917 5921 " for set (%s:%s)", krdc->bitmap_size,
5918 5922 limit, urdc->secondary.intf, urdc->secondary.file);
5919 5923 return (ENOSPC);
5920 5924 }
5921 5925 return (0);
5922 5926 }
5923 5927
5924 5928
5925 5929
5926 5930 /*
5927 5931 * Copy the user supplied bitmap to this set.
5928 5932 */
5929 5933 static int
5930 5934 rdc_installbitmap(int op, void *bmapaddr, int bmapsz,
5931 5935 nsc_off_t off, int mode, int *vec, int veccnt)
5932 5936 {
5933 5937 int rc;
5934 5938 nsc_off_t sfba;
5935 5939 nsc_off_t efba;
5936 5940 nsc_off_t fba;
5937 5941 void *ormem = NULL;
5938 5942 int len;
5939 5943 int left;
5940 5944 int copied;
5941 5945 int index;
5942 5946 rdc_k_info_t *krdc;
5943 5947 rdc_u_info_t *urdc;
5944 5948
5945 5949 rc = 0;
5946 5950 ormem = kmem_alloc(RDC_MAXDATA, KM_SLEEP);
5947 5951 left = bmapsz;
5948 5952 copied = 0;
5949 5953 while (left > 0) {
5950 5954 if (left > RDC_MAXDATA) {
5951 5955 len = RDC_MAXDATA;
5952 5956 } else {
5953 5957 len = left;
5954 5958 }
5955 5959 if (ddi_copyin((char *)bmapaddr + copied, ormem,
5956 5960 len, mode)) {
5957 5961 cmn_err(CE_WARN, "!installbitmap: Copyin failed");
5958 5962 rc = EFAULT;
5959 5963 goto out;
5960 5964 }
5961 5965 sfba = FBA_NUM(off + copied);
5962 5966 efba = FBA_NUM(off + copied + len);
5963 5967 for (index = 0; index < veccnt; index++) {
5964 5968 krdc = &rdc_k_info[vec[index]];
5965 5969 urdc = &rdc_u_info[vec[index]];
5966 5970
5967 5971 mutex_enter(&krdc->bmapmutex);
5968 5972 if (op == RDC_BITMAPSET) {
5969 5973 bcopy(ormem, krdc->dcio_bitmap + off + copied,
5970 5974 len);
5971 5975 } else {
5972 5976 rdc_lor(ormem,
5973 5977 krdc->dcio_bitmap + off + copied, len);
5974 5978 }
5975 5979 /*
5976 5980 * Maybe this should be just done once outside of
5977 5981 * the the loop? (Less work, but leaves a window
5978 5982 * where the bits_set doesn't match the bitmap).
5979 5983 */
5980 5984 urdc->bits_set = RDC_COUNT_BITMAP(krdc);
5981 5985 mutex_exit(&krdc->bmapmutex);
5982 5986 if (krdc->bitmap_write > 0) {
5983 5987 for (fba = sfba; fba < efba; fba++) {
5984 5988 if (rc = rdc_write_bitmap_fba(krdc,
5985 5989 fba)) {
5986 5990
5987 5991 cmn_err(CE_WARN,
5988 5992 "!installbitmap: "
5989 5993 "write_bitmap_fba failed "
5990 5994 "on fba number %" NSC_SZFMT
5991 5995 " set %s:%s", fba,
5992 5996 urdc->secondary.intf,
5993 5997 urdc->secondary.file);
5994 5998 goto out;
5995 5999 }
5996 6000 }
5997 6001 }
5998 6002 }
5999 6003 copied += len;
6000 6004 left -= len;
6001 6005 }
6002 6006 out:
6003 6007 kmem_free(ormem, RDC_MAXDATA);
6004 6008 return (rc);
6005 6009 }
6006 6010
6007 6011 /*
6008 6012 * _rdc_config
6009 6013 */
6010 6014 int
6011 6015 _rdc_config(void *arg, int mode, spcs_s_info_t kstatus, int *rvp)
6012 6016 {
6013 6017 int rc = 0;
6014 6018 struct netbuf fsvaddr, tsvaddr;
6015 6019 struct knetconfig *knconf;
6016 6020 char *p = NULL, *pf = NULL;
6017 6021 struct rdc_config *uap;
6018 6022 STRUCT_DECL(knetconfig, knconf_tmp);
6019 6023 STRUCT_DECL(rdc_config, uparms);
6020 6024 int enable, disable;
6021 6025 int cmd;
6022 6026
6023 6027
6024 6028 STRUCT_HANDLE(rdc_set, rs);
6025 6029 STRUCT_HANDLE(rdc_addr, pa);
6026 6030 STRUCT_HANDLE(rdc_addr, sa);
6027 6031
6028 6032 STRUCT_INIT(uparms, mode);
6029 6033
6030 6034 bzero(STRUCT_BUF(uparms), STRUCT_SIZE(uparms));
6031 6035 bzero(&fsvaddr, sizeof (fsvaddr));
6032 6036 bzero(&tsvaddr, sizeof (tsvaddr));
6033 6037
6034 6038 knconf = NULL;
6035 6039
6036 6040 if (ddi_copyin(arg, STRUCT_BUF(uparms), STRUCT_SIZE(uparms), mode)) {
6037 6041 return (EFAULT);
6038 6042 }
6039 6043
6040 6044 STRUCT_SET_HANDLE(rs, mode, STRUCT_FGETP(uparms, rdc_set));
6041 6045 STRUCT_SET_HANDLE(pa, mode, STRUCT_FADDR(rs, primary));
6042 6046 STRUCT_SET_HANDLE(sa, mode, STRUCT_FADDR(rs, secondary));
6043 6047 cmd = STRUCT_FGET(uparms, command);
6044 6048 if (cmd == RDC_CMD_ENABLE || cmd == RDC_CMD_RESUME) {
6045 6049 fsvaddr.len = STRUCT_FGET(pa, addr.len);
6046 6050 fsvaddr.maxlen = STRUCT_FGET(pa, addr.maxlen);
6047 6051 fsvaddr.buf = kmem_zalloc(fsvaddr.len, KM_SLEEP);
6048 6052
6049 6053 if (ddi_copyin(STRUCT_FGETP(pa, addr.buf),
6050 6054 fsvaddr.buf, fsvaddr.len, mode)) {
6051 6055 kmem_free(fsvaddr.buf, fsvaddr.len);
6052 6056 #ifdef DEBUG
6053 6057 cmn_err(CE_WARN, "!copyin failed primary.addr 2");
6054 6058 #endif
6055 6059 return (EFAULT);
6056 6060 }
6057 6061
6058 6062
6059 6063 tsvaddr.len = STRUCT_FGET(sa, addr.len);
6060 6064 tsvaddr.maxlen = STRUCT_FGET(sa, addr.maxlen);
6061 6065 tsvaddr.buf = kmem_zalloc(tsvaddr.len, KM_SLEEP);
6062 6066
6063 6067 if (ddi_copyin(STRUCT_FGETP(sa, addr.buf),
6064 6068 tsvaddr.buf, tsvaddr.len, mode)) {
6065 6069 #ifdef DEBUG
6066 6070 cmn_err(CE_WARN, "!copyin failed secondary addr");
6067 6071 #endif
6068 6072 kmem_free(fsvaddr.buf, fsvaddr.len);
6069 6073 kmem_free(tsvaddr.buf, tsvaddr.len);
6070 6074 return (EFAULT);
6071 6075 }
6072 6076 } else {
6073 6077 fsvaddr.len = 0;
6074 6078 fsvaddr.maxlen = 0;
6075 6079 fsvaddr.buf = kmem_zalloc(fsvaddr.len, KM_SLEEP);
6076 6080 tsvaddr.len = 0;
6077 6081 tsvaddr.maxlen = 0;
6078 6082 tsvaddr.buf = kmem_zalloc(tsvaddr.len, KM_SLEEP);
6079 6083 }
6080 6084
6081 6085 if (STRUCT_FGETP(uparms, rdc_set->netconfig) != NULL) {
6082 6086 STRUCT_INIT(knconf_tmp, mode);
6083 6087 knconf = kmem_zalloc(sizeof (*knconf), KM_SLEEP);
6084 6088 if (ddi_copyin(STRUCT_FGETP(uparms, rdc_set->netconfig),
6085 6089 STRUCT_BUF(knconf_tmp), STRUCT_SIZE(knconf_tmp), mode)) {
6086 6090 #ifdef DEBUG
6087 6091 cmn_err(CE_WARN, "!copyin failed netconfig");
6088 6092 #endif
6089 6093 kmem_free(fsvaddr.buf, fsvaddr.len);
6090 6094 kmem_free(tsvaddr.buf, tsvaddr.len);
6091 6095 kmem_free(knconf, sizeof (*knconf));
6092 6096 return (EFAULT);
6093 6097 }
6094 6098
6095 6099 knconf->knc_semantics = STRUCT_FGET(knconf_tmp, knc_semantics);
6096 6100 knconf->knc_protofmly = STRUCT_FGETP(knconf_tmp, knc_protofmly);
6097 6101 knconf->knc_proto = STRUCT_FGETP(knconf_tmp, knc_proto);
6098 6102
6099 6103 #ifndef _SunOS_5_6
6100 6104 if ((mode & DATAMODEL_LP64) == 0) {
6101 6105 knconf->knc_rdev =
6102 6106 expldev(STRUCT_FGET(knconf_tmp, knc_rdev));
6103 6107 } else {
6104 6108 #endif
6105 6109 knconf->knc_rdev = STRUCT_FGET(knconf_tmp, knc_rdev);
6106 6110 #ifndef _SunOS_5_6
6107 6111 }
6108 6112 #endif
6109 6113
6110 6114 pf = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
6111 6115 p = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
6112 6116 rc = ddi_copyin(knconf->knc_protofmly, pf, KNC_STRSIZE, mode);
6113 6117 if (rc) {
6114 6118 #ifdef DEBUG
6115 6119 cmn_err(CE_WARN, "!copyin failed parms protofmly");
6116 6120 #endif
6117 6121 rc = EFAULT;
6118 6122 goto out;
6119 6123 }
6120 6124 rc = ddi_copyin(knconf->knc_proto, p, KNC_STRSIZE, mode);
6121 6125 if (rc) {
6122 6126 #ifdef DEBUG
6123 6127 cmn_err(CE_WARN, "!copyin failed parms proto");
6124 6128 #endif
6125 6129 rc = EFAULT;
6126 6130 goto out;
6127 6131 }
6128 6132 knconf->knc_protofmly = pf;
6129 6133 knconf->knc_proto = p;
6130 6134 } /* !NULL netconfig */
6131 6135
6132 6136 uap = kmem_alloc(sizeof (*uap), KM_SLEEP);
6133 6137
6134 6138 /* copy relevant parts of rdc_config to uap field by field */
6135 6139
6136 6140 (void) strncpy(uap->rdc_set[0].primary.intf, STRUCT_FGETP(pa, intf),
6137 6141 MAX_RDC_HOST_SIZE);
6138 6142 (void) strncpy(uap->rdc_set[0].primary.file, STRUCT_FGETP(pa, file),
6139 6143 NSC_MAXPATH);
6140 6144 (void) strncpy(uap->rdc_set[0].primary.bitmap, STRUCT_FGETP(pa, bitmap),
6141 6145 NSC_MAXPATH);
6142 6146 uap->rdc_set[0].netconfig = knconf;
6143 6147 uap->rdc_set[0].flags = STRUCT_FGET(uparms, rdc_set->flags);
6144 6148 uap->rdc_set[0].index = STRUCT_FGET(uparms, rdc_set->index);
6145 6149 uap->rdc_set[0].setid = STRUCT_FGET(uparms, rdc_set->setid);
6146 6150 uap->rdc_set[0].sync_pos = STRUCT_FGET(uparms, rdc_set->sync_pos);
6147 6151 uap->rdc_set[0].volume_size = STRUCT_FGET(uparms, rdc_set->volume_size);
6148 6152 uap->rdc_set[0].bits_set = STRUCT_FGET(uparms, rdc_set->bits_set);
6149 6153 uap->rdc_set[0].autosync = STRUCT_FGET(uparms, rdc_set->autosync);
6150 6154 uap->rdc_set[0].maxqfbas = STRUCT_FGET(uparms, rdc_set->maxqfbas);
6151 6155 uap->rdc_set[0].maxqitems = STRUCT_FGET(uparms, rdc_set->maxqitems);
6152 6156 uap->rdc_set[0].asyncthr = STRUCT_FGET(uparms, rdc_set->asyncthr);
6153 6157 uap->rdc_set[0].syshostid = STRUCT_FGET(uparms, rdc_set->syshostid);
6154 6158 uap->rdc_set[0].primary.addr = fsvaddr; /* struct copy */
6155 6159 uap->rdc_set[0].secondary.addr = tsvaddr; /* struct copy */
6156 6160
6157 6161 (void) strncpy(uap->rdc_set[0].secondary.intf, STRUCT_FGETP(sa, intf),
6158 6162 MAX_RDC_HOST_SIZE);
6159 6163 (void) strncpy(uap->rdc_set[0].secondary.file, STRUCT_FGETP(sa, file),
6160 6164 NSC_MAXPATH);
6161 6165 (void) strncpy(uap->rdc_set[0].secondary.bitmap,
6162 6166 STRUCT_FGETP(sa, bitmap), NSC_MAXPATH);
6163 6167
6164 6168 (void) strncpy(uap->rdc_set[0].direct_file,
6165 6169 STRUCT_FGETP(rs, direct_file), NSC_MAXPATH);
6166 6170
6167 6171 (void) strncpy(uap->rdc_set[0].group_name, STRUCT_FGETP(rs, group_name),
6168 6172 NSC_MAXPATH);
6169 6173
6170 6174 (void) strncpy(uap->rdc_set[0].disk_queue, STRUCT_FGETP(rs, disk_queue),
6171 6175 NSC_MAXPATH);
6172 6176
6173 6177 uap->command = STRUCT_FGET(uparms, command);
6174 6178 uap->options = STRUCT_FGET(uparms, options);
6175 6179
6176 6180 enable = (uap->command == RDC_CMD_ENABLE ||
6177 6181 uap->command == RDC_CMD_RESUME);
6178 6182 disable = (uap->command == RDC_CMD_DISABLE ||
6179 6183 uap->command == RDC_CMD_SUSPEND);
6180 6184
6181 6185 /*
6182 6186 * Initialise the threadset if it has not already been done.
6183 6187 *
6184 6188 * This has to be done now, not in rdcattach(), because
6185 6189 * rdcattach() can be called before nskernd is running (eg.
6186 6190 * boot -r) in which case the nst_init() would fail and hence
6187 6191 * the attach would fail.
6188 6192 *
6189 6193 * Threadset creation is locked by the rdc_conf_lock,
6190 6194 * destruction is inherently single threaded as it is done in
6191 6195 * _rdc_unload() which must be the last thing performed by
6192 6196 * rdcdetach().
6193 6197 */
6194 6198
6195 6199 if (enable && _rdc_ioset == NULL) {
6196 6200 mutex_enter(&rdc_conf_lock);
6197 6201
6198 6202 if (_rdc_ioset == NULL) {
6199 6203 rc = rdc_thread_configure();
6200 6204 }
6201 6205
6202 6206 mutex_exit(&rdc_conf_lock);
6203 6207
6204 6208 if (rc || _rdc_ioset == NULL) {
6205 6209 spcs_s_add(kstatus, RDC_ENOTHREADS);
6206 6210 rc = RDC_ENOTHREADS;
6207 6211 goto outuap;
6208 6212 }
6209 6213 }
6210 6214 switch (uap->command) {
6211 6215 case RDC_CMD_ENABLE:
6212 6216 rc = rdc_enable(uap, kstatus);
6213 6217 break;
6214 6218 case RDC_CMD_DISABLE:
6215 6219 rc = rdc_disable(uap, kstatus);
6216 6220 break;
6217 6221 case RDC_CMD_COPY:
6218 6222 rc = rdc_sync(uap, kstatus);
6219 6223 break;
6220 6224 case RDC_CMD_LOG:
6221 6225 rc = rdc_log(uap, kstatus);
6222 6226 break;
6223 6227 case RDC_CMD_RECONFIG:
6224 6228 rc = rdc_reconfig(uap, kstatus);
6225 6229 break;
6226 6230 case RDC_CMD_RESUME:
6227 6231 rc = rdc_resume(uap, kstatus);
6228 6232 break;
6229 6233 case RDC_CMD_SUSPEND:
6230 6234 rc = rdc_suspend(uap, kstatus);
6231 6235 break;
6232 6236 case RDC_CMD_TUNABLE:
6233 6237 rc = rdc_tunable(uap, kstatus);
6234 6238 break;
6235 6239 case RDC_CMD_WAIT:
6236 6240 rc = rdc_wait(uap, kstatus);
6237 6241 break;
6238 6242 case RDC_CMD_HEALTH:
6239 6243 rc = rdc_health(uap, kstatus, rvp);
6240 6244 break;
6241 6245 case RDC_CMD_STATUS:
6242 6246 rc = rdc_status(arg, mode, uap, kstatus);
6243 6247 break;
6244 6248 case RDC_CMD_RESET:
6245 6249 rc = rdc_reset(uap, kstatus);
6246 6250 break;
6247 6251 case RDC_CMD_ADDQ:
6248 6252 rc = rdc_add_diskq(uap, kstatus);
6249 6253 break;
6250 6254 case RDC_CMD_REMQ:
6251 6255 if ((rc = rdc_rem_diskq(uap, kstatus)) != 0)
6252 6256 break;
6253 6257 /* FALLTHRU */
6254 6258 case RDC_CMD_KILLQ:
6255 6259 rc = rdc_kill_diskq(uap, kstatus);
6256 6260 break;
6257 6261 case RDC_CMD_INITQ:
6258 6262 rc = rdc_init_diskq(uap, kstatus);
6259 6263 break;
6260 6264
6261 6265 default:
6262 6266 rc = EINVAL;
6263 6267 break;
6264 6268 }
6265 6269
6266 6270 /*
6267 6271 * Tune the threadset size after a successful rdc_set addition
6268 6272 * or removal.
6269 6273 */
6270 6274 if ((enable || disable) && rc == 0) {
6271 6275 mutex_enter(&rdc_conf_lock);
6272 6276 rdc_thread_tune(enable ? 2 : -2);
6273 6277 mutex_exit(&rdc_conf_lock);
6274 6278 }
6275 6279 outuap:
6276 6280 kmem_free(uap, sizeof (*uap));
6277 6281 out:
6278 6282 kmem_free(fsvaddr.buf, fsvaddr.len);
6279 6283 kmem_free(tsvaddr.buf, tsvaddr.len);
6280 6284 if (pf)
6281 6285 kmem_free(pf, KNC_STRSIZE);
6282 6286 if (p)
6283 6287 kmem_free(p, KNC_STRSIZE);
6284 6288 if (knconf)
6285 6289 kmem_free(knconf, sizeof (*knconf));
6286 6290 return (rc);
6287 6291 }
6288 6292
6289 6293
6290 6294 /*
6291 6295 * krdc->group->lock held on entry to halt_sync()
6292 6296 */
6293 6297 static void
6294 6298 halt_sync(rdc_k_info_t *krdc)
6295 6299 {
6296 6300 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
6297 6301
6298 6302 ASSERT(MUTEX_HELD(&krdc->group->lock));
6299 6303 ASSERT(IS_ENABLED(urdc));
6300 6304
6301 6305 /*
6302 6306 * If a sync is in progress, halt it
6303 6307 */
6304 6308 if ((rdc_get_vflags(urdc) & RDC_PRIMARY) &&
6305 6309 (krdc->aux_state & RDC_AUXSYNCIP)) {
6306 6310 krdc->disk_status = 1;
6307 6311
6308 6312 while (krdc->disk_status == 1) {
6309 6313 if (cv_wait_sig(&krdc->haltcv, &krdc->group->lock) == 0)
6310 6314 break;
6311 6315 }
6312 6316 }
6313 6317 }
6314 6318
6315 6319 /*
6316 6320 * return size in blocks
6317 6321 */
6318 6322 uint64_t
6319 6323 mirror_getsize(int index)
6320 6324 {
6321 6325 rdc_k_info_t *krdc;
6322 6326 rdc_u_info_t *urdc;
6323 6327 int rc, rs;
6324 6328 nsc_size_t size;
6325 6329
6326 6330 krdc = &rdc_k_info[index];
6327 6331 urdc = &rdc_u_info[index];
6328 6332
6329 6333 rc = _rdc_rsrv_devs(krdc, RDC_RAW, RDC_INTERNAL);
6330 6334 rs = nsc_partsize(RDC_U_FD(krdc), &size);
6331 6335 urdc->volume_size = size;
6332 6336 if (rc == 0)
6333 6337 _rdc_rlse_devs(krdc, RDC_RAW);
6334 6338
6335 6339 return (rs == 0 ? urdc->volume_size : 0);
6336 6340 }
6337 6341
6338 6342
6339 6343 /*
6340 6344 * Create a new dataset for this transfer, and add it to the list
6341 6345 * of datasets via the net_dataset pointer in the krdc.
6342 6346 */
6343 6347 rdc_net_dataset_t *
6344 6348 rdc_net_add_set(int index)
6345 6349 {
6346 6350 rdc_k_info_t *krdc;
6347 6351 rdc_u_info_t *urdc;
6348 6352 rdc_net_dataset_t *dset;
6349 6353
6350 6354 if (index >= rdc_max_sets) {
6351 6355 cmn_err(CE_NOTE, "!rdc_net_add_set: bad index %d", index);
6352 6356 return (NULL);
6353 6357 }
6354 6358 krdc = &rdc_k_info[index];
6355 6359 urdc = &rdc_u_info[index];
6356 6360
6357 6361 dset = kmem_alloc(sizeof (*dset), KM_NOSLEEP);
6358 6362 if (dset == NULL) {
6359 6363 cmn_err(CE_NOTE, "!rdc_net_add_set: kmem_alloc failed");
6360 6364 return (NULL);
6361 6365 }
6362 6366 RDC_DSMEMUSE(sizeof (*dset));
6363 6367 dset->inuse = 1;
6364 6368 dset->nitems = 0;
6365 6369 dset->delpend = 0;
6366 6370 dset->head = NULL;
6367 6371 dset->tail = NULL;
6368 6372 mutex_enter(&krdc->dc_sleep);
6369 6373
6370 6374 if (!IS_ENABLED(urdc)) {
6371 6375 /* raced with a disable command */
6372 6376 kmem_free(dset, sizeof (*dset));
6373 6377 RDC_DSMEMUSE(-sizeof (*dset));
6374 6378 mutex_exit(&krdc->dc_sleep);
6375 6379 return (NULL);
6376 6380 }
6377 6381 /*
6378 6382 * Shared the id generator, (and the locks).
6379 6383 */
6380 6384 mutex_enter(&rdc_net_hnd_id_lock);
6381 6385 if (++rdc_net_hnd_id == 0)
6382 6386 rdc_net_hnd_id = 1;
6383 6387 dset->id = rdc_net_hnd_id;
6384 6388 mutex_exit(&rdc_net_hnd_id_lock);
6385 6389
6386 6390 #ifdef DEBUG
6387 6391 if (krdc->net_dataset != NULL) {
6388 6392 rdc_net_dataset_t *dset2;
6389 6393 for (dset2 = krdc->net_dataset; dset2; dset2 = dset2->next) {
6390 6394 if (dset2->id == dset->id) {
6391 6395 cmn_err(CE_PANIC,
6392 6396 "rdc_net_add_set duplicate id %p:%d %p:%d",
6393 6397 (void *)dset, dset->id,
6394 6398 (void *)dset2, dset2->id);
6395 6399 }
6396 6400 }
6397 6401 }
6398 6402 #endif
6399 6403 dset->next = krdc->net_dataset;
6400 6404 krdc->net_dataset = dset;
6401 6405 mutex_exit(&krdc->dc_sleep);
6402 6406
6403 6407 return (dset);
6404 6408 }
6405 6409
6406 6410 /*
6407 6411 * fetch the previously added dataset.
6408 6412 */
6409 6413 rdc_net_dataset_t *
6410 6414 rdc_net_get_set(int index, int id)
6411 6415 {
6412 6416 rdc_k_info_t *krdc;
6413 6417 rdc_net_dataset_t *dset;
6414 6418
6415 6419 if (index >= rdc_max_sets) {
6416 6420 cmn_err(CE_NOTE, "!rdc_net_get_set: bad index %d", index);
6417 6421 return (NULL);
6418 6422 }
6419 6423 krdc = &rdc_k_info[index];
6420 6424
6421 6425 mutex_enter(&krdc->dc_sleep);
6422 6426
6423 6427 dset = krdc->net_dataset;
6424 6428 while (dset && (dset->id != id))
6425 6429 dset = dset->next;
6426 6430
6427 6431 if (dset) {
6428 6432 dset->inuse++;
6429 6433 }
6430 6434
6431 6435 mutex_exit(&krdc->dc_sleep);
6432 6436 return (dset);
6433 6437 }
6434 6438
6435 6439 /*
6436 6440 * Decrement the inuse counter. Data may be freed.
6437 6441 */
6438 6442 void
6439 6443 rdc_net_put_set(int index, rdc_net_dataset_t *dset)
6440 6444 {
6441 6445 rdc_k_info_t *krdc;
6442 6446
6443 6447 if (index >= rdc_max_sets) {
6444 6448 cmn_err(CE_NOTE, "!rdc_net_put_set: bad index %d", index);
6445 6449 return;
6446 6450 }
6447 6451 krdc = &rdc_k_info[index];
6448 6452
6449 6453 mutex_enter(&krdc->dc_sleep);
6450 6454 dset->inuse--;
6451 6455 ASSERT(dset->inuse >= 0);
6452 6456 if ((dset->inuse == 0) && (dset->delpend)) {
6453 6457 rdc_net_free_set(krdc, dset);
6454 6458 }
6455 6459 mutex_exit(&krdc->dc_sleep);
6456 6460 }
6457 6461
6458 6462 /*
6459 6463 * Mark that we are finished with this set. Decrement inuse
6460 6464 * counter, mark as needing deletion, and
6461 6465 * remove from linked list.
6462 6466 */
6463 6467 void
6464 6468 rdc_net_del_set(int index, rdc_net_dataset_t *dset)
6465 6469 {
6466 6470 rdc_k_info_t *krdc;
6467 6471
6468 6472 if (index >= rdc_max_sets) {
6469 6473 cmn_err(CE_NOTE, "!rdc_net_del_set: bad index %d", index);
6470 6474 return;
6471 6475 }
6472 6476 krdc = &rdc_k_info[index];
6473 6477
6474 6478 mutex_enter(&krdc->dc_sleep);
6475 6479 dset->inuse--;
6476 6480 ASSERT(dset->inuse >= 0);
6477 6481 dset->delpend = 1;
6478 6482 if (dset->inuse == 0) {
6479 6483 rdc_net_free_set(krdc, dset);
6480 6484 }
6481 6485 mutex_exit(&krdc->dc_sleep);
6482 6486 }
6483 6487
6484 6488 /*
6485 6489 * free all the memory associated with this set, and remove from
6486 6490 * list.
6487 6491 * Enters and exits with dc_sleep lock held.
6488 6492 */
6489 6493
6490 6494 void
6491 6495 rdc_net_free_set(rdc_k_info_t *krdc, rdc_net_dataset_t *dset)
6492 6496 {
6493 6497 rdc_net_dataset_t **dsetp;
6494 6498 #ifdef DEBUG
6495 6499 int found = 0;
6496 6500 #endif
6497 6501
6498 6502 ASSERT(MUTEX_HELD(&krdc->dc_sleep));
6499 6503 ASSERT(dset);
6500 6504 for (dsetp = &krdc->net_dataset; *dsetp; dsetp = &((*dsetp)->next)) {
6501 6505 if (*dsetp == dset) {
6502 6506 *dsetp = dset->next;
6503 6507 #ifdef DEBUG
6504 6508 found = 1;
6505 6509 #endif
6506 6510 break;
6507 6511 }
6508 6512 }
6509 6513
6510 6514 #ifdef DEBUG
6511 6515 if (found == 0) {
6512 6516 cmn_err(CE_WARN, "!rdc_net_free_set: Unable to find "
6513 6517 "dataset 0x%p in krdc list", (void *)dset);
6514 6518 }
6515 6519 #endif
6516 6520 /*
6517 6521 * unlinked from list. Free all the data
6518 6522 */
6519 6523 rdc_ditemsfree(dset);
6520 6524 /*
6521 6525 * free my core.
6522 6526 */
6523 6527 kmem_free(dset, sizeof (*dset));
6524 6528 RDC_DSMEMUSE(-sizeof (*dset));
6525 6529 }
6526 6530
6527 6531
6528 6532 /*
6529 6533 * Free all the dataitems and the data it points to.
6530 6534 */
6531 6535 static void
6532 6536 rdc_ditemsfree(rdc_net_dataset_t *dset)
6533 6537 {
6534 6538 rdc_net_dataitem_t *ditem;
6535 6539 rdc_net_dataitem_t *nitem;
6536 6540
6537 6541 ditem = dset->head;
6538 6542
6539 6543 while (ditem) {
6540 6544 nitem = ditem->next;
6541 6545 kmem_free(ditem->dptr, ditem->mlen);
6542 6546 RDC_DSMEMUSE(-ditem->mlen);
6543 6547 dset->nitems--;
6544 6548 kmem_free(ditem, sizeof (*ditem));
6545 6549 RDC_DSMEMUSE(-sizeof (*ditem));
6546 6550 ditem = nitem;
6547 6551 }
6548 6552 ASSERT(dset->nitems == 0);
6549 6553 }
6550 6554
6551 6555 /*
6552 6556 * allocate and initialize a rdc_aio_t
6553 6557 */
6554 6558 rdc_aio_t *
6555 6559 rdc_aio_tbuf_get(void *n, void *h, int pos, int len, int flag, int index, int s)
6556 6560 {
6557 6561 rdc_aio_t *p;
6558 6562
6559 6563 p = kmem_zalloc(sizeof (rdc_aio_t), KM_NOSLEEP);
6560 6564 if (p == NULL) {
6561 6565 #ifdef DEBUG
6562 6566 cmn_err(CE_NOTE, "!_rdcaiotbufget: kmem_alloc failed bp aio");
6563 6567 #endif
6564 6568 return (NULL);
6565 6569 } else {
6566 6570 p->next = n; /* overload */
6567 6571 p->handle = h;
6568 6572 p->pos = pos;
6569 6573 p->qpos = -1;
6570 6574 p->len = len;
6571 6575 p->flag = flag;
6572 6576 p->index = index;
6573 6577 p->iostatus = s; /* overload */
6574 6578 /* set up seq later, in case thr create fails */
6575 6579 }
6576 6580 return (p);
6577 6581 }
6578 6582
6579 6583 /*
6580 6584 * rdc_aio_buf_get
6581 6585 * get an aio_buf
6582 6586 */
6583 6587 aio_buf_t *
6584 6588 rdc_aio_buf_get(rdc_buf_t *h, int index)
6585 6589 {
6586 6590 aio_buf_t *p;
6587 6591
6588 6592 if (index >= rdc_max_sets) {
6589 6593 cmn_err(CE_NOTE, "!rdc: rdc_aio_buf_get bad index %x", index);
6590 6594 return (NULL);
6591 6595 }
6592 6596
6593 6597 mutex_enter(&h->aio_lock);
6594 6598
6595 6599 p = h->rdc_anon;
6596 6600 while (p && (p->kindex != index))
6597 6601 p = p->next;
6598 6602
6599 6603 mutex_exit(&h->aio_lock);
6600 6604 return (p);
6601 6605 }
6602 6606
6603 6607 /*
6604 6608 * rdc_aio_buf_del
6605 6609 * delete a aio_buf
6606 6610 */
6607 6611 void
6608 6612 rdc_aio_buf_del(rdc_buf_t *h, rdc_k_info_t *krdc)
6609 6613 {
6610 6614 aio_buf_t *p, **pp;
6611 6615
6612 6616 mutex_enter(&h->aio_lock);
6613 6617
6614 6618 p = NULL;
6615 6619 for (pp = &h->rdc_anon; *pp; pp = &((*pp)->next)) {
6616 6620 if ((*pp)->kindex == krdc->index) {
6617 6621 p = *pp;
6618 6622 break;
6619 6623 }
6620 6624 }
6621 6625
6622 6626 if (p) {
6623 6627 *pp = p->next;
6624 6628 kmem_free(p, sizeof (*p));
6625 6629 }
6626 6630 mutex_exit(&h->aio_lock);
6627 6631 }
6628 6632
6629 6633 /*
6630 6634 * rdc_aio_buf_add
6631 6635 * Add a aio_buf.
6632 6636 */
6633 6637 aio_buf_t *
6634 6638 rdc_aio_buf_add(int index, rdc_buf_t *h)
6635 6639 {
6636 6640 aio_buf_t *p;
6637 6641
6638 6642 p = kmem_zalloc(sizeof (*p), KM_NOSLEEP);
6639 6643 if (p == NULL) {
6640 6644 cmn_err(CE_NOTE, "!rdc_aio_buf_add: kmem_alloc failed");
6641 6645 return (NULL);
6642 6646 }
6643 6647
6644 6648 p->rdc_abufp = NULL;
6645 6649 p->kindex = index;
6646 6650
6647 6651 mutex_enter(&h->aio_lock);
6648 6652 p->next = h->rdc_anon;
6649 6653 h->rdc_anon = p;
6650 6654 mutex_exit(&h->aio_lock);
6651 6655 return (p);
6652 6656 }
6653 6657
6654 6658 /*
6655 6659 * kmemalloc a new group structure and setup the common
6656 6660 * fields.
6657 6661 */
6658 6662 static rdc_group_t *
6659 6663 rdc_newgroup()
6660 6664 {
6661 6665 rdc_group_t *group;
6662 6666
6663 6667 group = kmem_zalloc(sizeof (rdc_group_t), KM_SLEEP);
6664 6668 group->diskq.lastio = kmem_zalloc(sizeof (rdc_aio_t), KM_SLEEP);
6665 6669 group->count = 1;
6666 6670 group->seq = RDC_NEWSEQ;
6667 6671 group->seqack = RDC_NEWSEQ;
6668 6672 mutex_init(&group->lock, NULL, MUTEX_DRIVER, NULL);
6669 6673 mutex_init(&group->ra_queue.net_qlock, NULL, MUTEX_DRIVER, NULL);
6670 6674 mutex_init(&group->diskqmutex, NULL, MUTEX_DRIVER, NULL);
6671 6675 mutex_init(&group->diskq.disk_qlock, NULL, MUTEX_DRIVER, NULL);
6672 6676 mutex_init(&group->diskq.head_lock, NULL, MUTEX_DRIVER, NULL);
6673 6677 mutex_init(&group->addthrnumlk, NULL, MUTEX_DRIVER, NULL);
6674 6678 cv_init(&group->unregistercv, NULL, CV_DRIVER, NULL);
6675 6679 cv_init(&group->asyncqcv, NULL, CV_DRIVER, NULL);
6676 6680 cv_init(&group->diskq.busycv, NULL, CV_DRIVER, NULL);
6677 6681 cv_init(&group->diskq.qfullcv, NULL, CV_DRIVER, NULL);
6678 6682 cv_init(&group->ra_queue.qfcv, NULL, CV_DRIVER, NULL);
6679 6683 group->ra_queue.qfill_sleeping = RDC_QFILL_DEAD;
6680 6684 group->diskq.busycnt = 0;
6681 6685 ASSERT(group->synccount == 0); /* group was kmem_zalloc'ed */
6682 6686
6683 6687 /*
6684 6688 * add default number of threads to the flusher thread set, plus
6685 6689 * one extra thread for the disk queue flusher
6686 6690 */
6687 6691 if (nst_add_thread(_rdc_flset, 3) != 3)
6688 6692 cmn_err(CE_NOTE, "!rdc_newgroup: nst_add_thread failed");
6689 6693
6690 6694 return (group);
6691 6695 }
6692 6696
6693 6697 void
6694 6698 rdc_delgroup(rdc_group_t *group)
6695 6699 {
6696 6700
6697 6701 ASSERT(group->asyncstall == 0);
6698 6702 ASSERT(group->rdc_thrnum == 0);
6699 6703 ASSERT(group->count == 0);
6700 6704 ASSERT(MUTEX_HELD(&rdc_many_lock));
6701 6705
6702 6706 mutex_enter(&group->ra_queue.net_qlock);
6703 6707 rdc_sleepqdiscard(group);
6704 6708 mutex_exit(&group->ra_queue.net_qlock);
6705 6709
6706 6710 /* try to remove flusher threads that this group added to _rdc_flset */
6707 6711 if (nst_del_thread(_rdc_flset, group->rdc_addthrnum + 3) !=
6708 6712 group->rdc_addthrnum + 3)
6709 6713 cmn_err(CE_NOTE, "!rdc_delgroup: nst_del_thread failed");
6710 6714
6711 6715 mutex_destroy(&group->lock);
6712 6716 mutex_destroy(&group->ra_queue.net_qlock);
6713 6717 mutex_destroy(&group->diskqmutex);
6714 6718 mutex_destroy(&group->diskq.disk_qlock);
6715 6719 mutex_destroy(&group->diskq.head_lock);
6716 6720 mutex_destroy(&group->addthrnumlk);
6717 6721 cv_destroy(&group->unregistercv);
6718 6722 cv_destroy(&group->asyncqcv);
6719 6723 cv_destroy(&group->diskq.busycv);
6720 6724 cv_destroy(&group->diskq.qfullcv);
6721 6725 cv_destroy(&group->ra_queue.qfcv);
6722 6726 kmem_free(group->diskq.lastio, sizeof (rdc_aio_t));
6723 6727 kmem_free(group, sizeof (rdc_group_t));
6724 6728 }
↓ open down ↓ |
1068 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX