Print this page
8074 need to add FMA event for SSD wearout
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/cmd/fm/modules/common/zfs-retire/zfs_retire.c
+++ new/usr/src/cmd/fm/modules/common/zfs-retire/zfs_retire.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /*
26 26 * The ZFS retire agent is responsible for managing hot spares across all pools.
27 27 * When we see a device fault or a device removal, we try to open the associated
28 28 * pool and look for any hot spares. We iterate over any available hot spares
29 29 * and attempt a 'zpool replace' for each one.
30 30 *
31 31 * For vdevs diagnosed as faulty, the agent is also responsible for proactively
32 32 * marking the vdev FAULTY (for I/O errors) or DEGRADED (for checksum errors).
33 33 */
34 34
35 35 #include <fm/fmd_api.h>
36 36 #include <sys/fs/zfs.h>
37 37 #include <sys/fm/protocol.h>
38 38 #include <sys/fm/fs/zfs.h>
39 39 #include <libzfs.h>
40 40 #include <fm/libtopo.h>
41 41 #include <string.h>
42 42
43 43 typedef struct zfs_retire_repaired {
44 44 struct zfs_retire_repaired *zrr_next;
45 45 uint64_t zrr_pool;
46 46 uint64_t zrr_vdev;
47 47 } zfs_retire_repaired_t;
48 48
49 49 typedef struct zfs_retire_data {
50 50 libzfs_handle_t *zrd_hdl;
51 51 zfs_retire_repaired_t *zrd_repaired;
52 52 } zfs_retire_data_t;
53 53
54 54 static void
55 55 zfs_retire_clear_data(fmd_hdl_t *hdl, zfs_retire_data_t *zdp)
56 56 {
57 57 zfs_retire_repaired_t *zrp;
58 58
59 59 while ((zrp = zdp->zrd_repaired) != NULL) {
60 60 zdp->zrd_repaired = zrp->zrr_next;
61 61 fmd_hdl_free(hdl, zrp, sizeof (zfs_retire_repaired_t));
62 62 }
63 63 }
64 64
65 65 /*
66 66 * Find a pool with a matching GUID.
67 67 */
68 68 typedef struct find_cbdata {
69 69 uint64_t cb_guid;
70 70 const char *cb_fru;
71 71 zpool_handle_t *cb_zhp;
72 72 nvlist_t *cb_vdev;
73 73 } find_cbdata_t;
74 74
75 75 static int
76 76 find_pool(zpool_handle_t *zhp, void *data)
77 77 {
78 78 find_cbdata_t *cbp = data;
79 79
80 80 if (cbp->cb_guid ==
81 81 zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL)) {
82 82 cbp->cb_zhp = zhp;
83 83 return (1);
84 84 }
85 85
86 86 zpool_close(zhp);
87 87 return (0);
88 88 }
89 89
90 90 /*
91 91 * Find a vdev within a tree with a matching GUID.
92 92 */
93 93 static nvlist_t *
94 94 find_vdev(libzfs_handle_t *zhdl, nvlist_t *nv, const char *search_fru,
95 95 uint64_t search_guid)
96 96 {
97 97 uint64_t guid;
98 98 nvlist_t **child;
99 99 uint_t c, children;
100 100 nvlist_t *ret;
101 101 char *fru;
102 102
103 103 if (search_fru != NULL) {
104 104 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &fru) == 0 &&
105 105 libzfs_fru_compare(zhdl, fru, search_fru))
106 106 return (nv);
107 107 } else {
108 108 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
109 109 guid == search_guid)
110 110 return (nv);
111 111 }
112 112
113 113 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
114 114 &child, &children) != 0)
115 115 return (NULL);
116 116
117 117 for (c = 0; c < children; c++) {
118 118 if ((ret = find_vdev(zhdl, child[c], search_fru,
119 119 search_guid)) != NULL)
120 120 return (ret);
121 121 }
122 122
123 123 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
124 124 &child, &children) != 0)
125 125 return (NULL);
126 126
127 127 for (c = 0; c < children; c++) {
128 128 if ((ret = find_vdev(zhdl, child[c], search_fru,
129 129 search_guid)) != NULL)
130 130 return (ret);
131 131 }
132 132
133 133 return (NULL);
134 134 }
135 135
136 136 /*
137 137 * Given a (pool, vdev) GUID pair, find the matching pool and vdev.
138 138 */
139 139 static zpool_handle_t *
140 140 find_by_guid(libzfs_handle_t *zhdl, uint64_t pool_guid, uint64_t vdev_guid,
141 141 nvlist_t **vdevp)
142 142 {
143 143 find_cbdata_t cb;
144 144 zpool_handle_t *zhp;
145 145 nvlist_t *config, *nvroot;
146 146
147 147 /*
148 148 * Find the corresponding pool and make sure the vdev still exists.
149 149 */
150 150 cb.cb_guid = pool_guid;
151 151 if (zpool_iter(zhdl, find_pool, &cb) != 1)
152 152 return (NULL);
153 153
154 154 zhp = cb.cb_zhp;
155 155 config = zpool_get_config(zhp, NULL);
156 156 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
157 157 &nvroot) != 0) {
158 158 zpool_close(zhp);
159 159 return (NULL);
160 160 }
161 161
162 162 if (vdev_guid != 0) {
163 163 if ((*vdevp = find_vdev(zhdl, nvroot, NULL,
164 164 vdev_guid)) == NULL) {
165 165 zpool_close(zhp);
166 166 return (NULL);
167 167 }
168 168 }
169 169
170 170 return (zhp);
171 171 }
172 172
173 173 static int
174 174 search_pool(zpool_handle_t *zhp, void *data)
175 175 {
176 176 find_cbdata_t *cbp = data;
177 177 nvlist_t *config;
178 178 nvlist_t *nvroot;
179 179
180 180 config = zpool_get_config(zhp, NULL);
181 181 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
182 182 &nvroot) != 0) {
183 183 zpool_close(zhp);
184 184 return (0);
185 185 }
186 186
187 187 if ((cbp->cb_vdev = find_vdev(zpool_get_handle(zhp), nvroot,
188 188 cbp->cb_fru, 0)) != NULL) {
189 189 cbp->cb_zhp = zhp;
190 190 return (1);
191 191 }
192 192
193 193 zpool_close(zhp);
194 194 return (0);
195 195 }
196 196
197 197 /*
198 198 * Given a FRU FMRI, find the matching pool and vdev.
199 199 */
200 200 static zpool_handle_t *
201 201 find_by_fru(libzfs_handle_t *zhdl, const char *fru, nvlist_t **vdevp)
202 202 {
203 203 find_cbdata_t cb;
204 204
205 205 cb.cb_fru = fru;
206 206 cb.cb_zhp = NULL;
207 207 if (zpool_iter(zhdl, search_pool, &cb) != 1)
208 208 return (NULL);
209 209
210 210 *vdevp = cb.cb_vdev;
211 211 return (cb.cb_zhp);
212 212 }
213 213
214 214 /*
215 215 * Given a vdev, attempt to replace it with every known spare until one
216 216 * succeeds.
217 217 */
218 218 static void
219 219 replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
220 220 {
221 221 nvlist_t *config, *nvroot, *replacement;
222 222 nvlist_t **spares;
223 223 uint_t s, nspares;
224 224 char *dev_name;
225 225
226 226 config = zpool_get_config(zhp, NULL);
227 227 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
228 228 &nvroot) != 0)
229 229 return;
230 230
231 231 /*
232 232 * Find out if there are any hot spares available in the pool.
233 233 */
234 234 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
235 235 &spares, &nspares) != 0)
236 236 return;
237 237
238 238 replacement = fmd_nvl_alloc(hdl, FMD_SLEEP);
239 239
240 240 (void) nvlist_add_string(replacement, ZPOOL_CONFIG_TYPE,
241 241 VDEV_TYPE_ROOT);
242 242
243 243 dev_name = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
244 244
245 245 /*
246 246 * Try to replace each spare, ending when we successfully
247 247 * replace it.
248 248 */
249 249 for (s = 0; s < nspares; s++) {
250 250 char *spare_name;
251 251
252 252 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
253 253 &spare_name) != 0)
254 254 continue;
255 255
256 256 (void) nvlist_add_nvlist_array(replacement,
257 257 ZPOOL_CONFIG_CHILDREN, &spares[s], 1);
258 258
259 259 if (zpool_vdev_attach(zhp, dev_name, spare_name,
260 260 replacement, B_TRUE) == 0)
261 261 break;
262 262 }
263 263
264 264 free(dev_name);
265 265 nvlist_free(replacement);
266 266 }
267 267
268 268 /*
269 269 * Repair this vdev if we had diagnosed a 'fault.fs.zfs.device' and
270 270 * ASRU is now usable. ZFS has found the device to be present and
271 271 * functioning.
272 272 */
273 273 /*ARGSUSED*/
274 274 void
275 275 zfs_vdev_repair(fmd_hdl_t *hdl, nvlist_t *nvl)
276 276 {
277 277 zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
278 278 zfs_retire_repaired_t *zrp;
279 279 uint64_t pool_guid, vdev_guid;
280 280 nvlist_t *asru;
281 281
282 282 if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
283 283 &pool_guid) != 0 || nvlist_lookup_uint64(nvl,
284 284 FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
285 285 return;
286 286
287 287 /*
288 288 * Before checking the state of the ASRU, go through and see if we've
289 289 * already made an attempt to repair this ASRU. This list is cleared
290 290 * whenever we receive any kind of list event, and is designed to
291 291 * prevent us from generating a feedback loop when we attempt repairs
292 292 * against a faulted pool. The problem is that checking the unusable
293 293 * state of the ASRU can involve opening the pool, which can post
294 294 * statechange events but otherwise leave the pool in the faulted
295 295 * state. This list allows us to detect when a statechange event is
296 296 * due to our own request.
297 297 */
298 298 for (zrp = zdp->zrd_repaired; zrp != NULL; zrp = zrp->zrr_next) {
299 299 if (zrp->zrr_pool == pool_guid &&
300 300 zrp->zrr_vdev == vdev_guid)
301 301 return;
302 302 }
303 303
304 304 asru = fmd_nvl_alloc(hdl, FMD_SLEEP);
305 305
306 306 (void) nvlist_add_uint8(asru, FM_VERSION, ZFS_SCHEME_VERSION0);
307 307 (void) nvlist_add_string(asru, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS);
308 308 (void) nvlist_add_uint64(asru, FM_FMRI_ZFS_POOL, pool_guid);
309 309 (void) nvlist_add_uint64(asru, FM_FMRI_ZFS_VDEV, vdev_guid);
310 310
311 311 /*
312 312 * We explicitly check for the unusable state here to make sure we
313 313 * aren't responding to a transient state change. As part of opening a
314 314 * vdev, it's possible to see the 'statechange' event, only to be
315 315 * followed by a vdev failure later. If we don't check the current
316 316 * state of the vdev (or pool) before marking it repaired, then we risk
317 317 * generating spurious repair events followed immediately by the same
318 318 * diagnosis.
319 319 *
320 320 * This assumes that the ZFS scheme code associated unusable (i.e.
321 321 * isolated) with its own definition of faulty state. In the case of a
322 322 * DEGRADED leaf vdev (due to checksum errors), this is not the case.
323 323 * This works, however, because the transient state change is not
324 324 * posted in this case. This could be made more explicit by not
325 325 * relying on the scheme's unusable callback and instead directly
326 326 * checking the vdev state, where we could correctly account for
327 327 * DEGRADED state.
328 328 */
329 329 if (!fmd_nvl_fmri_unusable(hdl, asru) && fmd_nvl_fmri_has_fault(hdl,
330 330 asru, FMD_HAS_FAULT_ASRU, NULL)) {
331 331 topo_hdl_t *thp;
332 332 char *fmri = NULL;
333 333 int err;
334 334
335 335 thp = fmd_hdl_topo_hold(hdl, TOPO_VERSION);
336 336 if (topo_fmri_nvl2str(thp, asru, &fmri, &err) == 0)
337 337 (void) fmd_repair_asru(hdl, fmri);
338 338 fmd_hdl_topo_rele(hdl, thp);
339 339
340 340 topo_hdl_strfree(thp, fmri);
341 341 }
342 342 nvlist_free(asru);
343 343 zrp = fmd_hdl_alloc(hdl, sizeof (zfs_retire_repaired_t), FMD_SLEEP);
344 344 zrp->zrr_next = zdp->zrd_repaired;
345 345 zrp->zrr_pool = pool_guid;
346 346 zrp->zrr_vdev = vdev_guid;
347 347 zdp->zrd_repaired = zrp;
348 348 }
349 349
350 350 /*ARGSUSED*/
351 351 static void
352 352 zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
353 353 const char *class)
354 354 {
355 355 uint64_t pool_guid, vdev_guid;
356 356 zpool_handle_t *zhp;
357 357 nvlist_t *resource, *fault, *fru;
358 358 nvlist_t **faults;
359 359 uint_t f, nfaults;
360 360 zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
361 361 libzfs_handle_t *zhdl = zdp->zrd_hdl;
362 362 boolean_t fault_device, degrade_device;
363 363 boolean_t is_repair;
364 364 char *scheme, *fmri;
365 365 nvlist_t *vdev;
366 366 char *uuid;
367 367 int repair_done = 0;
368 368 boolean_t retire;
369 369 boolean_t is_disk;
370 370 vdev_aux_t aux;
371 371 topo_hdl_t *thp;
372 372 int err;
373 373
374 374 /*
375 375 * If this is a resource notifying us of device removal, then simply
376 376 * check for an available spare and continue.
377 377 */
378 378 if (strcmp(class, "resource.fs.zfs.removed") == 0) {
379 379 if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
380 380 &pool_guid) != 0 ||
381 381 nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
382 382 &vdev_guid) != 0)
383 383 return;
384 384
385 385 if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
386 386 &vdev)) == NULL)
387 387 return;
388 388
389 389 if (fmd_prop_get_int32(hdl, "spare_on_remove"))
390 390 replace_with_spare(hdl, zhp, vdev);
391 391 zpool_close(zhp);
392 392 return;
393 393 }
394 394
395 395 if (strcmp(class, FM_LIST_RESOLVED_CLASS) == 0)
396 396 return;
397 397
398 398 if (strcmp(class, "resource.fs.zfs.statechange") == 0 ||
399 399 strcmp(class,
400 400 "resource.sysevent.EC_zfs.ESC_ZFS_vdev_remove") == 0) {
401 401 zfs_vdev_repair(hdl, nvl);
402 402 return;
403 403 }
404 404
405 405 zfs_retire_clear_data(hdl, zdp);
406 406
407 407 if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0)
408 408 is_repair = B_TRUE;
409 409 else
410 410 is_repair = B_FALSE;
411 411
412 412 /*
413 413 * We subscribe to zfs faults as well as all repair events.
414 414 */
415 415 if (nvlist_lookup_nvlist_array(nvl, FM_SUSPECT_FAULT_LIST,
416 416 &faults, &nfaults) != 0)
417 417 return;
418 418
419 419 for (f = 0; f < nfaults; f++) {
↓ open down ↓ |
419 lines elided |
↑ open up ↑ |
420 420 fault = faults[f];
421 421
422 422 fault_device = B_FALSE;
423 423 degrade_device = B_FALSE;
424 424 is_disk = B_FALSE;
425 425
426 426 if (nvlist_lookup_boolean_value(fault, FM_SUSPECT_RETIRE,
427 427 &retire) == 0 && retire == 0)
428 428 continue;
429 429
430 + if (fmd_nvl_class_match(hdl, fault,
431 + "fault.io.disk.ssm-wearout") &&
432 + fmd_prop_get_int32(hdl, "ssm_wearout_skip_retire") ==
433 + FMD_B_TRUE) {
434 + fmd_hdl_debug(hdl, "zfs-retire: ignoring SSM fault");
435 + continue;
436 + }
437 +
430 438 /*
431 439 * While we subscribe to fault.fs.zfs.*, we only take action
432 440 * for faults targeting a specific vdev (open failure or SERD
433 441 * failure). We also subscribe to fault.io.* events, so that
434 442 * faulty disks will be faulted in the ZFS configuration.
435 443 */
436 444 if (fmd_nvl_class_match(hdl, fault, "fault.fs.zfs.vdev.io")) {
437 445 fault_device = B_TRUE;
438 446 } else if (fmd_nvl_class_match(hdl, fault,
439 447 "fault.fs.zfs.vdev.checksum")) {
440 448 degrade_device = B_TRUE;
441 449 } else if (fmd_nvl_class_match(hdl, fault,
442 450 "fault.fs.zfs.device")) {
443 451 fault_device = B_FALSE;
444 452 } else if (fmd_nvl_class_match(hdl, fault, "fault.io.*")) {
445 453 is_disk = B_TRUE;
446 454 fault_device = B_TRUE;
447 455 } else {
448 456 continue;
449 457 }
450 458
451 459 if (is_disk) {
452 460 /*
453 461 * This is a disk fault. Lookup the FRU, convert it to
454 462 * an FMRI string, and attempt to find a matching vdev.
455 463 */
456 464 if (nvlist_lookup_nvlist(fault, FM_FAULT_FRU,
457 465 &fru) != 0 ||
458 466 nvlist_lookup_string(fru, FM_FMRI_SCHEME,
459 467 &scheme) != 0)
460 468 continue;
461 469
462 470 if (strcmp(scheme, FM_FMRI_SCHEME_HC) != 0)
463 471 continue;
464 472
465 473 thp = fmd_hdl_topo_hold(hdl, TOPO_VERSION);
466 474 if (topo_fmri_nvl2str(thp, fru, &fmri, &err) != 0) {
467 475 fmd_hdl_topo_rele(hdl, thp);
468 476 continue;
469 477 }
470 478
471 479 zhp = find_by_fru(zhdl, fmri, &vdev);
472 480 topo_hdl_strfree(thp, fmri);
473 481 fmd_hdl_topo_rele(hdl, thp);
474 482
475 483 if (zhp == NULL)
476 484 continue;
477 485
478 486 (void) nvlist_lookup_uint64(vdev,
479 487 ZPOOL_CONFIG_GUID, &vdev_guid);
480 488 aux = VDEV_AUX_EXTERNAL;
481 489 } else {
482 490 /*
483 491 * This is a ZFS fault. Lookup the resource, and
484 492 * attempt to find the matching vdev.
485 493 */
486 494 if (nvlist_lookup_nvlist(fault, FM_FAULT_RESOURCE,
487 495 &resource) != 0 ||
488 496 nvlist_lookup_string(resource, FM_FMRI_SCHEME,
489 497 &scheme) != 0)
490 498 continue;
491 499
492 500 if (strcmp(scheme, FM_FMRI_SCHEME_ZFS) != 0)
493 501 continue;
494 502
495 503 if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_POOL,
496 504 &pool_guid) != 0)
497 505 continue;
498 506
499 507 if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_VDEV,
500 508 &vdev_guid) != 0) {
501 509 if (is_repair)
502 510 vdev_guid = 0;
503 511 else
504 512 continue;
505 513 }
506 514
507 515 if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
508 516 &vdev)) == NULL)
509 517 continue;
510 518
511 519 aux = VDEV_AUX_ERR_EXCEEDED;
512 520 }
513 521
514 522 if (vdev_guid == 0) {
515 523 /*
516 524 * For pool-level repair events, clear the entire pool.
517 525 */
518 526 (void) zpool_clear(zhp, NULL, NULL);
519 527 zpool_close(zhp);
520 528 continue;
521 529 }
522 530
523 531 /*
524 532 * If this is a repair event, then mark the vdev as repaired and
525 533 * continue.
526 534 */
527 535 if (is_repair) {
528 536 repair_done = 1;
529 537 (void) zpool_vdev_clear(zhp, vdev_guid);
530 538 zpool_close(zhp);
531 539 continue;
532 540 }
533 541
534 542 /*
535 543 * Actively fault the device if needed.
536 544 */
537 545 if (fault_device)
538 546 (void) zpool_vdev_fault(zhp, vdev_guid, aux);
539 547 if (degrade_device)
540 548 (void) zpool_vdev_degrade(zhp, vdev_guid, aux);
541 549
542 550 /*
543 551 * Attempt to substitute a hot spare.
544 552 */
545 553 replace_with_spare(hdl, zhp, vdev);
546 554 zpool_close(zhp);
547 555 }
548 556
549 557 if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0 && repair_done &&
550 558 nvlist_lookup_string(nvl, FM_SUSPECT_UUID, &uuid) == 0)
551 559 fmd_case_uuresolved(hdl, uuid);
552 560 }
553 561
↓ open down ↓ |
114 lines elided |
↑ open up ↑ |
554 562 static const fmd_hdl_ops_t fmd_ops = {
555 563 zfs_retire_recv, /* fmdo_recv */
556 564 NULL, /* fmdo_timeout */
557 565 NULL, /* fmdo_close */
558 566 NULL, /* fmdo_stats */
559 567 NULL, /* fmdo_gc */
560 568 };
561 569
562 570 static const fmd_prop_t fmd_props[] = {
563 571 { "spare_on_remove", FMD_TYPE_BOOL, "true" },
572 + { "ssm_wearout_skip_retire", FMD_TYPE_BOOL, "true"},
564 573 { NULL, 0, NULL }
565 574 };
566 575
567 576 static const fmd_hdl_info_t fmd_info = {
568 577 "ZFS Retire Agent", "1.0", &fmd_ops, fmd_props
569 578 };
570 579
571 580 void
572 581 _fmd_init(fmd_hdl_t *hdl)
573 582 {
574 583 zfs_retire_data_t *zdp;
575 584 libzfs_handle_t *zhdl;
576 585
577 586 if ((zhdl = libzfs_init()) == NULL)
578 587 return;
579 588
580 589 if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
581 590 libzfs_fini(zhdl);
582 591 return;
583 592 }
584 593
585 594 zdp = fmd_hdl_zalloc(hdl, sizeof (zfs_retire_data_t), FMD_SLEEP);
586 595 zdp->zrd_hdl = zhdl;
587 596
588 597 fmd_hdl_setspecific(hdl, zdp);
589 598 }
590 599
591 600 void
592 601 _fmd_fini(fmd_hdl_t *hdl)
593 602 {
594 603 zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
595 604
596 605 if (zdp != NULL) {
597 606 zfs_retire_clear_data(hdl, zdp);
598 607 libzfs_fini(zdp->zrd_hdl);
599 608 fmd_hdl_free(hdl, zdp, sizeof (zfs_retire_data_t));
600 609 }
601 610 }
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX