mirror of
https://git.freebsd.org/src.git
synced 2026-01-11 19:57:22 +00:00
zfs: merge openzfs/zfs@89f729dcc
Notable upstream pull request merges: #179321f3444f2bzpool: fix special vdev -v -o conflict #17934 -multiple Remove libuutil #1794188d012a1dFix snapshot automount expiry cancellation deadlock #1794236e4f1888Fix taskq NULL pointer dereference on timer race #1794639303febachksum: run 256K benchmark on demand, preserve chksum_stat_data #17948 -multiple Remove libtpool #17957e37937f42ztest: fix broken random call #17960928eccc5bDDT: Reduce global DDT lock scope during writes #1796148f33c1efDDT: Make children writes inherit allocator #179757f7d4934cFreeBSD: Fix uninitialized variable error #17980a5b665df3DDT: Switch to using wmsums for lookup stats #18004ffaea0831FreeBSD: Remove HAVE_INLINE_FLSL use Obtained from: OpenZFS OpenZFS commit:89f729dcca
This commit is contained in:
commit
66e8575559
78 changed files with 1235 additions and 8282 deletions
|
|
@ -181,7 +181,7 @@ function freebsd() {
|
|||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run gmake -j$(sysctl -n hw.ncpu)
|
||||
run gmake -j$(nproc)
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
|
|
|
|||
|
|
@ -42,7 +42,10 @@ function test_install {
|
|||
sudo sed -i "s;baseurl=http://download.zfsonlinux.org;baseurl=$host;g" /etc/yum.repos.d/zfs.repo
|
||||
fi
|
||||
|
||||
sudo dnf -y install $args zfs zfs-test
|
||||
if ! sudo dnf -y install $args zfs zfs-test ; then
|
||||
echo "$repo ${package}...[FAILED] $baseurl" >> $SUMMARY
|
||||
return
|
||||
fi
|
||||
|
||||
# Load modules and create a simple pool as a sanity test.
|
||||
sudo /usr/share/zfs/zfs.sh -r
|
||||
|
|
@ -70,16 +73,19 @@ almalinux*)
|
|||
name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+')
|
||||
sudo dnf -y install https://zfsonlinux.org/epel/$name$(rpm --eval "%{dist}").noarch.rpm 2>&1
|
||||
sudo rpm -qi zfs-release
|
||||
test_install zfs $ALTHOST
|
||||
test_install zfs-kmod $ALTHOST
|
||||
test_install zfs-testing $ALTHOST
|
||||
test_install zfs-testing-kmod $ALTHOST
|
||||
for i in zfs zfs-kmod zfs-testing zfs-testing-kmod zfs-latest \
|
||||
zfs-latest-kmod zfs-legacy zfs-legacy-kmod zfs-2.2 \
|
||||
zfs-2.2-kmod zfs-2.3 zfs-2.3-kmod ; do
|
||||
test_install $i $ALTHOST
|
||||
done
|
||||
;;
|
||||
fedora*)
|
||||
url='https://raw.githubusercontent.com/openzfs/openzfs-docs/refs/heads/master/docs/Getting%20Started/Fedora/index.rst'
|
||||
name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+')
|
||||
sudo dnf -y install -y https://zfsonlinux.org/fedora/$name$(rpm --eval "%{dist}").noarch.rpm
|
||||
test_install zfs $ALTHOST
|
||||
for i in zfs zfs-latest zfs-legacy zfs-2.2 zfs-2.3 ; do
|
||||
test_install $i $ALTHOST
|
||||
done
|
||||
;;
|
||||
esac
|
||||
echo "##[endgroup]"
|
||||
|
|
|
|||
52
sys/contrib/openzfs/.github/workflows/smatch.yml
vendored
Normal file
52
sys/contrib/openzfs/.github/workflows/smatch.yml
vendored
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
name: smatch
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
smatch:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: Checkout smatch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: error27/smatch
|
||||
ref: master
|
||||
path: smatch
|
||||
- name: Install smatch dependencies
|
||||
run: |
|
||||
sudo apt-get install -y llvm gcc make sqlite3 libsqlite3-dev libdbd-sqlite3-perl libssl-dev libtry-tiny-perl
|
||||
- name: Make smatch
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/smatch
|
||||
make -j$(nproc)
|
||||
- name: Checkout OpenZFS
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
path: zfs
|
||||
- name: Install OpenZFS dependencies
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/zfs
|
||||
sudo apt-get purge -y snapd google-chrome-stable firefox
|
||||
ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps-vm.sh ubuntu24
|
||||
- name: Autogen.sh OpenZFS
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/zfs
|
||||
./autogen.sh
|
||||
- name: Configure OpenZFS
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/zfs
|
||||
./configure --enable-debug
|
||||
- name: Make OpenZFS
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/zfs
|
||||
make -j$(nproc) CHECK="$GITHUB_WORKSPACE/smatch/smatch" CC=$GITHUB_WORKSPACE/smatch/cgcc | tee $GITHUB_WORKSPACE/smatch.log
|
||||
- name: Smatch results log
|
||||
run: |
|
||||
grep -E 'error:|warn:|warning:' $GITHUB_WORKSPACE/smatch.log
|
||||
|
|
@ -73,7 +73,7 @@ jobs:
|
|||
.github/workflows/scripts/qemu-3-deps.sh ${{ matrix.os }}
|
||||
|
||||
- name: Build modules or Test repo
|
||||
timeout-minutes: 30
|
||||
timeout-minutes: 60
|
||||
run: |
|
||||
set -e
|
||||
if [ "${{ github.event.inputs.test_type }}" == "Test repo" ] ; then
|
||||
|
|
|
|||
|
|
@ -265,9 +265,21 @@ cmp_data(raidz_test_opts_t *opts, raidz_map_t *rm)
|
|||
|
||||
static int
|
||||
init_rand(void *data, size_t size, void *private)
|
||||
{
|
||||
size_t *offsetp = (size_t *)private;
|
||||
size_t offset = *offsetp;
|
||||
|
||||
VERIFY3U(offset + size, <=, SPA_MAXBLOCKSIZE);
|
||||
memcpy(data, (char *)rand_data + offset, size);
|
||||
*offsetp = offset + size;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
corrupt_rand_fill(void *data, size_t size, void *private)
|
||||
{
|
||||
(void) private;
|
||||
memcpy(data, rand_data, size);
|
||||
memset(data, 0xAA, size);
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
|
@ -279,7 +291,7 @@ corrupt_colums(raidz_map_t *rm, const int *tgts, const int cnt)
|
|||
for (int i = 0; i < cnt; i++) {
|
||||
raidz_col_t *col = &rr->rr_col[tgts[i]];
|
||||
abd_iterate_func(col->rc_abd, 0, col->rc_size,
|
||||
init_rand, NULL);
|
||||
corrupt_rand_fill, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -287,7 +299,8 @@ corrupt_colums(raidz_map_t *rm, const int *tgts, const int cnt)
|
|||
void
|
||||
init_zio_abd(zio_t *zio)
|
||||
{
|
||||
abd_iterate_func(zio->io_abd, 0, zio->io_size, init_rand, NULL);
|
||||
size_t offset = 0;
|
||||
abd_iterate_func(zio->io_abd, 0, zio->io_size, init_rand, &offset);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -374,7 +387,7 @@ init_raidz_map(raidz_test_opts_t *opts, zio_t **zio, const int parity)
|
|||
|
||||
*zio = umem_zalloc(sizeof (zio_t), UMEM_NOFAIL);
|
||||
|
||||
(*zio)->io_offset = 0;
|
||||
(*zio)->io_offset = opts->rto_offset;
|
||||
(*zio)->io_size = alloc_dsize;
|
||||
(*zio)->io_abd = raidz_alloc(alloc_dsize);
|
||||
init_zio_abd(*zio);
|
||||
|
|
@ -835,6 +848,8 @@ main(int argc, char **argv)
|
|||
err = run_test(NULL);
|
||||
}
|
||||
|
||||
mprotect(rand_data, SPA_MAXBLOCKSIZE, PROT_READ | PROT_WRITE);
|
||||
|
||||
umem_free(rand_data, SPA_MAXBLOCKSIZE);
|
||||
kernel_fini();
|
||||
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ typedef struct raidz_test_opts {
|
|||
|
||||
static const raidz_test_opts_t rto_opts_defaults = {
|
||||
.rto_ashift = 9,
|
||||
.rto_offset = 1ULL << 0,
|
||||
.rto_offset = 0,
|
||||
.rto_dcols = 8,
|
||||
.rto_dsize = 1<<19,
|
||||
.rto_v = D_ALL,
|
||||
|
|
|
|||
|
|
@ -37,8 +37,7 @@ zed_SOURCES = \
|
|||
zed_LDADD = \
|
||||
libzfs.la \
|
||||
libzfs_core.la \
|
||||
libnvpair.la \
|
||||
libuutil.la
|
||||
libnvpair.la
|
||||
|
||||
zed_LDADD += -lrt $(LIBATOMIC_LIBS) $(LIBUDEV_LIBS) $(LIBUUID_LIBS)
|
||||
zed_LDFLAGS = -pthread
|
||||
|
|
|
|||
|
|
@ -29,7 +29,6 @@
|
|||
|
||||
#include <stddef.h>
|
||||
#include <string.h>
|
||||
#include <libuutil.h>
|
||||
#include <libzfs.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/time.h>
|
||||
|
|
@ -96,7 +95,7 @@ typedef struct zfs_case {
|
|||
uint32_t zc_version;
|
||||
zfs_case_data_t zc_data;
|
||||
fmd_case_t *zc_case;
|
||||
uu_list_node_t zc_node;
|
||||
list_node_t zc_node;
|
||||
id_t zc_remove_timer;
|
||||
char *zc_fru;
|
||||
er_timeval_t zc_when;
|
||||
|
|
@ -126,8 +125,7 @@ zfs_de_stats_t zfs_stats = {
|
|||
/* wait 15 seconds after a removal */
|
||||
static hrtime_t zfs_remove_timeout = SEC2NSEC(15);
|
||||
|
||||
uu_list_pool_t *zfs_case_pool;
|
||||
uu_list_t *zfs_cases;
|
||||
static list_t zfs_cases;
|
||||
|
||||
#define ZFS_MAKE_RSRC(type) \
|
||||
FM_RSRC_CLASS "." ZFS_ERROR_CLASS "." type
|
||||
|
|
@ -174,8 +172,8 @@ zfs_case_unserialize(fmd_hdl_t *hdl, fmd_case_t *cp)
|
|||
zcp->zc_remove_timer = fmd_timer_install(hdl, zcp,
|
||||
NULL, zfs_remove_timeout);
|
||||
|
||||
uu_list_node_init(zcp, &zcp->zc_node, zfs_case_pool);
|
||||
(void) uu_list_insert_before(zfs_cases, NULL, zcp);
|
||||
list_link_init(&zcp->zc_node);
|
||||
list_insert_head(&zfs_cases, zcp);
|
||||
|
||||
fmd_case_setspecific(hdl, cp, zcp);
|
||||
|
||||
|
|
@ -206,8 +204,8 @@ zfs_other_serd_cases(fmd_hdl_t *hdl, const zfs_case_data_t *zfs_case)
|
|||
next_check = gethrestime_sec() + CASE_GC_TIMEOUT_SECS;
|
||||
}
|
||||
|
||||
for (zcp = uu_list_first(zfs_cases); zcp != NULL;
|
||||
zcp = uu_list_next(zfs_cases, zcp)) {
|
||||
for (zcp = list_head(&zfs_cases); zcp != NULL;
|
||||
zcp = list_next(&zfs_cases, zcp)) {
|
||||
zfs_case_data_t *zcd = &zcp->zc_data;
|
||||
|
||||
/*
|
||||
|
|
@ -257,8 +255,8 @@ zfs_mark_vdev(uint64_t pool_guid, nvlist_t *vd, er_timeval_t *loaded)
|
|||
/*
|
||||
* Mark any cases associated with this (pool, vdev) pair.
|
||||
*/
|
||||
for (zcp = uu_list_first(zfs_cases); zcp != NULL;
|
||||
zcp = uu_list_next(zfs_cases, zcp)) {
|
||||
for (zcp = list_head(&zfs_cases); zcp != NULL;
|
||||
zcp = list_next(&zfs_cases, zcp)) {
|
||||
if (zcp->zc_data.zc_pool_guid == pool_guid &&
|
||||
zcp->zc_data.zc_vdev_guid == vdev_guid) {
|
||||
zcp->zc_present = B_TRUE;
|
||||
|
|
@ -304,8 +302,8 @@ zfs_mark_pool(zpool_handle_t *zhp, void *unused)
|
|||
/*
|
||||
* Mark any cases associated with just this pool.
|
||||
*/
|
||||
for (zcp = uu_list_first(zfs_cases); zcp != NULL;
|
||||
zcp = uu_list_next(zfs_cases, zcp)) {
|
||||
for (zcp = list_head(&zfs_cases); zcp != NULL;
|
||||
zcp = list_next(&zfs_cases, zcp)) {
|
||||
if (zcp->zc_data.zc_pool_guid == pool_guid &&
|
||||
zcp->zc_data.zc_vdev_guid == 0)
|
||||
zcp->zc_present = B_TRUE;
|
||||
|
|
@ -321,8 +319,8 @@ zfs_mark_pool(zpool_handle_t *zhp, void *unused)
|
|||
if (nelem == 2) {
|
||||
loaded.ertv_sec = tod[0];
|
||||
loaded.ertv_nsec = tod[1];
|
||||
for (zcp = uu_list_first(zfs_cases); zcp != NULL;
|
||||
zcp = uu_list_next(zfs_cases, zcp)) {
|
||||
for (zcp = list_head(&zfs_cases); zcp != NULL;
|
||||
zcp = list_next(&zfs_cases, zcp)) {
|
||||
if (zcp->zc_data.zc_pool_guid == pool_guid &&
|
||||
zcp->zc_data.zc_vdev_guid == 0) {
|
||||
zcp->zc_when = loaded;
|
||||
|
|
@ -389,8 +387,7 @@ zpool_find_load_time(zpool_handle_t *zhp, void *arg)
|
|||
static void
|
||||
zfs_purge_cases(fmd_hdl_t *hdl)
|
||||
{
|
||||
zfs_case_t *zcp;
|
||||
uu_list_walk_t *walk;
|
||||
zfs_case_t *zcp, *next;
|
||||
libzfs_handle_t *zhdl = fmd_hdl_getspecific(hdl);
|
||||
|
||||
/*
|
||||
|
|
@ -410,8 +407,8 @@ zfs_purge_cases(fmd_hdl_t *hdl)
|
|||
/*
|
||||
* Mark the cases as not present.
|
||||
*/
|
||||
for (zcp = uu_list_first(zfs_cases); zcp != NULL;
|
||||
zcp = uu_list_next(zfs_cases, zcp))
|
||||
for (zcp = list_head(&zfs_cases); zcp != NULL;
|
||||
zcp = list_next(&zfs_cases, zcp))
|
||||
zcp->zc_present = B_FALSE;
|
||||
|
||||
/*
|
||||
|
|
@ -425,12 +422,11 @@ zfs_purge_cases(fmd_hdl_t *hdl)
|
|||
/*
|
||||
* Remove those cases which were not found.
|
||||
*/
|
||||
walk = uu_list_walk_start(zfs_cases, UU_WALK_ROBUST);
|
||||
while ((zcp = uu_list_walk_next(walk)) != NULL) {
|
||||
for (zcp = list_head(&zfs_cases); zcp != NULL; zcp = next) {
|
||||
next = list_next(&zfs_cases, zcp);
|
||||
if (!zcp->zc_present)
|
||||
fmd_case_close(hdl, zcp->zc_case);
|
||||
}
|
||||
uu_list_walk_end(walk);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -660,8 +656,8 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||
|
||||
zfs_ereport_when(hdl, nvl, &er_when);
|
||||
|
||||
for (zcp = uu_list_first(zfs_cases); zcp != NULL;
|
||||
zcp = uu_list_next(zfs_cases, zcp)) {
|
||||
for (zcp = list_head(&zfs_cases); zcp != NULL;
|
||||
zcp = list_next(&zfs_cases, zcp)) {
|
||||
if (zcp->zc_data.zc_pool_guid == pool_guid) {
|
||||
pool_found = B_TRUE;
|
||||
pool_load = zcp->zc_when;
|
||||
|
|
@ -867,8 +863,8 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||
* Pool level fault. Before solving the case, go through and
|
||||
* close any open device cases that may be pending.
|
||||
*/
|
||||
for (dcp = uu_list_first(zfs_cases); dcp != NULL;
|
||||
dcp = uu_list_next(zfs_cases, dcp)) {
|
||||
for (dcp = list_head(&zfs_cases); dcp != NULL;
|
||||
dcp = list_next(&zfs_cases, dcp)) {
|
||||
if (dcp->zc_data.zc_pool_guid ==
|
||||
zcp->zc_data.zc_pool_guid &&
|
||||
dcp->zc_data.zc_vdev_guid != 0)
|
||||
|
|
@ -1088,8 +1084,7 @@ zfs_fm_close(fmd_hdl_t *hdl, fmd_case_t *cs)
|
|||
if (zcp->zc_data.zc_has_remove_timer)
|
||||
fmd_timer_remove(hdl, zcp->zc_remove_timer);
|
||||
|
||||
uu_list_remove(zfs_cases, zcp);
|
||||
uu_list_node_fini(zcp, &zcp->zc_node, zfs_case_pool);
|
||||
list_remove(&zfs_cases, zcp);
|
||||
fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));
|
||||
}
|
||||
|
||||
|
|
@ -1117,23 +1112,11 @@ _zfs_diagnosis_init(fmd_hdl_t *hdl)
|
|||
if ((zhdl = libzfs_init()) == NULL)
|
||||
return;
|
||||
|
||||
if ((zfs_case_pool = uu_list_pool_create("zfs_case_pool",
|
||||
sizeof (zfs_case_t), offsetof(zfs_case_t, zc_node),
|
||||
NULL, UU_LIST_POOL_DEBUG)) == NULL) {
|
||||
libzfs_fini(zhdl);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((zfs_cases = uu_list_create(zfs_case_pool, NULL,
|
||||
UU_LIST_DEBUG)) == NULL) {
|
||||
uu_list_pool_destroy(zfs_case_pool);
|
||||
libzfs_fini(zhdl);
|
||||
return;
|
||||
}
|
||||
list_create(&zfs_cases,
|
||||
sizeof (zfs_case_t), offsetof(zfs_case_t, zc_node));
|
||||
|
||||
if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
|
||||
uu_list_destroy(zfs_cases);
|
||||
uu_list_pool_destroy(zfs_case_pool);
|
||||
list_destroy(&zfs_cases);
|
||||
libzfs_fini(zhdl);
|
||||
return;
|
||||
}
|
||||
|
|
@ -1148,24 +1131,18 @@ void
|
|||
_zfs_diagnosis_fini(fmd_hdl_t *hdl)
|
||||
{
|
||||
zfs_case_t *zcp;
|
||||
uu_list_walk_t *walk;
|
||||
libzfs_handle_t *zhdl;
|
||||
|
||||
/*
|
||||
* Remove all active cases.
|
||||
*/
|
||||
walk = uu_list_walk_start(zfs_cases, UU_WALK_ROBUST);
|
||||
while ((zcp = uu_list_walk_next(walk)) != NULL) {
|
||||
while ((zcp = list_remove_head(&zfs_cases)) != NULL) {
|
||||
fmd_hdl_debug(hdl, "removing case ena %llu",
|
||||
(long long unsigned)zcp->zc_data.zc_ena);
|
||||
uu_list_remove(zfs_cases, zcp);
|
||||
uu_list_node_fini(zcp, &zcp->zc_node, zfs_case_pool);
|
||||
fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));
|
||||
}
|
||||
uu_list_walk_end(walk);
|
||||
|
||||
uu_list_destroy(zfs_cases);
|
||||
uu_list_pool_destroy(zfs_case_pool);
|
||||
list_destroy(&zfs_cases);
|
||||
|
||||
zhdl = fmd_hdl_getspecific(hdl);
|
||||
libzfs_fini(zhdl);
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@
|
|||
#include <sys/sunddi.h>
|
||||
#include <sys/sysevent/eventdefs.h>
|
||||
#include <sys/sysevent/dev.h>
|
||||
#include <thread_pool.h>
|
||||
#include <sys/taskq.h>
|
||||
#include <pthread.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
|
|
@ -98,7 +98,7 @@ typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
|
|||
libzfs_handle_t *g_zfshdl;
|
||||
list_t g_pool_list; /* list of unavailable pools at initialization */
|
||||
list_t g_device_list; /* list of disks with asynchronous label request */
|
||||
tpool_t *g_tpool;
|
||||
taskq_t *g_taskq;
|
||||
boolean_t g_enumeration_done;
|
||||
pthread_t g_zfs_tid; /* zfs_enum_pools() thread */
|
||||
|
||||
|
|
@ -749,8 +749,8 @@ zfs_iter_pool(zpool_handle_t *zhp, void *data)
|
|||
continue;
|
||||
if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
|
||||
list_remove(&g_pool_list, pool);
|
||||
(void) tpool_dispatch(g_tpool, zfs_enable_ds,
|
||||
pool);
|
||||
(void) taskq_dispatch(g_taskq, zfs_enable_ds,
|
||||
pool, TQ_SLEEP);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
@ -1347,9 +1347,9 @@ zfs_slm_fini(void)
|
|||
/* wait for zfs_enum_pools thread to complete */
|
||||
(void) pthread_join(g_zfs_tid, NULL);
|
||||
/* destroy the thread pool */
|
||||
if (g_tpool != NULL) {
|
||||
tpool_wait(g_tpool);
|
||||
tpool_destroy(g_tpool);
|
||||
if (g_taskq != NULL) {
|
||||
taskq_wait(g_taskq);
|
||||
taskq_destroy(g_taskq);
|
||||
}
|
||||
|
||||
while ((pool = list_remove_head(&g_pool_list)) != NULL) {
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@ zfs_SOURCES = \
|
|||
zfs_LDADD = \
|
||||
libzfs.la \
|
||||
libzfs_core.la \
|
||||
libnvpair.la \
|
||||
libuutil.la
|
||||
libnvpair.la
|
||||
|
||||
zfs_LDADD += $(LTLIBINTL)
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@
|
|||
*/
|
||||
|
||||
#include <libintl.h>
|
||||
#include <libuutil.h>
|
||||
#include <stddef.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
|
@ -50,14 +49,16 @@
|
|||
* When finished, we have an AVL tree of ZFS handles. We go through and execute
|
||||
* the provided callback for each one, passing whatever data the user supplied.
|
||||
*/
|
||||
typedef struct callback_data callback_data_t;
|
||||
|
||||
typedef struct zfs_node {
|
||||
zfs_handle_t *zn_handle;
|
||||
uu_avl_node_t zn_avlnode;
|
||||
callback_data_t *zn_callback;
|
||||
avl_node_t zn_avlnode;
|
||||
} zfs_node_t;
|
||||
|
||||
typedef struct callback_data {
|
||||
uu_avl_t *cb_avl;
|
||||
struct callback_data {
|
||||
avl_tree_t cb_avl;
|
||||
int cb_flags;
|
||||
zfs_type_t cb_types;
|
||||
zfs_sort_column_t *cb_sortcol;
|
||||
|
|
@ -65,9 +66,7 @@ typedef struct callback_data {
|
|||
int cb_depth_limit;
|
||||
int cb_depth;
|
||||
uint8_t cb_props_table[ZFS_NUM_PROPS];
|
||||
} callback_data_t;
|
||||
|
||||
uu_avl_pool_t *avl_pool;
|
||||
};
|
||||
|
||||
/*
|
||||
* Include snaps if they were requested or if this a zfs list where types
|
||||
|
|
@ -99,13 +98,12 @@ zfs_callback(zfs_handle_t *zhp, void *data)
|
|||
|
||||
if ((zfs_get_type(zhp) & cb->cb_types) ||
|
||||
((zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT) && include_snaps)) {
|
||||
uu_avl_index_t idx;
|
||||
avl_index_t idx;
|
||||
zfs_node_t *node = safe_malloc(sizeof (zfs_node_t));
|
||||
|
||||
node->zn_handle = zhp;
|
||||
uu_avl_node_init(node, &node->zn_avlnode, avl_pool);
|
||||
if (uu_avl_find(cb->cb_avl, node, cb->cb_sortcol,
|
||||
&idx) == NULL) {
|
||||
node->zn_callback = cb;
|
||||
if (avl_find(&cb->cb_avl, node, &idx) == NULL) {
|
||||
if (cb->cb_proplist) {
|
||||
if ((*cb->cb_proplist) &&
|
||||
!(*cb->cb_proplist)->pl_all)
|
||||
|
|
@ -120,7 +118,7 @@ zfs_callback(zfs_handle_t *zhp, void *data)
|
|||
return (-1);
|
||||
}
|
||||
}
|
||||
uu_avl_insert(cb->cb_avl, node, idx);
|
||||
avl_insert(&cb->cb_avl, node, idx);
|
||||
should_close = B_FALSE;
|
||||
} else {
|
||||
free(node);
|
||||
|
|
@ -286,7 +284,7 @@ zfs_compare(const void *larg, const void *rarg)
|
|||
if (rat != NULL)
|
||||
*rat = '\0';
|
||||
|
||||
ret = strcmp(lname, rname);
|
||||
ret = TREE_ISIGN(strcmp(lname, rname));
|
||||
if (ret == 0 && (lat != NULL || rat != NULL)) {
|
||||
/*
|
||||
* If we're comparing a dataset to one of its snapshots, we
|
||||
|
|
@ -340,11 +338,11 @@ zfs_compare(const void *larg, const void *rarg)
|
|||
* with snapshots grouped under their parents.
|
||||
*/
|
||||
static int
|
||||
zfs_sort(const void *larg, const void *rarg, void *data)
|
||||
zfs_sort(const void *larg, const void *rarg)
|
||||
{
|
||||
zfs_handle_t *l = ((zfs_node_t *)larg)->zn_handle;
|
||||
zfs_handle_t *r = ((zfs_node_t *)rarg)->zn_handle;
|
||||
zfs_sort_column_t *sc = (zfs_sort_column_t *)data;
|
||||
zfs_sort_column_t *sc = ((zfs_node_t *)larg)->zn_callback->cb_sortcol;
|
||||
zfs_sort_column_t *psc;
|
||||
|
||||
for (psc = sc; psc != NULL; psc = psc->sc_next) {
|
||||
|
|
@ -414,7 +412,7 @@ zfs_sort(const void *larg, const void *rarg, void *data)
|
|||
return (-1);
|
||||
|
||||
if (lstr)
|
||||
ret = strcmp(lstr, rstr);
|
||||
ret = TREE_ISIGN(strcmp(lstr, rstr));
|
||||
else if (lnum < rnum)
|
||||
ret = -1;
|
||||
else if (lnum > rnum)
|
||||
|
|
@ -438,13 +436,6 @@ zfs_for_each(int argc, char **argv, int flags, zfs_type_t types,
|
|||
callback_data_t cb = {0};
|
||||
int ret = 0;
|
||||
zfs_node_t *node;
|
||||
uu_avl_walk_t *walk;
|
||||
|
||||
avl_pool = uu_avl_pool_create("zfs_pool", sizeof (zfs_node_t),
|
||||
offsetof(zfs_node_t, zn_avlnode), zfs_sort, UU_DEFAULT);
|
||||
|
||||
if (avl_pool == NULL)
|
||||
nomem();
|
||||
|
||||
cb.cb_sortcol = sortcol;
|
||||
cb.cb_flags = flags;
|
||||
|
|
@ -489,8 +480,8 @@ zfs_for_each(int argc, char **argv, int flags, zfs_type_t types,
|
|||
sizeof (cb.cb_props_table));
|
||||
}
|
||||
|
||||
if ((cb.cb_avl = uu_avl_create(avl_pool, NULL, UU_DEFAULT)) == NULL)
|
||||
nomem();
|
||||
avl_create(&cb.cb_avl, zfs_sort,
|
||||
sizeof (zfs_node_t), offsetof(zfs_node_t, zn_avlnode));
|
||||
|
||||
if (argc == 0) {
|
||||
/*
|
||||
|
|
@ -531,25 +522,20 @@ zfs_for_each(int argc, char **argv, int flags, zfs_type_t types,
|
|||
* At this point we've got our AVL tree full of zfs handles, so iterate
|
||||
* over each one and execute the real user callback.
|
||||
*/
|
||||
for (node = uu_avl_first(cb.cb_avl); node != NULL;
|
||||
node = uu_avl_next(cb.cb_avl, node))
|
||||
for (node = avl_first(&cb.cb_avl); node != NULL;
|
||||
node = AVL_NEXT(&cb.cb_avl, node))
|
||||
ret |= callback(node->zn_handle, data);
|
||||
|
||||
/*
|
||||
* Finally, clean up the AVL tree.
|
||||
*/
|
||||
if ((walk = uu_avl_walk_start(cb.cb_avl, UU_WALK_ROBUST)) == NULL)
|
||||
nomem();
|
||||
|
||||
while ((node = uu_avl_walk_next(walk)) != NULL) {
|
||||
uu_avl_remove(cb.cb_avl, node);
|
||||
void *cookie = NULL;
|
||||
while ((node = avl_destroy_nodes(&cb.cb_avl, &cookie)) != NULL) {
|
||||
zfs_close(node->zn_handle);
|
||||
free(node);
|
||||
}
|
||||
|
||||
uu_avl_walk_end(walk);
|
||||
uu_avl_destroy(cb.cb_avl);
|
||||
uu_avl_pool_destroy(avl_pool);
|
||||
avl_destroy(&cb.cb_avl);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,7 +42,6 @@
|
|||
#include <getopt.h>
|
||||
#include <libgen.h>
|
||||
#include <libintl.h>
|
||||
#include <libuutil.h>
|
||||
#include <libnvpair.h>
|
||||
#include <locale.h>
|
||||
#include <stddef.h>
|
||||
|
|
@ -2846,31 +2845,27 @@ static int us_type_bits[] = {
|
|||
static const char *const us_type_names[] = { "posixgroup", "posixuser",
|
||||
"smbgroup", "smbuser", "all" };
|
||||
|
||||
typedef struct us_cbdata us_cbdata_t;
|
||||
typedef struct us_node {
|
||||
nvlist_t *usn_nvl;
|
||||
uu_avl_node_t usn_avlnode;
|
||||
uu_list_node_t usn_listnode;
|
||||
us_cbdata_t *usn_cbdata;
|
||||
avl_node_t usn_avlnode;
|
||||
list_node_t usn_listnode;
|
||||
} us_node_t;
|
||||
|
||||
typedef struct us_cbdata {
|
||||
struct us_cbdata {
|
||||
nvlist_t **cb_nvlp;
|
||||
uu_avl_pool_t *cb_avl_pool;
|
||||
uu_avl_t *cb_avl;
|
||||
avl_tree_t cb_avl;
|
||||
boolean_t cb_numname;
|
||||
boolean_t cb_nicenum;
|
||||
boolean_t cb_sid2posix;
|
||||
zfs_userquota_prop_t cb_prop;
|
||||
zfs_sort_column_t *cb_sortcol;
|
||||
size_t cb_width[USFIELD_LAST];
|
||||
} us_cbdata_t;
|
||||
};
|
||||
|
||||
static boolean_t us_populated = B_FALSE;
|
||||
|
||||
typedef struct {
|
||||
zfs_sort_column_t *si_sortcol;
|
||||
boolean_t si_numname;
|
||||
} us_sort_info_t;
|
||||
|
||||
static int
|
||||
us_field_index(const char *field)
|
||||
{
|
||||
|
|
@ -2883,13 +2878,12 @@ us_field_index(const char *field)
|
|||
}
|
||||
|
||||
static int
|
||||
us_compare(const void *larg, const void *rarg, void *unused)
|
||||
us_compare(const void *larg, const void *rarg)
|
||||
{
|
||||
const us_node_t *l = larg;
|
||||
const us_node_t *r = rarg;
|
||||
us_sort_info_t *si = (us_sort_info_t *)unused;
|
||||
zfs_sort_column_t *sortcol = si->si_sortcol;
|
||||
boolean_t numname = si->si_numname;
|
||||
zfs_sort_column_t *sortcol = l->usn_cbdata->cb_sortcol;
|
||||
boolean_t numname = l->usn_cbdata->cb_numname;
|
||||
nvlist_t *lnvl = l->usn_nvl;
|
||||
nvlist_t *rnvl = r->usn_nvl;
|
||||
int rc = 0;
|
||||
|
|
@ -3023,25 +3017,22 @@ userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space,
|
|||
const char *propname;
|
||||
char sizebuf[32];
|
||||
us_node_t *node;
|
||||
uu_avl_pool_t *avl_pool = cb->cb_avl_pool;
|
||||
uu_avl_t *avl = cb->cb_avl;
|
||||
uu_avl_index_t idx;
|
||||
avl_tree_t *avl = &cb->cb_avl;
|
||||
avl_index_t idx;
|
||||
nvlist_t *props;
|
||||
us_node_t *n;
|
||||
zfs_sort_column_t *sortcol = cb->cb_sortcol;
|
||||
unsigned type = 0;
|
||||
const char *typestr;
|
||||
size_t namelen;
|
||||
size_t typelen;
|
||||
size_t sizelen;
|
||||
int typeidx, nameidx, sizeidx;
|
||||
us_sort_info_t sortinfo = { sortcol, cb->cb_numname };
|
||||
boolean_t smbentity = B_FALSE;
|
||||
|
||||
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
|
||||
nomem();
|
||||
node = safe_malloc(sizeof (us_node_t));
|
||||
uu_avl_node_init(node, &node->usn_avlnode, avl_pool);
|
||||
node->usn_cbdata = cb;
|
||||
node->usn_nvl = props;
|
||||
|
||||
if (domain != NULL && domain[0] != '\0') {
|
||||
|
|
@ -3143,8 +3134,8 @@ userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space,
|
|||
* Check if this type/name combination is in the list and update it;
|
||||
* otherwise add new node to the list.
|
||||
*/
|
||||
if ((n = uu_avl_find(avl, node, &sortinfo, &idx)) == NULL) {
|
||||
uu_avl_insert(avl, node, idx);
|
||||
if ((n = avl_find(avl, node, &idx)) == NULL) {
|
||||
avl_insert(avl, node, idx);
|
||||
} else {
|
||||
nvlist_free(props);
|
||||
free(node);
|
||||
|
|
@ -3318,7 +3309,7 @@ print_us_node(boolean_t scripted, boolean_t parsable, int *fields, int types,
|
|||
|
||||
static void
|
||||
print_us(boolean_t scripted, boolean_t parsable, int *fields, int types,
|
||||
size_t *width, boolean_t rmnode, uu_avl_t *avl)
|
||||
size_t *width, boolean_t rmnode, avl_tree_t *avl)
|
||||
{
|
||||
us_node_t *node;
|
||||
const char *col;
|
||||
|
|
@ -3343,7 +3334,7 @@ print_us(boolean_t scripted, boolean_t parsable, int *fields, int types,
|
|||
(void) printf("\n");
|
||||
}
|
||||
|
||||
for (node = uu_avl_first(avl); node; node = uu_avl_next(avl, node)) {
|
||||
for (node = avl_first(avl); node; node = AVL_NEXT(avl, node)) {
|
||||
print_us_node(scripted, parsable, fields, types, width, node);
|
||||
if (rmnode)
|
||||
nvlist_free(node->usn_nvl);
|
||||
|
|
@ -3355,9 +3346,6 @@ zfs_do_userspace(int argc, char **argv)
|
|||
{
|
||||
zfs_handle_t *zhp;
|
||||
zfs_userquota_prop_t p;
|
||||
uu_avl_pool_t *avl_pool;
|
||||
uu_avl_t *avl_tree;
|
||||
uu_avl_walk_t *walk;
|
||||
char *delim;
|
||||
char deffields[] = "type,name,used,quota,objused,objquota";
|
||||
char *ofield = NULL;
|
||||
|
|
@ -3376,10 +3364,8 @@ zfs_do_userspace(int argc, char **argv)
|
|||
us_cbdata_t cb;
|
||||
us_node_t *node;
|
||||
us_node_t *rmnode;
|
||||
uu_list_pool_t *listpool;
|
||||
uu_list_t *list;
|
||||
uu_avl_index_t idx = 0;
|
||||
uu_list_index_t idx2 = 0;
|
||||
list_t list;
|
||||
avl_index_t idx = 0;
|
||||
|
||||
if (argc < 2)
|
||||
usage(B_FALSE);
|
||||
|
|
@ -3513,12 +3499,6 @@ zfs_do_userspace(int argc, char **argv)
|
|||
return (1);
|
||||
}
|
||||
|
||||
if ((avl_pool = uu_avl_pool_create("us_avl_pool", sizeof (us_node_t),
|
||||
offsetof(us_node_t, usn_avlnode), us_compare, UU_DEFAULT)) == NULL)
|
||||
nomem();
|
||||
if ((avl_tree = uu_avl_create(avl_pool, NULL, UU_DEFAULT)) == NULL)
|
||||
nomem();
|
||||
|
||||
/* Always add default sorting columns */
|
||||
(void) zfs_add_sort_column(&sortcol, "type", B_FALSE);
|
||||
(void) zfs_add_sort_column(&sortcol, "name", B_FALSE);
|
||||
|
|
@ -3526,10 +3506,12 @@ zfs_do_userspace(int argc, char **argv)
|
|||
cb.cb_sortcol = sortcol;
|
||||
cb.cb_numname = prtnum;
|
||||
cb.cb_nicenum = !parsable;
|
||||
cb.cb_avl_pool = avl_pool;
|
||||
cb.cb_avl = avl_tree;
|
||||
cb.cb_sid2posix = sid2posix;
|
||||
|
||||
avl_create(&cb.cb_avl, us_compare,
|
||||
sizeof (us_node_t), offsetof(us_node_t, usn_avlnode));
|
||||
|
||||
|
||||
for (i = 0; i < USFIELD_LAST; i++)
|
||||
cb.cb_width[i] = strlen(gettext(us_field_hdr[i]));
|
||||
|
||||
|
|
@ -3544,59 +3526,52 @@ zfs_do_userspace(int argc, char **argv)
|
|||
cb.cb_prop = p;
|
||||
if ((ret = zfs_userspace(zhp, p, userspace_cb, &cb)) != 0) {
|
||||
zfs_close(zhp);
|
||||
avl_destroy(&cb.cb_avl);
|
||||
return (ret);
|
||||
}
|
||||
}
|
||||
zfs_close(zhp);
|
||||
|
||||
/* Sort the list */
|
||||
if ((node = uu_avl_first(avl_tree)) == NULL)
|
||||
if ((node = avl_first(&cb.cb_avl)) == NULL) {
|
||||
avl_destroy(&cb.cb_avl);
|
||||
return (0);
|
||||
}
|
||||
|
||||
us_populated = B_TRUE;
|
||||
|
||||
listpool = uu_list_pool_create("tmplist", sizeof (us_node_t),
|
||||
offsetof(us_node_t, usn_listnode), NULL, UU_DEFAULT);
|
||||
list = uu_list_create(listpool, NULL, UU_DEFAULT);
|
||||
uu_list_node_init(node, &node->usn_listnode, listpool);
|
||||
list_create(&list, sizeof (us_node_t),
|
||||
offsetof(us_node_t, usn_listnode));
|
||||
list_link_init(&node->usn_listnode);
|
||||
|
||||
while (node != NULL) {
|
||||
rmnode = node;
|
||||
node = uu_avl_next(avl_tree, node);
|
||||
uu_avl_remove(avl_tree, rmnode);
|
||||
if (uu_list_find(list, rmnode, NULL, &idx2) == NULL)
|
||||
uu_list_insert(list, rmnode, idx2);
|
||||
node = AVL_NEXT(&cb.cb_avl, node);
|
||||
avl_remove(&cb.cb_avl, rmnode);
|
||||
list_insert_head(&list, rmnode);
|
||||
}
|
||||
|
||||
for (node = uu_list_first(list); node != NULL;
|
||||
node = uu_list_next(list, node)) {
|
||||
us_sort_info_t sortinfo = { sortcol, cb.cb_numname };
|
||||
|
||||
if (uu_avl_find(avl_tree, node, &sortinfo, &idx) == NULL)
|
||||
uu_avl_insert(avl_tree, node, idx);
|
||||
for (node = list_head(&list); node != NULL;
|
||||
node = list_next(&list, node)) {
|
||||
if (avl_find(&cb.cb_avl, node, &idx) == NULL)
|
||||
avl_insert(&cb.cb_avl, node, idx);
|
||||
}
|
||||
|
||||
uu_list_destroy(list);
|
||||
uu_list_pool_destroy(listpool);
|
||||
while ((node = list_remove_head(&list)) != NULL) { }
|
||||
list_destroy(&list);
|
||||
|
||||
/* Print and free node nvlist memory */
|
||||
print_us(scripted, parsable, fields, types, cb.cb_width, B_TRUE,
|
||||
cb.cb_avl);
|
||||
&cb.cb_avl);
|
||||
|
||||
zfs_free_sort_columns(sortcol);
|
||||
|
||||
/* Clean up the AVL tree */
|
||||
if ((walk = uu_avl_walk_start(cb.cb_avl, UU_WALK_ROBUST)) == NULL)
|
||||
nomem();
|
||||
|
||||
while ((node = uu_avl_walk_next(walk)) != NULL) {
|
||||
uu_avl_remove(cb.cb_avl, node);
|
||||
void *cookie = NULL;
|
||||
while ((node = avl_destroy_nodes(&cb.cb_avl, &cookie)) != NULL) {
|
||||
free(node);
|
||||
}
|
||||
|
||||
uu_avl_walk_end(walk);
|
||||
uu_avl_destroy(avl_tree);
|
||||
uu_avl_pool_destroy(avl_pool);
|
||||
avl_destroy(&cb.cb_avl);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
|
@ -5401,7 +5376,7 @@ typedef struct deleg_perm {
|
|||
typedef struct deleg_perm_node {
|
||||
deleg_perm_t dpn_perm;
|
||||
|
||||
uu_avl_node_t dpn_avl_node;
|
||||
avl_node_t dpn_avl_node;
|
||||
} deleg_perm_node_t;
|
||||
|
||||
typedef struct fs_perm fs_perm_t;
|
||||
|
|
@ -5413,13 +5388,13 @@ typedef struct who_perm {
|
|||
char who_ug_name[256]; /* user/group name */
|
||||
fs_perm_t *who_fsperm; /* uplink */
|
||||
|
||||
uu_avl_t *who_deleg_perm_avl; /* permissions */
|
||||
avl_tree_t who_deleg_perm_avl; /* permissions */
|
||||
} who_perm_t;
|
||||
|
||||
/* */
|
||||
typedef struct who_perm_node {
|
||||
who_perm_t who_perm;
|
||||
uu_avl_node_t who_avl_node;
|
||||
avl_node_t who_avl_node;
|
||||
} who_perm_node_t;
|
||||
|
||||
typedef struct fs_perm_set fs_perm_set_t;
|
||||
|
|
@ -5427,8 +5402,8 @@ typedef struct fs_perm_set fs_perm_set_t;
|
|||
struct fs_perm {
|
||||
const char *fsp_name;
|
||||
|
||||
uu_avl_t *fsp_sc_avl; /* sets,create */
|
||||
uu_avl_t *fsp_uge_avl; /* user,group,everyone */
|
||||
avl_tree_t fsp_sc_avl; /* sets,create */
|
||||
avl_tree_t fsp_uge_avl; /* user,group,everyone */
|
||||
|
||||
fs_perm_set_t *fsp_set; /* uplink */
|
||||
};
|
||||
|
|
@ -5436,19 +5411,14 @@ struct fs_perm {
|
|||
/* */
|
||||
typedef struct fs_perm_node {
|
||||
fs_perm_t fspn_fsperm;
|
||||
uu_avl_t *fspn_avl;
|
||||
avl_tree_t fspn_avl;
|
||||
|
||||
uu_list_node_t fspn_list_node;
|
||||
list_node_t fspn_list_node;
|
||||
} fs_perm_node_t;
|
||||
|
||||
/* top level structure */
|
||||
struct fs_perm_set {
|
||||
uu_list_pool_t *fsps_list_pool;
|
||||
uu_list_t *fsps_list; /* list of fs_perms */
|
||||
|
||||
uu_avl_pool_t *fsps_named_set_avl_pool;
|
||||
uu_avl_pool_t *fsps_who_perm_avl_pool;
|
||||
uu_avl_pool_t *fsps_deleg_perm_avl_pool;
|
||||
list_t fsps_list; /* list of fs_perms */
|
||||
};
|
||||
|
||||
static inline const char *
|
||||
|
|
@ -5511,9 +5481,8 @@ who_type2weight(zfs_deleg_who_type_t who_type)
|
|||
}
|
||||
|
||||
static int
|
||||
who_perm_compare(const void *larg, const void *rarg, void *unused)
|
||||
who_perm_compare(const void *larg, const void *rarg)
|
||||
{
|
||||
(void) unused;
|
||||
const who_perm_node_t *l = larg;
|
||||
const who_perm_node_t *r = rarg;
|
||||
zfs_deleg_who_type_t ltype = l->who_perm.who_type;
|
||||
|
|
@ -5524,63 +5493,24 @@ who_perm_compare(const void *larg, const void *rarg, void *unused)
|
|||
if (res == 0)
|
||||
res = strncmp(l->who_perm.who_name, r->who_perm.who_name,
|
||||
ZFS_MAX_DELEG_NAME-1);
|
||||
|
||||
if (res == 0)
|
||||
return (0);
|
||||
if (res > 0)
|
||||
return (1);
|
||||
else
|
||||
return (-1);
|
||||
return (TREE_ISIGN(res));
|
||||
}
|
||||
|
||||
static int
|
||||
deleg_perm_compare(const void *larg, const void *rarg, void *unused)
|
||||
deleg_perm_compare(const void *larg, const void *rarg)
|
||||
{
|
||||
(void) unused;
|
||||
const deleg_perm_node_t *l = larg;
|
||||
const deleg_perm_node_t *r = rarg;
|
||||
int res = strncmp(l->dpn_perm.dp_name, r->dpn_perm.dp_name,
|
||||
ZFS_MAX_DELEG_NAME-1);
|
||||
|
||||
if (res == 0)
|
||||
return (0);
|
||||
|
||||
if (res > 0)
|
||||
return (1);
|
||||
else
|
||||
return (-1);
|
||||
return (TREE_ISIGN(strncmp(l->dpn_perm.dp_name, r->dpn_perm.dp_name,
|
||||
ZFS_MAX_DELEG_NAME-1)));
|
||||
}
|
||||
|
||||
static inline void
|
||||
fs_perm_set_init(fs_perm_set_t *fspset)
|
||||
{
|
||||
memset(fspset, 0, sizeof (fs_perm_set_t));
|
||||
|
||||
if ((fspset->fsps_list_pool = uu_list_pool_create("fsps_list_pool",
|
||||
sizeof (fs_perm_node_t), offsetof(fs_perm_node_t, fspn_list_node),
|
||||
NULL, UU_DEFAULT)) == NULL)
|
||||
nomem();
|
||||
if ((fspset->fsps_list = uu_list_create(fspset->fsps_list_pool, NULL,
|
||||
UU_DEFAULT)) == NULL)
|
||||
nomem();
|
||||
|
||||
if ((fspset->fsps_named_set_avl_pool = uu_avl_pool_create(
|
||||
"named_set_avl_pool", sizeof (who_perm_node_t), offsetof(
|
||||
who_perm_node_t, who_avl_node), who_perm_compare,
|
||||
UU_DEFAULT)) == NULL)
|
||||
nomem();
|
||||
|
||||
if ((fspset->fsps_who_perm_avl_pool = uu_avl_pool_create(
|
||||
"who_perm_avl_pool", sizeof (who_perm_node_t), offsetof(
|
||||
who_perm_node_t, who_avl_node), who_perm_compare,
|
||||
UU_DEFAULT)) == NULL)
|
||||
nomem();
|
||||
|
||||
if ((fspset->fsps_deleg_perm_avl_pool = uu_avl_pool_create(
|
||||
"deleg_perm_avl_pool", sizeof (deleg_perm_node_t), offsetof(
|
||||
deleg_perm_node_t, dpn_avl_node), deleg_perm_compare, UU_DEFAULT))
|
||||
== NULL)
|
||||
nomem();
|
||||
list_create(&fspset->fsps_list, sizeof (fs_perm_node_t),
|
||||
offsetof(fs_perm_node_t, fspn_list_node));
|
||||
}
|
||||
|
||||
static inline void fs_perm_fini(fs_perm_t *);
|
||||
|
|
@ -5589,21 +5519,13 @@ static inline void who_perm_fini(who_perm_t *);
|
|||
static inline void
|
||||
fs_perm_set_fini(fs_perm_set_t *fspset)
|
||||
{
|
||||
fs_perm_node_t *node = uu_list_first(fspset->fsps_list);
|
||||
|
||||
while (node != NULL) {
|
||||
fs_perm_node_t *next_node =
|
||||
uu_list_next(fspset->fsps_list, node);
|
||||
fs_perm_node_t *node;
|
||||
while ((node = list_remove_head(&fspset->fsps_list)) != NULL) {
|
||||
fs_perm_t *fsperm = &node->fspn_fsperm;
|
||||
fs_perm_fini(fsperm);
|
||||
uu_list_remove(fspset->fsps_list, node);
|
||||
free(node);
|
||||
node = next_node;
|
||||
}
|
||||
|
||||
uu_avl_pool_destroy(fspset->fsps_named_set_avl_pool);
|
||||
uu_avl_pool_destroy(fspset->fsps_who_perm_avl_pool);
|
||||
uu_avl_pool_destroy(fspset->fsps_deleg_perm_avl_pool);
|
||||
list_destroy(&fspset->fsps_list);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
|
@ -5618,14 +5540,11 @@ static inline void
|
|||
who_perm_init(who_perm_t *who_perm, fs_perm_t *fsperm,
|
||||
zfs_deleg_who_type_t type, const char *name)
|
||||
{
|
||||
uu_avl_pool_t *pool;
|
||||
pool = fsperm->fsp_set->fsps_deleg_perm_avl_pool;
|
||||
|
||||
memset(who_perm, 0, sizeof (who_perm_t));
|
||||
|
||||
if ((who_perm->who_deleg_perm_avl = uu_avl_create(pool, NULL,
|
||||
UU_DEFAULT)) == NULL)
|
||||
nomem();
|
||||
avl_create(&who_perm->who_deleg_perm_avl, deleg_perm_compare,
|
||||
sizeof (deleg_perm_node_t),
|
||||
offsetof(deleg_perm_node_t, dpn_avl_node));
|
||||
|
||||
who_perm->who_type = type;
|
||||
who_perm->who_name = name;
|
||||
|
|
@ -5635,35 +5554,26 @@ who_perm_init(who_perm_t *who_perm, fs_perm_t *fsperm,
|
|||
static inline void
|
||||
who_perm_fini(who_perm_t *who_perm)
|
||||
{
|
||||
deleg_perm_node_t *node = uu_avl_first(who_perm->who_deleg_perm_avl);
|
||||
deleg_perm_node_t *node;
|
||||
void *cookie = NULL;
|
||||
|
||||
while (node != NULL) {
|
||||
deleg_perm_node_t *next_node =
|
||||
uu_avl_next(who_perm->who_deleg_perm_avl, node);
|
||||
|
||||
uu_avl_remove(who_perm->who_deleg_perm_avl, node);
|
||||
while ((node = avl_destroy_nodes(&who_perm->who_deleg_perm_avl,
|
||||
&cookie)) != NULL) {
|
||||
free(node);
|
||||
node = next_node;
|
||||
}
|
||||
|
||||
uu_avl_destroy(who_perm->who_deleg_perm_avl);
|
||||
avl_destroy(&who_perm->who_deleg_perm_avl);
|
||||
}
|
||||
|
||||
static inline void
|
||||
fs_perm_init(fs_perm_t *fsperm, fs_perm_set_t *fspset, const char *fsname)
|
||||
{
|
||||
uu_avl_pool_t *nset_pool = fspset->fsps_named_set_avl_pool;
|
||||
uu_avl_pool_t *who_pool = fspset->fsps_who_perm_avl_pool;
|
||||
|
||||
memset(fsperm, 0, sizeof (fs_perm_t));
|
||||
|
||||
if ((fsperm->fsp_sc_avl = uu_avl_create(nset_pool, NULL, UU_DEFAULT))
|
||||
== NULL)
|
||||
nomem();
|
||||
|
||||
if ((fsperm->fsp_uge_avl = uu_avl_create(who_pool, NULL, UU_DEFAULT))
|
||||
== NULL)
|
||||
nomem();
|
||||
avl_create(&fsperm->fsp_sc_avl, who_perm_compare,
|
||||
sizeof (who_perm_node_t), offsetof(who_perm_node_t, who_avl_node));
|
||||
avl_create(&fsperm->fsp_uge_avl, who_perm_compare,
|
||||
sizeof (who_perm_node_t), offsetof(who_perm_node_t, who_avl_node));
|
||||
|
||||
fsperm->fsp_set = fspset;
|
||||
fsperm->fsp_name = fsname;
|
||||
|
|
@ -5672,46 +5582,41 @@ fs_perm_init(fs_perm_t *fsperm, fs_perm_set_t *fspset, const char *fsname)
|
|||
static inline void
|
||||
fs_perm_fini(fs_perm_t *fsperm)
|
||||
{
|
||||
who_perm_node_t *node = uu_avl_first(fsperm->fsp_sc_avl);
|
||||
while (node != NULL) {
|
||||
who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_sc_avl,
|
||||
node);
|
||||
who_perm_node_t *node;
|
||||
void *cookie = NULL;
|
||||
|
||||
while ((node = avl_destroy_nodes(&fsperm->fsp_sc_avl,
|
||||
&cookie)) != NULL) {
|
||||
who_perm_t *who_perm = &node->who_perm;
|
||||
who_perm_fini(who_perm);
|
||||
uu_avl_remove(fsperm->fsp_sc_avl, node);
|
||||
free(node);
|
||||
node = next_node;
|
||||
}
|
||||
|
||||
node = uu_avl_first(fsperm->fsp_uge_avl);
|
||||
while (node != NULL) {
|
||||
who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_uge_avl,
|
||||
node);
|
||||
cookie = NULL;
|
||||
while ((node = avl_destroy_nodes(&fsperm->fsp_uge_avl,
|
||||
&cookie)) != NULL) {
|
||||
who_perm_t *who_perm = &node->who_perm;
|
||||
who_perm_fini(who_perm);
|
||||
uu_avl_remove(fsperm->fsp_uge_avl, node);
|
||||
free(node);
|
||||
node = next_node;
|
||||
}
|
||||
|
||||
uu_avl_destroy(fsperm->fsp_sc_avl);
|
||||
uu_avl_destroy(fsperm->fsp_uge_avl);
|
||||
avl_destroy(&fsperm->fsp_sc_avl);
|
||||
avl_destroy(&fsperm->fsp_uge_avl);
|
||||
}
|
||||
|
||||
static void
|
||||
set_deleg_perm_node(uu_avl_t *avl, deleg_perm_node_t *node,
|
||||
set_deleg_perm_node(avl_tree_t *avl, deleg_perm_node_t *node,
|
||||
zfs_deleg_who_type_t who_type, const char *name, char locality)
|
||||
{
|
||||
uu_avl_index_t idx = 0;
|
||||
avl_index_t idx = 0;
|
||||
|
||||
deleg_perm_node_t *found_node = NULL;
|
||||
deleg_perm_t *deleg_perm = &node->dpn_perm;
|
||||
|
||||
deleg_perm_init(deleg_perm, who_type, name);
|
||||
|
||||
if ((found_node = uu_avl_find(avl, node, NULL, &idx))
|
||||
== NULL)
|
||||
uu_avl_insert(avl, node, idx);
|
||||
if ((found_node = avl_find(avl, node, &idx)) == NULL)
|
||||
avl_insert(avl, node, idx);
|
||||
else {
|
||||
node = found_node;
|
||||
deleg_perm = &node->dpn_perm;
|
||||
|
|
@ -5736,20 +5641,17 @@ static inline int
|
|||
parse_who_perm(who_perm_t *who_perm, nvlist_t *nvl, char locality)
|
||||
{
|
||||
nvpair_t *nvp = NULL;
|
||||
fs_perm_set_t *fspset = who_perm->who_fsperm->fsp_set;
|
||||
uu_avl_t *avl = who_perm->who_deleg_perm_avl;
|
||||
avl_tree_t *avl = &who_perm->who_deleg_perm_avl;
|
||||
zfs_deleg_who_type_t who_type = who_perm->who_type;
|
||||
|
||||
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
|
||||
const char *name = nvpair_name(nvp);
|
||||
data_type_t type = nvpair_type(nvp);
|
||||
uu_avl_pool_t *avl_pool = fspset->fsps_deleg_perm_avl_pool;
|
||||
deleg_perm_node_t *node =
|
||||
safe_malloc(sizeof (deleg_perm_node_t));
|
||||
|
||||
VERIFY(type == DATA_TYPE_BOOLEAN);
|
||||
|
||||
uu_avl_node_init(node, &node->dpn_avl_node, avl_pool);
|
||||
set_deleg_perm_node(avl, node, who_type, name, locality);
|
||||
}
|
||||
|
||||
|
|
@ -5760,13 +5662,11 @@ static inline int
|
|||
parse_fs_perm(fs_perm_t *fsperm, nvlist_t *nvl)
|
||||
{
|
||||
nvpair_t *nvp = NULL;
|
||||
fs_perm_set_t *fspset = fsperm->fsp_set;
|
||||
|
||||
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
|
||||
nvlist_t *nvl2 = NULL;
|
||||
const char *name = nvpair_name(nvp);
|
||||
uu_avl_t *avl = NULL;
|
||||
uu_avl_pool_t *avl_pool = NULL;
|
||||
avl_tree_t *avl = NULL;
|
||||
zfs_deleg_who_type_t perm_type = name[0];
|
||||
char perm_locality = name[1];
|
||||
const char *perm_name = name + 3;
|
||||
|
|
@ -5782,8 +5682,7 @@ parse_fs_perm(fs_perm_t *fsperm, nvlist_t *nvl)
|
|||
case ZFS_DELEG_CREATE_SETS:
|
||||
case ZFS_DELEG_NAMED_SET:
|
||||
case ZFS_DELEG_NAMED_SET_SETS:
|
||||
avl_pool = fspset->fsps_named_set_avl_pool;
|
||||
avl = fsperm->fsp_sc_avl;
|
||||
avl = &fsperm->fsp_sc_avl;
|
||||
break;
|
||||
case ZFS_DELEG_USER:
|
||||
case ZFS_DELEG_USER_SETS:
|
||||
|
|
@ -5791,8 +5690,7 @@ parse_fs_perm(fs_perm_t *fsperm, nvlist_t *nvl)
|
|||
case ZFS_DELEG_GROUP_SETS:
|
||||
case ZFS_DELEG_EVERYONE:
|
||||
case ZFS_DELEG_EVERYONE_SETS:
|
||||
avl_pool = fspset->fsps_who_perm_avl_pool;
|
||||
avl = fsperm->fsp_uge_avl;
|
||||
avl = &fsperm->fsp_uge_avl;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
@ -5803,14 +5701,12 @@ parse_fs_perm(fs_perm_t *fsperm, nvlist_t *nvl)
|
|||
who_perm_node_t *node = safe_malloc(
|
||||
sizeof (who_perm_node_t));
|
||||
who_perm = &node->who_perm;
|
||||
uu_avl_index_t idx = 0;
|
||||
avl_index_t idx = 0;
|
||||
|
||||
uu_avl_node_init(node, &node->who_avl_node, avl_pool);
|
||||
who_perm_init(who_perm, fsperm, perm_type, perm_name);
|
||||
|
||||
if ((found_node = uu_avl_find(avl, node, NULL, &idx))
|
||||
== NULL) {
|
||||
if (avl == fsperm->fsp_uge_avl) {
|
||||
if ((found_node = avl_find(avl, node, &idx)) == NULL) {
|
||||
if (avl == &fsperm->fsp_uge_avl) {
|
||||
uid_t rid = 0;
|
||||
struct passwd *p = NULL;
|
||||
struct group *g = NULL;
|
||||
|
|
@ -5849,7 +5745,7 @@ parse_fs_perm(fs_perm_t *fsperm, nvlist_t *nvl)
|
|||
}
|
||||
}
|
||||
|
||||
uu_avl_insert(avl, node, idx);
|
||||
avl_insert(avl, node, idx);
|
||||
} else {
|
||||
node = found_node;
|
||||
who_perm = &node->who_perm;
|
||||
|
|
@ -5866,7 +5762,6 @@ static inline int
|
|||
parse_fs_perm_set(fs_perm_set_t *fspset, nvlist_t *nvl)
|
||||
{
|
||||
nvpair_t *nvp = NULL;
|
||||
uu_avl_index_t idx = 0;
|
||||
|
||||
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
|
||||
nvlist_t *nvl2 = NULL;
|
||||
|
|
@ -5879,10 +5774,6 @@ parse_fs_perm_set(fs_perm_set_t *fspset, nvlist_t *nvl)
|
|||
|
||||
VERIFY(DATA_TYPE_NVLIST == type);
|
||||
|
||||
uu_list_node_init(node, &node->fspn_list_node,
|
||||
fspset->fsps_list_pool);
|
||||
|
||||
idx = uu_list_numnodes(fspset->fsps_list);
|
||||
fs_perm_init(fsperm, fspset, fsname);
|
||||
|
||||
if (nvpair_value_nvlist(nvp, &nvl2) != 0)
|
||||
|
|
@ -5890,7 +5781,7 @@ parse_fs_perm_set(fs_perm_set_t *fspset, nvlist_t *nvl)
|
|||
|
||||
(void) parse_fs_perm(fsperm, nvl2);
|
||||
|
||||
uu_list_insert(fspset->fsps_list, node, idx);
|
||||
list_insert_tail(&fspset->fsps_list, node);
|
||||
}
|
||||
|
||||
return (0);
|
||||
|
|
@ -6442,7 +6333,7 @@ construct_fsacl_list(boolean_t un, struct allow_opts *opts, nvlist_t **nvlp)
|
|||
}
|
||||
|
||||
static void
|
||||
print_set_creat_perms(uu_avl_t *who_avl)
|
||||
print_set_creat_perms(avl_tree_t *who_avl)
|
||||
{
|
||||
const char *sc_title[] = {
|
||||
gettext("Permission sets:\n"),
|
||||
|
|
@ -6452,9 +6343,9 @@ print_set_creat_perms(uu_avl_t *who_avl)
|
|||
who_perm_node_t *who_node = NULL;
|
||||
int prev_weight = -1;
|
||||
|
||||
for (who_node = uu_avl_first(who_avl); who_node != NULL;
|
||||
who_node = uu_avl_next(who_avl, who_node)) {
|
||||
uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl;
|
||||
for (who_node = avl_first(who_avl); who_node != NULL;
|
||||
who_node = AVL_NEXT(who_avl, who_node)) {
|
||||
avl_tree_t *avl = &who_node->who_perm.who_deleg_perm_avl;
|
||||
zfs_deleg_who_type_t who_type = who_node->who_perm.who_type;
|
||||
const char *who_name = who_node->who_perm.who_name;
|
||||
int weight = who_type2weight(who_type);
|
||||
|
|
@ -6471,8 +6362,8 @@ print_set_creat_perms(uu_avl_t *who_avl)
|
|||
else
|
||||
(void) printf("\t%s ", who_name);
|
||||
|
||||
for (deleg_node = uu_avl_first(avl); deleg_node != NULL;
|
||||
deleg_node = uu_avl_next(avl, deleg_node)) {
|
||||
for (deleg_node = avl_first(avl); deleg_node != NULL;
|
||||
deleg_node = AVL_NEXT(avl, deleg_node)) {
|
||||
if (first) {
|
||||
(void) printf("%s",
|
||||
deleg_node->dpn_perm.dp_name);
|
||||
|
|
@ -6487,28 +6378,24 @@ print_set_creat_perms(uu_avl_t *who_avl)
|
|||
}
|
||||
|
||||
static void
|
||||
print_uge_deleg_perms(uu_avl_t *who_avl, boolean_t local, boolean_t descend,
|
||||
print_uge_deleg_perms(avl_tree_t *who_avl, boolean_t local, boolean_t descend,
|
||||
const char *title)
|
||||
{
|
||||
who_perm_node_t *who_node = NULL;
|
||||
boolean_t prt_title = B_TRUE;
|
||||
uu_avl_walk_t *walk;
|
||||
|
||||
if ((walk = uu_avl_walk_start(who_avl, UU_WALK_ROBUST)) == NULL)
|
||||
nomem();
|
||||
|
||||
while ((who_node = uu_avl_walk_next(walk)) != NULL) {
|
||||
for (who_node = avl_first(who_avl); who_node != NULL;
|
||||
who_node = AVL_NEXT(who_avl, who_node)) {
|
||||
const char *who_name = who_node->who_perm.who_name;
|
||||
const char *nice_who_name = who_node->who_perm.who_ug_name;
|
||||
uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl;
|
||||
avl_tree_t *avl = &who_node->who_perm.who_deleg_perm_avl;
|
||||
zfs_deleg_who_type_t who_type = who_node->who_perm.who_type;
|
||||
char delim = ' ';
|
||||
deleg_perm_node_t *deleg_node;
|
||||
boolean_t prt_who = B_TRUE;
|
||||
|
||||
for (deleg_node = uu_avl_first(avl);
|
||||
deleg_node != NULL;
|
||||
deleg_node = uu_avl_next(avl, deleg_node)) {
|
||||
for (deleg_node = avl_first(avl); deleg_node != NULL;
|
||||
deleg_node = AVL_NEXT(avl, deleg_node)) {
|
||||
if (local != deleg_node->dpn_perm.dp_local ||
|
||||
descend != deleg_node->dpn_perm.dp_descend)
|
||||
continue;
|
||||
|
|
@ -6558,8 +6445,6 @@ print_uge_deleg_perms(uu_avl_t *who_avl, boolean_t local, boolean_t descend,
|
|||
if (!prt_who)
|
||||
(void) printf("\n");
|
||||
}
|
||||
|
||||
uu_avl_walk_end(walk);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -6569,10 +6454,10 @@ print_fs_perms(fs_perm_set_t *fspset)
|
|||
char buf[MAXNAMELEN + 32];
|
||||
const char *dsname = buf;
|
||||
|
||||
for (node = uu_list_first(fspset->fsps_list); node != NULL;
|
||||
node = uu_list_next(fspset->fsps_list, node)) {
|
||||
uu_avl_t *sc_avl = node->fspn_fsperm.fsp_sc_avl;
|
||||
uu_avl_t *uge_avl = node->fspn_fsperm.fsp_uge_avl;
|
||||
for (node = list_head(&fspset->fsps_list); node != NULL;
|
||||
node = list_next(&fspset->fsps_list, node)) {
|
||||
avl_tree_t *sc_avl = &node->fspn_fsperm.fsp_sc_avl;
|
||||
avl_tree_t *uge_avl = &node->fspn_fsperm.fsp_uge_avl;
|
||||
int left = 0;
|
||||
|
||||
(void) snprintf(buf, sizeof (buf),
|
||||
|
|
@ -6594,7 +6479,7 @@ print_fs_perms(fs_perm_set_t *fspset)
|
|||
}
|
||||
}
|
||||
|
||||
static fs_perm_set_t fs_perm_set = { NULL, NULL, NULL, NULL };
|
||||
static fs_perm_set_t fs_perm_set = {};
|
||||
|
||||
struct deleg_perms {
|
||||
boolean_t un;
|
||||
|
|
@ -7726,17 +7611,16 @@ zfs_do_share(int argc, char **argv)
|
|||
typedef struct unshare_unmount_node {
|
||||
zfs_handle_t *un_zhp;
|
||||
char *un_mountp;
|
||||
uu_avl_node_t un_avlnode;
|
||||
avl_node_t un_avlnode;
|
||||
} unshare_unmount_node_t;
|
||||
|
||||
static int
|
||||
unshare_unmount_compare(const void *larg, const void *rarg, void *unused)
|
||||
unshare_unmount_compare(const void *larg, const void *rarg)
|
||||
{
|
||||
(void) unused;
|
||||
const unshare_unmount_node_t *l = larg;
|
||||
const unshare_unmount_node_t *r = rarg;
|
||||
|
||||
return (strcmp(l->un_mountp, r->un_mountp));
|
||||
return (TREE_ISIGN(strcmp(l->un_mountp, r->un_mountp)));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -7918,11 +7802,9 @@ unshare_unmount(int op, int argc, char **argv)
|
|||
*/
|
||||
FILE *mnttab;
|
||||
struct mnttab entry;
|
||||
uu_avl_pool_t *pool;
|
||||
uu_avl_t *tree = NULL;
|
||||
avl_tree_t tree;
|
||||
unshare_unmount_node_t *node;
|
||||
uu_avl_index_t idx;
|
||||
uu_avl_walk_t *walk;
|
||||
avl_index_t idx;
|
||||
enum sa_protocol *protocol = NULL,
|
||||
single_protocol[] = {SA_NO_PROTOCOL, SA_NO_PROTOCOL};
|
||||
|
||||
|
|
@ -7938,16 +7820,12 @@ unshare_unmount(int op, int argc, char **argv)
|
|||
usage(B_FALSE);
|
||||
}
|
||||
|
||||
if (((pool = uu_avl_pool_create("unmount_pool",
|
||||
avl_create(&tree, unshare_unmount_compare,
|
||||
sizeof (unshare_unmount_node_t),
|
||||
offsetof(unshare_unmount_node_t, un_avlnode),
|
||||
unshare_unmount_compare, UU_DEFAULT)) == NULL) ||
|
||||
((tree = uu_avl_create(pool, NULL, UU_DEFAULT)) == NULL))
|
||||
nomem();
|
||||
offsetof(unshare_unmount_node_t, un_avlnode));
|
||||
|
||||
if ((mnttab = fopen(MNTTAB, "re")) == NULL) {
|
||||
uu_avl_destroy(tree);
|
||||
uu_avl_pool_destroy(pool);
|
||||
avl_destroy(&tree);
|
||||
return (ENOENT);
|
||||
}
|
||||
|
||||
|
|
@ -8012,10 +7890,8 @@ unshare_unmount(int op, int argc, char **argv)
|
|||
node->un_zhp = zhp;
|
||||
node->un_mountp = safe_strdup(entry.mnt_mountp);
|
||||
|
||||
uu_avl_node_init(node, &node->un_avlnode, pool);
|
||||
|
||||
if (uu_avl_find(tree, node, NULL, &idx) == NULL) {
|
||||
uu_avl_insert(tree, node, idx);
|
||||
if (avl_find(&tree, node, &idx) == NULL) {
|
||||
avl_insert(&tree, node, idx);
|
||||
} else {
|
||||
zfs_close(node->un_zhp);
|
||||
free(node->un_mountp);
|
||||
|
|
@ -8028,14 +7904,10 @@ unshare_unmount(int op, int argc, char **argv)
|
|||
* Walk the AVL tree in reverse, unmounting each filesystem and
|
||||
* removing it from the AVL tree in the process.
|
||||
*/
|
||||
if ((walk = uu_avl_walk_start(tree,
|
||||
UU_WALK_REVERSE | UU_WALK_ROBUST)) == NULL)
|
||||
nomem();
|
||||
|
||||
while ((node = uu_avl_walk_next(walk)) != NULL) {
|
||||
while ((node = avl_last(&tree)) != NULL) {
|
||||
const char *mntarg = NULL;
|
||||
|
||||
uu_avl_remove(tree, node);
|
||||
avl_remove(&tree, node);
|
||||
switch (op) {
|
||||
case OP_SHARE:
|
||||
if (zfs_unshare(node->un_zhp,
|
||||
|
|
@ -8058,9 +7930,7 @@ unshare_unmount(int op, int argc, char **argv)
|
|||
if (op == OP_SHARE)
|
||||
zfs_commit_shares(protocol);
|
||||
|
||||
uu_avl_walk_end(walk);
|
||||
uu_avl_destroy(tree);
|
||||
uu_avl_pool_destroy(pool);
|
||||
avl_destroy(&tree);
|
||||
|
||||
} else {
|
||||
if (argc != 1) {
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ zpool_LDADD = \
|
|||
libzfs.la \
|
||||
libzfs_core.la \
|
||||
libnvpair.la \
|
||||
libuutil.la \
|
||||
libzutil.la
|
||||
|
||||
zpool_LDADD += $(LTLIBINTL)
|
||||
|
|
|
|||
|
|
@ -30,12 +30,10 @@
|
|||
*/
|
||||
|
||||
#include <libintl.h>
|
||||
#include <libuutil.h>
|
||||
#include <stddef.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <thread_pool.h>
|
||||
|
||||
#include <libzfs.h>
|
||||
#include <libzutil.h>
|
||||
|
|
@ -52,30 +50,28 @@
|
|||
|
||||
typedef struct zpool_node {
|
||||
zpool_handle_t *zn_handle;
|
||||
uu_avl_node_t zn_avlnode;
|
||||
avl_node_t zn_avlnode;
|
||||
hrtime_t zn_last_refresh;
|
||||
} zpool_node_t;
|
||||
|
||||
struct zpool_list {
|
||||
boolean_t zl_findall;
|
||||
boolean_t zl_literal;
|
||||
uu_avl_t *zl_avl;
|
||||
uu_avl_pool_t *zl_pool;
|
||||
avl_tree_t zl_avl;
|
||||
zprop_list_t **zl_proplist;
|
||||
zfs_type_t zl_type;
|
||||
hrtime_t zl_last_refresh;
|
||||
};
|
||||
|
||||
static int
|
||||
zpool_compare(const void *larg, const void *rarg, void *unused)
|
||||
zpool_compare(const void *larg, const void *rarg)
|
||||
{
|
||||
(void) unused;
|
||||
zpool_handle_t *l = ((zpool_node_t *)larg)->zn_handle;
|
||||
zpool_handle_t *r = ((zpool_node_t *)rarg)->zn_handle;
|
||||
const char *lname = zpool_get_name(l);
|
||||
const char *rname = zpool_get_name(r);
|
||||
|
||||
return (strcmp(lname, rname));
|
||||
return (TREE_ISIGN(strcmp(lname, rname)));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -86,12 +82,11 @@ static int
|
|||
add_pool(zpool_handle_t *zhp, zpool_list_t *zlp)
|
||||
{
|
||||
zpool_node_t *node, *new = safe_malloc(sizeof (zpool_node_t));
|
||||
uu_avl_index_t idx;
|
||||
avl_index_t idx;
|
||||
|
||||
new->zn_handle = zhp;
|
||||
uu_avl_node_init(new, &new->zn_avlnode, zlp->zl_pool);
|
||||
|
||||
node = uu_avl_find(zlp->zl_avl, new, NULL, &idx);
|
||||
node = avl_find(&zlp->zl_avl, new, &idx);
|
||||
if (node == NULL) {
|
||||
if (zlp->zl_proplist &&
|
||||
zpool_expand_proplist(zhp, zlp->zl_proplist,
|
||||
|
|
@ -101,7 +96,7 @@ add_pool(zpool_handle_t *zhp, zpool_list_t *zlp)
|
|||
return (-1);
|
||||
}
|
||||
new->zn_last_refresh = zlp->zl_last_refresh;
|
||||
uu_avl_insert(zlp->zl_avl, new, idx);
|
||||
avl_insert(&zlp->zl_avl, new, idx);
|
||||
} else {
|
||||
zpool_refresh_stats_from_handle(node->zn_handle, zhp);
|
||||
node->zn_last_refresh = zlp->zl_last_refresh;
|
||||
|
|
@ -139,15 +134,8 @@ pool_list_get(int argc, char **argv, zprop_list_t **proplist, zfs_type_t type,
|
|||
|
||||
zlp = safe_malloc(sizeof (zpool_list_t));
|
||||
|
||||
zlp->zl_pool = uu_avl_pool_create("zfs_pool", sizeof (zpool_node_t),
|
||||
offsetof(zpool_node_t, zn_avlnode), zpool_compare, UU_DEFAULT);
|
||||
|
||||
if (zlp->zl_pool == NULL)
|
||||
zpool_no_memory();
|
||||
|
||||
if ((zlp->zl_avl = uu_avl_create(zlp->zl_pool, NULL,
|
||||
UU_DEFAULT)) == NULL)
|
||||
zpool_no_memory();
|
||||
avl_create(&zlp->zl_avl, zpool_compare,
|
||||
sizeof (zpool_node_t), offsetof(zpool_node_t, zn_avlnode));
|
||||
|
||||
zlp->zl_proplist = proplist;
|
||||
zlp->zl_type = type;
|
||||
|
|
@ -194,8 +182,8 @@ pool_list_refresh(zpool_list_t *zlp)
|
|||
* state.
|
||||
*/
|
||||
int navail = 0;
|
||||
for (zpool_node_t *node = uu_avl_first(zlp->zl_avl);
|
||||
node != NULL; node = uu_avl_next(zlp->zl_avl, node)) {
|
||||
for (zpool_node_t *node = avl_first(&zlp->zl_avl);
|
||||
node != NULL; node = AVL_NEXT(&zlp->zl_avl, node)) {
|
||||
boolean_t missing;
|
||||
zpool_refresh_stats(node->zn_handle, &missing);
|
||||
navail += !missing;
|
||||
|
|
@ -209,8 +197,8 @@ pool_list_refresh(zpool_list_t *zlp)
|
|||
|
||||
/* Walk the list of existing pools, and update or remove them. */
|
||||
zpool_node_t *node, *next;
|
||||
for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next) {
|
||||
next = uu_avl_next(zlp->zl_avl, node);
|
||||
for (node = avl_first(&zlp->zl_avl); node != NULL; node = next) {
|
||||
next = AVL_NEXT(&zlp->zl_avl, node);
|
||||
|
||||
/*
|
||||
* Skip any that were refreshed and are online; they were added
|
||||
|
|
@ -224,7 +212,7 @@ pool_list_refresh(zpool_list_t *zlp)
|
|||
boolean_t missing;
|
||||
zpool_refresh_stats(node->zn_handle, &missing);
|
||||
if (missing) {
|
||||
uu_avl_remove(zlp->zl_avl, node);
|
||||
avl_remove(&zlp->zl_avl, node);
|
||||
zpool_close(node->zn_handle);
|
||||
free(node);
|
||||
} else {
|
||||
|
|
@ -232,7 +220,7 @@ pool_list_refresh(zpool_list_t *zlp)
|
|||
}
|
||||
}
|
||||
|
||||
return (uu_avl_numnodes(zlp->zl_avl));
|
||||
return (avl_numnodes(&zlp->zl_avl));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -245,8 +233,8 @@ pool_list_iter(zpool_list_t *zlp, int unavail, zpool_iter_f func,
|
|||
zpool_node_t *node, *next_node;
|
||||
int ret = 0;
|
||||
|
||||
for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next_node) {
|
||||
next_node = uu_avl_next(zlp->zl_avl, node);
|
||||
for (node = avl_first(&zlp->zl_avl); node != NULL; node = next_node) {
|
||||
next_node = AVL_NEXT(&zlp->zl_avl, node);
|
||||
if (zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL ||
|
||||
unavail)
|
||||
ret |= func(node->zn_handle, data);
|
||||
|
|
@ -261,25 +249,15 @@ pool_list_iter(zpool_list_t *zlp, int unavail, zpool_iter_f func,
|
|||
void
|
||||
pool_list_free(zpool_list_t *zlp)
|
||||
{
|
||||
uu_avl_walk_t *walk;
|
||||
zpool_node_t *node;
|
||||
void *cookie = NULL;
|
||||
|
||||
if ((walk = uu_avl_walk_start(zlp->zl_avl, UU_WALK_ROBUST)) == NULL) {
|
||||
(void) fprintf(stderr,
|
||||
gettext("internal error: out of memory"));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
while ((node = uu_avl_walk_next(walk)) != NULL) {
|
||||
uu_avl_remove(zlp->zl_avl, node);
|
||||
while ((node = avl_destroy_nodes(&zlp->zl_avl, &cookie)) != NULL) {
|
||||
zpool_close(node->zn_handle);
|
||||
free(node);
|
||||
}
|
||||
|
||||
uu_avl_walk_end(walk);
|
||||
uu_avl_destroy(zlp->zl_avl);
|
||||
uu_avl_pool_destroy(zlp->zl_pool);
|
||||
|
||||
avl_destroy(&zlp->zl_avl);
|
||||
free(zlp);
|
||||
}
|
||||
|
||||
|
|
@ -289,7 +267,7 @@ pool_list_free(zpool_list_t *zlp)
|
|||
int
|
||||
pool_list_count(zpool_list_t *zlp)
|
||||
{
|
||||
return (uu_avl_numnodes(zlp->zl_avl));
|
||||
return (avl_numnodes(&zlp->zl_avl));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -674,21 +652,21 @@ all_pools_for_each_vdev_gather_cb(zpool_handle_t *zhp, void *cb_vcdl)
|
|||
static void
|
||||
all_pools_for_each_vdev_run_vcdl(vdev_cmd_data_list_t *vcdl)
|
||||
{
|
||||
tpool_t *t;
|
||||
|
||||
t = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN), 0, NULL);
|
||||
if (t == NULL)
|
||||
taskq_t *tq = taskq_create("vdev_run_cmd",
|
||||
5 * sysconf(_SC_NPROCESSORS_ONLN), minclsyspri, 1, INT_MAX,
|
||||
TASKQ_DYNAMIC);
|
||||
if (tq == NULL)
|
||||
return;
|
||||
|
||||
/* Spawn off the command for each vdev */
|
||||
for (int i = 0; i < vcdl->count; i++) {
|
||||
(void) tpool_dispatch(t, vdev_run_cmd_thread,
|
||||
(void *) &vcdl->data[i]);
|
||||
(void) taskq_dispatch(tq, vdev_run_cmd_thread,
|
||||
(void *) &vcdl->data[i], TQ_SLEEP);
|
||||
}
|
||||
|
||||
/* Wait for threads to finish */
|
||||
tpool_wait(t);
|
||||
tpool_destroy(t);
|
||||
taskq_wait(tq);
|
||||
taskq_destroy(tq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -46,14 +46,12 @@
|
|||
#include <inttypes.h>
|
||||
#include <libgen.h>
|
||||
#include <libintl.h>
|
||||
#include <libuutil.h>
|
||||
#include <locale.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <termios.h>
|
||||
#include <thread_pool.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#include <pwd.h>
|
||||
|
|
@ -2390,7 +2388,7 @@ zpool_do_destroy(int argc, char **argv)
|
|||
}
|
||||
|
||||
typedef struct export_cbdata {
|
||||
tpool_t *tpool;
|
||||
taskq_t *taskq;
|
||||
pthread_mutex_t mnttab_lock;
|
||||
boolean_t force;
|
||||
boolean_t hardforce;
|
||||
|
|
@ -2415,12 +2413,12 @@ zpool_export_one(zpool_handle_t *zhp, void *data)
|
|||
* zpool_disable_datasets() is not thread-safe for mnttab access.
|
||||
* So we serialize access here for 'zpool export -a' parallel case.
|
||||
*/
|
||||
if (cb->tpool != NULL)
|
||||
if (cb->taskq != NULL)
|
||||
(void) pthread_mutex_lock(&cb->mnttab_lock);
|
||||
|
||||
int retval = zpool_disable_datasets(zhp, cb->force);
|
||||
|
||||
if (cb->tpool != NULL)
|
||||
if (cb->taskq != NULL)
|
||||
(void) pthread_mutex_unlock(&cb->mnttab_lock);
|
||||
|
||||
if (retval)
|
||||
|
|
@ -2464,7 +2462,7 @@ zpool_export_task(void *arg)
|
|||
static int
|
||||
zpool_export_one_async(zpool_handle_t *zhp, void *data)
|
||||
{
|
||||
tpool_t *tpool = ((export_cbdata_t *)data)->tpool;
|
||||
taskq_t *tq = ((export_cbdata_t *)data)->taskq;
|
||||
async_export_args_t *aea = safe_malloc(sizeof (async_export_args_t));
|
||||
|
||||
/* save pool name since zhp will go out of scope */
|
||||
|
|
@ -2472,7 +2470,8 @@ zpool_export_one_async(zpool_handle_t *zhp, void *data)
|
|||
aea->aea_cbdata = data;
|
||||
|
||||
/* ship off actual export to another thread */
|
||||
if (tpool_dispatch(tpool, zpool_export_task, (void *)aea) != 0)
|
||||
if (taskq_dispatch(tq, zpool_export_task, (void *)aea,
|
||||
TQ_SLEEP) == TASKQID_INVALID)
|
||||
return (errno); /* unlikely */
|
||||
else
|
||||
return (0);
|
||||
|
|
@ -2518,7 +2517,7 @@ zpool_do_export(int argc, char **argv)
|
|||
|
||||
cb.force = force;
|
||||
cb.hardforce = hardforce;
|
||||
cb.tpool = NULL;
|
||||
cb.taskq = NULL;
|
||||
cb.retval = 0;
|
||||
argc -= optind;
|
||||
argv += optind;
|
||||
|
|
@ -2532,16 +2531,17 @@ zpool_do_export(int argc, char **argv)
|
|||
usage(B_FALSE);
|
||||
}
|
||||
|
||||
cb.tpool = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),
|
||||
0, NULL);
|
||||
cb.taskq = taskq_create("zpool_export",
|
||||
5 * sysconf(_SC_NPROCESSORS_ONLN), minclsyspri, 1, INT_MAX,
|
||||
TASKQ_DYNAMIC);
|
||||
(void) pthread_mutex_init(&cb.mnttab_lock, NULL);
|
||||
|
||||
/* Asynchronously call zpool_export_one using thread pool */
|
||||
ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
|
||||
B_FALSE, zpool_export_one_async, &cb);
|
||||
|
||||
tpool_wait(cb.tpool);
|
||||
tpool_destroy(cb.tpool);
|
||||
taskq_wait(cb.taskq);
|
||||
taskq_destroy(cb.taskq);
|
||||
(void) pthread_mutex_destroy(&cb.mnttab_lock);
|
||||
|
||||
return (ret | cb.retval);
|
||||
|
|
@ -3946,10 +3946,11 @@ import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
|
|||
uint_t npools = 0;
|
||||
|
||||
|
||||
tpool_t *tp = NULL;
|
||||
taskq_t *tq = NULL;
|
||||
if (import->do_all) {
|
||||
tp = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN),
|
||||
0, NULL);
|
||||
tq = taskq_create("zpool_import_all",
|
||||
5 * sysconf(_SC_NPROCESSORS_ONLN), minclsyspri, 1, INT_MAX,
|
||||
TASKQ_DYNAMIC);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -3998,8 +3999,8 @@ import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
|
|||
ip->ip_mntthreads = mount_tp_nthr / npools;
|
||||
ip->ip_err = &err;
|
||||
|
||||
(void) tpool_dispatch(tp, do_import_task,
|
||||
(void *)ip);
|
||||
(void) taskq_dispatch(tq, do_import_task,
|
||||
(void *)ip, TQ_SLEEP);
|
||||
} else {
|
||||
/*
|
||||
* If we're importing from cachefile, then
|
||||
|
|
@ -4048,8 +4049,8 @@ import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
|
|||
}
|
||||
}
|
||||
if (import->do_all) {
|
||||
tpool_wait(tp);
|
||||
tpool_destroy(tp);
|
||||
taskq_wait(tq);
|
||||
taskq_destroy(tq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -6746,10 +6747,12 @@ typedef struct list_cbdata {
|
|||
|
||||
|
||||
/*
|
||||
* Given a list of columns to display, output appropriate headers for each one.
|
||||
* Given a list of columns to display, print an appropriate line. If
|
||||
* `vdev_name` is not NULL, we print `vdev_name` followed by a line of dashes.
|
||||
* If `vdev_name` is NULL, we print a line of the headers.
|
||||
*/
|
||||
static void
|
||||
print_header(list_cbdata_t *cb)
|
||||
print_line(list_cbdata_t *cb, const char *vdev_name)
|
||||
{
|
||||
zprop_list_t *pl = cb->cb_proplist;
|
||||
char headerbuf[ZPOOL_MAXPROPLEN];
|
||||
|
|
@ -6758,6 +6761,8 @@ print_header(list_cbdata_t *cb)
|
|||
boolean_t right_justify;
|
||||
size_t width = 0;
|
||||
|
||||
boolean_t print_header = (vdev_name == NULL);
|
||||
|
||||
for (; pl != NULL; pl = pl->pl_next) {
|
||||
width = pl->pl_width;
|
||||
if (first && cb->cb_verbose) {
|
||||
|
|
@ -6770,20 +6775,36 @@ print_header(list_cbdata_t *cb)
|
|||
|
||||
if (!first)
|
||||
(void) fputs(" ", stdout);
|
||||
else
|
||||
first = B_FALSE;
|
||||
|
||||
right_justify = B_FALSE;
|
||||
if (pl->pl_prop != ZPROP_USERPROP) {
|
||||
header = zpool_prop_column_name(pl->pl_prop);
|
||||
right_justify = zpool_prop_align_right(pl->pl_prop);
|
||||
} else {
|
||||
int i;
|
||||
if (print_header) {
|
||||
right_justify = B_FALSE;
|
||||
if (pl->pl_prop != ZPROP_USERPROP) {
|
||||
header = zpool_prop_column_name(pl->pl_prop);
|
||||
right_justify = zpool_prop_align_right(
|
||||
pl->pl_prop);
|
||||
} else {
|
||||
int i;
|
||||
|
||||
for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
|
||||
headerbuf[i] = toupper(pl->pl_user_prop[i]);
|
||||
headerbuf[i] = '\0';
|
||||
header = headerbuf;
|
||||
for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
|
||||
headerbuf[i] = toupper(
|
||||
pl->pl_user_prop[i]);
|
||||
headerbuf[i] = '\0';
|
||||
header = headerbuf;
|
||||
}
|
||||
|
||||
}
|
||||
/*
|
||||
* If `print_header` is false, we want to print a line of
|
||||
* dashes.
|
||||
*/
|
||||
else {
|
||||
if (first) {
|
||||
header = vdev_name;
|
||||
right_justify = B_FALSE;
|
||||
} else {
|
||||
header = "-";
|
||||
right_justify = B_TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
if (pl->pl_next == NULL && !right_justify)
|
||||
|
|
@ -6792,6 +6813,9 @@ print_header(list_cbdata_t *cb)
|
|||
(void) printf("%*s", (int)width, header);
|
||||
else
|
||||
(void) printf("%-*s", (int)width, header);
|
||||
|
||||
if (first)
|
||||
first = B_FALSE;
|
||||
}
|
||||
|
||||
(void) fputc('\n', stdout);
|
||||
|
|
@ -6995,8 +7019,6 @@ collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
|
|||
uint64_t islog = B_FALSE;
|
||||
nvlist_t *props, *ent, *ch, *obj, *l2c, *sp;
|
||||
props = ent = ch = obj = sp = l2c = NULL;
|
||||
const char *dashes = "%-*s - - - - "
|
||||
"- - - - -\n";
|
||||
|
||||
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
|
||||
(uint64_t **)&vs, &c) == 0);
|
||||
|
|
@ -7208,9 +7230,7 @@ collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
|
|||
continue;
|
||||
|
||||
if (!printed && !cb->cb_json) {
|
||||
/* LINTED E_SEC_PRINTF_VAR_FMT */
|
||||
(void) printf(dashes, cb->cb_namewidth,
|
||||
class_name[n]);
|
||||
print_line(cb, class_name[n]);
|
||||
printed = B_TRUE;
|
||||
}
|
||||
vname = zpool_vdev_name(g_zfs, zhp, child[c],
|
||||
|
|
@ -7231,8 +7251,7 @@ collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
|
|||
if (cb->cb_json) {
|
||||
l2c = fnvlist_alloc();
|
||||
} else {
|
||||
/* LINTED E_SEC_PRINTF_VAR_FMT */
|
||||
(void) printf(dashes, cb->cb_namewidth, "cache");
|
||||
print_line(cb, "cache");
|
||||
}
|
||||
for (c = 0; c < children; c++) {
|
||||
vname = zpool_vdev_name(g_zfs, zhp, child[c],
|
||||
|
|
@ -7253,8 +7272,7 @@ collect_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
|
|||
if (cb->cb_json) {
|
||||
sp = fnvlist_alloc();
|
||||
} else {
|
||||
/* LINTED E_SEC_PRINTF_VAR_FMT */
|
||||
(void) printf(dashes, cb->cb_namewidth, "spare");
|
||||
print_line(cb, "spare");
|
||||
}
|
||||
for (c = 0; c < children; c++) {
|
||||
vname = zpool_vdev_name(g_zfs, zhp, child[c],
|
||||
|
|
@ -7497,7 +7515,7 @@ zpool_do_list(int argc, char **argv)
|
|||
|
||||
if (!cb.cb_scripted && (first || cb.cb_verbose) &&
|
||||
!cb.cb_json) {
|
||||
print_header(&cb);
|
||||
print_line(&cb, NULL);
|
||||
first = B_FALSE;
|
||||
}
|
||||
ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
|
||||
|
|
|
|||
|
|
@ -8143,7 +8143,7 @@ ztest_raidz_expand_run(ztest_shared_t *zs, spa_t *spa)
|
|||
/* Setup a 1 MiB buffer of random data */
|
||||
uint64_t bufsize = 1024 * 1024;
|
||||
void *buffer = umem_alloc(bufsize, UMEM_NOFAIL);
|
||||
random_get_pseudo_bytes((uint8_t *)&buffer, bufsize);
|
||||
random_get_pseudo_bytes((uint8_t *)buffer, bufsize);
|
||||
|
||||
/*
|
||||
* Put some data in the pool and then attach a vdev to initiate
|
||||
|
|
|
|||
|
|
@ -57,22 +57,21 @@ deb-utils: deb-local rpm-utils-initramfs
|
|||
debarch=`$(DPKG) --print-architecture`; \
|
||||
pkg1=$${name}-$${version}.$${arch}.rpm; \
|
||||
pkg2=libnvpair3-$${version}.$${arch}.rpm; \
|
||||
pkg3=libuutil3-$${version}.$${arch}.rpm; \
|
||||
pkg4=libzfs7-$${version}.$${arch}.rpm; \
|
||||
pkg5=libzpool7-$${version}.$${arch}.rpm; \
|
||||
pkg6=libzfs7-devel-$${version}.$${arch}.rpm; \
|
||||
pkg7=$${name}-test-$${version}.$${arch}.rpm; \
|
||||
pkg8=$${name}-dracut-$${version}.noarch.rpm; \
|
||||
pkg9=$${name}-initramfs-$${version}.$${arch}.rpm; \
|
||||
pkg10=`ls python3-pyzfs-$${version}.noarch.rpm 2>/dev/null`; \
|
||||
pkg11=`ls pam_zfs_key-$${version}.$${arch}.rpm 2>/dev/null`; \
|
||||
pkg3=libzfs7-$${version}.$${arch}.rpm; \
|
||||
pkg4=libzpool7-$${version}.$${arch}.rpm; \
|
||||
pkg5=libzfs7-devel-$${version}.$${arch}.rpm; \
|
||||
pkg6=$${name}-test-$${version}.$${arch}.rpm; \
|
||||
pkg7=$${name}-dracut-$${version}.noarch.rpm; \
|
||||
pkg8=$${name}-initramfs-$${version}.$${arch}.rpm; \
|
||||
pkg9=`ls python3-pyzfs-$${version}.noarch.rpm 2>/dev/null`; \
|
||||
pkg10=`ls pam_zfs_key-$${version}.$${arch}.rpm 2>/dev/null`; \
|
||||
## Arguments need to be passed to dh_shlibdeps. Alien provides no mechanism
|
||||
## to do this, so we install a shim onto the path which calls the real
|
||||
## dh_shlibdeps with the required arguments.
|
||||
path_prepend=`mktemp -d /tmp/intercept.XXXXXX`; \
|
||||
echo "#!$(SHELL)" > $${path_prepend}/dh_shlibdeps; \
|
||||
echo "`which dh_shlibdeps` -- \
|
||||
-xlibuutil3linux -xlibnvpair3linux -xlibzfs7linux -xlibzpool7linux" \
|
||||
-xlibnvpair3linux -xlibzfs7linux -xlibzpool7linux" \
|
||||
>> $${path_prepend}/dh_shlibdeps; \
|
||||
## These -x arguments are passed to dpkg-shlibdeps, which exclude the
|
||||
## Debianized packages from the auto-generated dependencies of the new debs,
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_KMAP_ATOMIC_ARGS], [
|
|||
ZFS_LINUX_TEST_SRC([kmap_atomic], [
|
||||
#include <linux/pagemap.h>
|
||||
],[
|
||||
struct page page;
|
||||
struct page page = {};
|
||||
kmap_atomic(&page);
|
||||
])
|
||||
])
|
||||
|
|
|
|||
|
|
@ -10,8 +10,6 @@ dist_noinst_DATA += %D%/openzfs-libnvpair3.install.in
|
|||
dist_noinst_DATA += %D%/openzfs-libpam-zfs.install
|
||||
dist_noinst_DATA += %D%/openzfs-libpam-zfs.postinst
|
||||
dist_noinst_DATA += %D%/openzfs-libpam-zfs.prerm
|
||||
dist_noinst_DATA += %D%/openzfs-libuutil3.docs
|
||||
dist_noinst_DATA += %D%/openzfs-libuutil3.install.in
|
||||
dist_noinst_DATA += %D%/openzfs-libzfs7.docs
|
||||
dist_noinst_DATA += %D%/openzfs-libzfs7.install.in
|
||||
dist_noinst_DATA += %D%/openzfs-libzfsbootenv1.docs
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ contrib/pyzfs/libzfs_core/__pycache__/
|
|||
contrib/pyzfs/libzfs_core/bindings/__pycache__/
|
||||
contrib/pyzfs/pyzfs.egg-info/
|
||||
debian/openzfs-libnvpair3.install
|
||||
debian/openzfs-libuutil3.install
|
||||
debian/openzfs-libzfs7.install
|
||||
debian/openzfs-libzfs-dev.install
|
||||
debian/openzfs-libzpool7.install
|
||||
|
|
|
|||
|
|
@ -55,42 +55,23 @@ Description: PAM module for managing encryption keys for ZFS
|
|||
This provides a Pluggable Authentication Module (PAM) that automatically
|
||||
unlocks encrypted ZFS datasets upon login.
|
||||
|
||||
Package: openzfs-libuutil3
|
||||
Section: contrib/libs
|
||||
Architecture: linux-any
|
||||
Depends: ${misc:Depends}, ${shlibs:Depends}
|
||||
Breaks: libuutil1, libuutil3
|
||||
Replaces: libuutil1, libuutil3, libuutil3linux
|
||||
Conflicts: libuutil3linux
|
||||
Description: Solaris userland utility library for Linux
|
||||
This library provides a variety of glue functions for ZFS on Linux:
|
||||
* libspl: The Solaris Porting Layer userland library, which provides APIs
|
||||
that make it possible to run Solaris user code in a Linux environment
|
||||
with relatively minimal modification.
|
||||
* libavl: The Adelson-Velskii Landis balanced binary tree manipulation
|
||||
library.
|
||||
* libefi: The Extensible Firmware Interface library for GUID disk
|
||||
partitioning.
|
||||
* libshare: NFS, SMB, and iSCSI service integration for ZFS.
|
||||
|
||||
Package: openzfs-libzfs-dev
|
||||
Section: contrib/libdevel
|
||||
Architecture: linux-any
|
||||
Depends: libssl-dev | libssl1.0-dev,
|
||||
openzfs-libnvpair3 (= ${binary:Version}),
|
||||
openzfs-libuutil3 (= ${binary:Version}),
|
||||
openzfs-libzfs7 (= ${binary:Version}),
|
||||
openzfs-libzfsbootenv1 (= ${binary:Version}),
|
||||
openzfs-libzpool7 (= ${binary:Version}),
|
||||
${misc:Depends}
|
||||
Replaces: libzfslinux-dev
|
||||
Conflicts: libzfslinux-dev
|
||||
Provides: libnvpair-dev, libuutil-dev
|
||||
Provides: libnvpair-dev
|
||||
Description: OpenZFS filesystem development files for Linux
|
||||
Header files and static libraries for compiling software against
|
||||
libraries of OpenZFS filesystem.
|
||||
.
|
||||
This package includes the development files of libnvpair3, libuutil3,
|
||||
This package includes the development files of libnvpair3,
|
||||
libzpool7 and libzfs7.
|
||||
|
||||
Package: openzfs-libzfs7
|
||||
|
|
@ -246,7 +227,6 @@ Section: contrib/admin
|
|||
Architecture: linux-any
|
||||
Pre-Depends: ${misc:Pre-Depends}
|
||||
Depends: openzfs-libnvpair3 (= ${binary:Version}),
|
||||
openzfs-libuutil3 (= ${binary:Version}),
|
||||
openzfs-libzfs7 (= ${binary:Version}),
|
||||
openzfs-libzpool7 (= ${binary:Version}),
|
||||
python3,
|
||||
|
|
|
|||
|
|
@ -1,2 +0,0 @@
|
|||
COPYRIGHT
|
||||
LICENSE
|
||||
|
|
@ -1 +0,0 @@
|
|||
usr/lib/@DEB_HOST_MULTIARCH@/libuutil.so.*
|
||||
|
|
@ -7,7 +7,6 @@ pammodule_LTLIBRARIES = %D%/pam_zfs_key.la
|
|||
|
||||
%C%_pam_zfs_key_la_LIBADD = \
|
||||
libnvpair.la \
|
||||
libuutil.la \
|
||||
libzfs.la \
|
||||
libzfs_core.la
|
||||
|
||||
|
|
|
|||
|
|
@ -184,16 +184,12 @@ KERNEL_H = \
|
|||
|
||||
USER_H = \
|
||||
libnvpair.h \
|
||||
libuutil.h \
|
||||
libuutil_common.h \
|
||||
libuutil_impl.h \
|
||||
libzdb.h \
|
||||
libzfs.h \
|
||||
libzfs_core.h \
|
||||
libzfsbootenv.h \
|
||||
libzpool.h \
|
||||
libzutil.h \
|
||||
thread_pool.h
|
||||
libzutil.h
|
||||
|
||||
|
||||
if CONFIG_USER
|
||||
|
|
|
|||
|
|
@ -1,327 +0,0 @@
|
|||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
* The contents of this file are subject to the terms of the
|
||||
* Common Development and Distribution License (the "License").
|
||||
* You may not use this file except in compliance with the License.
|
||||
*
|
||||
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
* or https://opensource.org/licenses/CDDL-1.0.
|
||||
* See the License for the specific language governing permissions
|
||||
* and limitations under the License.
|
||||
*
|
||||
* When distributing Covered Code, include this CDDL HEADER in each
|
||||
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
* If applicable, add the following below this CDDL HEADER, with the
|
||||
* fields enclosed by brackets "[]" replaced with your own identifying
|
||||
* information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
*
|
||||
* CDDL HEADER END
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _LIBUUTIL_H
|
||||
#define _LIBUUTIL_H
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Standard flags codes.
|
||||
*/
|
||||
#define UU_DEFAULT 0
|
||||
|
||||
/*
|
||||
* Standard error codes.
|
||||
*/
|
||||
#define UU_ERROR_NONE 0 /* no error */
|
||||
#define UU_ERROR_INVALID_ARGUMENT 1 /* invalid argument */
|
||||
#define UU_ERROR_UNKNOWN_FLAG 2 /* passed flag invalid */
|
||||
#define UU_ERROR_NO_MEMORY 3 /* out of memory */
|
||||
#define UU_ERROR_CALLBACK_FAILED 4 /* callback-initiated error */
|
||||
#define UU_ERROR_NOT_SUPPORTED 5 /* operation not supported */
|
||||
#define UU_ERROR_EMPTY 6 /* no value provided */
|
||||
#define UU_ERROR_UNDERFLOW 7 /* value is too small */
|
||||
#define UU_ERROR_OVERFLOW 8 /* value is too value */
|
||||
#define UU_ERROR_INVALID_CHAR 9 /* value contains unexpected char */
|
||||
#define UU_ERROR_INVALID_DIGIT 10 /* value contains digit not in base */
|
||||
|
||||
#define UU_ERROR_SYSTEM 99 /* underlying system error */
|
||||
#define UU_ERROR_UNKNOWN 100 /* error status not known */
|
||||
|
||||
/*
|
||||
* Exit status profiles.
|
||||
*/
|
||||
#define UU_PROFILE_DEFAULT 0
|
||||
#define UU_PROFILE_LAUNCHER 1
|
||||
|
||||
/*
|
||||
* Error reporting functions.
|
||||
*/
|
||||
uint32_t uu_error(void);
|
||||
const char *uu_strerror(uint32_t);
|
||||
|
||||
/*
|
||||
* Identifier test flags and function.
|
||||
*/
|
||||
#define UU_NAME_DOMAIN 0x1 /* allow SUNW, or com.sun, prefix */
|
||||
#define UU_NAME_PATH 0x2 /* allow '/'-delimited paths */
|
||||
|
||||
int uu_check_name(const char *, uint_t);
|
||||
|
||||
/*
|
||||
* Convenience functions.
|
||||
*/
|
||||
#define UU_NELEM(a) (sizeof (a) / sizeof ((a)[0]))
|
||||
|
||||
extern char *uu_msprintf(const char *format, ...)
|
||||
__attribute__((format(printf, 1, 2)));
|
||||
extern void *uu_zalloc(size_t);
|
||||
extern char *uu_strdup(const char *);
|
||||
extern void uu_free(void *);
|
||||
|
||||
extern boolean_t uu_strcaseeq(const char *a, const char *b);
|
||||
extern boolean_t uu_streq(const char *a, const char *b);
|
||||
extern char *uu_strndup(const char *s, size_t n);
|
||||
extern boolean_t uu_strbw(const char *a, const char *b);
|
||||
extern void *uu_memdup(const void *buf, size_t sz);
|
||||
|
||||
/*
|
||||
* Comparison function type definition.
|
||||
* Developers should be careful in their use of the _private argument. If you
|
||||
* break interface guarantees, you get undefined behavior.
|
||||
*/
|
||||
typedef int uu_compare_fn_t(const void *__left, const void *__right,
|
||||
void *__private);
|
||||
|
||||
/*
|
||||
* Walk variant flags.
|
||||
* A data structure need not provide support for all variants and
|
||||
* combinations. Refer to the appropriate documentation.
|
||||
*/
|
||||
#define UU_WALK_ROBUST 0x00000001 /* walk can survive removes */
|
||||
#define UU_WALK_REVERSE 0x00000002 /* reverse walk order */
|
||||
|
||||
#define UU_WALK_PREORDER 0x00000010 /* walk tree in pre-order */
|
||||
#define UU_WALK_POSTORDER 0x00000020 /* walk tree in post-order */
|
||||
|
||||
/*
|
||||
* Walk callback function return codes.
|
||||
*/
|
||||
#define UU_WALK_ERROR -1
|
||||
#define UU_WALK_NEXT 0
|
||||
#define UU_WALK_DONE 1
|
||||
|
||||
/*
|
||||
* Walk callback function type definition.
|
||||
*/
|
||||
typedef int uu_walk_fn_t(void *_elem, void *_private);
|
||||
|
||||
/*
|
||||
* lists: opaque structures
|
||||
*/
|
||||
typedef struct uu_list_pool uu_list_pool_t;
|
||||
typedef struct uu_list uu_list_t;
|
||||
|
||||
typedef struct uu_list_node {
|
||||
uintptr_t uln_opaque[2];
|
||||
} uu_list_node_t;
|
||||
|
||||
typedef struct uu_list_walk uu_list_walk_t;
|
||||
|
||||
typedef uintptr_t uu_list_index_t;
|
||||
|
||||
/*
|
||||
* lists: interface
|
||||
*
|
||||
* basic usage:
|
||||
* typedef struct foo {
|
||||
* ...
|
||||
* uu_list_node_t foo_node;
|
||||
* ...
|
||||
* } foo_t;
|
||||
*
|
||||
* static int
|
||||
* foo_compare(void *l_arg, void *r_arg, void *private)
|
||||
* {
|
||||
* foo_t *l = l_arg;
|
||||
* foo_t *r = r_arg;
|
||||
*
|
||||
* if (... l greater than r ...)
|
||||
* return (1);
|
||||
* if (... l less than r ...)
|
||||
* return (-1);
|
||||
* return (0);
|
||||
* }
|
||||
*
|
||||
* ...
|
||||
* // at initialization time
|
||||
* foo_pool = uu_list_pool_create("foo_pool",
|
||||
* sizeof (foo_t), offsetof(foo_t, foo_node), foo_compare,
|
||||
* debugging? 0 : UU_AVL_POOL_DEBUG);
|
||||
* ...
|
||||
*/
|
||||
uu_list_pool_t *uu_list_pool_create(const char *, size_t, size_t,
|
||||
uu_compare_fn_t *, uint32_t);
|
||||
#define UU_LIST_POOL_DEBUG 0x00000001
|
||||
|
||||
void uu_list_pool_destroy(uu_list_pool_t *);
|
||||
|
||||
/*
|
||||
* usage:
|
||||
*
|
||||
* foo_t *a;
|
||||
* a = malloc(sizeof (*a));
|
||||
* uu_list_node_init(a, &a->foo_list, pool);
|
||||
* ...
|
||||
* uu_list_node_fini(a, &a->foo_list, pool);
|
||||
* free(a);
|
||||
*/
|
||||
void uu_list_node_init(void *, uu_list_node_t *, uu_list_pool_t *);
|
||||
void uu_list_node_fini(void *, uu_list_node_t *, uu_list_pool_t *);
|
||||
|
||||
uu_list_t *uu_list_create(uu_list_pool_t *, void *_parent, uint32_t);
|
||||
#define UU_LIST_DEBUG 0x00000001
|
||||
#define UU_LIST_SORTED 0x00000002 /* list is sorted */
|
||||
|
||||
void uu_list_destroy(uu_list_t *); /* list must be empty */
|
||||
|
||||
size_t uu_list_numnodes(uu_list_t *);
|
||||
|
||||
void *uu_list_first(uu_list_t *);
|
||||
void *uu_list_last(uu_list_t *);
|
||||
|
||||
void *uu_list_next(uu_list_t *, void *);
|
||||
void *uu_list_prev(uu_list_t *, void *);
|
||||
|
||||
int uu_list_walk(uu_list_t *, uu_walk_fn_t *, void *, uint32_t);
|
||||
|
||||
uu_list_walk_t *uu_list_walk_start(uu_list_t *, uint32_t);
|
||||
void *uu_list_walk_next(uu_list_walk_t *);
|
||||
void uu_list_walk_end(uu_list_walk_t *);
|
||||
|
||||
void *uu_list_find(uu_list_t *, void *, void *, uu_list_index_t *);
|
||||
void uu_list_insert(uu_list_t *, void *, uu_list_index_t);
|
||||
|
||||
void *uu_list_nearest_next(uu_list_t *, uu_list_index_t);
|
||||
void *uu_list_nearest_prev(uu_list_t *, uu_list_index_t);
|
||||
|
||||
void *uu_list_teardown(uu_list_t *, void **);
|
||||
|
||||
void uu_list_remove(uu_list_t *, void *);
|
||||
|
||||
/*
|
||||
* lists: interfaces for non-sorted lists only
|
||||
*/
|
||||
int uu_list_insert_before(uu_list_t *, void *_target, void *_elem);
|
||||
int uu_list_insert_after(uu_list_t *, void *_target, void *_elem);
|
||||
|
||||
/*
|
||||
* avl trees: opaque structures
|
||||
*/
|
||||
typedef struct uu_avl_pool uu_avl_pool_t;
|
||||
typedef struct uu_avl uu_avl_t;
|
||||
|
||||
typedef struct uu_avl_node {
|
||||
#ifdef _LP64
|
||||
uintptr_t uan_opaque[3];
|
||||
#else
|
||||
uintptr_t uan_opaque[4];
|
||||
#endif
|
||||
} uu_avl_node_t;
|
||||
|
||||
typedef struct uu_avl_walk uu_avl_walk_t;
|
||||
|
||||
typedef uintptr_t uu_avl_index_t;
|
||||
|
||||
/*
|
||||
* avl trees: interface
|
||||
*
|
||||
* basic usage:
|
||||
* typedef struct foo {
|
||||
* ...
|
||||
* uu_avl_node_t foo_node;
|
||||
* ...
|
||||
* } foo_t;
|
||||
*
|
||||
* static int
|
||||
* foo_compare(void *l_arg, void *r_arg, void *private)
|
||||
* {
|
||||
* foo_t *l = l_arg;
|
||||
* foo_t *r = r_arg;
|
||||
*
|
||||
* if (... l greater than r ...)
|
||||
* return (1);
|
||||
* if (... l less than r ...)
|
||||
* return (-1);
|
||||
* return (0);
|
||||
* }
|
||||
*
|
||||
* ...
|
||||
* // at initialization time
|
||||
* foo_pool = uu_avl_pool_create("foo_pool",
|
||||
* sizeof (foo_t), offsetof(foo_t, foo_node), foo_compare,
|
||||
* debugging? 0 : UU_AVL_POOL_DEBUG);
|
||||
* ...
|
||||
*/
|
||||
uu_avl_pool_t *uu_avl_pool_create(const char *, size_t, size_t,
|
||||
uu_compare_fn_t *, uint32_t);
|
||||
#define UU_AVL_POOL_DEBUG 0x00000001
|
||||
|
||||
void uu_avl_pool_destroy(uu_avl_pool_t *);
|
||||
|
||||
/*
|
||||
* usage:
|
||||
*
|
||||
* foo_t *a;
|
||||
* a = malloc(sizeof (*a));
|
||||
* uu_avl_node_init(a, &a->foo_avl, pool);
|
||||
* ...
|
||||
* uu_avl_node_fini(a, &a->foo_avl, pool);
|
||||
* free(a);
|
||||
*/
|
||||
void uu_avl_node_init(void *, uu_avl_node_t *, uu_avl_pool_t *);
|
||||
void uu_avl_node_fini(void *, uu_avl_node_t *, uu_avl_pool_t *);
|
||||
|
||||
uu_avl_t *uu_avl_create(uu_avl_pool_t *, void *_parent, uint32_t);
|
||||
#define UU_AVL_DEBUG 0x00000001
|
||||
|
||||
void uu_avl_destroy(uu_avl_t *); /* list must be empty */
|
||||
|
||||
size_t uu_avl_numnodes(uu_avl_t *);
|
||||
|
||||
void *uu_avl_first(uu_avl_t *);
|
||||
void *uu_avl_last(uu_avl_t *);
|
||||
|
||||
void *uu_avl_next(uu_avl_t *, void *);
|
||||
void *uu_avl_prev(uu_avl_t *, void *);
|
||||
|
||||
int uu_avl_walk(uu_avl_t *, uu_walk_fn_t *, void *, uint32_t);
|
||||
|
||||
uu_avl_walk_t *uu_avl_walk_start(uu_avl_t *, uint32_t);
|
||||
void *uu_avl_walk_next(uu_avl_walk_t *);
|
||||
void uu_avl_walk_end(uu_avl_walk_t *);
|
||||
|
||||
void *uu_avl_find(uu_avl_t *, void *, void *, uu_avl_index_t *);
|
||||
void uu_avl_insert(uu_avl_t *, void *, uu_avl_index_t);
|
||||
|
||||
void *uu_avl_nearest_next(uu_avl_t *, uu_avl_index_t);
|
||||
void *uu_avl_nearest_prev(uu_avl_t *, uu_avl_index_t);
|
||||
|
||||
void *uu_avl_teardown(uu_avl_t *, void **);
|
||||
|
||||
void uu_avl_remove(uu_avl_t *, void *);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _LIBUUTIL_H */
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
* The contents of this file are subject to the terms of the
|
||||
* Common Development and Distribution License (the "License").
|
||||
* You may not use this file except in compliance with the License.
|
||||
*
|
||||
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
* or https://opensource.org/licenses/CDDL-1.0.
|
||||
* See the License for the specific language governing permissions
|
||||
* and limitations under the License.
|
||||
*
|
||||
* When distributing Covered Code, include this CDDL HEADER in each
|
||||
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
* If applicable, add the following below this CDDL HEADER, with the
|
||||
* fields enclosed by brackets "[]" replaced with your own identifying
|
||||
* information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
*
|
||||
* CDDL HEADER END
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
|
||||
* Use is subject to license terms.
|
||||
*/
|
||||
|
||||
#ifndef _LIBUUTIL_COMMON_H
|
||||
#define _LIBUUTIL_COMMON_H
|
||||
|
||||
|
||||
|
||||
#include <libuutil.h>
|
||||
#include <libuutil_impl.h>
|
||||
|
||||
#endif /* _LIBUUTIL_COMMON_H */
|
||||
|
|
@ -1,157 +0,0 @@
|
|||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
* The contents of this file are subject to the terms of the
|
||||
* Common Development and Distribution License, Version 1.0 only
|
||||
* (the "License"). You may not use this file except in compliance
|
||||
* with the License.
|
||||
*
|
||||
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
* or https://opensource.org/licenses/CDDL-1.0.
|
||||
* See the License for the specific language governing permissions
|
||||
* and limitations under the License.
|
||||
*
|
||||
* When distributing Covered Code, include this CDDL HEADER in each
|
||||
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
* If applicable, add the following below this CDDL HEADER, with the
|
||||
* fields enclosed by brackets "[]" replaced with your own identifying
|
||||
* information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
*
|
||||
* CDDL HEADER END
|
||||
*/
|
||||
/*
|
||||
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
|
||||
* Use is subject to license terms.
|
||||
*/
|
||||
|
||||
#ifndef _LIBUUTIL_IMPL_H
|
||||
#define _LIBUUTIL_IMPL_H
|
||||
|
||||
|
||||
|
||||
#include <libuutil.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#include <sys/avl_impl.h>
|
||||
#include <sys/byteorder.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void uu_set_error(uint_t);
|
||||
|
||||
|
||||
__attribute__((format(printf, 1, 2), __noreturn__))
|
||||
void uu_panic(const char *format, ...);
|
||||
|
||||
|
||||
/*
|
||||
* uu_list structures
|
||||
*/
|
||||
typedef struct uu_list_node_impl {
|
||||
struct uu_list_node_impl *uln_next;
|
||||
struct uu_list_node_impl *uln_prev;
|
||||
} uu_list_node_impl_t;
|
||||
|
||||
struct uu_list_walk {
|
||||
uu_list_walk_t *ulw_next;
|
||||
uu_list_walk_t *ulw_prev;
|
||||
|
||||
uu_list_t *ulw_list;
|
||||
int8_t ulw_dir;
|
||||
uint8_t ulw_robust;
|
||||
uu_list_node_impl_t *ulw_next_result;
|
||||
};
|
||||
|
||||
struct uu_list {
|
||||
uu_list_t *ul_next;
|
||||
uu_list_t *ul_prev;
|
||||
|
||||
uu_list_pool_t *ul_pool;
|
||||
void *ul_parent;
|
||||
size_t ul_offset;
|
||||
size_t ul_numnodes;
|
||||
uint8_t ul_debug;
|
||||
uint8_t ul_sorted;
|
||||
uint8_t ul_index; /* mark for uu_list_index_ts */
|
||||
|
||||
uu_list_node_impl_t ul_null_node;
|
||||
uu_list_walk_t ul_null_walk; /* for robust walkers */
|
||||
};
|
||||
|
||||
#define UU_LIST_POOL_MAXNAME 64
|
||||
|
||||
struct uu_list_pool {
|
||||
uu_list_pool_t *ulp_next;
|
||||
uu_list_pool_t *ulp_prev;
|
||||
|
||||
char ulp_name[UU_LIST_POOL_MAXNAME];
|
||||
size_t ulp_nodeoffset;
|
||||
size_t ulp_objsize;
|
||||
uu_compare_fn_t *ulp_cmp;
|
||||
uint8_t ulp_debug;
|
||||
uint8_t ulp_last_index;
|
||||
pthread_mutex_t ulp_lock; /* protects null_list */
|
||||
uu_list_t ulp_null_list;
|
||||
};
|
||||
|
||||
/*
|
||||
* uu_avl structures
|
||||
*/
|
||||
typedef struct avl_node uu_avl_node_impl_t;
|
||||
|
||||
struct uu_avl_walk {
|
||||
uu_avl_walk_t *uaw_next;
|
||||
uu_avl_walk_t *uaw_prev;
|
||||
|
||||
uu_avl_t *uaw_avl;
|
||||
void *uaw_next_result;
|
||||
int8_t uaw_dir;
|
||||
uint8_t uaw_robust;
|
||||
};
|
||||
|
||||
struct uu_avl {
|
||||
uu_avl_t *ua_next;
|
||||
uu_avl_t *ua_prev;
|
||||
|
||||
uu_avl_pool_t *ua_pool;
|
||||
void *ua_parent;
|
||||
uint8_t ua_debug;
|
||||
uint8_t ua_index; /* mark for uu_avl_index_ts */
|
||||
|
||||
struct avl_tree ua_tree;
|
||||
uu_avl_walk_t ua_null_walk;
|
||||
};
|
||||
|
||||
#define UU_AVL_POOL_MAXNAME 64
|
||||
|
||||
struct uu_avl_pool {
|
||||
uu_avl_pool_t *uap_next;
|
||||
uu_avl_pool_t *uap_prev;
|
||||
|
||||
char uap_name[UU_AVL_POOL_MAXNAME];
|
||||
size_t uap_nodeoffset;
|
||||
size_t uap_objsize;
|
||||
uu_compare_fn_t *uap_cmp;
|
||||
uint8_t uap_debug;
|
||||
uint8_t uap_last_index;
|
||||
pthread_mutex_t uap_lock; /* protects null_avl */
|
||||
uu_avl_t uap_null_avl;
|
||||
};
|
||||
|
||||
/*
|
||||
* atfork() handlers
|
||||
*/
|
||||
void uu_avl_lockup(void);
|
||||
void uu_avl_release(void);
|
||||
|
||||
void uu_list_lockup(void);
|
||||
void uu_list_release(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _LIBUUTIL_IMPL_H */
|
||||
|
|
@ -290,80 +290,11 @@ extern unsigned char bcd_to_byte[256];
|
|||
#define offsetof(type, field) __offsetof(type, field)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Find highest one bit set.
|
||||
* Returns bit number + 1 of highest bit that is set, otherwise returns 0.
|
||||
* High order bit is 31 (or 63 in _LP64 kernel).
|
||||
*/
|
||||
static __inline int
|
||||
highbit(ulong_t i)
|
||||
{
|
||||
#if defined(HAVE_INLINE_FLSL)
|
||||
return (flsl(i));
|
||||
#else
|
||||
int h = 1;
|
||||
#define highbit(x) flsl(x)
|
||||
#define lowbit(x) ffsl(x)
|
||||
|
||||
if (i == 0)
|
||||
return (0);
|
||||
#ifdef _LP64
|
||||
if (i & 0xffffffff00000000ul) {
|
||||
h += 32; i >>= 32;
|
||||
}
|
||||
#endif
|
||||
if (i & 0xffff0000) {
|
||||
h += 16; i >>= 16;
|
||||
}
|
||||
if (i & 0xff00) {
|
||||
h += 8; i >>= 8;
|
||||
}
|
||||
if (i & 0xf0) {
|
||||
h += 4; i >>= 4;
|
||||
}
|
||||
if (i & 0xc) {
|
||||
h += 2; i >>= 2;
|
||||
}
|
||||
if (i & 0x2) {
|
||||
h += 1;
|
||||
}
|
||||
return (h);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Find highest one bit set.
|
||||
* Returns bit number + 1 of highest bit that is set, otherwise returns 0.
|
||||
*/
|
||||
static __inline int
|
||||
highbit64(uint64_t i)
|
||||
{
|
||||
#if defined(HAVE_INLINE_FLSLL)
|
||||
return (flsll(i));
|
||||
#else
|
||||
int h = 1;
|
||||
|
||||
if (i == 0)
|
||||
return (0);
|
||||
if (i & 0xffffffff00000000ULL) {
|
||||
h += 32; i >>= 32;
|
||||
}
|
||||
if (i & 0xffff0000) {
|
||||
h += 16; i >>= 16;
|
||||
}
|
||||
if (i & 0xff00) {
|
||||
h += 8; i >>= 8;
|
||||
}
|
||||
if (i & 0xf0) {
|
||||
h += 4; i >>= 4;
|
||||
}
|
||||
if (i & 0xc) {
|
||||
h += 2; i >>= 2;
|
||||
}
|
||||
if (i & 0x2) {
|
||||
h += 1;
|
||||
}
|
||||
return (h);
|
||||
#endif
|
||||
}
|
||||
#define highbit64(x) flsll(x)
|
||||
#define lowbit64(x) ffsll(x)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ extern void taskq_destroy(taskq_t *);
|
|||
extern void taskq_wait_id(taskq_t *, taskqid_t);
|
||||
extern void taskq_wait_outstanding(taskq_t *, taskqid_t);
|
||||
extern void taskq_wait(taskq_t *);
|
||||
extern int taskq_cancel_id(taskq_t *, taskqid_t);
|
||||
extern int taskq_cancel_id(taskq_t *, taskqid_t, boolean_t);
|
||||
extern int taskq_member(taskq_t *, kthread_t *);
|
||||
extern taskq_t *taskq_of_curthread(void);
|
||||
void taskq_suspend(taskq_t *);
|
||||
|
|
|
|||
|
|
@ -198,7 +198,7 @@ extern void taskq_destroy(taskq_t *);
|
|||
extern void taskq_wait_id(taskq_t *, taskqid_t);
|
||||
extern void taskq_wait_outstanding(taskq_t *, taskqid_t);
|
||||
extern void taskq_wait(taskq_t *);
|
||||
extern int taskq_cancel_id(taskq_t *, taskqid_t);
|
||||
extern int taskq_cancel_id(taskq_t *, taskqid_t, boolean_t);
|
||||
extern int taskq_member(taskq_t *, kthread_t *);
|
||||
extern taskq_t *taskq_of_curthread(void);
|
||||
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@
|
|||
#include <sys/fs/zfs.h>
|
||||
#include <sys/zio.h>
|
||||
#include <sys/dmu.h>
|
||||
#include <sys/wmsum.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
|
@ -218,6 +219,9 @@ typedef enum {
|
|||
* because its relatively rarely used.
|
||||
*/
|
||||
typedef struct {
|
||||
/* protects dde_phys, dde_orig_phys and dde_lead_zio during I/O */
|
||||
kmutex_t dde_io_lock;
|
||||
|
||||
/* copy of data after a repair read, to be rewritten */
|
||||
abd_t *dde_repair_abd;
|
||||
|
||||
|
|
@ -296,6 +300,20 @@ typedef struct {
|
|||
|
||||
kstat_t *ddt_ksp; /* kstats context */
|
||||
|
||||
/* wmsums for hot-path lookup counters */
|
||||
wmsum_t ddt_kstat_dds_lookup;
|
||||
wmsum_t ddt_kstat_dds_lookup_live_hit;
|
||||
wmsum_t ddt_kstat_dds_lookup_live_wait;
|
||||
wmsum_t ddt_kstat_dds_lookup_live_miss;
|
||||
wmsum_t ddt_kstat_dds_lookup_existing;
|
||||
wmsum_t ddt_kstat_dds_lookup_new;
|
||||
wmsum_t ddt_kstat_dds_lookup_log_hit;
|
||||
wmsum_t ddt_kstat_dds_lookup_log_active_hit;
|
||||
wmsum_t ddt_kstat_dds_lookup_log_flushing_hit;
|
||||
wmsum_t ddt_kstat_dds_lookup_log_miss;
|
||||
wmsum_t ddt_kstat_dds_lookup_stored_hit;
|
||||
wmsum_t ddt_kstat_dds_lookup_stored_miss;
|
||||
|
||||
enum zio_checksum ddt_checksum; /* checksum algorithm in use */
|
||||
spa_t *ddt_spa; /* pool this ddt is on */
|
||||
objset_t *ddt_os; /* ddt objset (always MOS) */
|
||||
|
|
|
|||
|
|
@ -1,56 +0,0 @@
|
|||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
* The contents of this file are subject to the terms of the
|
||||
* Common Development and Distribution License (the "License").
|
||||
* You may not use this file except in compliance with the License.
|
||||
*
|
||||
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
* or https://opensource.org/licenses/CDDL-1.0.
|
||||
* See the License for the specific language governing permissions
|
||||
* and limitations under the License.
|
||||
*
|
||||
* When distributing Covered Code, include this CDDL HEADER in each
|
||||
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
* If applicable, add the following below this CDDL HEADER, with the
|
||||
* fields enclosed by brackets "[]" replaced with your own identifying
|
||||
* information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
*
|
||||
* CDDL HEADER END
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
|
||||
* Use is subject to license terms.
|
||||
*/
|
||||
|
||||
#ifndef _THREAD_POOL_H_
|
||||
#define _THREAD_POOL_H_ extern __attribute__((visibility("default")))
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct tpool tpool_t; /* opaque thread pool descriptor */
|
||||
|
||||
_THREAD_POOL_H_ tpool_t *tpool_create(uint_t min_threads, uint_t max_threads,
|
||||
uint_t linger, pthread_attr_t *attr);
|
||||
_THREAD_POOL_H_ int tpool_dispatch(tpool_t *tpool,
|
||||
void (*func)(void *), void *arg);
|
||||
_THREAD_POOL_H_ void tpool_destroy(tpool_t *tpool);
|
||||
_THREAD_POOL_H_ void tpool_abandon(tpool_t *tpool);
|
||||
_THREAD_POOL_H_ void tpool_wait(tpool_t *tpool);
|
||||
_THREAD_POOL_H_ void tpool_suspend(tpool_t *tpool);
|
||||
_THREAD_POOL_H_ int tpool_suspended(tpool_t *tpool);
|
||||
_THREAD_POOL_H_ void tpool_resume(tpool_t *tpool);
|
||||
_THREAD_POOL_H_ int tpool_member(tpool_t *tpool);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _THREAD_POOL_H_ */
|
||||
|
|
@ -16,18 +16,18 @@
|
|||
# |--libzdb--zdb | |
|
||||
# | | |
|
||||
# libzpool libzfs* ----------------+
|
||||
# | | | \ / | | |
|
||||
# libicp --/ | | \ / | | \------- libshare
|
||||
# | | \ / | |
|
||||
# libzstd ---/ | \ / | \--------- libuutil
|
||||
# | \ / \ | |
|
||||
# libunicode --/ \ / \ | |
|
||||
# \ / \ | |
|
||||
# | | | \ / | | |\
|
||||
# libicp --/ | | \ / | \ | \----- libshare
|
||||
# | | \ / | \ \
|
||||
# libzstd ---/ | \ / | \ \-------\
|
||||
# | \ / \ \ \
|
||||
# libunicode --/ \ / \ \-------\ \
|
||||
# \ / \ \ |
|
||||
# libzutil libzfs_core* | |
|
||||
# | | | | \ | | | |
|
||||
# | | | | | | | | |
|
||||
# | | | | | | | | |
|
||||
# libtpool -------------/ | | | \---- libnvpair* | | |
|
||||
# | | | \ | | | |
|
||||
# | | | | | | | |
|
||||
# | | | | | | | |
|
||||
# | | | \---- libnvpair* | | |
|
||||
# | | | | | |
|
||||
# libefi -----------------/ | \------ libavl* --------/ |
|
||||
# | | |
|
||||
|
|
@ -41,8 +41,7 @@
|
|||
# when performing an ABI check the following options are applied:
|
||||
#
|
||||
# --no-unreferenced-symbols: Exclude symbols which are not referenced by
|
||||
# any debug information. Without this _init() and _fini() are incorrectly
|
||||
# reported on CentOS7 for libuutil.so.
|
||||
# any debug information.
|
||||
#
|
||||
# --headers-dir1: Limit ABI checks to public OpenZFS headers, otherwise
|
||||
# changes in public system headers are also reported.
|
||||
|
|
@ -59,9 +58,7 @@ include $(srcdir)/%D%/libicp/Makefile.am
|
|||
include $(srcdir)/%D%/libnvpair/Makefile.am
|
||||
include $(srcdir)/%D%/libshare/Makefile.am
|
||||
include $(srcdir)/%D%/libspl/Makefile.am
|
||||
include $(srcdir)/%D%/libtpool/Makefile.am
|
||||
include $(srcdir)/%D%/libunicode/Makefile.am
|
||||
include $(srcdir)/%D%/libuutil/Makefile.am
|
||||
include $(srcdir)/%D%/libzdb/Makefile.am
|
||||
include $(srcdir)/%D%/libzfs_core/Makefile.am
|
||||
include $(srcdir)/%D%/libzfs/Makefile.am
|
||||
|
|
|
|||
|
|
@ -31,6 +31,8 @@
|
|||
|
||||
#include <pthread.h>
|
||||
#include <stdint.h>
|
||||
#include <sys/kmem.h>
|
||||
#include <sys/thread.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/rwlock.h>
|
||||
#include <sys/condvar.h>
|
||||
|
|
@ -112,7 +114,7 @@ extern void taskq_wait_id(taskq_t *, taskqid_t);
|
|||
extern void taskq_wait_outstanding(taskq_t *, taskqid_t);
|
||||
extern int taskq_member(taskq_t *, kthread_t *);
|
||||
extern taskq_t *taskq_of_curthread(void);
|
||||
extern int taskq_cancel_id(taskq_t *, taskqid_t);
|
||||
extern int taskq_cancel_id(taskq_t *, taskqid_t, boolean_t);
|
||||
extern void system_taskq_init(void);
|
||||
extern void system_taskq_fini(void);
|
||||
|
||||
|
|
|
|||
|
|
@ -54,4 +54,15 @@ typedef int projid_t;
|
|||
typedef off_t loff_t;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* On musl, loff_t is a macro within fcntl.h when _GNU_SOURCE is defined.
|
||||
* If no macro is defined, a typedef fallback is provided.
|
||||
*/
|
||||
#if defined(__linux__) && !defined(__GLIBC__)
|
||||
#include <fcntl.h>
|
||||
#ifndef loff_t
|
||||
typedef off_t loff_t;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -35,6 +35,10 @@
|
|||
#include <sys/thread.h>
|
||||
#include <sys/taskq.h>
|
||||
#include <sys/kmem.h>
|
||||
#include <pthread.h>
|
||||
|
||||
static pthread_key_t taskq_tsd;
|
||||
static pthread_once_t taskq_tsd_once = PTHREAD_ONCE_INIT;
|
||||
|
||||
static taskq_t *__system_taskq = NULL;
|
||||
static taskq_t *__system_delay_taskq = NULL;
|
||||
|
|
@ -51,8 +55,6 @@ taskq_t
|
|||
return (__system_delay_taskq);
|
||||
}
|
||||
|
||||
static pthread_key_t taskq_tsd;
|
||||
|
||||
#define TASKQ_ACTIVE 0x00010000
|
||||
|
||||
static taskq_ent_t *
|
||||
|
|
@ -223,6 +225,12 @@ taskq_wait_outstanding(taskq_t *tq, taskqid_t id)
|
|||
taskq_wait(tq);
|
||||
}
|
||||
|
||||
static void
|
||||
taskq_tsd_init(void)
|
||||
{
|
||||
VERIFY0(pthread_key_create(&taskq_tsd, NULL));
|
||||
}
|
||||
|
||||
static __attribute__((noreturn)) void
|
||||
taskq_thread(void *arg)
|
||||
{
|
||||
|
|
@ -230,6 +238,7 @@ taskq_thread(void *arg)
|
|||
taskq_ent_t *t;
|
||||
boolean_t prealloc;
|
||||
|
||||
pthread_once(&taskq_tsd_once, taskq_tsd_init);
|
||||
VERIFY0(pthread_setspecific(taskq_tsd, tq));
|
||||
|
||||
mutex_enter(&tq->tq_lock);
|
||||
|
|
@ -398,16 +407,15 @@ taskq_of_curthread(void)
|
|||
}
|
||||
|
||||
int
|
||||
taskq_cancel_id(taskq_t *tq, taskqid_t id)
|
||||
taskq_cancel_id(taskq_t *tq, taskqid_t id, boolean_t wait)
|
||||
{
|
||||
(void) tq, (void) id;
|
||||
(void) tq, (void) id, (void) wait;
|
||||
return (ENOENT);
|
||||
}
|
||||
|
||||
void
|
||||
system_taskq_init(void)
|
||||
{
|
||||
VERIFY0(pthread_key_create(&taskq_tsd, NULL));
|
||||
__system_taskq = taskq_create("system_taskq", 64, maxclsyspri, 4, 512,
|
||||
TASKQ_DYNAMIC | TASKQ_PREPOPULATE);
|
||||
__system_delay_taskq = taskq_create("delay_taskq", 4, maxclsyspri, 4,
|
||||
|
|
@ -421,5 +429,4 @@ system_taskq_fini(void)
|
|||
__system_taskq = NULL; /* defensive */
|
||||
taskq_destroy(__system_delay_taskq);
|
||||
__system_delay_taskq = NULL;
|
||||
VERIFY0(pthread_key_delete(taskq_tsd));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -124,10 +124,12 @@ zfs_tunable_parse_int(const char *val, intmax_t *np,
|
|||
{
|
||||
intmax_t n;
|
||||
char *end;
|
||||
int err;
|
||||
|
||||
errno = 0;
|
||||
n = strtoimax(val, &end, 0);
|
||||
if (errno != 0)
|
||||
return (errno);
|
||||
if ((err = errno) != 0)
|
||||
return (err);
|
||||
if (*end != '\0')
|
||||
return (EINVAL);
|
||||
if (n < min || n > max)
|
||||
|
|
@ -142,10 +144,12 @@ zfs_tunable_parse_uint(const char *val, uintmax_t *np,
|
|||
{
|
||||
uintmax_t n;
|
||||
char *end;
|
||||
int err;
|
||||
|
||||
errno = 0;
|
||||
n = strtoumax(val, &end, 0);
|
||||
if (errno != 0)
|
||||
return (errno);
|
||||
if ((err = errno) != 0)
|
||||
return (err);
|
||||
if (*end != '\0')
|
||||
return (EINVAL);
|
||||
if (strchr(val, '-'))
|
||||
|
|
|
|||
|
|
@ -1,11 +0,0 @@
|
|||
libtpool_la_CFLAGS = $(AM_CFLAGS) $(LIBRARY_CFLAGS)
|
||||
libtpool_la_CFLAGS += -fvisibility=hidden
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61118
|
||||
libtpool_la_CFLAGS += $(NO_CLOBBERED)
|
||||
|
||||
noinst_LTLIBRARIES += libtpool.la
|
||||
CPPCHECKTARGETS += libtpool.la
|
||||
|
||||
libtpool_la_SOURCES = \
|
||||
%D%/thread_pool.c \
|
||||
%D%/thread_pool_impl.h
|
||||
|
|
@ -1,612 +0,0 @@
|
|||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
* The contents of this file are subject to the terms of the
|
||||
* Common Development and Distribution License (the "License").
|
||||
* You may not use this file except in compliance with the License.
|
||||
*
|
||||
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
* or https://opensource.org/licenses/CDDL-1.0.
|
||||
* See the License for the specific language governing permissions
|
||||
* and limitations under the License.
|
||||
*
|
||||
* When distributing Covered Code, include this CDDL HEADER in each
|
||||
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
* If applicable, add the following below this CDDL HEADER, with the
|
||||
* fields enclosed by brackets "[]" replaced with your own identifying
|
||||
* information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
*
|
||||
* CDDL HEADER END
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
|
||||
* Use is subject to license terms.
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <signal.h>
|
||||
#include <errno.h>
|
||||
#include <assert.h>
|
||||
#include <limits.h>
|
||||
#include "thread_pool_impl.h"
|
||||
|
||||
static pthread_mutex_t thread_pool_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
static tpool_t *thread_pools = NULL;
|
||||
|
||||
static void
|
||||
delete_pool(tpool_t *tpool)
|
||||
{
|
||||
tpool_job_t *job;
|
||||
|
||||
ASSERT(tpool->tp_current == 0 && tpool->tp_active == NULL);
|
||||
|
||||
/*
|
||||
* Unlink the pool from the global list of all pools.
|
||||
*/
|
||||
(void) pthread_mutex_lock(&thread_pool_lock);
|
||||
if (thread_pools == tpool)
|
||||
thread_pools = tpool->tp_forw;
|
||||
if (thread_pools == tpool)
|
||||
thread_pools = NULL;
|
||||
else {
|
||||
tpool->tp_back->tp_forw = tpool->tp_forw;
|
||||
tpool->tp_forw->tp_back = tpool->tp_back;
|
||||
}
|
||||
pthread_mutex_unlock(&thread_pool_lock);
|
||||
|
||||
/*
|
||||
* There should be no pending jobs, but just in case...
|
||||
*/
|
||||
for (job = tpool->tp_head; job != NULL; job = tpool->tp_head) {
|
||||
tpool->tp_head = job->tpj_next;
|
||||
free(job);
|
||||
}
|
||||
(void) pthread_attr_destroy(&tpool->tp_attr);
|
||||
free(tpool);
|
||||
}
|
||||
|
||||
/*
|
||||
* Worker thread is terminating.
|
||||
*/
|
||||
static void
|
||||
worker_cleanup(void *arg)
|
||||
{
|
||||
tpool_t *tpool = (tpool_t *)arg;
|
||||
|
||||
if (--tpool->tp_current == 0 &&
|
||||
(tpool->tp_flags & (TP_DESTROY | TP_ABANDON))) {
|
||||
if (tpool->tp_flags & TP_ABANDON) {
|
||||
pthread_mutex_unlock(&tpool->tp_mutex);
|
||||
delete_pool(tpool);
|
||||
return;
|
||||
}
|
||||
if (tpool->tp_flags & TP_DESTROY)
|
||||
(void) pthread_cond_broadcast(&tpool->tp_busycv);
|
||||
}
|
||||
pthread_mutex_unlock(&tpool->tp_mutex);
|
||||
}
|
||||
|
||||
static void
|
||||
notify_waiters(tpool_t *tpool)
|
||||
{
|
||||
if (tpool->tp_head == NULL && tpool->tp_active == NULL) {
|
||||
tpool->tp_flags &= ~TP_WAIT;
|
||||
(void) pthread_cond_broadcast(&tpool->tp_waitcv);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by a worker thread on return from a tpool_dispatch()d job.
|
||||
*/
|
||||
static void
|
||||
job_cleanup(void *arg)
|
||||
{
|
||||
tpool_t *tpool = (tpool_t *)arg;
|
||||
|
||||
pthread_t my_tid = pthread_self();
|
||||
tpool_active_t *activep;
|
||||
tpool_active_t **activepp;
|
||||
|
||||
pthread_mutex_lock(&tpool->tp_mutex);
|
||||
for (activepp = &tpool->tp_active; ; activepp = &activep->tpa_next) {
|
||||
activep = *activepp;
|
||||
if (activep->tpa_tid == my_tid) {
|
||||
*activepp = activep->tpa_next;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (tpool->tp_flags & TP_WAIT)
|
||||
notify_waiters(tpool);
|
||||
}
|
||||
|
||||
static void *
|
||||
tpool_worker(void *arg)
|
||||
{
|
||||
tpool_t *tpool = (tpool_t *)arg;
|
||||
int elapsed;
|
||||
tpool_job_t *job;
|
||||
void (*func)(void *);
|
||||
tpool_active_t active;
|
||||
|
||||
pthread_mutex_lock(&tpool->tp_mutex);
|
||||
pthread_cleanup_push(worker_cleanup, tpool);
|
||||
|
||||
/*
|
||||
* This is the worker's main loop.
|
||||
* It will only be left if a timeout or an error has occurred.
|
||||
*/
|
||||
active.tpa_tid = pthread_self();
|
||||
for (;;) {
|
||||
elapsed = 0;
|
||||
tpool->tp_idle++;
|
||||
if (tpool->tp_flags & TP_WAIT)
|
||||
notify_waiters(tpool);
|
||||
while ((tpool->tp_head == NULL ||
|
||||
(tpool->tp_flags & TP_SUSPEND)) &&
|
||||
!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON))) {
|
||||
if (tpool->tp_current <= tpool->tp_minimum ||
|
||||
tpool->tp_linger == 0) {
|
||||
(void) pthread_cond_wait(&tpool->tp_workcv,
|
||||
&tpool->tp_mutex);
|
||||
} else {
|
||||
struct timespec ts;
|
||||
|
||||
clock_gettime(CLOCK_REALTIME, &ts);
|
||||
ts.tv_sec += tpool->tp_linger;
|
||||
|
||||
if (pthread_cond_timedwait(&tpool->tp_workcv,
|
||||
&tpool->tp_mutex, &ts) != 0) {
|
||||
elapsed = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
tpool->tp_idle--;
|
||||
if (tpool->tp_flags & TP_DESTROY)
|
||||
break;
|
||||
if (tpool->tp_flags & TP_ABANDON) {
|
||||
/* can't abandon a suspended pool */
|
||||
if (tpool->tp_flags & TP_SUSPEND) {
|
||||
tpool->tp_flags &= ~TP_SUSPEND;
|
||||
(void) pthread_cond_broadcast(
|
||||
&tpool->tp_workcv);
|
||||
}
|
||||
if (tpool->tp_head == NULL)
|
||||
break;
|
||||
}
|
||||
if ((job = tpool->tp_head) != NULL &&
|
||||
!(tpool->tp_flags & TP_SUSPEND)) {
|
||||
elapsed = 0;
|
||||
func = job->tpj_func;
|
||||
arg = job->tpj_arg;
|
||||
tpool->tp_head = job->tpj_next;
|
||||
if (job == tpool->tp_tail)
|
||||
tpool->tp_tail = NULL;
|
||||
tpool->tp_njobs--;
|
||||
active.tpa_next = tpool->tp_active;
|
||||
tpool->tp_active = &active;
|
||||
pthread_mutex_unlock(&tpool->tp_mutex);
|
||||
pthread_cleanup_push(job_cleanup, tpool);
|
||||
free(job);
|
||||
|
||||
sigset_t maskset;
|
||||
(void) pthread_sigmask(SIG_SETMASK, NULL, &maskset);
|
||||
|
||||
/*
|
||||
* Call the specified function.
|
||||
*/
|
||||
func(arg);
|
||||
/*
|
||||
* We don't know what this thread has been doing,
|
||||
* so we reset its signal mask and cancellation
|
||||
* state back to the values prior to calling func().
|
||||
*/
|
||||
(void) pthread_sigmask(SIG_SETMASK, &maskset, NULL);
|
||||
(void) pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED,
|
||||
NULL);
|
||||
(void) pthread_setcancelstate(PTHREAD_CANCEL_ENABLE,
|
||||
NULL);
|
||||
pthread_cleanup_pop(1);
|
||||
}
|
||||
if (elapsed && tpool->tp_current > tpool->tp_minimum) {
|
||||
/*
|
||||
* We timed out and there is no work to be done
|
||||
* and the number of workers exceeds the minimum.
|
||||
* Exit now to reduce the size of the pool.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
pthread_cleanup_pop(1);
|
||||
return (arg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a worker thread, with default signals blocked.
|
||||
*/
|
||||
static int
|
||||
create_worker(tpool_t *tpool)
|
||||
{
|
||||
pthread_t thread;
|
||||
sigset_t oset;
|
||||
int error;
|
||||
|
||||
(void) pthread_sigmask(SIG_SETMASK, NULL, &oset);
|
||||
error = pthread_create(&thread, &tpool->tp_attr, tpool_worker, tpool);
|
||||
(void) pthread_sigmask(SIG_SETMASK, &oset, NULL);
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* pthread_attr_clone: make a copy of a pthread_attr_t. When old_attr
|
||||
* is NULL initialize the cloned attr using default values.
|
||||
*/
|
||||
static int
|
||||
pthread_attr_clone(pthread_attr_t *attr, const pthread_attr_t *old_attr)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = pthread_attr_init(attr);
|
||||
if (error || (old_attr == NULL))
|
||||
return (error);
|
||||
|
||||
#ifdef __GLIBC__
|
||||
cpu_set_t cpuset;
|
||||
size_t cpusetsize = sizeof (cpuset);
|
||||
error = pthread_attr_getaffinity_np(old_attr, cpusetsize, &cpuset);
|
||||
if (error == 0)
|
||||
error = pthread_attr_setaffinity_np(attr, cpusetsize, &cpuset);
|
||||
if (error)
|
||||
goto error;
|
||||
#endif /* __GLIBC__ */
|
||||
|
||||
int detachstate;
|
||||
error = pthread_attr_getdetachstate(old_attr, &detachstate);
|
||||
if (error == 0)
|
||||
error = pthread_attr_setdetachstate(attr, detachstate);
|
||||
if (error)
|
||||
goto error;
|
||||
|
||||
size_t guardsize;
|
||||
error = pthread_attr_getguardsize(old_attr, &guardsize);
|
||||
if (error == 0)
|
||||
error = pthread_attr_setguardsize(attr, guardsize);
|
||||
if (error)
|
||||
goto error;
|
||||
|
||||
int inheritsched;
|
||||
error = pthread_attr_getinheritsched(old_attr, &inheritsched);
|
||||
if (error == 0)
|
||||
error = pthread_attr_setinheritsched(attr, inheritsched);
|
||||
if (error)
|
||||
goto error;
|
||||
|
||||
struct sched_param param;
|
||||
error = pthread_attr_getschedparam(old_attr, ¶m);
|
||||
if (error == 0)
|
||||
error = pthread_attr_setschedparam(attr, ¶m);
|
||||
if (error)
|
||||
goto error;
|
||||
|
||||
int policy;
|
||||
error = pthread_attr_getschedpolicy(old_attr, &policy);
|
||||
if (error == 0)
|
||||
error = pthread_attr_setschedpolicy(attr, policy);
|
||||
if (error)
|
||||
goto error;
|
||||
|
||||
int scope;
|
||||
error = pthread_attr_getscope(old_attr, &scope);
|
||||
if (error == 0)
|
||||
error = pthread_attr_setscope(attr, scope);
|
||||
if (error)
|
||||
goto error;
|
||||
|
||||
void *stackaddr;
|
||||
size_t stacksize;
|
||||
error = pthread_attr_getstack(old_attr, &stackaddr, &stacksize);
|
||||
if (error == 0)
|
||||
error = pthread_attr_setstack(attr, stackaddr, stacksize);
|
||||
if (error)
|
||||
goto error;
|
||||
|
||||
return (0);
|
||||
error:
|
||||
pthread_attr_destroy(attr);
|
||||
return (error);
|
||||
}
|
||||
|
||||
tpool_t *
|
||||
tpool_create(uint_t min_threads, uint_t max_threads, uint_t linger,
|
||||
pthread_attr_t *attr)
|
||||
{
|
||||
tpool_t *tpool;
|
||||
void *stackaddr;
|
||||
size_t stacksize;
|
||||
size_t minstack;
|
||||
int error;
|
||||
|
||||
if (min_threads > max_threads || max_threads < 1) {
|
||||
errno = EINVAL;
|
||||
return (NULL);
|
||||
}
|
||||
if (attr != NULL) {
|
||||
if (pthread_attr_getstack(attr, &stackaddr, &stacksize) != 0) {
|
||||
errno = EINVAL;
|
||||
return (NULL);
|
||||
}
|
||||
/*
|
||||
* Allow only one thread in the pool with a specified stack.
|
||||
* Require threads to have at least the minimum stack size.
|
||||
*/
|
||||
minstack = PTHREAD_STACK_MIN;
|
||||
if (stackaddr != NULL) {
|
||||
if (stacksize < minstack || max_threads != 1) {
|
||||
errno = EINVAL;
|
||||
return (NULL);
|
||||
}
|
||||
} else if (stacksize != 0 && stacksize < minstack) {
|
||||
errno = EINVAL;
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
|
||||
tpool = calloc(1, sizeof (*tpool));
|
||||
if (tpool == NULL) {
|
||||
errno = ENOMEM;
|
||||
return (NULL);
|
||||
}
|
||||
(void) pthread_mutex_init(&tpool->tp_mutex, NULL);
|
||||
(void) pthread_cond_init(&tpool->tp_busycv, NULL);
|
||||
(void) pthread_cond_init(&tpool->tp_workcv, NULL);
|
||||
(void) pthread_cond_init(&tpool->tp_waitcv, NULL);
|
||||
tpool->tp_minimum = min_threads;
|
||||
tpool->tp_maximum = max_threads;
|
||||
tpool->tp_linger = linger;
|
||||
|
||||
/*
|
||||
* We cannot just copy the attribute pointer.
|
||||
* We need to initialize a new pthread_attr_t structure
|
||||
* with the values from the user-supplied pthread_attr_t.
|
||||
* If the attribute pointer is NULL, we need to initialize
|
||||
* the new pthread_attr_t structure with default values.
|
||||
*/
|
||||
error = pthread_attr_clone(&tpool->tp_attr, attr);
|
||||
if (error) {
|
||||
free(tpool);
|
||||
errno = error;
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/* make all pool threads be detached daemon threads */
|
||||
(void) pthread_attr_setdetachstate(&tpool->tp_attr,
|
||||
PTHREAD_CREATE_DETACHED);
|
||||
|
||||
/* insert into the global list of all thread pools */
|
||||
pthread_mutex_lock(&thread_pool_lock);
|
||||
if (thread_pools == NULL) {
|
||||
tpool->tp_forw = tpool;
|
||||
tpool->tp_back = tpool;
|
||||
thread_pools = tpool;
|
||||
} else {
|
||||
thread_pools->tp_back->tp_forw = tpool;
|
||||
tpool->tp_forw = thread_pools;
|
||||
tpool->tp_back = thread_pools->tp_back;
|
||||
thread_pools->tp_back = tpool;
|
||||
}
|
||||
pthread_mutex_unlock(&thread_pool_lock);
|
||||
|
||||
return (tpool);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispatch a work request to the thread pool.
|
||||
* If there are idle workers, awaken one.
|
||||
* Else, if the maximum number of workers has
|
||||
* not been reached, spawn a new worker thread.
|
||||
* Else just return with the job added to the queue.
|
||||
*/
|
||||
int
|
||||
tpool_dispatch(tpool_t *tpool, void (*func)(void *), void *arg)
|
||||
{
|
||||
tpool_job_t *job;
|
||||
|
||||
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
|
||||
|
||||
if ((job = calloc(1, sizeof (*job))) == NULL)
|
||||
return (-1);
|
||||
job->tpj_next = NULL;
|
||||
job->tpj_func = func;
|
||||
job->tpj_arg = arg;
|
||||
|
||||
pthread_mutex_lock(&tpool->tp_mutex);
|
||||
|
||||
if (!(tpool->tp_flags & TP_SUSPEND)) {
|
||||
if (tpool->tp_idle > 0)
|
||||
(void) pthread_cond_signal(&tpool->tp_workcv);
|
||||
else if (tpool->tp_current >= tpool->tp_maximum) {
|
||||
/* At worker limit. Leave task on queue */
|
||||
} else {
|
||||
if (create_worker(tpool) == 0) {
|
||||
/* Started a new worker thread */
|
||||
tpool->tp_current++;
|
||||
} else if (tpool->tp_current > 0) {
|
||||
/* Leave task on queue */
|
||||
} else {
|
||||
/* Cannot start a single worker! */
|
||||
pthread_mutex_unlock(&tpool->tp_mutex);
|
||||
free(job);
|
||||
return (-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (tpool->tp_head == NULL)
|
||||
tpool->tp_head = job;
|
||||
else
|
||||
tpool->tp_tail->tpj_next = job;
|
||||
tpool->tp_tail = job;
|
||||
tpool->tp_njobs++;
|
||||
|
||||
pthread_mutex_unlock(&tpool->tp_mutex);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
tpool_cleanup(void *arg)
|
||||
{
|
||||
tpool_t *tpool = (tpool_t *)arg;
|
||||
|
||||
pthread_mutex_unlock(&tpool->tp_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Assumes: by the time tpool_destroy() is called no one will use this
|
||||
* thread pool in any way and no one will try to dispatch entries to it.
|
||||
* Calling tpool_destroy() from a job in the pool will cause deadlock.
|
||||
*/
|
||||
void
|
||||
tpool_destroy(tpool_t *tpool)
|
||||
{
|
||||
tpool_active_t *activep;
|
||||
|
||||
ASSERT(!tpool_member(tpool));
|
||||
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
|
||||
|
||||
pthread_mutex_lock(&tpool->tp_mutex);
|
||||
pthread_cleanup_push(tpool_cleanup, tpool);
|
||||
|
||||
/* mark the pool as being destroyed; wakeup idle workers */
|
||||
tpool->tp_flags |= TP_DESTROY;
|
||||
tpool->tp_flags &= ~TP_SUSPEND;
|
||||
(void) pthread_cond_broadcast(&tpool->tp_workcv);
|
||||
|
||||
/* cancel all active workers */
|
||||
for (activep = tpool->tp_active; activep; activep = activep->tpa_next)
|
||||
(void) pthread_cancel(activep->tpa_tid);
|
||||
|
||||
/* wait for all active workers to finish */
|
||||
while (tpool->tp_active != NULL) {
|
||||
tpool->tp_flags |= TP_WAIT;
|
||||
(void) pthread_cond_wait(&tpool->tp_waitcv, &tpool->tp_mutex);
|
||||
}
|
||||
|
||||
/* the last worker to terminate will wake us up */
|
||||
while (tpool->tp_current != 0)
|
||||
(void) pthread_cond_wait(&tpool->tp_busycv, &tpool->tp_mutex);
|
||||
|
||||
pthread_cleanup_pop(1); /* pthread_mutex_unlock(&tpool->tp_mutex); */
|
||||
delete_pool(tpool);
|
||||
}
|
||||
|
||||
/*
|
||||
* Like tpool_destroy(), but don't cancel workers or wait for them to finish.
|
||||
* The last worker to terminate will delete the pool.
|
||||
*/
|
||||
void
|
||||
tpool_abandon(tpool_t *tpool)
|
||||
{
|
||||
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
|
||||
|
||||
pthread_mutex_lock(&tpool->tp_mutex);
|
||||
if (tpool->tp_current == 0) {
|
||||
/* no workers, just delete the pool */
|
||||
pthread_mutex_unlock(&tpool->tp_mutex);
|
||||
delete_pool(tpool);
|
||||
} else {
|
||||
/* wake up all workers, last one will delete the pool */
|
||||
tpool->tp_flags |= TP_ABANDON;
|
||||
tpool->tp_flags &= ~TP_SUSPEND;
|
||||
(void) pthread_cond_broadcast(&tpool->tp_workcv);
|
||||
pthread_mutex_unlock(&tpool->tp_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for all jobs to complete.
|
||||
* Calling tpool_wait() from a job in the pool will cause deadlock.
|
||||
*/
|
||||
void
|
||||
tpool_wait(tpool_t *tpool)
|
||||
{
|
||||
ASSERT(!tpool_member(tpool));
|
||||
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
|
||||
|
||||
pthread_mutex_lock(&tpool->tp_mutex);
|
||||
pthread_cleanup_push(tpool_cleanup, tpool);
|
||||
while (tpool->tp_head != NULL || tpool->tp_active != NULL) {
|
||||
tpool->tp_flags |= TP_WAIT;
|
||||
(void) pthread_cond_wait(&tpool->tp_waitcv, &tpool->tp_mutex);
|
||||
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
|
||||
}
|
||||
pthread_cleanup_pop(1); /* pthread_mutex_unlock(&tpool->tp_mutex); */
|
||||
}
|
||||
|
||||
void
|
||||
tpool_suspend(tpool_t *tpool)
|
||||
{
|
||||
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
|
||||
|
||||
pthread_mutex_lock(&tpool->tp_mutex);
|
||||
tpool->tp_flags |= TP_SUSPEND;
|
||||
pthread_mutex_unlock(&tpool->tp_mutex);
|
||||
}
|
||||
|
||||
int
|
||||
tpool_suspended(tpool_t *tpool)
|
||||
{
|
||||
int suspended;
|
||||
|
||||
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
|
||||
|
||||
pthread_mutex_lock(&tpool->tp_mutex);
|
||||
suspended = (tpool->tp_flags & TP_SUSPEND) != 0;
|
||||
pthread_mutex_unlock(&tpool->tp_mutex);
|
||||
|
||||
return (suspended);
|
||||
}
|
||||
|
||||
void
|
||||
tpool_resume(tpool_t *tpool)
|
||||
{
|
||||
int excess;
|
||||
|
||||
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
|
||||
|
||||
pthread_mutex_lock(&tpool->tp_mutex);
|
||||
if (!(tpool->tp_flags & TP_SUSPEND)) {
|
||||
pthread_mutex_unlock(&tpool->tp_mutex);
|
||||
return;
|
||||
}
|
||||
tpool->tp_flags &= ~TP_SUSPEND;
|
||||
(void) pthread_cond_broadcast(&tpool->tp_workcv);
|
||||
excess = tpool->tp_njobs - tpool->tp_idle;
|
||||
while (excess-- > 0 && tpool->tp_current < tpool->tp_maximum) {
|
||||
if (create_worker(tpool) != 0)
|
||||
break; /* pthread_create() failed */
|
||||
tpool->tp_current++;
|
||||
}
|
||||
pthread_mutex_unlock(&tpool->tp_mutex);
|
||||
}
|
||||
|
||||
int
|
||||
tpool_member(tpool_t *tpool)
|
||||
{
|
||||
pthread_t my_tid = pthread_self();
|
||||
tpool_active_t *activep;
|
||||
|
||||
ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON)));
|
||||
|
||||
pthread_mutex_lock(&tpool->tp_mutex);
|
||||
for (activep = tpool->tp_active; activep; activep = activep->tpa_next) {
|
||||
if (activep->tpa_tid == my_tid) {
|
||||
pthread_mutex_unlock(&tpool->tp_mutex);
|
||||
return (1);
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&tpool->tp_mutex);
|
||||
return (0);
|
||||
}
|
||||
|
|
@ -1,94 +0,0 @@
|
|||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
* The contents of this file are subject to the terms of the
|
||||
* Common Development and Distribution License (the "License").
|
||||
* You may not use this file except in compliance with the License.
|
||||
*
|
||||
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
* or https://opensource.org/licenses/CDDL-1.0.
|
||||
* See the License for the specific language governing permissions
|
||||
* and limitations under the License.
|
||||
*
|
||||
* When distributing Covered Code, include this CDDL HEADER in each
|
||||
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
* If applicable, add the following below this CDDL HEADER, with the
|
||||
* fields enclosed by brackets "[]" replaced with your own identifying
|
||||
* information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
*
|
||||
* CDDL HEADER END
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
|
||||
* Use is subject to license terms.
|
||||
*/
|
||||
|
||||
#ifndef _THREAD_POOL_IMPL_H
|
||||
#define _THREAD_POOL_IMPL_H
|
||||
|
||||
#include <thread_pool.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Thread pool implementation definitions.
|
||||
* See <thread_pool.h> for interface declarations.
|
||||
*/
|
||||
|
||||
/*
|
||||
* FIFO queued job
|
||||
*/
|
||||
typedef struct tpool_job tpool_job_t;
|
||||
struct tpool_job {
|
||||
tpool_job_t *tpj_next; /* list of jobs */
|
||||
void (*tpj_func)(void *); /* function to call */
|
||||
void *tpj_arg; /* its argument */
|
||||
};
|
||||
|
||||
/*
|
||||
* List of active threads, linked through their stacks.
|
||||
*/
|
||||
typedef struct tpool_active tpool_active_t;
|
||||
struct tpool_active {
|
||||
tpool_active_t *tpa_next; /* list of active threads */
|
||||
pthread_t tpa_tid; /* active thread id */
|
||||
};
|
||||
|
||||
/*
|
||||
* The thread pool.
|
||||
*/
|
||||
struct tpool {
|
||||
tpool_t *tp_forw; /* circular list of all thread pools */
|
||||
tpool_t *tp_back;
|
||||
pthread_mutex_t tp_mutex; /* protects the pool data */
|
||||
pthread_cond_t tp_busycv; /* synchronization in tpool_dispatch */
|
||||
pthread_cond_t tp_workcv; /* synchronization with workers */
|
||||
pthread_cond_t tp_waitcv; /* synchronization in tpool_wait() */
|
||||
tpool_active_t *tp_active; /* threads performing work */
|
||||
tpool_job_t *tp_head; /* FIFO job queue */
|
||||
tpool_job_t *tp_tail;
|
||||
pthread_attr_t tp_attr; /* attributes of the workers */
|
||||
int tp_flags; /* see below */
|
||||
uint_t tp_linger; /* seconds before idle workers exit */
|
||||
int tp_njobs; /* number of jobs in job queue */
|
||||
int tp_minimum; /* minimum number of worker threads */
|
||||
int tp_maximum; /* maximum number of worker threads */
|
||||
int tp_current; /* current number of worker threads */
|
||||
int tp_idle; /* number of idle workers */
|
||||
};
|
||||
|
||||
/* tp_flags */
|
||||
#define TP_WAIT 0x01 /* waiting in tpool_wait() */
|
||||
#define TP_SUSPEND 0x02 /* pool is being suspended */
|
||||
#define TP_DESTROY 0x04 /* pool is being destroyed */
|
||||
#define TP_ABANDON 0x08 /* pool is abandoned (auto-destroy) */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _THREAD_POOL_IMPL_H */
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
libuutil_la_CFLAGS = $(AM_CFLAGS) $(LIBRARY_CFLAGS) $(LIBRARY_CFLAGS)
|
||||
|
||||
lib_LTLIBRARIES += libuutil.la
|
||||
CPPCHECKTARGETS += libuutil.la
|
||||
|
||||
libuutil_la_SOURCES = \
|
||||
%D%/uu_alloc.c \
|
||||
%D%/uu_avl.c \
|
||||
%D%/uu_ident.c \
|
||||
%D%/uu_list.c \
|
||||
%D%/uu_misc.c \
|
||||
%D%/uu_string.c
|
||||
|
||||
libuutil_la_LIBADD = \
|
||||
libavl.la \
|
||||
libspl.la
|
||||
|
||||
libuutil_la_LIBADD += $(LTLIBINTL)
|
||||
|
||||
libuutil_la_LDFLAGS = -pthread
|
||||
|
||||
if !ASAN_ENABLED
|
||||
libuutil_la_LDFLAGS += -Wl,-z,defs
|
||||
endif
|
||||
|
||||
libuutil_la_LDFLAGS += -version-info 3:0:0
|
||||
|
||||
dist_noinst_DATA += %D%/libuutil.abi %D%/libuutil.suppr
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,2 +0,0 @@
|
|||
[suppress_type]
|
||||
name = FILE*
|
||||
|
|
@ -1,136 +0,0 @@
|
|||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
* The contents of this file are subject to the terms of the
|
||||
* Common Development and Distribution License (the "License").
|
||||
* You may not use this file except in compliance with the License.
|
||||
*
|
||||
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
* or https://opensource.org/licenses/CDDL-1.0.
|
||||
* See the License for the specific language governing permissions
|
||||
* and limitations under the License.
|
||||
*
|
||||
* When distributing Covered Code, include this CDDL HEADER in each
|
||||
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
* If applicable, add the following below this CDDL HEADER, with the
|
||||
* fields enclosed by brackets "[]" replaced with your own identifying
|
||||
* information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
*
|
||||
* CDDL HEADER END
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "libuutil_common.h"
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
void *
|
||||
uu_zalloc(size_t n)
|
||||
{
|
||||
void *p = malloc(n);
|
||||
|
||||
if (p == NULL) {
|
||||
uu_set_error(UU_ERROR_SYSTEM);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
(void) memset(p, 0, n);
|
||||
|
||||
return (p);
|
||||
}
|
||||
|
||||
void
|
||||
uu_free(void *p)
|
||||
{
|
||||
free(p);
|
||||
}
|
||||
|
||||
char *
|
||||
uu_strdup(const char *str)
|
||||
{
|
||||
char *buf = NULL;
|
||||
|
||||
if (str != NULL) {
|
||||
size_t sz;
|
||||
|
||||
sz = strlen(str) + 1;
|
||||
buf = uu_zalloc(sz);
|
||||
if (buf != NULL)
|
||||
(void) memcpy(buf, str, sz);
|
||||
}
|
||||
return (buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Duplicate up to n bytes of a string. Kind of sort of like
|
||||
* strdup(strlcpy(s, n)).
|
||||
*/
|
||||
char *
|
||||
uu_strndup(const char *s, size_t n)
|
||||
{
|
||||
size_t len;
|
||||
char *p;
|
||||
|
||||
len = strnlen(s, n);
|
||||
p = uu_zalloc(len + 1);
|
||||
if (p == NULL)
|
||||
return (NULL);
|
||||
|
||||
if (len > 0)
|
||||
(void) memcpy(p, s, len);
|
||||
p[len] = '\0';
|
||||
|
||||
return (p);
|
||||
}
|
||||
|
||||
/*
|
||||
* Duplicate a block of memory. Combines malloc with memcpy, much as
|
||||
* strdup combines malloc, strlen, and strcpy.
|
||||
*/
|
||||
void *
|
||||
uu_memdup(const void *buf, size_t sz)
|
||||
{
|
||||
void *p;
|
||||
|
||||
p = uu_zalloc(sz);
|
||||
if (p == NULL)
|
||||
return (NULL);
|
||||
(void) memcpy(p, buf, sz);
|
||||
return (p);
|
||||
}
|
||||
|
||||
char *
|
||||
uu_msprintf(const char *format, ...)
|
||||
{
|
||||
va_list args;
|
||||
char attic[1];
|
||||
uint_t M, m;
|
||||
char *b;
|
||||
|
||||
va_start(args, format);
|
||||
M = vsnprintf(attic, 1, format, args);
|
||||
va_end(args);
|
||||
|
||||
for (;;) {
|
||||
m = M;
|
||||
if ((b = uu_zalloc(m + 1)) == NULL)
|
||||
return (NULL);
|
||||
|
||||
va_start(args, format);
|
||||
M = vsnprintf(b, m + 1, format, args);
|
||||
va_end(args);
|
||||
|
||||
if (M == m)
|
||||
break; /* sizes match */
|
||||
|
||||
uu_free(b);
|
||||
}
|
||||
|
||||
return (b);
|
||||
}
|
||||
|
|
@ -1,569 +0,0 @@
|
|||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
* The contents of this file are subject to the terms of the
|
||||
* Common Development and Distribution License (the "License").
|
||||
* You may not use this file except in compliance with the License.
|
||||
*
|
||||
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
* or https://opensource.org/licenses/CDDL-1.0.
|
||||
* See the License for the specific language governing permissions
|
||||
* and limitations under the License.
|
||||
*
|
||||
* When distributing Covered Code, include this CDDL HEADER in each
|
||||
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
* If applicable, add the following below this CDDL HEADER, with the
|
||||
* fields enclosed by brackets "[]" replaced with your own identifying
|
||||
* information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
*
|
||||
* CDDL HEADER END
|
||||
*/
|
||||
/*
|
||||
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
|
||||
* Use is subject to license terms.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "libuutil_common.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/avl.h>
|
||||
|
||||
static uu_avl_pool_t uu_null_apool = { &uu_null_apool, &uu_null_apool };
|
||||
static pthread_mutex_t uu_apool_list_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
/*
|
||||
* The index mark change on every insert and delete, to catch stale
|
||||
* references.
|
||||
*
|
||||
* We leave the low bit alone, since the avl code uses it.
|
||||
*/
|
||||
#define INDEX_MAX (sizeof (uintptr_t) - 2)
|
||||
#define INDEX_NEXT(m) (((m) == INDEX_MAX)? 2 : ((m) + 2) & INDEX_MAX)
|
||||
|
||||
#define INDEX_DECODE(i) ((i) & ~INDEX_MAX)
|
||||
#define INDEX_ENCODE(p, n) (((n) & ~INDEX_MAX) | (p)->ua_index)
|
||||
#define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ua_index)
|
||||
#define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0)
|
||||
|
||||
/*
|
||||
* When an element is inactive (not in a tree), we keep a marked pointer to
|
||||
* its containing pool in its first word, and a NULL pointer in its second.
|
||||
*
|
||||
* On insert, we use these to verify that it comes from the correct pool.
|
||||
*/
|
||||
#define NODE_ARRAY(p, n) ((uintptr_t *)((uintptr_t)(n) + \
|
||||
(pp)->uap_nodeoffset))
|
||||
|
||||
#define POOL_TO_MARKER(pp) (((uintptr_t)(pp) | 1))
|
||||
|
||||
#define DEAD_MARKER 0xc4
|
||||
|
||||
uu_avl_pool_t *
|
||||
uu_avl_pool_create(const char *name, size_t objsize, size_t nodeoffset,
|
||||
uu_compare_fn_t *compare_func, uint32_t flags)
|
||||
{
|
||||
uu_avl_pool_t *pp, *next, *prev;
|
||||
|
||||
if (name == NULL ||
|
||||
uu_check_name(name, UU_NAME_DOMAIN) == -1 ||
|
||||
nodeoffset + sizeof (uu_avl_node_t) > objsize ||
|
||||
compare_func == NULL) {
|
||||
uu_set_error(UU_ERROR_INVALID_ARGUMENT);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
if (flags & ~UU_AVL_POOL_DEBUG) {
|
||||
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
pp = uu_zalloc(sizeof (uu_avl_pool_t));
|
||||
if (pp == NULL) {
|
||||
uu_set_error(UU_ERROR_NO_MEMORY);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
(void) strlcpy(pp->uap_name, name, sizeof (pp->uap_name));
|
||||
pp->uap_nodeoffset = nodeoffset;
|
||||
pp->uap_objsize = objsize;
|
||||
pp->uap_cmp = compare_func;
|
||||
if (flags & UU_AVL_POOL_DEBUG)
|
||||
pp->uap_debug = 1;
|
||||
pp->uap_last_index = 0;
|
||||
|
||||
(void) pthread_mutex_init(&pp->uap_lock, NULL);
|
||||
|
||||
pp->uap_null_avl.ua_next = &pp->uap_null_avl;
|
||||
pp->uap_null_avl.ua_prev = &pp->uap_null_avl;
|
||||
|
||||
(void) pthread_mutex_lock(&uu_apool_list_lock);
|
||||
pp->uap_next = next = &uu_null_apool;
|
||||
pp->uap_prev = prev = next->uap_prev;
|
||||
next->uap_prev = pp;
|
||||
prev->uap_next = pp;
|
||||
(void) pthread_mutex_unlock(&uu_apool_list_lock);
|
||||
|
||||
return (pp);
|
||||
}
|
||||
|
||||
void
|
||||
uu_avl_pool_destroy(uu_avl_pool_t *pp)
|
||||
{
|
||||
if (pp->uap_debug) {
|
||||
if (pp->uap_null_avl.ua_next != &pp->uap_null_avl ||
|
||||
pp->uap_null_avl.ua_prev != &pp->uap_null_avl) {
|
||||
uu_panic("uu_avl_pool_destroy: Pool \"%.*s\" (%p) has "
|
||||
"outstanding avls, or is corrupt.\n",
|
||||
(int)sizeof (pp->uap_name), pp->uap_name,
|
||||
(void *)pp);
|
||||
}
|
||||
}
|
||||
(void) pthread_mutex_lock(&uu_apool_list_lock);
|
||||
pp->uap_next->uap_prev = pp->uap_prev;
|
||||
pp->uap_prev->uap_next = pp->uap_next;
|
||||
(void) pthread_mutex_unlock(&uu_apool_list_lock);
|
||||
(void) pthread_mutex_destroy(&pp->uap_lock);
|
||||
pp->uap_prev = NULL;
|
||||
pp->uap_next = NULL;
|
||||
uu_free(pp);
|
||||
}
|
||||
|
||||
void
|
||||
uu_avl_node_init(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp)
|
||||
{
|
||||
uintptr_t *na = (uintptr_t *)np;
|
||||
|
||||
if (pp->uap_debug) {
|
||||
uintptr_t offset = (uintptr_t)np - (uintptr_t)base;
|
||||
if (offset + sizeof (*np) > pp->uap_objsize) {
|
||||
uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
|
||||
"offset %ld doesn't fit in object (size %ld)\n",
|
||||
base, (void *)np, (void *)pp, pp->uap_name,
|
||||
(long)offset, (long)pp->uap_objsize);
|
||||
}
|
||||
if (offset != pp->uap_nodeoffset) {
|
||||
uu_panic("uu_avl_node_init(%p, %p, %p (\"%s\")): "
|
||||
"offset %ld doesn't match pool's offset (%ld)\n",
|
||||
base, (void *)np, (void *)pp, pp->uap_name,
|
||||
(long)offset, (long)pp->uap_objsize);
|
||||
}
|
||||
}
|
||||
|
||||
na[0] = POOL_TO_MARKER(pp);
|
||||
na[1] = 0;
|
||||
}
|
||||
|
||||
void
|
||||
uu_avl_node_fini(void *base, uu_avl_node_t *np, uu_avl_pool_t *pp)
|
||||
{
|
||||
uintptr_t *na = (uintptr_t *)np;
|
||||
|
||||
if (pp->uap_debug) {
|
||||
if (na[0] == DEAD_MARKER && na[1] == DEAD_MARKER) {
|
||||
uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
|
||||
"node already finied\n",
|
||||
base, (void *)np, (void *)pp, pp->uap_name);
|
||||
}
|
||||
if (na[0] != POOL_TO_MARKER(pp) || na[1] != 0) {
|
||||
uu_panic("uu_avl_node_fini(%p, %p, %p (\"%s\")): "
|
||||
"node corrupt, in tree, or in different pool\n",
|
||||
base, (void *)np, (void *)pp, pp->uap_name);
|
||||
}
|
||||
}
|
||||
|
||||
na[0] = DEAD_MARKER;
|
||||
na[1] = DEAD_MARKER;
|
||||
na[2] = DEAD_MARKER;
|
||||
}
|
||||
|
||||
struct uu_avl_node_compare_info {
|
||||
uu_compare_fn_t *ac_compare;
|
||||
void *ac_private;
|
||||
void *ac_right;
|
||||
void *ac_found;
|
||||
};
|
||||
|
||||
static int
|
||||
uu_avl_node_compare(const void *l, const void *r)
|
||||
{
|
||||
struct uu_avl_node_compare_info *info =
|
||||
(struct uu_avl_node_compare_info *)l;
|
||||
|
||||
int res = info->ac_compare(r, info->ac_right, info->ac_private);
|
||||
|
||||
if (res == 0) {
|
||||
if (info->ac_found == NULL)
|
||||
info->ac_found = (void *)r;
|
||||
return (-1);
|
||||
}
|
||||
if (res < 0)
|
||||
return (1);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
uu_avl_t *
|
||||
uu_avl_create(uu_avl_pool_t *pp, void *parent, uint32_t flags)
|
||||
{
|
||||
uu_avl_t *ap, *next, *prev;
|
||||
|
||||
if (flags & ~UU_AVL_DEBUG) {
|
||||
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
ap = uu_zalloc(sizeof (*ap));
|
||||
if (ap == NULL) {
|
||||
uu_set_error(UU_ERROR_NO_MEMORY);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
ap->ua_pool = pp;
|
||||
ap->ua_parent = parent;
|
||||
ap->ua_debug = pp->uap_debug || (flags & UU_AVL_DEBUG);
|
||||
ap->ua_index = (pp->uap_last_index = INDEX_NEXT(pp->uap_last_index));
|
||||
|
||||
avl_create(&ap->ua_tree, &uu_avl_node_compare, pp->uap_objsize,
|
||||
pp->uap_nodeoffset);
|
||||
|
||||
ap->ua_null_walk.uaw_next = &ap->ua_null_walk;
|
||||
ap->ua_null_walk.uaw_prev = &ap->ua_null_walk;
|
||||
|
||||
(void) pthread_mutex_lock(&pp->uap_lock);
|
||||
next = &pp->uap_null_avl;
|
||||
prev = next->ua_prev;
|
||||
ap->ua_next = next;
|
||||
ap->ua_prev = prev;
|
||||
next->ua_prev = ap;
|
||||
prev->ua_next = ap;
|
||||
(void) pthread_mutex_unlock(&pp->uap_lock);
|
||||
|
||||
return (ap);
|
||||
}
|
||||
|
||||
void
|
||||
uu_avl_destroy(uu_avl_t *ap)
|
||||
{
|
||||
uu_avl_pool_t *pp = ap->ua_pool;
|
||||
|
||||
if (ap->ua_debug) {
|
||||
if (avl_numnodes(&ap->ua_tree) != 0) {
|
||||
uu_panic("uu_avl_destroy(%p): tree not empty\n",
|
||||
(void *)ap);
|
||||
}
|
||||
if (ap->ua_null_walk.uaw_next != &ap->ua_null_walk ||
|
||||
ap->ua_null_walk.uaw_prev != &ap->ua_null_walk) {
|
||||
uu_panic("uu_avl_destroy(%p): outstanding walkers\n",
|
||||
(void *)ap);
|
||||
}
|
||||
}
|
||||
(void) pthread_mutex_lock(&pp->uap_lock);
|
||||
ap->ua_next->ua_prev = ap->ua_prev;
|
||||
ap->ua_prev->ua_next = ap->ua_next;
|
||||
(void) pthread_mutex_unlock(&pp->uap_lock);
|
||||
ap->ua_prev = NULL;
|
||||
ap->ua_next = NULL;
|
||||
|
||||
ap->ua_pool = NULL;
|
||||
avl_destroy(&ap->ua_tree);
|
||||
|
||||
uu_free(ap);
|
||||
}
|
||||
|
||||
size_t
|
||||
uu_avl_numnodes(uu_avl_t *ap)
|
||||
{
|
||||
return (avl_numnodes(&ap->ua_tree));
|
||||
}
|
||||
|
||||
void *
|
||||
uu_avl_first(uu_avl_t *ap)
|
||||
{
|
||||
return (avl_first(&ap->ua_tree));
|
||||
}
|
||||
|
||||
void *
|
||||
uu_avl_last(uu_avl_t *ap)
|
||||
{
|
||||
return (avl_last(&ap->ua_tree));
|
||||
}
|
||||
|
||||
void *
|
||||
uu_avl_next(uu_avl_t *ap, void *node)
|
||||
{
|
||||
return (AVL_NEXT(&ap->ua_tree, node));
|
||||
}
|
||||
|
||||
void *
|
||||
uu_avl_prev(uu_avl_t *ap, void *node)
|
||||
{
|
||||
return (AVL_PREV(&ap->ua_tree, node));
|
||||
}
|
||||
|
||||
static void
|
||||
_avl_walk_init(uu_avl_walk_t *wp, uu_avl_t *ap, uint32_t flags)
|
||||
{
|
||||
uu_avl_walk_t *next, *prev;
|
||||
|
||||
int robust = (flags & UU_WALK_ROBUST);
|
||||
int direction = (flags & UU_WALK_REVERSE)? -1 : 1;
|
||||
|
||||
(void) memset(wp, 0, sizeof (*wp));
|
||||
wp->uaw_avl = ap;
|
||||
wp->uaw_robust = robust;
|
||||
wp->uaw_dir = direction;
|
||||
|
||||
if (direction > 0)
|
||||
wp->uaw_next_result = avl_first(&ap->ua_tree);
|
||||
else
|
||||
wp->uaw_next_result = avl_last(&ap->ua_tree);
|
||||
|
||||
if (ap->ua_debug || robust) {
|
||||
wp->uaw_next = next = &ap->ua_null_walk;
|
||||
wp->uaw_prev = prev = next->uaw_prev;
|
||||
next->uaw_prev = wp;
|
||||
prev->uaw_next = wp;
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
_avl_walk_advance(uu_avl_walk_t *wp, uu_avl_t *ap)
|
||||
{
|
||||
void *np = wp->uaw_next_result;
|
||||
|
||||
avl_tree_t *t = &ap->ua_tree;
|
||||
|
||||
if (np == NULL)
|
||||
return (NULL);
|
||||
|
||||
wp->uaw_next_result = (wp->uaw_dir > 0)? AVL_NEXT(t, np) :
|
||||
AVL_PREV(t, np);
|
||||
|
||||
return (np);
|
||||
}
|
||||
|
||||
static void
|
||||
_avl_walk_fini(uu_avl_walk_t *wp)
|
||||
{
|
||||
if (wp->uaw_next != NULL) {
|
||||
wp->uaw_next->uaw_prev = wp->uaw_prev;
|
||||
wp->uaw_prev->uaw_next = wp->uaw_next;
|
||||
wp->uaw_next = NULL;
|
||||
wp->uaw_prev = NULL;
|
||||
}
|
||||
wp->uaw_avl = NULL;
|
||||
wp->uaw_next_result = NULL;
|
||||
}
|
||||
|
||||
uu_avl_walk_t *
|
||||
uu_avl_walk_start(uu_avl_t *ap, uint32_t flags)
|
||||
{
|
||||
uu_avl_walk_t *wp;
|
||||
|
||||
if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
|
||||
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
wp = uu_zalloc(sizeof (*wp));
|
||||
if (wp == NULL) {
|
||||
uu_set_error(UU_ERROR_NO_MEMORY);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
_avl_walk_init(wp, ap, flags);
|
||||
return (wp);
|
||||
}
|
||||
|
||||
void *
|
||||
uu_avl_walk_next(uu_avl_walk_t *wp)
|
||||
{
|
||||
return (_avl_walk_advance(wp, wp->uaw_avl));
|
||||
}
|
||||
|
||||
void
|
||||
uu_avl_walk_end(uu_avl_walk_t *wp)
|
||||
{
|
||||
_avl_walk_fini(wp);
|
||||
uu_free(wp);
|
||||
}
|
||||
|
||||
int
|
||||
uu_avl_walk(uu_avl_t *ap, uu_walk_fn_t *func, void *private, uint32_t flags)
|
||||
{
|
||||
void *e;
|
||||
uu_avl_walk_t my_walk;
|
||||
|
||||
int status = UU_WALK_NEXT;
|
||||
|
||||
if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
|
||||
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
_avl_walk_init(&my_walk, ap, flags);
|
||||
while (status == UU_WALK_NEXT &&
|
||||
(e = _avl_walk_advance(&my_walk, ap)) != NULL)
|
||||
status = (*func)(e, private);
|
||||
_avl_walk_fini(&my_walk);
|
||||
|
||||
if (status >= 0)
|
||||
return (0);
|
||||
uu_set_error(UU_ERROR_CALLBACK_FAILED);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
void
|
||||
uu_avl_remove(uu_avl_t *ap, void *elem)
|
||||
{
|
||||
uu_avl_walk_t *wp;
|
||||
uu_avl_pool_t *pp = ap->ua_pool;
|
||||
uintptr_t *na = NODE_ARRAY(pp, elem);
|
||||
|
||||
if (ap->ua_debug) {
|
||||
/*
|
||||
* invalidate outstanding uu_avl_index_ts.
|
||||
*/
|
||||
ap->ua_index = INDEX_NEXT(ap->ua_index);
|
||||
}
|
||||
|
||||
/*
|
||||
* Robust walkers most be advanced, if we are removing the node
|
||||
* they are currently using. In debug mode, non-robust walkers
|
||||
* are also on the walker list.
|
||||
*/
|
||||
for (wp = ap->ua_null_walk.uaw_next; wp != &ap->ua_null_walk;
|
||||
wp = wp->uaw_next) {
|
||||
if (wp->uaw_robust) {
|
||||
if (elem == wp->uaw_next_result)
|
||||
(void) _avl_walk_advance(wp, ap);
|
||||
} else if (wp->uaw_next_result != NULL) {
|
||||
uu_panic("uu_avl_remove(%p, %p): active non-robust "
|
||||
"walker\n", (void *)ap, elem);
|
||||
}
|
||||
}
|
||||
|
||||
avl_remove(&ap->ua_tree, elem);
|
||||
|
||||
na[0] = POOL_TO_MARKER(pp);
|
||||
na[1] = 0;
|
||||
}
|
||||
|
||||
void *
|
||||
uu_avl_teardown(uu_avl_t *ap, void **cookie)
|
||||
{
|
||||
void *elem = avl_destroy_nodes(&ap->ua_tree, cookie);
|
||||
|
||||
if (elem != NULL) {
|
||||
uu_avl_pool_t *pp = ap->ua_pool;
|
||||
uintptr_t *na = NODE_ARRAY(pp, elem);
|
||||
|
||||
na[0] = POOL_TO_MARKER(pp);
|
||||
na[1] = 0;
|
||||
}
|
||||
return (elem);
|
||||
}
|
||||
|
||||
void *
|
||||
uu_avl_find(uu_avl_t *ap, void *elem, void *private, uu_avl_index_t *out)
|
||||
{
|
||||
struct uu_avl_node_compare_info info;
|
||||
void *result;
|
||||
|
||||
info.ac_compare = ap->ua_pool->uap_cmp;
|
||||
info.ac_private = private;
|
||||
info.ac_right = elem;
|
||||
info.ac_found = NULL;
|
||||
|
||||
result = avl_find(&ap->ua_tree, &info, out);
|
||||
if (out != NULL)
|
||||
*out = INDEX_ENCODE(ap, *out);
|
||||
|
||||
if (ap->ua_debug && result != NULL)
|
||||
uu_panic("uu_avl_find: internal error: avl_find succeeded\n");
|
||||
|
||||
return (info.ac_found);
|
||||
}
|
||||
|
||||
void
|
||||
uu_avl_insert(uu_avl_t *ap, void *elem, uu_avl_index_t idx)
|
||||
{
|
||||
if (ap->ua_debug) {
|
||||
uu_avl_pool_t *pp = ap->ua_pool;
|
||||
uintptr_t *na = NODE_ARRAY(pp, elem);
|
||||
|
||||
if (na[1] != 0)
|
||||
uu_panic("uu_avl_insert(%p, %p, %p): node already "
|
||||
"in tree, or corrupt\n",
|
||||
(void *)ap, elem, (void *)idx);
|
||||
if (na[0] == 0)
|
||||
uu_panic("uu_avl_insert(%p, %p, %p): node not "
|
||||
"initialized\n",
|
||||
(void *)ap, elem, (void *)idx);
|
||||
if (na[0] != POOL_TO_MARKER(pp))
|
||||
uu_panic("uu_avl_insert(%p, %p, %p): node from "
|
||||
"other pool, or corrupt\n",
|
||||
(void *)ap, elem, (void *)idx);
|
||||
|
||||
if (!INDEX_VALID(ap, idx))
|
||||
uu_panic("uu_avl_insert(%p, %p, %p): %s\n",
|
||||
(void *)ap, elem, (void *)idx,
|
||||
INDEX_CHECK(idx)? "outdated index" :
|
||||
"invalid index");
|
||||
|
||||
/*
|
||||
* invalidate outstanding uu_avl_index_ts.
|
||||
*/
|
||||
ap->ua_index = INDEX_NEXT(ap->ua_index);
|
||||
}
|
||||
avl_insert(&ap->ua_tree, elem, INDEX_DECODE(idx));
|
||||
}
|
||||
|
||||
void *
|
||||
uu_avl_nearest_next(uu_avl_t *ap, uu_avl_index_t idx)
|
||||
{
|
||||
if (ap->ua_debug && !INDEX_VALID(ap, idx))
|
||||
uu_panic("uu_avl_nearest_next(%p, %p): %s\n",
|
||||
(void *)ap, (void *)idx, INDEX_CHECK(idx)?
|
||||
"outdated index" : "invalid index");
|
||||
return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_AFTER));
|
||||
}
|
||||
|
||||
void *
|
||||
uu_avl_nearest_prev(uu_avl_t *ap, uu_avl_index_t idx)
|
||||
{
|
||||
if (ap->ua_debug && !INDEX_VALID(ap, idx))
|
||||
uu_panic("uu_avl_nearest_prev(%p, %p): %s\n",
|
||||
(void *)ap, (void *)idx, INDEX_CHECK(idx)?
|
||||
"outdated index" : "invalid index");
|
||||
return (avl_nearest(&ap->ua_tree, INDEX_DECODE(idx), AVL_BEFORE));
|
||||
}
|
||||
|
||||
/*
|
||||
* called from uu_lockup() and uu_release(), as part of our fork1()-safety.
|
||||
*/
|
||||
void
|
||||
uu_avl_lockup(void)
|
||||
{
|
||||
uu_avl_pool_t *pp;
|
||||
|
||||
(void) pthread_mutex_lock(&uu_apool_list_lock);
|
||||
for (pp = uu_null_apool.uap_next; pp != &uu_null_apool;
|
||||
pp = pp->uap_next)
|
||||
(void) pthread_mutex_lock(&pp->uap_lock);
|
||||
}
|
||||
|
||||
void
|
||||
uu_avl_release(void)
|
||||
{
|
||||
uu_avl_pool_t *pp;
|
||||
|
||||
for (pp = uu_null_apool.uap_next; pp != &uu_null_apool;
|
||||
pp = pp->uap_next)
|
||||
(void) pthread_mutex_unlock(&pp->uap_lock);
|
||||
(void) pthread_mutex_unlock(&uu_apool_list_lock);
|
||||
}
|
||||
|
|
@ -1,123 +0,0 @@
|
|||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
* The contents of this file are subject to the terms of the
|
||||
* Common Development and Distribution License, Version 1.0 only
|
||||
* (the "License"). You may not use this file except in compliance
|
||||
* with the License.
|
||||
*
|
||||
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
* or https://opensource.org/licenses/CDDL-1.0.
|
||||
* See the License for the specific language governing permissions
|
||||
* and limitations under the License.
|
||||
*
|
||||
* When distributing Covered Code, include this CDDL HEADER in each
|
||||
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
* If applicable, add the following below this CDDL HEADER, with the
|
||||
* fields enclosed by brackets "[]" replaced with your own identifying
|
||||
* information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
*
|
||||
* CDDL HEADER END
|
||||
*/
|
||||
/*
|
||||
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
|
||||
* Use is subject to license terms.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "libuutil_common.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
/*
|
||||
* We require names of the form:
|
||||
* [provider,]identifier[/[provider,]identifier]...
|
||||
*
|
||||
* Where provider is either a stock symbol (SUNW) or a java-style reversed
|
||||
* domain name (com.sun).
|
||||
*
|
||||
* Both providers and identifiers must start with a letter, and may
|
||||
* only contain alphanumerics, dashes, and underlines. Providers
|
||||
* may also contain periods.
|
||||
*
|
||||
* Note that we do _not_ use the macros in <ctype.h>, since they are affected
|
||||
* by the current locale settings.
|
||||
*/
|
||||
|
||||
#define IS_ALPHA(c) \
|
||||
(((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
|
||||
|
||||
#define IS_DIGIT(c) \
|
||||
((c) >= '0' && (c) <= '9')
|
||||
|
||||
static int
|
||||
is_valid_ident(const char *s, const char *e, int allowdot)
|
||||
{
|
||||
char c;
|
||||
|
||||
if (s >= e)
|
||||
return (0); /* name is empty */
|
||||
|
||||
c = *s++;
|
||||
if (!IS_ALPHA(c))
|
||||
return (0); /* does not start with letter */
|
||||
|
||||
while (s < e && (c = *s++) != 0) {
|
||||
if (IS_ALPHA(c) || IS_DIGIT(c) || c == '-' || c == '_' ||
|
||||
(allowdot && c == '.'))
|
||||
continue;
|
||||
return (0); /* invalid character */
|
||||
}
|
||||
return (1);
|
||||
}
|
||||
|
||||
static int
|
||||
is_valid_component(const char *b, const char *e, uint_t flags)
|
||||
{
|
||||
char *sp;
|
||||
|
||||
if (flags & UU_NAME_DOMAIN) {
|
||||
sp = strchr(b, ',');
|
||||
if (sp != NULL && sp < e) {
|
||||
if (!is_valid_ident(b, sp, 1))
|
||||
return (0);
|
||||
b = sp + 1;
|
||||
}
|
||||
}
|
||||
|
||||
return (is_valid_ident(b, e, 0));
|
||||
}
|
||||
|
||||
int
|
||||
uu_check_name(const char *name, uint_t flags)
|
||||
{
|
||||
const char *end = name + strlen(name);
|
||||
const char *p;
|
||||
|
||||
if (flags & ~(UU_NAME_DOMAIN | UU_NAME_PATH)) {
|
||||
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
if (!(flags & UU_NAME_PATH)) {
|
||||
if (!is_valid_component(name, end, flags))
|
||||
goto bad;
|
||||
return (0);
|
||||
}
|
||||
|
||||
while ((p = strchr(name, '/')) != NULL) {
|
||||
if (!is_valid_component(name, p - 1, flags))
|
||||
goto bad;
|
||||
name = p + 1;
|
||||
}
|
||||
if (!is_valid_component(name, end, flags))
|
||||
goto bad;
|
||||
|
||||
return (0);
|
||||
|
||||
bad:
|
||||
uu_set_error(UU_ERROR_INVALID_ARGUMENT);
|
||||
return (-1);
|
||||
}
|
||||
|
|
@ -1,723 +0,0 @@
|
|||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
* The contents of this file are subject to the terms of the
|
||||
* Common Development and Distribution License (the "License").
|
||||
* You may not use this file except in compliance with the License.
|
||||
*
|
||||
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
* or https://opensource.org/licenses/CDDL-1.0.
|
||||
* See the License for the specific language governing permissions
|
||||
* and limitations under the License.
|
||||
*
|
||||
* When distributing Covered Code, include this CDDL HEADER in each
|
||||
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
* If applicable, add the following below this CDDL HEADER, with the
|
||||
* fields enclosed by brackets "[]" replaced with your own identifying
|
||||
* information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
*
|
||||
* CDDL HEADER END
|
||||
*/
|
||||
/*
|
||||
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
|
||||
* Use is subject to license terms.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "libuutil_common.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#define ELEM_TO_NODE(lp, e) \
|
||||
((uu_list_node_impl_t *)((uintptr_t)(e) + (lp)->ul_offset))
|
||||
|
||||
#define NODE_TO_ELEM(lp, n) \
|
||||
((void *)((uintptr_t)(n) - (lp)->ul_offset))
|
||||
|
||||
/*
|
||||
* uu_list_index_ts define a location for insertion. They are simply a
|
||||
* pointer to the object after the insertion point. We store a mark
|
||||
* in the low-bits of the index, to help prevent mistakes.
|
||||
*
|
||||
* When debugging, the index mark changes on every insert and delete, to
|
||||
* catch stale references.
|
||||
*/
|
||||
#define INDEX_MAX (sizeof (uintptr_t) - 1)
|
||||
#define INDEX_NEXT(m) (((m) == INDEX_MAX)? 1 : ((m) + 1) & INDEX_MAX)
|
||||
|
||||
#define INDEX_TO_NODE(i) ((uu_list_node_impl_t *)((i) & ~INDEX_MAX))
|
||||
#define NODE_TO_INDEX(p, n) (((uintptr_t)(n) & ~INDEX_MAX) | (p)->ul_index)
|
||||
#define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ul_index)
|
||||
#define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0)
|
||||
|
||||
#define POOL_TO_MARKER(pp) ((void *)((uintptr_t)(pp) | 1))
|
||||
|
||||
static uu_list_pool_t uu_null_lpool = { &uu_null_lpool, &uu_null_lpool };
|
||||
static pthread_mutex_t uu_lpool_list_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
uu_list_pool_t *
|
||||
uu_list_pool_create(const char *name, size_t objsize,
|
||||
size_t nodeoffset, uu_compare_fn_t *compare_func, uint32_t flags)
|
||||
{
|
||||
uu_list_pool_t *pp, *next, *prev;
|
||||
|
||||
if (name == NULL ||
|
||||
uu_check_name(name, UU_NAME_DOMAIN) == -1 ||
|
||||
nodeoffset + sizeof (uu_list_node_t) > objsize) {
|
||||
uu_set_error(UU_ERROR_INVALID_ARGUMENT);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
if (flags & ~UU_LIST_POOL_DEBUG) {
|
||||
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
pp = uu_zalloc(sizeof (uu_list_pool_t));
|
||||
if (pp == NULL) {
|
||||
uu_set_error(UU_ERROR_NO_MEMORY);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
(void) strlcpy(pp->ulp_name, name, sizeof (pp->ulp_name));
|
||||
pp->ulp_nodeoffset = nodeoffset;
|
||||
pp->ulp_objsize = objsize;
|
||||
pp->ulp_cmp = compare_func;
|
||||
if (flags & UU_LIST_POOL_DEBUG)
|
||||
pp->ulp_debug = 1;
|
||||
pp->ulp_last_index = 0;
|
||||
|
||||
(void) pthread_mutex_init(&pp->ulp_lock, NULL);
|
||||
|
||||
pp->ulp_null_list.ul_next = &pp->ulp_null_list;
|
||||
pp->ulp_null_list.ul_prev = &pp->ulp_null_list;
|
||||
|
||||
(void) pthread_mutex_lock(&uu_lpool_list_lock);
|
||||
pp->ulp_next = next = &uu_null_lpool;
|
||||
pp->ulp_prev = prev = next->ulp_prev;
|
||||
next->ulp_prev = pp;
|
||||
prev->ulp_next = pp;
|
||||
(void) pthread_mutex_unlock(&uu_lpool_list_lock);
|
||||
|
||||
return (pp);
|
||||
}
|
||||
|
||||
void
|
||||
uu_list_pool_destroy(uu_list_pool_t *pp)
|
||||
{
|
||||
if (pp->ulp_debug) {
|
||||
if (pp->ulp_null_list.ul_next != &pp->ulp_null_list ||
|
||||
pp->ulp_null_list.ul_prev != &pp->ulp_null_list) {
|
||||
uu_panic("uu_list_pool_destroy: Pool \"%.*s\" (%p) has "
|
||||
"outstanding lists, or is corrupt.\n",
|
||||
(int)sizeof (pp->ulp_name), pp->ulp_name,
|
||||
(void *)pp);
|
||||
}
|
||||
}
|
||||
(void) pthread_mutex_lock(&uu_lpool_list_lock);
|
||||
pp->ulp_next->ulp_prev = pp->ulp_prev;
|
||||
pp->ulp_prev->ulp_next = pp->ulp_next;
|
||||
(void) pthread_mutex_unlock(&uu_lpool_list_lock);
|
||||
pp->ulp_prev = NULL;
|
||||
pp->ulp_next = NULL;
|
||||
uu_free(pp);
|
||||
}
|
||||
|
||||
void
|
||||
uu_list_node_init(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
|
||||
{
|
||||
uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
|
||||
|
||||
if (pp->ulp_debug) {
|
||||
uintptr_t offset = (uintptr_t)np - (uintptr_t)base;
|
||||
if (offset + sizeof (*np) > pp->ulp_objsize) {
|
||||
uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
|
||||
"offset %ld doesn't fit in object (size %ld)\n",
|
||||
base, (void *)np, (void *)pp, pp->ulp_name,
|
||||
(long)offset, (long)pp->ulp_objsize);
|
||||
}
|
||||
if (offset != pp->ulp_nodeoffset) {
|
||||
uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
|
||||
"offset %ld doesn't match pool's offset (%ld)\n",
|
||||
base, (void *)np, (void *)pp, pp->ulp_name,
|
||||
(long)offset, (long)pp->ulp_objsize);
|
||||
}
|
||||
}
|
||||
np->uln_next = POOL_TO_MARKER(pp);
|
||||
np->uln_prev = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
uu_list_node_fini(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
|
||||
{
|
||||
uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
|
||||
|
||||
if (pp->ulp_debug) {
|
||||
if (np->uln_next == NULL &&
|
||||
np->uln_prev == NULL) {
|
||||
uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
|
||||
"node already finied\n",
|
||||
base, (void *)np_arg, (void *)pp, pp->ulp_name);
|
||||
}
|
||||
if (np->uln_next != POOL_TO_MARKER(pp) ||
|
||||
np->uln_prev != NULL) {
|
||||
uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
|
||||
"node corrupt or on list\n",
|
||||
base, (void *)np_arg, (void *)pp, pp->ulp_name);
|
||||
}
|
||||
}
|
||||
np->uln_next = NULL;
|
||||
np->uln_prev = NULL;
|
||||
}
|
||||
|
||||
uu_list_t *
|
||||
uu_list_create(uu_list_pool_t *pp, void *parent, uint32_t flags)
|
||||
{
|
||||
uu_list_t *lp, *next, *prev;
|
||||
|
||||
if (flags & ~(UU_LIST_DEBUG | UU_LIST_SORTED)) {
|
||||
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
if ((flags & UU_LIST_SORTED) && pp->ulp_cmp == NULL) {
|
||||
if (pp->ulp_debug)
|
||||
uu_panic("uu_list_create(%p, ...): requested "
|
||||
"UU_LIST_SORTED, but pool has no comparison func\n",
|
||||
(void *)pp);
|
||||
uu_set_error(UU_ERROR_NOT_SUPPORTED);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
lp = uu_zalloc(sizeof (*lp));
|
||||
if (lp == NULL) {
|
||||
uu_set_error(UU_ERROR_NO_MEMORY);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
lp->ul_pool = pp;
|
||||
lp->ul_parent = parent;
|
||||
lp->ul_offset = pp->ulp_nodeoffset;
|
||||
lp->ul_debug = pp->ulp_debug || (flags & UU_LIST_DEBUG);
|
||||
lp->ul_sorted = (flags & UU_LIST_SORTED);
|
||||
lp->ul_numnodes = 0;
|
||||
lp->ul_index = (pp->ulp_last_index = INDEX_NEXT(pp->ulp_last_index));
|
||||
|
||||
lp->ul_null_node.uln_next = &lp->ul_null_node;
|
||||
lp->ul_null_node.uln_prev = &lp->ul_null_node;
|
||||
|
||||
lp->ul_null_walk.ulw_next = &lp->ul_null_walk;
|
||||
lp->ul_null_walk.ulw_prev = &lp->ul_null_walk;
|
||||
|
||||
(void) pthread_mutex_lock(&pp->ulp_lock);
|
||||
next = &pp->ulp_null_list;
|
||||
prev = next->ul_prev;
|
||||
lp->ul_next = next;
|
||||
lp->ul_prev = prev;
|
||||
next->ul_prev = lp;
|
||||
prev->ul_next = lp;
|
||||
(void) pthread_mutex_unlock(&pp->ulp_lock);
|
||||
|
||||
return (lp);
|
||||
}
|
||||
|
||||
void
|
||||
uu_list_destroy(uu_list_t *lp)
|
||||
{
|
||||
uu_list_pool_t *pp = lp->ul_pool;
|
||||
|
||||
if (lp->ul_debug) {
|
||||
if (lp->ul_null_node.uln_next != &lp->ul_null_node ||
|
||||
lp->ul_null_node.uln_prev != &lp->ul_null_node) {
|
||||
uu_panic("uu_list_destroy(%p): list not empty\n",
|
||||
(void *)lp);
|
||||
}
|
||||
if (lp->ul_numnodes != 0) {
|
||||
uu_panic("uu_list_destroy(%p): numnodes is nonzero, "
|
||||
"but list is empty\n", (void *)lp);
|
||||
}
|
||||
if (lp->ul_null_walk.ulw_next != &lp->ul_null_walk ||
|
||||
lp->ul_null_walk.ulw_prev != &lp->ul_null_walk) {
|
||||
uu_panic("uu_list_destroy(%p): outstanding walkers\n",
|
||||
(void *)lp);
|
||||
}
|
||||
}
|
||||
|
||||
(void) pthread_mutex_lock(&pp->ulp_lock);
|
||||
lp->ul_next->ul_prev = lp->ul_prev;
|
||||
lp->ul_prev->ul_next = lp->ul_next;
|
||||
(void) pthread_mutex_unlock(&pp->ulp_lock);
|
||||
lp->ul_prev = NULL;
|
||||
lp->ul_next = NULL;
|
||||
lp->ul_pool = NULL;
|
||||
uu_free(lp);
|
||||
}
|
||||
|
||||
static void
|
||||
list_insert(uu_list_t *lp, uu_list_node_impl_t *np, uu_list_node_impl_t *prev,
|
||||
uu_list_node_impl_t *next)
|
||||
{
|
||||
if (lp->ul_debug) {
|
||||
if (next->uln_prev != prev || prev->uln_next != next)
|
||||
uu_panic("insert(%p): internal error: %p and %p not "
|
||||
"neighbors\n", (void *)lp, (void *)next,
|
||||
(void *)prev);
|
||||
|
||||
if (np->uln_next != POOL_TO_MARKER(lp->ul_pool) ||
|
||||
np->uln_prev != NULL) {
|
||||
uu_panic("insert(%p): elem %p node %p corrupt, "
|
||||
"not initialized, or already in a list.\n",
|
||||
(void *)lp, NODE_TO_ELEM(lp, np), (void *)np);
|
||||
}
|
||||
/*
|
||||
* invalidate outstanding uu_list_index_ts.
|
||||
*/
|
||||
lp->ul_index = INDEX_NEXT(lp->ul_index);
|
||||
}
|
||||
np->uln_next = next;
|
||||
np->uln_prev = prev;
|
||||
next->uln_prev = np;
|
||||
prev->uln_next = np;
|
||||
|
||||
lp->ul_numnodes++;
|
||||
}
|
||||
|
||||
void
|
||||
uu_list_insert(uu_list_t *lp, void *elem, uu_list_index_t idx)
|
||||
{
|
||||
uu_list_node_impl_t *np;
|
||||
|
||||
np = INDEX_TO_NODE(idx);
|
||||
if (np == NULL)
|
||||
np = &lp->ul_null_node;
|
||||
|
||||
if (lp->ul_debug) {
|
||||
if (!INDEX_VALID(lp, idx))
|
||||
uu_panic("uu_list_insert(%p, %p, %p): %s\n",
|
||||
(void *)lp, elem, (void *)idx,
|
||||
INDEX_CHECK(idx)? "outdated index" :
|
||||
"invalid index");
|
||||
if (np->uln_prev == NULL)
|
||||
uu_panic("uu_list_insert(%p, %p, %p): out-of-date "
|
||||
"index\n", (void *)lp, elem, (void *)idx);
|
||||
}
|
||||
|
||||
list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
|
||||
}
|
||||
|
||||
void *
|
||||
uu_list_find(uu_list_t *lp, void *elem, void *private, uu_list_index_t *out)
|
||||
{
|
||||
int sorted = lp->ul_sorted;
|
||||
uu_compare_fn_t *func = lp->ul_pool->ulp_cmp;
|
||||
uu_list_node_impl_t *np;
|
||||
|
||||
if (func == NULL) {
|
||||
if (out != NULL)
|
||||
*out = 0;
|
||||
uu_set_error(UU_ERROR_NOT_SUPPORTED);
|
||||
return (NULL);
|
||||
}
|
||||
for (np = lp->ul_null_node.uln_next; np != &lp->ul_null_node;
|
||||
np = np->uln_next) {
|
||||
void *ep = NODE_TO_ELEM(lp, np);
|
||||
int cmp = func(ep, elem, private);
|
||||
if (cmp == 0) {
|
||||
if (out != NULL)
|
||||
*out = NODE_TO_INDEX(lp, np);
|
||||
return (ep);
|
||||
}
|
||||
if (sorted && cmp > 0) {
|
||||
if (out != NULL)
|
||||
*out = NODE_TO_INDEX(lp, np);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
if (out != NULL)
|
||||
*out = NODE_TO_INDEX(lp, 0);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
void *
|
||||
uu_list_nearest_next(uu_list_t *lp, uu_list_index_t idx)
|
||||
{
|
||||
uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
|
||||
|
||||
if (np == NULL)
|
||||
np = &lp->ul_null_node;
|
||||
|
||||
if (lp->ul_debug) {
|
||||
if (!INDEX_VALID(lp, idx))
|
||||
uu_panic("uu_list_nearest_next(%p, %p): %s\n",
|
||||
(void *)lp, (void *)idx,
|
||||
INDEX_CHECK(idx)? "outdated index" :
|
||||
"invalid index");
|
||||
if (np->uln_prev == NULL)
|
||||
uu_panic("uu_list_nearest_next(%p, %p): out-of-date "
|
||||
"index\n", (void *)lp, (void *)idx);
|
||||
}
|
||||
|
||||
if (np == &lp->ul_null_node)
|
||||
return (NULL);
|
||||
else
|
||||
return (NODE_TO_ELEM(lp, np));
|
||||
}
|
||||
|
||||
void *
|
||||
uu_list_nearest_prev(uu_list_t *lp, uu_list_index_t idx)
|
||||
{
|
||||
uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
|
||||
|
||||
if (np == NULL)
|
||||
np = &lp->ul_null_node;
|
||||
|
||||
if (lp->ul_debug) {
|
||||
if (!INDEX_VALID(lp, idx))
|
||||
uu_panic("uu_list_nearest_prev(%p, %p): %s\n",
|
||||
(void *)lp, (void *)idx, INDEX_CHECK(idx)?
|
||||
"outdated index" : "invalid index");
|
||||
if (np->uln_prev == NULL)
|
||||
uu_panic("uu_list_nearest_prev(%p, %p): out-of-date "
|
||||
"index\n", (void *)lp, (void *)idx);
|
||||
}
|
||||
|
||||
if ((np = np->uln_prev) == &lp->ul_null_node)
|
||||
return (NULL);
|
||||
else
|
||||
return (NODE_TO_ELEM(lp, np));
|
||||
}
|
||||
|
||||
static void
|
||||
list_walk_init(uu_list_walk_t *wp, uu_list_t *lp, uint32_t flags)
|
||||
{
|
||||
uu_list_walk_t *next, *prev;
|
||||
|
||||
int robust = (flags & UU_WALK_ROBUST);
|
||||
int direction = (flags & UU_WALK_REVERSE)? -1 : 1;
|
||||
|
||||
(void) memset(wp, 0, sizeof (*wp));
|
||||
wp->ulw_list = lp;
|
||||
wp->ulw_robust = robust;
|
||||
wp->ulw_dir = direction;
|
||||
if (direction > 0)
|
||||
wp->ulw_next_result = lp->ul_null_node.uln_next;
|
||||
else
|
||||
wp->ulw_next_result = lp->ul_null_node.uln_prev;
|
||||
|
||||
if (lp->ul_debug || robust) {
|
||||
/*
|
||||
* Add this walker to the list's list of walkers so
|
||||
* uu_list_remove() can advance us if somebody tries to
|
||||
* remove ulw_next_result.
|
||||
*/
|
||||
wp->ulw_next = next = &lp->ul_null_walk;
|
||||
wp->ulw_prev = prev = next->ulw_prev;
|
||||
next->ulw_prev = wp;
|
||||
prev->ulw_next = wp;
|
||||
}
|
||||
}
|
||||
|
||||
static uu_list_node_impl_t *
|
||||
list_walk_advance(uu_list_walk_t *wp, uu_list_t *lp)
|
||||
{
|
||||
uu_list_node_impl_t *np = wp->ulw_next_result;
|
||||
uu_list_node_impl_t *next;
|
||||
|
||||
if (np == &lp->ul_null_node)
|
||||
return (NULL);
|
||||
|
||||
next = (wp->ulw_dir > 0)? np->uln_next : np->uln_prev;
|
||||
|
||||
wp->ulw_next_result = next;
|
||||
return (np);
|
||||
}
|
||||
|
||||
static void
|
||||
list_walk_fini(uu_list_walk_t *wp)
|
||||
{
|
||||
/* GLXXX debugging? */
|
||||
if (wp->ulw_next != NULL) {
|
||||
wp->ulw_next->ulw_prev = wp->ulw_prev;
|
||||
wp->ulw_prev->ulw_next = wp->ulw_next;
|
||||
wp->ulw_next = NULL;
|
||||
wp->ulw_prev = NULL;
|
||||
}
|
||||
wp->ulw_list = NULL;
|
||||
wp->ulw_next_result = NULL;
|
||||
}
|
||||
|
||||
uu_list_walk_t *
|
||||
uu_list_walk_start(uu_list_t *lp, uint32_t flags)
|
||||
{
|
||||
uu_list_walk_t *wp;
|
||||
|
||||
if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
|
||||
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
wp = uu_zalloc(sizeof (*wp));
|
||||
if (wp == NULL) {
|
||||
uu_set_error(UU_ERROR_NO_MEMORY);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
list_walk_init(wp, lp, flags);
|
||||
return (wp);
|
||||
}
|
||||
|
||||
void *
|
||||
uu_list_walk_next(uu_list_walk_t *wp)
|
||||
{
|
||||
uu_list_t *lp = wp->ulw_list;
|
||||
uu_list_node_impl_t *np = list_walk_advance(wp, lp);
|
||||
|
||||
if (np == NULL)
|
||||
return (NULL);
|
||||
|
||||
return (NODE_TO_ELEM(lp, np));
|
||||
}
|
||||
|
||||
void
|
||||
uu_list_walk_end(uu_list_walk_t *wp)
|
||||
{
|
||||
list_walk_fini(wp);
|
||||
uu_free(wp);
|
||||
}
|
||||
|
||||
int
|
||||
uu_list_walk(uu_list_t *lp, uu_walk_fn_t *func, void *private, uint32_t flags)
|
||||
{
|
||||
uu_list_node_impl_t *np;
|
||||
|
||||
int status = UU_WALK_NEXT;
|
||||
|
||||
int robust = (flags & UU_WALK_ROBUST);
|
||||
int reverse = (flags & UU_WALK_REVERSE);
|
||||
|
||||
if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
|
||||
uu_set_error(UU_ERROR_UNKNOWN_FLAG);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
if (lp->ul_debug || robust) {
|
||||
uu_list_walk_t *my_walk;
|
||||
void *e;
|
||||
|
||||
my_walk = uu_zalloc(sizeof (*my_walk));
|
||||
if (my_walk == NULL)
|
||||
return (-1);
|
||||
|
||||
list_walk_init(my_walk, lp, flags);
|
||||
while (status == UU_WALK_NEXT &&
|
||||
(e = uu_list_walk_next(my_walk)) != NULL)
|
||||
status = (*func)(e, private);
|
||||
list_walk_fini(my_walk);
|
||||
|
||||
uu_free(my_walk);
|
||||
} else {
|
||||
if (!reverse) {
|
||||
for (np = lp->ul_null_node.uln_next;
|
||||
status == UU_WALK_NEXT && np != &lp->ul_null_node;
|
||||
np = np->uln_next) {
|
||||
status = (*func)(NODE_TO_ELEM(lp, np), private);
|
||||
}
|
||||
} else {
|
||||
for (np = lp->ul_null_node.uln_prev;
|
||||
status == UU_WALK_NEXT && np != &lp->ul_null_node;
|
||||
np = np->uln_prev) {
|
||||
status = (*func)(NODE_TO_ELEM(lp, np), private);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (status >= 0)
|
||||
return (0);
|
||||
uu_set_error(UU_ERROR_CALLBACK_FAILED);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
void
|
||||
uu_list_remove(uu_list_t *lp, void *elem)
|
||||
{
|
||||
uu_list_node_impl_t *np = ELEM_TO_NODE(lp, elem);
|
||||
uu_list_walk_t *wp;
|
||||
|
||||
if (lp->ul_debug) {
|
||||
if (np->uln_prev == NULL)
|
||||
uu_panic("uu_list_remove(%p, %p): elem not on list\n",
|
||||
(void *)lp, elem);
|
||||
/*
|
||||
* invalidate outstanding uu_list_index_ts.
|
||||
*/
|
||||
lp->ul_index = INDEX_NEXT(lp->ul_index);
|
||||
}
|
||||
|
||||
/*
|
||||
* robust walkers must be advanced. In debug mode, non-robust
|
||||
* walkers are also on the list. If there are any, it's an error.
|
||||
*/
|
||||
for (wp = lp->ul_null_walk.ulw_next; wp != &lp->ul_null_walk;
|
||||
wp = wp->ulw_next) {
|
||||
if (wp->ulw_robust) {
|
||||
if (np == wp->ulw_next_result)
|
||||
(void) list_walk_advance(wp, lp);
|
||||
} else if (wp->ulw_next_result != NULL) {
|
||||
uu_panic("uu_list_remove(%p, %p): active non-robust "
|
||||
"walker\n", (void *)lp, elem);
|
||||
}
|
||||
}
|
||||
|
||||
np->uln_next->uln_prev = np->uln_prev;
|
||||
np->uln_prev->uln_next = np->uln_next;
|
||||
|
||||
lp->ul_numnodes--;
|
||||
|
||||
np->uln_next = POOL_TO_MARKER(lp->ul_pool);
|
||||
np->uln_prev = NULL;
|
||||
}
|
||||
|
||||
void *
|
||||
uu_list_teardown(uu_list_t *lp, void **cookie)
|
||||
{
|
||||
void *ep;
|
||||
|
||||
/*
|
||||
* XXX: disable list modification until list is empty
|
||||
*/
|
||||
if (lp->ul_debug && *cookie != NULL)
|
||||
uu_panic("uu_list_teardown(%p, %p): unexpected cookie\n",
|
||||
(void *)lp, (void *)cookie);
|
||||
|
||||
ep = uu_list_first(lp);
|
||||
if (ep)
|
||||
uu_list_remove(lp, ep);
|
||||
return (ep);
|
||||
}
|
||||
|
||||
int
|
||||
uu_list_insert_before(uu_list_t *lp, void *target, void *elem)
|
||||
{
|
||||
uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
|
||||
|
||||
if (target == NULL)
|
||||
np = &lp->ul_null_node;
|
||||
|
||||
if (lp->ul_debug) {
|
||||
if (np->uln_prev == NULL)
|
||||
uu_panic("uu_list_insert_before(%p, %p, %p): %p is "
|
||||
"not currently on a list\n",
|
||||
(void *)lp, target, elem, target);
|
||||
}
|
||||
if (lp->ul_sorted) {
|
||||
if (lp->ul_debug)
|
||||
uu_panic("uu_list_insert_before(%p, ...): list is "
|
||||
"UU_LIST_SORTED\n", (void *)lp);
|
||||
uu_set_error(UU_ERROR_NOT_SUPPORTED);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
uu_list_insert_after(uu_list_t *lp, void *target, void *elem)
|
||||
{
|
||||
uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
|
||||
|
||||
if (target == NULL)
|
||||
np = &lp->ul_null_node;
|
||||
|
||||
if (lp->ul_debug) {
|
||||
if (np->uln_prev == NULL)
|
||||
uu_panic("uu_list_insert_after(%p, %p, %p): %p is "
|
||||
"not currently on a list\n",
|
||||
(void *)lp, target, elem, target);
|
||||
}
|
||||
if (lp->ul_sorted) {
|
||||
if (lp->ul_debug)
|
||||
uu_panic("uu_list_insert_after(%p, ...): list is "
|
||||
"UU_LIST_SORTED\n", (void *)lp);
|
||||
uu_set_error(UU_ERROR_NOT_SUPPORTED);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
list_insert(lp, ELEM_TO_NODE(lp, elem), np, np->uln_next);
|
||||
return (0);
|
||||
}
|
||||
|
||||
size_t
|
||||
uu_list_numnodes(uu_list_t *lp)
|
||||
{
|
||||
return (lp->ul_numnodes);
|
||||
}
|
||||
|
||||
void *
|
||||
uu_list_first(uu_list_t *lp)
|
||||
{
|
||||
uu_list_node_impl_t *n = lp->ul_null_node.uln_next;
|
||||
if (n == &lp->ul_null_node)
|
||||
return (NULL);
|
||||
return (NODE_TO_ELEM(lp, n));
|
||||
}
|
||||
|
||||
void *
|
||||
uu_list_last(uu_list_t *lp)
|
||||
{
|
||||
uu_list_node_impl_t *n = lp->ul_null_node.uln_prev;
|
||||
if (n == &lp->ul_null_node)
|
||||
return (NULL);
|
||||
return (NODE_TO_ELEM(lp, n));
|
||||
}
|
||||
|
||||
void *
|
||||
uu_list_next(uu_list_t *lp, void *elem)
|
||||
{
|
||||
uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
|
||||
|
||||
n = n->uln_next;
|
||||
if (n == &lp->ul_null_node)
|
||||
return (NULL);
|
||||
return (NODE_TO_ELEM(lp, n));
|
||||
}
|
||||
|
||||
void *
|
||||
uu_list_prev(uu_list_t *lp, void *elem)
|
||||
{
|
||||
uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
|
||||
|
||||
n = n->uln_prev;
|
||||
if (n == &lp->ul_null_node)
|
||||
return (NULL);
|
||||
return (NODE_TO_ELEM(lp, n));
|
||||
}
|
||||
|
||||
/*
|
||||
* called from uu_lockup() and uu_release(), as part of our fork1()-safety.
|
||||
*/
|
||||
void
|
||||
uu_list_lockup(void)
|
||||
{
|
||||
uu_list_pool_t *pp;
|
||||
|
||||
(void) pthread_mutex_lock(&uu_lpool_list_lock);
|
||||
for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
|
||||
pp = pp->ulp_next)
|
||||
(void) pthread_mutex_lock(&pp->ulp_lock);
|
||||
}
|
||||
|
||||
void
|
||||
uu_list_release(void)
|
||||
{
|
||||
uu_list_pool_t *pp;
|
||||
|
||||
for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
|
||||
pp = pp->ulp_next)
|
||||
(void) pthread_mutex_unlock(&pp->ulp_lock);
|
||||
(void) pthread_mutex_unlock(&uu_lpool_list_lock);
|
||||
}
|
||||
|
|
@ -1,255 +0,0 @@
|
|||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
* The contents of this file are subject to the terms of the
|
||||
* Common Development and Distribution License (the "License").
|
||||
* You may not use this file except in compliance with the License.
|
||||
*
|
||||
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
* or https://opensource.org/licenses/CDDL-1.0.
|
||||
* See the License for the specific language governing permissions
|
||||
* and limitations under the License.
|
||||
*
|
||||
* When distributing Covered Code, include this CDDL HEADER in each
|
||||
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
* If applicable, add the following below this CDDL HEADER, with the
|
||||
* fields enclosed by brackets "[]" replaced with your own identifying
|
||||
* information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
*
|
||||
* CDDL HEADER END
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "libuutil_common.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
#include <libintl.h>
|
||||
#include <pthread.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/debug.h>
|
||||
#include <unistd.h>
|
||||
#include <ctype.h>
|
||||
|
||||
#if !defined(TEXT_DOMAIN)
|
||||
#define TEXT_DOMAIN "SYS_TEST"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* All of the old code under !defined(PTHREAD_ONCE_KEY_NP)
|
||||
* is here to enable the building of a native version of
|
||||
* libuutil.so when the build machine has not yet been upgraded
|
||||
* to a version of libc that provides pthread_key_create_once_np().
|
||||
* It should all be deleted when solaris_nevada ships.
|
||||
* The code is not MT-safe in a relaxed memory model.
|
||||
*/
|
||||
|
||||
#if defined(PTHREAD_ONCE_KEY_NP)
|
||||
static pthread_key_t uu_error_key = PTHREAD_ONCE_KEY_NP;
|
||||
#else /* PTHREAD_ONCE_KEY_NP */
|
||||
static pthread_key_t uu_error_key = 0;
|
||||
static pthread_mutex_t uu_key_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
#endif /* PTHREAD_ONCE_KEY_NP */
|
||||
|
||||
static int uu_error_key_setup = 0;
|
||||
|
||||
static pthread_mutex_t uu_panic_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
/* LINTED static unused */
|
||||
static const char *uu_panic_format;
|
||||
/* LINTED static unused */
|
||||
static va_list uu_panic_args;
|
||||
static pthread_t uu_panic_thread;
|
||||
|
||||
static uint32_t _uu_main_error;
|
||||
static __thread int _uu_main_thread = 0;
|
||||
|
||||
void
|
||||
uu_set_error(uint_t code)
|
||||
{
|
||||
if (_uu_main_thread) {
|
||||
_uu_main_error = code;
|
||||
return;
|
||||
}
|
||||
#if defined(PTHREAD_ONCE_KEY_NP)
|
||||
if (pthread_key_create_once_np(&uu_error_key, NULL) != 0)
|
||||
uu_error_key_setup = -1;
|
||||
else
|
||||
uu_error_key_setup = 1;
|
||||
#else /* PTHREAD_ONCE_KEY_NP */
|
||||
if (uu_error_key_setup == 0) {
|
||||
(void) pthread_mutex_lock(&uu_key_lock);
|
||||
if (uu_error_key_setup == 0) {
|
||||
if (pthread_key_create(&uu_error_key, NULL) != 0)
|
||||
uu_error_key_setup = -1;
|
||||
else
|
||||
uu_error_key_setup = 1;
|
||||
}
|
||||
(void) pthread_mutex_unlock(&uu_key_lock);
|
||||
}
|
||||
#endif /* PTHREAD_ONCE_KEY_NP */
|
||||
if (uu_error_key_setup > 0)
|
||||
(void) pthread_setspecific(uu_error_key,
|
||||
(void *)(uintptr_t)code);
|
||||
}
|
||||
|
||||
uint32_t
|
||||
uu_error(void)
|
||||
{
|
||||
if (_uu_main_thread)
|
||||
return (_uu_main_error);
|
||||
|
||||
if (uu_error_key_setup < 0) /* can't happen? */
|
||||
return (UU_ERROR_UNKNOWN);
|
||||
|
||||
/*
|
||||
* Because UU_ERROR_NONE == 0, if uu_set_error() was
|
||||
* never called, then this will return UU_ERROR_NONE:
|
||||
*/
|
||||
return ((uint32_t)(uintptr_t)pthread_getspecific(uu_error_key));
|
||||
}
|
||||
|
||||
const char *
|
||||
uu_strerror(uint32_t code)
|
||||
{
|
||||
const char *str;
|
||||
|
||||
switch (code) {
|
||||
case UU_ERROR_NONE:
|
||||
str = dgettext(TEXT_DOMAIN, "No error");
|
||||
break;
|
||||
|
||||
case UU_ERROR_INVALID_ARGUMENT:
|
||||
str = dgettext(TEXT_DOMAIN, "Invalid argument");
|
||||
break;
|
||||
|
||||
case UU_ERROR_UNKNOWN_FLAG:
|
||||
str = dgettext(TEXT_DOMAIN, "Unknown flag passed");
|
||||
break;
|
||||
|
||||
case UU_ERROR_NO_MEMORY:
|
||||
str = dgettext(TEXT_DOMAIN, "Out of memory");
|
||||
break;
|
||||
|
||||
case UU_ERROR_CALLBACK_FAILED:
|
||||
str = dgettext(TEXT_DOMAIN, "Callback-initiated failure");
|
||||
break;
|
||||
|
||||
case UU_ERROR_NOT_SUPPORTED:
|
||||
str = dgettext(TEXT_DOMAIN, "Operation not supported");
|
||||
break;
|
||||
|
||||
case UU_ERROR_EMPTY:
|
||||
str = dgettext(TEXT_DOMAIN, "No value provided");
|
||||
break;
|
||||
|
||||
case UU_ERROR_UNDERFLOW:
|
||||
str = dgettext(TEXT_DOMAIN, "Value too small");
|
||||
break;
|
||||
|
||||
case UU_ERROR_OVERFLOW:
|
||||
str = dgettext(TEXT_DOMAIN, "Value too large");
|
||||
break;
|
||||
|
||||
case UU_ERROR_INVALID_CHAR:
|
||||
str = dgettext(TEXT_DOMAIN,
|
||||
"Value contains unexpected character");
|
||||
break;
|
||||
|
||||
case UU_ERROR_INVALID_DIGIT:
|
||||
str = dgettext(TEXT_DOMAIN,
|
||||
"Value contains digit not in base");
|
||||
break;
|
||||
|
||||
case UU_ERROR_SYSTEM:
|
||||
str = dgettext(TEXT_DOMAIN, "Underlying system error");
|
||||
break;
|
||||
|
||||
case UU_ERROR_UNKNOWN:
|
||||
str = dgettext(TEXT_DOMAIN, "Error status not known");
|
||||
break;
|
||||
|
||||
default:
|
||||
errno = ESRCH;
|
||||
str = NULL;
|
||||
break;
|
||||
}
|
||||
return (str);
|
||||
}
|
||||
|
||||
void
|
||||
uu_panic(const char *format, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
va_start(args, format);
|
||||
|
||||
(void) pthread_mutex_lock(&uu_panic_lock);
|
||||
if (uu_panic_thread == 0) {
|
||||
uu_panic_thread = pthread_self();
|
||||
uu_panic_format = format;
|
||||
va_copy(uu_panic_args, args);
|
||||
}
|
||||
(void) pthread_mutex_unlock(&uu_panic_lock);
|
||||
|
||||
(void) vfprintf(stderr, format, args);
|
||||
|
||||
va_end(args);
|
||||
|
||||
if (uu_panic_thread == pthread_self())
|
||||
abort();
|
||||
else
|
||||
for (;;)
|
||||
(void) pause();
|
||||
}
|
||||
|
||||
static void
|
||||
uu_lockup(void)
|
||||
{
|
||||
(void) pthread_mutex_lock(&uu_panic_lock);
|
||||
#if !defined(PTHREAD_ONCE_KEY_NP)
|
||||
(void) pthread_mutex_lock(&uu_key_lock);
|
||||
#endif
|
||||
uu_avl_lockup();
|
||||
uu_list_lockup();
|
||||
}
|
||||
|
||||
static void
|
||||
uu_release(void)
|
||||
{
|
||||
(void) pthread_mutex_unlock(&uu_panic_lock);
|
||||
#if !defined(PTHREAD_ONCE_KEY_NP)
|
||||
(void) pthread_mutex_unlock(&uu_key_lock);
|
||||
#endif
|
||||
uu_avl_release();
|
||||
uu_list_release();
|
||||
}
|
||||
|
||||
static void
|
||||
uu_release_child(void)
|
||||
{
|
||||
uu_panic_format = NULL;
|
||||
uu_panic_thread = 0;
|
||||
|
||||
uu_release();
|
||||
}
|
||||
|
||||
#ifdef __GNUC__
|
||||
static void
|
||||
uu_init(void) __attribute__((constructor));
|
||||
#else
|
||||
#pragma init(uu_init)
|
||||
#endif
|
||||
|
||||
static void
|
||||
uu_init(void)
|
||||
{
|
||||
_uu_main_thread = 1;
|
||||
(void) pthread_atfork(uu_lockup, uu_release, uu_release_child);
|
||||
}
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
* The contents of this file are subject to the terms of the
|
||||
* Common Development and Distribution License (the "License").
|
||||
* You may not use this file except in compliance with the License.
|
||||
*
|
||||
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||
* or https://opensource.org/licenses/CDDL-1.0.
|
||||
* See the License for the specific language governing permissions
|
||||
* and limitations under the License.
|
||||
*
|
||||
* When distributing Covered Code, include this CDDL HEADER in each
|
||||
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||
* If applicable, add the following below this CDDL HEADER, with the
|
||||
* fields enclosed by brackets "[]" replaced with your own identifying
|
||||
* information: Portions Copyright [yyyy] [name of copyright owner]
|
||||
*
|
||||
* CDDL HEADER END
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
*/
|
||||
|
||||
/*
|
||||
* String helper functions
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
#include <sys/types.h>
|
||||
#include <ctype.h>
|
||||
#include "libuutil.h"
|
||||
|
||||
/* Return true if strings are equal */
|
||||
boolean_t
|
||||
uu_streq(const char *a, const char *b)
|
||||
{
|
||||
return (strcmp(a, b) == 0);
|
||||
}
|
||||
|
||||
/* Return true if strings are equal, case-insensitively */
|
||||
boolean_t
|
||||
uu_strcaseeq(const char *a, const char *b)
|
||||
{
|
||||
return (strcasecmp(a, b) == 0);
|
||||
}
|
||||
|
||||
/* Return true if string a Begins With string b */
|
||||
boolean_t
|
||||
uu_strbw(const char *a, const char *b)
|
||||
{
|
||||
return (strncmp(a, b, strlen(b)) == 0);
|
||||
}
|
||||
|
|
@ -55,8 +55,7 @@ libzfs_la_LIBADD = \
|
|||
libshare.la \
|
||||
libzfs_core.la \
|
||||
libnvpair.la \
|
||||
libzutil.la \
|
||||
libuutil.la
|
||||
libzutil.la
|
||||
|
||||
libzfs_la_LIBADD += -lrt -lm $(LIBCRYPTO_LIBS) $(ZLIB_LIBS) $(LIBFETCH_LIBS) $(LTLIBINTL)
|
||||
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -31,12 +31,12 @@
|
|||
*/
|
||||
|
||||
#include <libintl.h>
|
||||
#include <libuutil.h>
|
||||
#include <stddef.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <zone.h>
|
||||
#include <sys/avl.h>
|
||||
|
||||
#include <libzfs.h>
|
||||
|
||||
|
|
@ -70,15 +70,14 @@ typedef struct prop_changenode {
|
|||
int cn_mounted;
|
||||
int cn_zoned;
|
||||
boolean_t cn_needpost; /* is postfix() needed? */
|
||||
uu_avl_node_t cn_treenode;
|
||||
avl_node_t cn_treenode;
|
||||
} prop_changenode_t;
|
||||
|
||||
struct prop_changelist {
|
||||
zfs_prop_t cl_prop;
|
||||
zfs_prop_t cl_realprop;
|
||||
zfs_prop_t cl_shareprop; /* used with sharenfs/sharesmb */
|
||||
uu_avl_pool_t *cl_pool;
|
||||
uu_avl_t *cl_tree;
|
||||
avl_tree_t cl_tree;
|
||||
boolean_t cl_waslegacy;
|
||||
boolean_t cl_allchildren;
|
||||
boolean_t cl_alldependents;
|
||||
|
|
@ -97,7 +96,6 @@ int
|
|||
changelist_prefix(prop_changelist_t *clp)
|
||||
{
|
||||
prop_changenode_t *cn;
|
||||
uu_avl_walk_t *walk;
|
||||
int ret = 0;
|
||||
const enum sa_protocol smb[] = {SA_PROTOCOL_SMB, SA_NO_PROTOCOL};
|
||||
boolean_t commit_smb_shares = B_FALSE;
|
||||
|
|
@ -115,10 +113,8 @@ changelist_prefix(prop_changelist_t *clp)
|
|||
if (clp->cl_gflags & CL_GATHER_DONT_UNMOUNT)
|
||||
return (0);
|
||||
|
||||
if ((walk = uu_avl_walk_start(clp->cl_tree, UU_WALK_ROBUST)) == NULL)
|
||||
return (-1);
|
||||
|
||||
while ((cn = uu_avl_walk_next(walk)) != NULL) {
|
||||
for (cn = avl_first(&clp->cl_tree); cn != NULL;
|
||||
cn = AVL_NEXT(&clp->cl_tree, cn)) {
|
||||
|
||||
/* if a previous loop failed, set the remaining to false */
|
||||
if (ret == -1) {
|
||||
|
|
@ -159,7 +155,6 @@ changelist_prefix(prop_changelist_t *clp)
|
|||
|
||||
if (commit_smb_shares)
|
||||
zfs_commit_shares(smb);
|
||||
uu_avl_walk_end(walk);
|
||||
|
||||
if (ret == -1)
|
||||
(void) changelist_postfix(clp);
|
||||
|
|
@ -179,7 +174,6 @@ int
|
|||
changelist_postfix(prop_changelist_t *clp)
|
||||
{
|
||||
prop_changenode_t *cn;
|
||||
uu_avl_walk_t *walk;
|
||||
char shareopts[ZFS_MAXPROPLEN];
|
||||
boolean_t commit_smb_shares = B_FALSE;
|
||||
boolean_t commit_nfs_shares = B_FALSE;
|
||||
|
|
@ -199,7 +193,7 @@ changelist_postfix(prop_changelist_t *clp)
|
|||
* location), or have explicit mountpoints set (in which case they won't
|
||||
* be in the changelist).
|
||||
*/
|
||||
if ((cn = uu_avl_last(clp->cl_tree)) == NULL)
|
||||
if ((cn = avl_last(&clp->cl_tree)) == NULL)
|
||||
return (0);
|
||||
|
||||
if (clp->cl_prop == ZFS_PROP_MOUNTPOINT &&
|
||||
|
|
@ -211,11 +205,8 @@ changelist_postfix(prop_changelist_t *clp)
|
|||
* datasets before mounting the children. We walk all datasets even if
|
||||
* there are errors.
|
||||
*/
|
||||
if ((walk = uu_avl_walk_start(clp->cl_tree,
|
||||
UU_WALK_REVERSE | UU_WALK_ROBUST)) == NULL)
|
||||
return (-1);
|
||||
|
||||
while ((cn = uu_avl_walk_next(walk)) != NULL) {
|
||||
for (cn = avl_last(&clp->cl_tree); cn != NULL;
|
||||
cn = AVL_PREV(&clp->cl_tree, cn)) {
|
||||
|
||||
boolean_t sharenfs;
|
||||
boolean_t sharesmb;
|
||||
|
|
@ -299,7 +290,6 @@ changelist_postfix(prop_changelist_t *clp)
|
|||
*p++ = SA_PROTOCOL_SMB;
|
||||
*p++ = SA_NO_PROTOCOL;
|
||||
zfs_commit_shares(proto);
|
||||
uu_avl_walk_end(walk);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
|
@ -334,13 +324,10 @@ void
|
|||
changelist_rename(prop_changelist_t *clp, const char *src, const char *dst)
|
||||
{
|
||||
prop_changenode_t *cn;
|
||||
uu_avl_walk_t *walk;
|
||||
char newname[ZFS_MAX_DATASET_NAME_LEN];
|
||||
|
||||
if ((walk = uu_avl_walk_start(clp->cl_tree, UU_WALK_ROBUST)) == NULL)
|
||||
return;
|
||||
|
||||
while ((cn = uu_avl_walk_next(walk)) != NULL) {
|
||||
for (cn = avl_first(&clp->cl_tree); cn != NULL;
|
||||
cn = AVL_NEXT(&clp->cl_tree, cn)) {
|
||||
/*
|
||||
* Do not rename a clone that's not in the source hierarchy.
|
||||
*/
|
||||
|
|
@ -359,8 +346,6 @@ changelist_rename(prop_changelist_t *clp, const char *src, const char *dst)
|
|||
(void) strlcpy(cn->cn_handle->zfs_name, newname,
|
||||
sizeof (cn->cn_handle->zfs_name));
|
||||
}
|
||||
|
||||
uu_avl_walk_end(walk);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -371,24 +356,20 @@ int
|
|||
changelist_unshare(prop_changelist_t *clp, const enum sa_protocol *proto)
|
||||
{
|
||||
prop_changenode_t *cn;
|
||||
uu_avl_walk_t *walk;
|
||||
int ret = 0;
|
||||
|
||||
if (clp->cl_prop != ZFS_PROP_SHARENFS &&
|
||||
clp->cl_prop != ZFS_PROP_SHARESMB)
|
||||
return (0);
|
||||
|
||||
if ((walk = uu_avl_walk_start(clp->cl_tree, UU_WALK_ROBUST)) == NULL)
|
||||
return (-1);
|
||||
|
||||
while ((cn = uu_avl_walk_next(walk)) != NULL) {
|
||||
for (cn = avl_first(&clp->cl_tree); cn != NULL;
|
||||
cn = AVL_NEXT(&clp->cl_tree, cn)) {
|
||||
if (zfs_unshare(cn->cn_handle, NULL, proto) != 0)
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
for (const enum sa_protocol *p = proto; *p != SA_NO_PROTOCOL; ++p)
|
||||
sa_commit_shares(*p);
|
||||
uu_avl_walk_end(walk);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
|
@ -411,22 +392,16 @@ void
|
|||
changelist_remove(prop_changelist_t *clp, const char *name)
|
||||
{
|
||||
prop_changenode_t *cn;
|
||||
uu_avl_walk_t *walk;
|
||||
|
||||
if ((walk = uu_avl_walk_start(clp->cl_tree, UU_WALK_ROBUST)) == NULL)
|
||||
return;
|
||||
|
||||
while ((cn = uu_avl_walk_next(walk)) != NULL) {
|
||||
for (cn = avl_first(&clp->cl_tree); cn != NULL;
|
||||
cn = AVL_NEXT(&clp->cl_tree, cn)) {
|
||||
if (strcmp(cn->cn_handle->zfs_name, name) == 0) {
|
||||
uu_avl_remove(clp->cl_tree, cn);
|
||||
avl_remove(&clp->cl_tree, cn);
|
||||
zfs_close(cn->cn_handle);
|
||||
free(cn);
|
||||
uu_avl_walk_end(walk);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
uu_avl_walk_end(walk);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -436,26 +411,14 @@ void
|
|||
changelist_free(prop_changelist_t *clp)
|
||||
{
|
||||
prop_changenode_t *cn;
|
||||
void *cookie = NULL;
|
||||
|
||||
if (clp->cl_tree) {
|
||||
uu_avl_walk_t *walk;
|
||||
|
||||
if ((walk = uu_avl_walk_start(clp->cl_tree,
|
||||
UU_WALK_ROBUST)) == NULL)
|
||||
return;
|
||||
|
||||
while ((cn = uu_avl_walk_next(walk)) != NULL) {
|
||||
uu_avl_remove(clp->cl_tree, cn);
|
||||
zfs_close(cn->cn_handle);
|
||||
free(cn);
|
||||
}
|
||||
|
||||
uu_avl_walk_end(walk);
|
||||
uu_avl_destroy(clp->cl_tree);
|
||||
while ((cn = avl_destroy_nodes(&clp->cl_tree, &cookie)) != NULL) {
|
||||
zfs_close(cn->cn_handle);
|
||||
free(cn);
|
||||
}
|
||||
if (clp->cl_pool)
|
||||
uu_avl_pool_destroy(clp->cl_pool);
|
||||
|
||||
avl_destroy(&clp->cl_tree);
|
||||
free(clp);
|
||||
}
|
||||
|
||||
|
|
@ -467,7 +430,7 @@ changelist_add_mounted(zfs_handle_t *zhp, void *data)
|
|||
{
|
||||
prop_changelist_t *clp = data;
|
||||
prop_changenode_t *cn;
|
||||
uu_avl_index_t idx;
|
||||
avl_index_t idx;
|
||||
|
||||
ASSERT3U(clp->cl_prop, ==, ZFS_PROP_MOUNTPOINT);
|
||||
|
||||
|
|
@ -483,10 +446,8 @@ changelist_add_mounted(zfs_handle_t *zhp, void *data)
|
|||
if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned)
|
||||
clp->cl_haszonedchild = B_TRUE;
|
||||
|
||||
uu_avl_node_init(cn, &cn->cn_treenode, clp->cl_pool);
|
||||
|
||||
if (uu_avl_find(clp->cl_tree, cn, NULL, &idx) == NULL) {
|
||||
uu_avl_insert(clp->cl_tree, cn, idx);
|
||||
if (avl_find(&clp->cl_tree, cn, &idx) == NULL) {
|
||||
avl_insert(&clp->cl_tree, cn, idx);
|
||||
} else {
|
||||
free(cn);
|
||||
zfs_close(zhp);
|
||||
|
|
@ -553,12 +514,9 @@ change_one(zfs_handle_t *zhp, void *data)
|
|||
if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned)
|
||||
clp->cl_haszonedchild = B_TRUE;
|
||||
|
||||
uu_avl_node_init(cn, &cn->cn_treenode, clp->cl_pool);
|
||||
|
||||
uu_avl_index_t idx;
|
||||
|
||||
if (uu_avl_find(clp->cl_tree, cn, NULL, &idx) == NULL) {
|
||||
uu_avl_insert(clp->cl_tree, cn, idx);
|
||||
avl_index_t idx;
|
||||
if (avl_find(&clp->cl_tree, cn, &idx) == NULL) {
|
||||
avl_insert(&clp->cl_tree, cn, idx);
|
||||
} else {
|
||||
free(cn);
|
||||
cn = NULL;
|
||||
|
|
@ -610,11 +568,11 @@ compare_props(const void *a, const void *b, zfs_prop_t prop)
|
|||
else if (!haspropa && !haspropb)
|
||||
return (0);
|
||||
else
|
||||
return (strcmp(propb, propa));
|
||||
return (TREE_ISIGN(strcmp(propb, propa)));
|
||||
}
|
||||
|
||||
static int
|
||||
compare_mountpoints(const void *a, const void *b, void *unused)
|
||||
compare_mountpoints(const void *a, const void *b)
|
||||
{
|
||||
/*
|
||||
* When unsharing or unmounting filesystems, we need to do it in
|
||||
|
|
@ -622,14 +580,12 @@ compare_mountpoints(const void *a, const void *b, void *unused)
|
|||
* hierarchy that is different from the dataset hierarchy, and still
|
||||
* allow it to be changed.
|
||||
*/
|
||||
(void) unused;
|
||||
return (compare_props(a, b, ZFS_PROP_MOUNTPOINT));
|
||||
}
|
||||
|
||||
static int
|
||||
compare_dataset_names(const void *a, const void *b, void *unused)
|
||||
compare_dataset_names(const void *a, const void *b)
|
||||
{
|
||||
(void) unused;
|
||||
return (compare_props(a, b, ZFS_PROP_NAME));
|
||||
}
|
||||
|
||||
|
|
@ -671,28 +627,14 @@ changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int gather_flags,
|
|||
}
|
||||
}
|
||||
|
||||
clp->cl_pool = uu_avl_pool_create("changelist_pool",
|
||||
avl_create(&clp->cl_tree,
|
||||
legacy ? compare_dataset_names : compare_mountpoints,
|
||||
sizeof (prop_changenode_t),
|
||||
offsetof(prop_changenode_t, cn_treenode),
|
||||
legacy ? compare_dataset_names : compare_mountpoints, 0);
|
||||
if (clp->cl_pool == NULL) {
|
||||
assert(uu_error() == UU_ERROR_NO_MEMORY);
|
||||
(void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error");
|
||||
changelist_free(clp);
|
||||
return (NULL);
|
||||
}
|
||||
offsetof(prop_changenode_t, cn_treenode));
|
||||
|
||||
clp->cl_tree = uu_avl_create(clp->cl_pool, NULL, UU_DEFAULT);
|
||||
clp->cl_gflags = gather_flags;
|
||||
clp->cl_mflags = mnt_flags;
|
||||
|
||||
if (clp->cl_tree == NULL) {
|
||||
assert(uu_error() == UU_ERROR_NO_MEMORY);
|
||||
(void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error");
|
||||
changelist_free(clp);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is a rename or the 'zoned' property, we pretend we're
|
||||
* changing the mountpoint and flag it so we can catch all children in
|
||||
|
|
@ -778,10 +720,9 @@ changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int gather_flags,
|
|||
cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
|
||||
cn->cn_needpost = B_TRUE;
|
||||
|
||||
uu_avl_node_init(cn, &cn->cn_treenode, clp->cl_pool);
|
||||
uu_avl_index_t idx;
|
||||
if (uu_avl_find(clp->cl_tree, cn, NULL, &idx) == NULL) {
|
||||
uu_avl_insert(clp->cl_tree, cn, idx);
|
||||
avl_index_t idx;
|
||||
if (avl_find(&clp->cl_tree, cn, &idx) == NULL) {
|
||||
avl_insert(&clp->cl_tree, cn, idx);
|
||||
} else {
|
||||
free(cn);
|
||||
zfs_close(temp);
|
||||
|
|
|
|||
|
|
@ -47,55 +47,37 @@
|
|||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <libintl.h>
|
||||
#include <libuutil.h>
|
||||
|
||||
#include "libzfs_impl.h"
|
||||
|
||||
typedef struct config_node {
|
||||
char *cn_name;
|
||||
nvlist_t *cn_config;
|
||||
uu_avl_node_t cn_avl;
|
||||
avl_node_t cn_avl;
|
||||
} config_node_t;
|
||||
|
||||
static int
|
||||
config_node_compare(const void *a, const void *b, void *unused)
|
||||
config_node_compare(const void *a, const void *b)
|
||||
{
|
||||
(void) unused;
|
||||
const config_node_t *ca = (config_node_t *)a;
|
||||
const config_node_t *cb = (config_node_t *)b;
|
||||
|
||||
int ret = strcmp(ca->cn_name, cb->cn_name);
|
||||
|
||||
if (ret < 0)
|
||||
return (-1);
|
||||
else if (ret > 0)
|
||||
return (1);
|
||||
else
|
||||
return (0);
|
||||
return (TREE_ISIGN(strcmp(ca->cn_name, cb->cn_name)));
|
||||
}
|
||||
|
||||
void
|
||||
namespace_clear(libzfs_handle_t *hdl)
|
||||
{
|
||||
if (hdl->libzfs_ns_avl) {
|
||||
config_node_t *cn;
|
||||
void *cookie = NULL;
|
||||
config_node_t *cn;
|
||||
void *cookie = NULL;
|
||||
|
||||
while ((cn = uu_avl_teardown(hdl->libzfs_ns_avl,
|
||||
&cookie)) != NULL) {
|
||||
nvlist_free(cn->cn_config);
|
||||
free(cn->cn_name);
|
||||
free(cn);
|
||||
}
|
||||
|
||||
uu_avl_destroy(hdl->libzfs_ns_avl);
|
||||
hdl->libzfs_ns_avl = NULL;
|
||||
while ((cn = avl_destroy_nodes(&hdl->libzfs_ns_avl, &cookie)) != NULL) {
|
||||
nvlist_free(cn->cn_config);
|
||||
free(cn->cn_name);
|
||||
free(cn);
|
||||
}
|
||||
|
||||
if (hdl->libzfs_ns_avlpool) {
|
||||
uu_avl_pool_destroy(hdl->libzfs_ns_avlpool);
|
||||
hdl->libzfs_ns_avlpool = NULL;
|
||||
}
|
||||
avl_destroy(&hdl->libzfs_ns_avl);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -111,20 +93,8 @@ namespace_reload(libzfs_handle_t *hdl)
|
|||
void *cookie;
|
||||
|
||||
if (hdl->libzfs_ns_gen == 0) {
|
||||
/*
|
||||
* This is the first time we've accessed the configuration
|
||||
* cache. Initialize the AVL tree and then fall through to the
|
||||
* common code.
|
||||
*/
|
||||
if ((hdl->libzfs_ns_avlpool = uu_avl_pool_create("config_pool",
|
||||
sizeof (config_node_t),
|
||||
offsetof(config_node_t, cn_avl),
|
||||
config_node_compare, UU_DEFAULT)) == NULL)
|
||||
return (no_memory(hdl));
|
||||
|
||||
if ((hdl->libzfs_ns_avl = uu_avl_create(hdl->libzfs_ns_avlpool,
|
||||
NULL, UU_DEFAULT)) == NULL)
|
||||
return (no_memory(hdl));
|
||||
avl_create(&hdl->libzfs_ns_avl, config_node_compare,
|
||||
sizeof (config_node_t), offsetof(config_node_t, cn_avl));
|
||||
}
|
||||
|
||||
zcmd_alloc_dst_nvlist(hdl, &zc, 0);
|
||||
|
|
@ -167,7 +137,7 @@ namespace_reload(libzfs_handle_t *hdl)
|
|||
* Clear out any existing configuration information.
|
||||
*/
|
||||
cookie = NULL;
|
||||
while ((cn = uu_avl_teardown(hdl->libzfs_ns_avl, &cookie)) != NULL) {
|
||||
while ((cn = avl_destroy_nodes(&hdl->libzfs_ns_avl, &cookie)) != NULL) {
|
||||
nvlist_free(cn->cn_config);
|
||||
free(cn->cn_name);
|
||||
free(cn);
|
||||
|
|
@ -176,7 +146,7 @@ namespace_reload(libzfs_handle_t *hdl)
|
|||
elem = NULL;
|
||||
while ((elem = nvlist_next_nvpair(config, elem)) != NULL) {
|
||||
nvlist_t *child;
|
||||
uu_avl_index_t where;
|
||||
avl_index_t where;
|
||||
|
||||
cn = zfs_alloc(hdl, sizeof (config_node_t));
|
||||
cn->cn_name = zfs_strdup(hdl, nvpair_name(elem));
|
||||
|
|
@ -187,10 +157,9 @@ namespace_reload(libzfs_handle_t *hdl)
|
|||
nvlist_free(config);
|
||||
return (no_memory(hdl));
|
||||
}
|
||||
verify(uu_avl_find(hdl->libzfs_ns_avl, cn, NULL, &where)
|
||||
== NULL);
|
||||
verify(avl_find(&hdl->libzfs_ns_avl, cn, &where) == NULL);
|
||||
|
||||
uu_avl_insert(hdl->libzfs_ns_avl, cn, where);
|
||||
avl_insert(&hdl->libzfs_ns_avl, cn, where);
|
||||
}
|
||||
|
||||
nvlist_free(config);
|
||||
|
|
@ -400,8 +369,8 @@ zpool_iter(libzfs_handle_t *hdl, zpool_iter_f func, void *data)
|
|||
return (-1);
|
||||
|
||||
hdl->libzfs_pool_iter++;
|
||||
for (cn = uu_avl_first(hdl->libzfs_ns_avl); cn != NULL;
|
||||
cn = uu_avl_next(hdl->libzfs_ns_avl, cn)) {
|
||||
for (cn = avl_first(&hdl->libzfs_ns_avl); cn != NULL;
|
||||
cn = AVL_NEXT(&hdl->libzfs_ns_avl, cn)) {
|
||||
|
||||
if (zpool_skip_pool(cn->cn_name))
|
||||
continue;
|
||||
|
|
@ -438,8 +407,8 @@ zfs_iter_root(libzfs_handle_t *hdl, zfs_iter_f func, void *data)
|
|||
if (namespace_reload(hdl) != 0)
|
||||
return (-1);
|
||||
|
||||
for (cn = uu_avl_first(hdl->libzfs_ns_avl); cn != NULL;
|
||||
cn = uu_avl_next(hdl->libzfs_ns_avl, cn)) {
|
||||
for (cn = avl_first(&hdl->libzfs_ns_avl); cn != NULL;
|
||||
cn = AVL_NEXT(&hdl->libzfs_ns_avl, cn)) {
|
||||
|
||||
if (zpool_skip_pool(cn->cn_name))
|
||||
continue;
|
||||
|
|
|
|||
|
|
@ -36,7 +36,6 @@
|
|||
#include <sys/zfs_ioctl.h>
|
||||
#include <regex.h>
|
||||
|
||||
#include <libuutil.h>
|
||||
#include <libzfs.h>
|
||||
#include <libshare.h>
|
||||
#include <libzfs_core.h>
|
||||
|
|
@ -51,8 +50,7 @@ struct libzfs_handle {
|
|||
int libzfs_error;
|
||||
int libzfs_fd;
|
||||
zpool_handle_t *libzfs_pool_handles;
|
||||
uu_avl_pool_t *libzfs_ns_avlpool;
|
||||
uu_avl_t *libzfs_ns_avl;
|
||||
avl_tree_t libzfs_ns_avl;
|
||||
uint64_t libzfs_ns_gen;
|
||||
int libzfs_desc_active;
|
||||
char libzfs_action[1024];
|
||||
|
|
|
|||
|
|
@ -78,7 +78,6 @@
|
|||
#include <libzutil.h>
|
||||
|
||||
#include "libzfs_impl.h"
|
||||
#include <thread_pool.h>
|
||||
|
||||
#include <libshare.h>
|
||||
#include <sys/systeminfo.h>
|
||||
|
|
@ -1071,7 +1070,7 @@ non_descendant_idx(zfs_handle_t **handles, size_t num_handles, int idx)
|
|||
|
||||
typedef struct mnt_param {
|
||||
libzfs_handle_t *mnt_hdl;
|
||||
tpool_t *mnt_tp;
|
||||
taskq_t *mnt_tq;
|
||||
zfs_handle_t **mnt_zhps; /* filesystems to mount */
|
||||
size_t mnt_num_handles;
|
||||
int mnt_idx; /* Index of selected entry to mount */
|
||||
|
|
@ -1085,19 +1084,20 @@ typedef struct mnt_param {
|
|||
*/
|
||||
static void
|
||||
zfs_dispatch_mount(libzfs_handle_t *hdl, zfs_handle_t **handles,
|
||||
size_t num_handles, int idx, zfs_iter_f func, void *data, tpool_t *tp)
|
||||
size_t num_handles, int idx, zfs_iter_f func, void *data, taskq_t *tq)
|
||||
{
|
||||
mnt_param_t *mnt_param = zfs_alloc(hdl, sizeof (mnt_param_t));
|
||||
|
||||
mnt_param->mnt_hdl = hdl;
|
||||
mnt_param->mnt_tp = tp;
|
||||
mnt_param->mnt_tq = tq;
|
||||
mnt_param->mnt_zhps = handles;
|
||||
mnt_param->mnt_num_handles = num_handles;
|
||||
mnt_param->mnt_idx = idx;
|
||||
mnt_param->mnt_func = func;
|
||||
mnt_param->mnt_data = data;
|
||||
|
||||
if (tpool_dispatch(tp, zfs_mount_task, (void*)mnt_param)) {
|
||||
if (taskq_dispatch(tq, zfs_mount_task, (void*)mnt_param,
|
||||
TQ_SLEEP) == TASKQID_INVALID) {
|
||||
/* Could not dispatch to thread pool; execute directly */
|
||||
zfs_mount_task((void*)mnt_param);
|
||||
}
|
||||
|
|
@ -1188,7 +1188,7 @@ zfs_mount_task(void *arg)
|
|||
if (!libzfs_path_contains(mountpoint, child))
|
||||
break; /* not a descendant, return */
|
||||
zfs_dispatch_mount(mp->mnt_hdl, handles, num_handles, i,
|
||||
mp->mnt_func, mp->mnt_data, mp->mnt_tp);
|
||||
mp->mnt_func, mp->mnt_data, mp->mnt_tq);
|
||||
}
|
||||
|
||||
out:
|
||||
|
|
@ -1246,7 +1246,8 @@ zfs_foreach_mountpoint(libzfs_handle_t *hdl, zfs_handle_t **handles,
|
|||
* Issue the callback function for each dataset using a parallel
|
||||
* algorithm that uses a thread pool to manage threads.
|
||||
*/
|
||||
tpool_t *tp = tpool_create(1, nthr, 0, NULL);
|
||||
taskq_t *tq = taskq_create("zfs_foreach_mountpoint", nthr, minclsyspri,
|
||||
1, INT_MAX, TASKQ_DYNAMIC);
|
||||
|
||||
/*
|
||||
* There may be multiple "top level" mountpoints outside of the pool's
|
||||
|
|
@ -1264,11 +1265,11 @@ zfs_foreach_mountpoint(libzfs_handle_t *hdl, zfs_handle_t **handles,
|
|||
zfs_prop_get_int(handles[i], ZFS_PROP_ZONED))
|
||||
break;
|
||||
zfs_dispatch_mount(hdl, handles, num_handles, i, func, data,
|
||||
tp);
|
||||
tq);
|
||||
}
|
||||
|
||||
tpool_wait(tp); /* wait for all scheduled mounts to complete */
|
||||
tpool_destroy(tp);
|
||||
taskq_wait(tq); /* wait for all scheduled mounts to complete */
|
||||
taskq_destroy(tq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -49,7 +49,6 @@
|
|||
#include <libzfs.h>
|
||||
|
||||
#include "../../libzfs_impl.h"
|
||||
#include <thread_pool.h>
|
||||
|
||||
#define ZS_COMMENT 0x00000000 /* comment */
|
||||
#define ZS_ZFSUTIL 0x00000001 /* caller is zfs(8) */
|
||||
|
|
|
|||
|
|
@ -2004,14 +2004,17 @@
|
|||
<typedef-decl name='kthread_t' type-id='4051f5e7' id='9bccee1a'/>
|
||||
<typedef-decl name='uintptr_t' type-id='7359adad' id='e475ab95'/>
|
||||
<typedef-decl name='pthread_key_t' type-id='f0981eeb' id='2de5383b'/>
|
||||
<typedef-decl name='pthread_once_t' type-id='95e97e5e' id='2568d84b'/>
|
||||
<pointer-type-def type-id='9bccee1a' size-in-bits='64' id='6ae5a80d'/>
|
||||
<pointer-type-def type-id='6ae5a80d' size-in-bits='64' id='6e87b565'/>
|
||||
<pointer-type-def type-id='6e87b565' size-in-bits='64' id='4ea26b5d'/>
|
||||
<pointer-type-def type-id='2de5383b' size-in-bits='64' id='ce04b822'/>
|
||||
<pointer-type-def type-id='2568d84b' size-in-bits='64' id='d9bab700'/>
|
||||
<pointer-type-def type-id='d8481e1f' size-in-bits='64' id='41cce5ce'/>
|
||||
<pointer-type-def type-id='cfda1b05' size-in-bits='64' id='67918d75'/>
|
||||
<pointer-type-def type-id='65d297d1' size-in-bits='64' id='3a4f23d4'/>
|
||||
<pointer-type-def type-id='ef507f03' size-in-bits='64' id='4f8ed29a'/>
|
||||
<pointer-type-def type-id='ee076206' size-in-bits='64' id='953b12f8'/>
|
||||
<pointer-type-def type-id='c5c76c9c' size-in-bits='64' id='b7f9d8e6'/>
|
||||
<function-decl name='zk_thread_create' mangled-name='zk_thread_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zk_thread_create'>
|
||||
<parameter type-id='80f4b756'/>
|
||||
|
|
@ -2025,15 +2028,16 @@
|
|||
<parameter type-id='eaa32e2f'/>
|
||||
<return type-id='48b5725f'/>
|
||||
</function-decl>
|
||||
<function-decl name='pthread_once' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='d9bab700'/>
|
||||
<parameter type-id='953b12f8'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='pthread_key_create' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='ce04b822'/>
|
||||
<parameter type-id='b7f9d8e6'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='pthread_key_delete' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='2de5383b'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='pthread_getspecific' visibility='default' binding='global' size-in-bits='64'>
|
||||
<parameter type-id='2de5383b'/>
|
||||
<return type-id='eaa32e2f'/>
|
||||
|
|
@ -2123,6 +2127,7 @@
|
|||
<function-decl name='taskq_cancel_id' mangled-name='taskq_cancel_id' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='taskq_cancel_id'>
|
||||
<parameter type-id='4f8ed29a' name='tq'/>
|
||||
<parameter type-id='de0ea20e' name='id'/>
|
||||
<parameter type-id='c19b74c3' name='wait'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='system_taskq_init' mangled-name='system_taskq_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='system_taskq_init'>
|
||||
|
|
@ -2131,6 +2136,9 @@
|
|||
<function-decl name='system_taskq_fini' mangled-name='system_taskq_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='system_taskq_fini'>
|
||||
<return type-id='48b5725f'/>
|
||||
</function-decl>
|
||||
<function-type size-in-bits='64' id='ee076206'>
|
||||
<return type-id='48b5725f'/>
|
||||
</function-type>
|
||||
<function-type size-in-bits='64' id='c5c76c9c'>
|
||||
<parameter type-id='eaa32e2f'/>
|
||||
<return type-id='48b5725f'/>
|
||||
|
|
|
|||
|
|
@ -30,7 +30,6 @@ endif
|
|||
|
||||
libzutil_la_LIBADD = \
|
||||
libavl.la \
|
||||
libtpool.la \
|
||||
libnvpair.la \
|
||||
libspl.la
|
||||
|
||||
|
|
|
|||
|
|
@ -62,7 +62,6 @@
|
|||
#include <fcntl.h>
|
||||
|
||||
#include <sys/efi_partition.h>
|
||||
#include <thread_pool.h>
|
||||
#include <libgeom.h>
|
||||
|
||||
#include <sys/vdev_impl.h>
|
||||
|
|
|
|||
|
|
@ -63,7 +63,6 @@
|
|||
#include <sys/vdev_impl.h>
|
||||
#include <sys/fs/zfs.h>
|
||||
|
||||
#include <thread_pool.h>
|
||||
#include <libzutil.h>
|
||||
#include <libnvpair.h>
|
||||
#include <libzfs.h>
|
||||
|
|
|
|||
|
|
@ -65,8 +65,8 @@
|
|||
#include <sys/dktp/fdisk.h>
|
||||
#include <sys/vdev_impl.h>
|
||||
#include <sys/fs/zfs.h>
|
||||
#include <sys/taskq.h>
|
||||
|
||||
#include <thread_pool.h>
|
||||
#include <libzutil.h>
|
||||
#include <libnvpair.h>
|
||||
|
||||
|
|
@ -1457,7 +1457,7 @@ zpool_find_import_impl(libpc_handle_t *hdl, importargs_t *iarg,
|
|||
name_entry_t *ne, *nenext;
|
||||
rdsk_node_t *slice;
|
||||
void *cookie;
|
||||
tpool_t *t;
|
||||
taskq_t *tq;
|
||||
|
||||
verify(iarg->poolname == NULL || iarg->guid == 0);
|
||||
|
||||
|
|
@ -1480,13 +1480,14 @@ zpool_find_import_impl(libpc_handle_t *hdl, importargs_t *iarg,
|
|||
threads = MIN(threads, am / VDEV_LABELS);
|
||||
#endif
|
||||
#endif
|
||||
t = tpool_create(1, threads, 0, NULL);
|
||||
tq = taskq_create("zpool_find_import", threads, minclsyspri, 1, INT_MAX,
|
||||
TASKQ_DYNAMIC);
|
||||
for (slice = avl_first(cache); slice;
|
||||
(slice = avl_walk(cache, slice, AVL_AFTER)))
|
||||
(void) tpool_dispatch(t, zpool_open_func, slice);
|
||||
(void) taskq_dispatch(tq, zpool_open_func, slice, TQ_SLEEP);
|
||||
|
||||
tpool_wait(t);
|
||||
tpool_destroy(t);
|
||||
taskq_wait(tq);
|
||||
taskq_destroy(tq);
|
||||
|
||||
/*
|
||||
* Process the cache, filtering out any entries which are not
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
# first. This ensures its module initialization function is run before
|
||||
# any of the other module initialization functions which depend on it.
|
||||
|
||||
ZFS_MODULE_CFLAGS += -std=gnu99 -Wno-declaration-after-statement
|
||||
ZFS_MODULE_CFLAGS += -std=gnu11 -Wno-declaration-after-statement
|
||||
ZFS_MODULE_CFLAGS += -Wmissing-prototypes
|
||||
ZFS_MODULE_CFLAGS += @KERNEL_DEBUG_CFLAGS@ @KERNEL_NO_FORMAT_ZERO_LENGTH@
|
||||
|
||||
|
|
|
|||
|
|
@ -351,7 +351,7 @@ taskq_free(taskq_ent_t *task)
|
|||
}
|
||||
|
||||
int
|
||||
taskq_cancel_id(taskq_t *tq, taskqid_t tid)
|
||||
taskq_cancel_id(taskq_t *tq, taskqid_t tid, boolean_t wait)
|
||||
{
|
||||
uint32_t pend;
|
||||
int rc;
|
||||
|
|
@ -362,12 +362,12 @@ taskq_cancel_id(taskq_t *tq, taskqid_t tid)
|
|||
|
||||
if (ent->tqent_type == NORMAL_TASK) {
|
||||
rc = taskqueue_cancel(tq->tq_queue, &ent->tqent_task, &pend);
|
||||
if (rc == EBUSY)
|
||||
if (rc == EBUSY && wait)
|
||||
taskqueue_drain(tq->tq_queue, &ent->tqent_task);
|
||||
} else {
|
||||
rc = taskqueue_cancel_timeout(tq->tq_queue,
|
||||
&ent->tqent_timeout_task, &pend);
|
||||
if (rc == EBUSY) {
|
||||
if (rc == EBUSY && wait) {
|
||||
taskqueue_drain_timeout(tq->tq_queue,
|
||||
&ent->tqent_timeout_task);
|
||||
}
|
||||
|
|
@ -381,6 +381,13 @@ taskq_cancel_id(taskq_t *tq, taskqid_t tid)
|
|||
}
|
||||
/* Free the extra reference we added with taskq_lookup. */
|
||||
taskq_free(ent);
|
||||
|
||||
/*
|
||||
* If task was running and we didn't wait, return EBUSY.
|
||||
* Otherwise return 0 if cancelled or ENOENT if not found.
|
||||
*/
|
||||
if (rc == EBUSY && !wait)
|
||||
return (EBUSY);
|
||||
return (pend ? 0 : ENOENT);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -840,7 +840,7 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
|
|||
id = skc->skc_taskqid;
|
||||
spin_unlock(&skc->skc_lock);
|
||||
|
||||
taskq_cancel_id(spl_kmem_cache_taskq, id);
|
||||
taskq_cancel_id(spl_kmem_cache_taskq, id, B_TRUE);
|
||||
|
||||
/*
|
||||
* Wait until all current callers complete, this is mainly
|
||||
|
|
|
|||
|
|
@ -598,13 +598,22 @@ taskq_of_curthread(void)
|
|||
EXPORT_SYMBOL(taskq_of_curthread);
|
||||
|
||||
/*
|
||||
* Cancel an already dispatched task given the task id. Still pending tasks
|
||||
* will be immediately canceled, and if the task is active the function will
|
||||
* block until it completes. Preallocated tasks which are canceled must be
|
||||
* freed by the caller.
|
||||
* Cancel a dispatched task. Pending tasks are cancelled immediately.
|
||||
* If the task is running, behavior depends on wait parameter:
|
||||
* - wait=B_TRUE: Block until task completes
|
||||
* - wait=B_FALSE: Return EBUSY immediately
|
||||
*
|
||||
* Return values:
|
||||
* 0 - Cancelled before execution. Caller must release resources.
|
||||
* EBUSY - Task running (wait=B_FALSE only). Will self-cleanup.
|
||||
* ENOENT - Not found, or completed after waiting. Already cleaned up.
|
||||
*
|
||||
* Note: wait=B_TRUE returns ENOENT (not EBUSY) after waiting because
|
||||
* the task no longer exists. This distinguishes "cancelled before run"
|
||||
* from "completed naturally" for proper resource management.
|
||||
*/
|
||||
int
|
||||
taskq_cancel_id(taskq_t *tq, taskqid_t id)
|
||||
taskq_cancel_id(taskq_t *tq, taskqid_t id, boolean_t wait)
|
||||
{
|
||||
taskq_ent_t *t;
|
||||
int rc = ENOENT;
|
||||
|
|
@ -633,14 +642,31 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)
|
|||
|
||||
/*
|
||||
* The task_expire() function takes the tq->tq_lock so drop
|
||||
* drop the lock before synchronously cancelling the timer.
|
||||
* the lock before synchronously cancelling the timer.
|
||||
*
|
||||
* Always call timer_delete_sync() unconditionally. A
|
||||
* timer_pending() check would be insufficient and unsafe.
|
||||
* When a timer expires, it is immediately dequeued from the
|
||||
* timer wheel (timer_pending() returns FALSE), but the
|
||||
* callback (task_expire) may not run until later.
|
||||
*
|
||||
* The race window:
|
||||
* 1) Timer expires and is dequeued - timer_pending() now
|
||||
* returns FALSE
|
||||
* 2) task_done() is called below, freeing the task, sets
|
||||
* tqent_func = NULL and clears flags including CANCEL
|
||||
* 3) Timer callback finally runs, sees no CANCEL flag,
|
||||
* queues task to prio_list
|
||||
* 4) Worker thread attempts to execute NULL tqent_func
|
||||
* and panics
|
||||
*
|
||||
* timer_delete_sync() prevents this by ensuring the timer
|
||||
* callback completes before the task is freed.
|
||||
*/
|
||||
if (timer_pending(&t->tqent_timer)) {
|
||||
spin_unlock_irqrestore(&tq->tq_lock, flags);
|
||||
timer_delete_sync(&t->tqent_timer);
|
||||
spin_lock_irqsave_nested(&tq->tq_lock, flags,
|
||||
tq->tq_lock_class);
|
||||
}
|
||||
spin_unlock_irqrestore(&tq->tq_lock, flags);
|
||||
timer_delete_sync(&t->tqent_timer);
|
||||
spin_lock_irqsave_nested(&tq->tq_lock, flags,
|
||||
tq->tq_lock_class);
|
||||
|
||||
if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
|
||||
task_done(tq, t);
|
||||
|
|
@ -650,8 +676,12 @@ taskq_cancel_id(taskq_t *tq, taskqid_t id)
|
|||
spin_unlock_irqrestore(&tq->tq_lock, flags);
|
||||
|
||||
if (t == ERR_PTR(-EBUSY)) {
|
||||
taskq_wait_id(tq, id);
|
||||
rc = EBUSY;
|
||||
if (wait) {
|
||||
taskq_wait_id(tq, id);
|
||||
rc = ENOENT; /* Completed, no longer exists */
|
||||
} else {
|
||||
rc = EBUSY; /* Still running */
|
||||
}
|
||||
}
|
||||
|
||||
return (rc);
|
||||
|
|
|
|||
|
|
@ -120,7 +120,6 @@ typedef struct {
|
|||
spa_t *se_spa; /* pool spa */
|
||||
uint64_t se_objsetid; /* snapshot objset id */
|
||||
struct dentry *se_root_dentry; /* snapshot root dentry */
|
||||
krwlock_t se_taskqid_lock; /* scheduled unmount taskqid lock */
|
||||
taskqid_t se_taskqid; /* scheduled unmount taskqid */
|
||||
avl_node_t se_node_name; /* zfs_snapshots_by_name link */
|
||||
avl_node_t se_node_objsetid; /* zfs_snapshots_by_objsetid link */
|
||||
|
|
@ -147,7 +146,6 @@ zfsctl_snapshot_alloc(const char *full_name, const char *full_path, spa_t *spa,
|
|||
se->se_objsetid = objsetid;
|
||||
se->se_root_dentry = root_dentry;
|
||||
se->se_taskqid = TASKQID_INVALID;
|
||||
rw_init(&se->se_taskqid_lock, NULL, RW_DEFAULT, NULL);
|
||||
|
||||
zfs_refcount_create(&se->se_refcount);
|
||||
|
||||
|
|
@ -164,7 +162,6 @@ zfsctl_snapshot_free(zfs_snapentry_t *se)
|
|||
zfs_refcount_destroy(&se->se_refcount);
|
||||
kmem_strfree(se->se_name);
|
||||
kmem_strfree(se->se_path);
|
||||
rw_destroy(&se->se_taskqid_lock);
|
||||
|
||||
kmem_free(se, sizeof (zfs_snapentry_t));
|
||||
}
|
||||
|
|
@ -340,17 +337,15 @@ snapentry_expire(void *data)
|
|||
return;
|
||||
}
|
||||
|
||||
rw_enter(&se->se_taskqid_lock, RW_WRITER);
|
||||
se->se_taskqid = TASKQID_INVALID;
|
||||
rw_exit(&se->se_taskqid_lock);
|
||||
(void) zfsctl_snapshot_unmount(se->se_name, MNT_EXPIRE);
|
||||
zfsctl_snapshot_rele(se);
|
||||
|
||||
/*
|
||||
* Reschedule the unmount if the zfs_snapentry_t wasn't removed.
|
||||
* Clear taskqid and reschedule if the snapshot wasn't removed.
|
||||
* This can occur when the snapshot is busy.
|
||||
*/
|
||||
rw_enter(&zfs_snapshot_lock, RW_READER);
|
||||
rw_enter(&zfs_snapshot_lock, RW_WRITER);
|
||||
se->se_taskqid = TASKQID_INVALID;
|
||||
zfsctl_snapshot_rele(se);
|
||||
if ((se = zfsctl_snapshot_find_by_objsetid(spa, objsetid)) != NULL) {
|
||||
zfsctl_snapshot_unmount_delay_impl(se, zfs_expire_snapshot);
|
||||
zfsctl_snapshot_rele(se);
|
||||
|
|
@ -367,17 +362,17 @@ static void
|
|||
zfsctl_snapshot_unmount_cancel(zfs_snapentry_t *se)
|
||||
{
|
||||
int err = 0;
|
||||
rw_enter(&se->se_taskqid_lock, RW_WRITER);
|
||||
err = taskq_cancel_id(system_delay_taskq, se->se_taskqid);
|
||||
|
||||
ASSERT(RW_WRITE_HELD(&zfs_snapshot_lock));
|
||||
|
||||
err = taskq_cancel_id(system_delay_taskq, se->se_taskqid, B_FALSE);
|
||||
/*
|
||||
* if we get ENOENT, the taskq couldn't be found to be
|
||||
* canceled, so we can just mark it as invalid because
|
||||
* it's already gone. If we got EBUSY, then we already
|
||||
* blocked until it was gone _anyway_, so we don't care.
|
||||
* Clear taskqid only if we successfully cancelled before execution.
|
||||
* For ENOENT, task already cleared it. For EBUSY, task will clear
|
||||
* it when done.
|
||||
*/
|
||||
se->se_taskqid = TASKQID_INVALID;
|
||||
rw_exit(&se->se_taskqid_lock);
|
||||
if (err == 0) {
|
||||
se->se_taskqid = TASKQID_INVALID;
|
||||
zfsctl_snapshot_rele(se);
|
||||
}
|
||||
}
|
||||
|
|
@ -388,12 +383,11 @@ zfsctl_snapshot_unmount_cancel(zfs_snapentry_t *se)
|
|||
static void
|
||||
zfsctl_snapshot_unmount_delay_impl(zfs_snapentry_t *se, int delay)
|
||||
{
|
||||
ASSERT(RW_LOCK_HELD(&zfs_snapshot_lock));
|
||||
|
||||
if (delay <= 0)
|
||||
return;
|
||||
|
||||
zfsctl_snapshot_hold(se);
|
||||
rw_enter(&se->se_taskqid_lock, RW_WRITER);
|
||||
/*
|
||||
* If this condition happens, we managed to:
|
||||
* - dispatch once
|
||||
|
|
@ -404,13 +398,12 @@ zfsctl_snapshot_unmount_delay_impl(zfs_snapentry_t *se, int delay)
|
|||
* no problem.
|
||||
*/
|
||||
if (se->se_taskqid != TASKQID_INVALID) {
|
||||
rw_exit(&se->se_taskqid_lock);
|
||||
zfsctl_snapshot_rele(se);
|
||||
return;
|
||||
}
|
||||
|
||||
zfsctl_snapshot_hold(se);
|
||||
se->se_taskqid = taskq_dispatch_delay(system_delay_taskq,
|
||||
snapentry_expire, se, TQ_SLEEP, ddi_get_lbolt() + delay * HZ);
|
||||
rw_exit(&se->se_taskqid_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -425,7 +418,7 @@ zfsctl_snapshot_unmount_delay(spa_t *spa, uint64_t objsetid, int delay)
|
|||
zfs_snapentry_t *se;
|
||||
int error = ENOENT;
|
||||
|
||||
rw_enter(&zfs_snapshot_lock, RW_READER);
|
||||
rw_enter(&zfs_snapshot_lock, RW_WRITER);
|
||||
if ((se = zfsctl_snapshot_find_by_objsetid(spa, objsetid)) != NULL) {
|
||||
zfsctl_snapshot_unmount_cancel(se);
|
||||
zfsctl_snapshot_unmount_delay_impl(se, delay);
|
||||
|
|
@ -614,13 +607,18 @@ zfsctl_destroy(zfsvfs_t *zfsvfs)
|
|||
|
||||
rw_enter(&zfs_snapshot_lock, RW_WRITER);
|
||||
se = zfsctl_snapshot_find_by_objsetid(spa, objsetid);
|
||||
if (se != NULL)
|
||||
zfsctl_snapshot_remove(se);
|
||||
rw_exit(&zfs_snapshot_lock);
|
||||
if (se != NULL) {
|
||||
zfsctl_snapshot_remove(se);
|
||||
/*
|
||||
* Don't wait if snapentry_expire task is calling
|
||||
* umount, which may have resulted in this destroy
|
||||
* call. Waiting would deadlock: snapentry_expire
|
||||
* waits for umount while umount waits for task.
|
||||
*/
|
||||
zfsctl_snapshot_unmount_cancel(se);
|
||||
zfsctl_snapshot_rele(se);
|
||||
}
|
||||
rw_exit(&zfs_snapshot_lock);
|
||||
} else if (zfsvfs->z_ctldir) {
|
||||
iput(zfsvfs->z_ctldir);
|
||||
zfsvfs->z_ctldir = NULL;
|
||||
|
|
|
|||
|
|
@ -573,7 +573,8 @@ zfs_unlinked_drain_stop_wait(zfsvfs_t *zfsvfs)
|
|||
if (zfsvfs->z_draining) {
|
||||
zfsvfs->z_drain_cancel = B_TRUE;
|
||||
taskq_cancel_id(dsl_pool_unlinked_drain_taskq(
|
||||
dmu_objset_pool(zfsvfs->z_os)), zfsvfs->z_drain_task);
|
||||
dmu_objset_pool(zfsvfs->z_os)), zfsvfs->z_drain_task,
|
||||
B_TRUE);
|
||||
zfsvfs->z_drain_task = TASKQID_INVALID;
|
||||
zfsvfs->z_draining = B_FALSE;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -362,20 +362,26 @@ static const ddt_kstats_t ddt_kstats_template = {
|
|||
};
|
||||
|
||||
#ifdef _KERNEL
|
||||
/*
|
||||
* Hot-path lookup counters use wmsums to avoid cache line bouncing.
|
||||
* DDT_KSTAT_BUMP: Increment a wmsum counter (lookup stats).
|
||||
*
|
||||
* Sync-only counters use direct kstat assignment (no atomics needed).
|
||||
* DDT_KSTAT_SET: Set a value (log entry counts, rates).
|
||||
* DDT_KSTAT_SUB: Subtract from a value (decrement log entry counts).
|
||||
* DDT_KSTAT_ZERO: Zero a value (clear log entry counts).
|
||||
*/
|
||||
#define _DDT_KSTAT_STAT(ddt, stat) \
|
||||
&((ddt_kstats_t *)(ddt)->ddt_ksp->ks_data)->stat.value.ui64
|
||||
#define DDT_KSTAT_BUMP(ddt, stat) \
|
||||
do { atomic_inc_64(_DDT_KSTAT_STAT(ddt, stat)); } while (0)
|
||||
#define DDT_KSTAT_ADD(ddt, stat, val) \
|
||||
do { atomic_add_64(_DDT_KSTAT_STAT(ddt, stat), val); } while (0)
|
||||
wmsum_add(&(ddt)->ddt_kstat_##stat, 1)
|
||||
#define DDT_KSTAT_SUB(ddt, stat, val) \
|
||||
do { atomic_sub_64(_DDT_KSTAT_STAT(ddt, stat), val); } while (0)
|
||||
do { *_DDT_KSTAT_STAT(ddt, stat) -= (val); } while (0)
|
||||
#define DDT_KSTAT_SET(ddt, stat, val) \
|
||||
do { atomic_store_64(_DDT_KSTAT_STAT(ddt, stat), val); } while (0)
|
||||
do { *_DDT_KSTAT_STAT(ddt, stat) = (val); } while (0)
|
||||
#define DDT_KSTAT_ZERO(ddt, stat) DDT_KSTAT_SET(ddt, stat, 0)
|
||||
#else
|
||||
#define DDT_KSTAT_BUMP(ddt, stat) do {} while (0)
|
||||
#define DDT_KSTAT_ADD(ddt, stat, val) do {} while (0)
|
||||
#define DDT_KSTAT_SUB(ddt, stat, val) do {} while (0)
|
||||
#define DDT_KSTAT_SET(ddt, stat, val) do {} while (0)
|
||||
#define DDT_KSTAT_ZERO(ddt, stat) do {} while (0)
|
||||
|
|
@ -783,7 +789,7 @@ ddt_class_start(void)
|
|||
{
|
||||
uint64_t start = gethrestime_sec();
|
||||
|
||||
if (ddt_prune_artificial_age) {
|
||||
if (unlikely(ddt_prune_artificial_age)) {
|
||||
/*
|
||||
* debug aide -- simulate a wider distribution
|
||||
* so we don't have to wait for an aged DDT
|
||||
|
|
@ -1004,6 +1010,7 @@ ddt_alloc_entry_io(ddt_entry_t *dde)
|
|||
return;
|
||||
|
||||
dde->dde_io = kmem_zalloc(sizeof (ddt_entry_io_t), KM_SLEEP);
|
||||
mutex_init(&dde->dde_io->dde_io_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -1016,6 +1023,7 @@ ddt_free(const ddt_t *ddt, ddt_entry_t *dde)
|
|||
if (dde->dde_io->dde_repair_abd != NULL)
|
||||
abd_free(dde->dde_io->dde_repair_abd);
|
||||
|
||||
mutex_destroy(&dde->dde_io->dde_io_lock);
|
||||
kmem_free(dde->dde_io, sizeof (ddt_entry_io_t));
|
||||
}
|
||||
|
||||
|
|
@ -1171,7 +1179,7 @@ ddt_lookup(ddt_t *ddt, const blkptr_t *bp, boolean_t verify)
|
|||
|
||||
ASSERT(MUTEX_HELD(&ddt->ddt_lock));
|
||||
|
||||
if (ddt->ddt_version == DDT_VERSION_UNCONFIGURED) {
|
||||
if (unlikely(ddt->ddt_version == DDT_VERSION_UNCONFIGURED)) {
|
||||
/*
|
||||
* This is the first use of this DDT since the pool was
|
||||
* created; finish getting it ready for use.
|
||||
|
|
@ -1594,6 +1602,46 @@ not_found:
|
|||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
ddt_kstat_update(kstat_t *ksp, int rw)
|
||||
{
|
||||
ddt_t *ddt = ksp->ks_private;
|
||||
ddt_kstats_t *dds = ksp->ks_data;
|
||||
|
||||
if (rw == KSTAT_WRITE)
|
||||
return (SET_ERROR(EACCES));
|
||||
|
||||
/* Aggregate wmsum counters for lookup stats */
|
||||
dds->dds_lookup.value.ui64 =
|
||||
wmsum_value(&ddt->ddt_kstat_dds_lookup);
|
||||
dds->dds_lookup_live_hit.value.ui64 =
|
||||
wmsum_value(&ddt->ddt_kstat_dds_lookup_live_hit);
|
||||
dds->dds_lookup_live_wait.value.ui64 =
|
||||
wmsum_value(&ddt->ddt_kstat_dds_lookup_live_wait);
|
||||
dds->dds_lookup_live_miss.value.ui64 =
|
||||
wmsum_value(&ddt->ddt_kstat_dds_lookup_live_miss);
|
||||
dds->dds_lookup_existing.value.ui64 =
|
||||
wmsum_value(&ddt->ddt_kstat_dds_lookup_existing);
|
||||
dds->dds_lookup_new.value.ui64 =
|
||||
wmsum_value(&ddt->ddt_kstat_dds_lookup_new);
|
||||
dds->dds_lookup_log_hit.value.ui64 =
|
||||
wmsum_value(&ddt->ddt_kstat_dds_lookup_log_hit);
|
||||
dds->dds_lookup_log_active_hit.value.ui64 =
|
||||
wmsum_value(&ddt->ddt_kstat_dds_lookup_log_active_hit);
|
||||
dds->dds_lookup_log_flushing_hit.value.ui64 =
|
||||
wmsum_value(&ddt->ddt_kstat_dds_lookup_log_flushing_hit);
|
||||
dds->dds_lookup_log_miss.value.ui64 =
|
||||
wmsum_value(&ddt->ddt_kstat_dds_lookup_log_miss);
|
||||
dds->dds_lookup_stored_hit.value.ui64 =
|
||||
wmsum_value(&ddt->ddt_kstat_dds_lookup_stored_hit);
|
||||
dds->dds_lookup_stored_miss.value.ui64 =
|
||||
wmsum_value(&ddt->ddt_kstat_dds_lookup_stored_miss);
|
||||
|
||||
/* Sync-only counters are already set directly in kstats */
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
ddt_table_alloc_kstats(ddt_t *ddt)
|
||||
{
|
||||
|
|
@ -1601,12 +1649,28 @@ ddt_table_alloc_kstats(ddt_t *ddt)
|
|||
char *name = kmem_asprintf("ddt_stats_%s",
|
||||
zio_checksum_table[ddt->ddt_checksum].ci_name);
|
||||
|
||||
/* Initialize wmsums for lookup counters */
|
||||
wmsum_init(&ddt->ddt_kstat_dds_lookup, 0);
|
||||
wmsum_init(&ddt->ddt_kstat_dds_lookup_live_hit, 0);
|
||||
wmsum_init(&ddt->ddt_kstat_dds_lookup_live_wait, 0);
|
||||
wmsum_init(&ddt->ddt_kstat_dds_lookup_live_miss, 0);
|
||||
wmsum_init(&ddt->ddt_kstat_dds_lookup_existing, 0);
|
||||
wmsum_init(&ddt->ddt_kstat_dds_lookup_new, 0);
|
||||
wmsum_init(&ddt->ddt_kstat_dds_lookup_log_hit, 0);
|
||||
wmsum_init(&ddt->ddt_kstat_dds_lookup_log_active_hit, 0);
|
||||
wmsum_init(&ddt->ddt_kstat_dds_lookup_log_flushing_hit, 0);
|
||||
wmsum_init(&ddt->ddt_kstat_dds_lookup_log_miss, 0);
|
||||
wmsum_init(&ddt->ddt_kstat_dds_lookup_stored_hit, 0);
|
||||
wmsum_init(&ddt->ddt_kstat_dds_lookup_stored_miss, 0);
|
||||
|
||||
ddt->ddt_ksp = kstat_create(mod, 0, name, "misc", KSTAT_TYPE_NAMED,
|
||||
sizeof (ddt_kstats_t) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
|
||||
if (ddt->ddt_ksp != NULL) {
|
||||
ddt_kstats_t *dds = kmem_alloc(sizeof (ddt_kstats_t), KM_SLEEP);
|
||||
memcpy(dds, &ddt_kstats_template, sizeof (ddt_kstats_t));
|
||||
ddt->ddt_ksp->ks_data = dds;
|
||||
ddt->ddt_ksp->ks_update = ddt_kstat_update;
|
||||
ddt->ddt_ksp->ks_private = ddt;
|
||||
kstat_install(ddt->ddt_ksp);
|
||||
}
|
||||
|
||||
|
|
@ -1648,6 +1712,20 @@ ddt_table_free(ddt_t *ddt)
|
|||
kstat_delete(ddt->ddt_ksp);
|
||||
}
|
||||
|
||||
/* Cleanup wmsums for lookup counters */
|
||||
wmsum_fini(&ddt->ddt_kstat_dds_lookup);
|
||||
wmsum_fini(&ddt->ddt_kstat_dds_lookup_live_hit);
|
||||
wmsum_fini(&ddt->ddt_kstat_dds_lookup_live_wait);
|
||||
wmsum_fini(&ddt->ddt_kstat_dds_lookup_live_miss);
|
||||
wmsum_fini(&ddt->ddt_kstat_dds_lookup_existing);
|
||||
wmsum_fini(&ddt->ddt_kstat_dds_lookup_new);
|
||||
wmsum_fini(&ddt->ddt_kstat_dds_lookup_log_hit);
|
||||
wmsum_fini(&ddt->ddt_kstat_dds_lookup_log_active_hit);
|
||||
wmsum_fini(&ddt->ddt_kstat_dds_lookup_log_flushing_hit);
|
||||
wmsum_fini(&ddt->ddt_kstat_dds_lookup_log_miss);
|
||||
wmsum_fini(&ddt->ddt_kstat_dds_lookup_stored_hit);
|
||||
wmsum_fini(&ddt->ddt_kstat_dds_lookup_stored_miss);
|
||||
|
||||
ddt_log_free(ddt);
|
||||
ASSERT0(avl_numnodes(&ddt->ddt_tree));
|
||||
ASSERT0(avl_numnodes(&ddt->ddt_repair_tree));
|
||||
|
|
|
|||
|
|
@ -1452,7 +1452,8 @@ dmu_objset_upgrade_stop(objset_t *os)
|
|||
os->os_upgrade_id = 0;
|
||||
mutex_exit(&os->os_upgrade_lock);
|
||||
|
||||
if ((taskq_cancel_id(os->os_spa->spa_upgrade_taskq, id)) == 0) {
|
||||
if ((taskq_cancel_id(os->os_spa->spa_upgrade_taskq, id,
|
||||
B_TRUE)) == 0) {
|
||||
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
|
||||
}
|
||||
txg_wait_synced(os->os_spa->spa_dsl_pool, 0);
|
||||
|
|
|
|||
|
|
@ -1934,7 +1934,7 @@ spa_deactivate(spa_t *spa)
|
|||
list_destroy(&spa->spa_evicting_os_list);
|
||||
list_destroy(&spa->spa_state_dirty_list);
|
||||
|
||||
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
|
||||
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid, B_TRUE);
|
||||
|
||||
for (int t = 0; t < ZIO_TYPES; t++) {
|
||||
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
|
||||
|
|
@ -10451,7 +10451,7 @@ spa_sync(spa_t *spa, uint64_t txg)
|
|||
|
||||
spa->spa_sync_starttime = gethrtime();
|
||||
|
||||
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
|
||||
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid, B_TRUE);
|
||||
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
|
||||
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
|
||||
NSEC_TO_TICK(spa->spa_deadman_synctime));
|
||||
|
|
@ -10508,7 +10508,7 @@ spa_sync(spa_t *spa, uint64_t txg)
|
|||
spa_sync_rewrite_vdev_config(spa, tx);
|
||||
dmu_tx_commit(tx);
|
||||
|
||||
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
|
||||
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid, B_TRUE);
|
||||
spa->spa_deadman_tqid = 0;
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -155,11 +155,11 @@ chksum_run(chksum_stat_t *cs, abd_t *abd, void *ctx, int round,
|
|||
switch (round) {
|
||||
case 1: /* 1k */
|
||||
size = 1<<10; loops = 128; break;
|
||||
case 2: /* 2k */
|
||||
case 2: /* 4k */
|
||||
size = 1<<12; loops = 64; break;
|
||||
case 3: /* 4k */
|
||||
case 3: /* 16k */
|
||||
size = 1<<14; loops = 32; break;
|
||||
case 4: /* 16k */
|
||||
case 4: /* 64k */
|
||||
size = 1<<16; loops = 16; break;
|
||||
case 5: /* 256k */
|
||||
size = 1<<18; loops = 8; break;
|
||||
|
|
@ -212,6 +212,7 @@ chksum_benchit(chksum_stat_t *cs)
|
|||
chksum_run(cs, abd, ctx, 2, &cs->bs4k);
|
||||
chksum_run(cs, abd, ctx, 3, &cs->bs16k);
|
||||
chksum_run(cs, abd, ctx, 4, &cs->bs64k);
|
||||
chksum_run(cs, abd, ctx, 5, &cs->bs256k);
|
||||
chksum_run(cs, abd, ctx, 6, &cs->bs1m);
|
||||
abd_free(abd);
|
||||
|
||||
|
|
@ -249,15 +250,16 @@ chksum_benchmark(void)
|
|||
if (chksum_stat_limit == AT_DONE)
|
||||
return;
|
||||
|
||||
|
||||
/* count implementations */
|
||||
chksum_stat_cnt = 1; /* edonr */
|
||||
chksum_stat_cnt += 1; /* skein */
|
||||
chksum_stat_cnt += sha256->getcnt();
|
||||
chksum_stat_cnt += sha512->getcnt();
|
||||
chksum_stat_cnt += blake3->getcnt();
|
||||
chksum_stat_data = kmem_zalloc(
|
||||
sizeof (chksum_stat_t) * chksum_stat_cnt, KM_SLEEP);
|
||||
if (chksum_stat_limit == AT_STARTUP) {
|
||||
chksum_stat_cnt = 1; /* edonr */
|
||||
chksum_stat_cnt += 1; /* skein */
|
||||
chksum_stat_cnt += sha256->getcnt();
|
||||
chksum_stat_cnt += sha512->getcnt();
|
||||
chksum_stat_cnt += blake3->getcnt();
|
||||
chksum_stat_data = kmem_zalloc(
|
||||
sizeof (chksum_stat_t) * chksum_stat_cnt, KM_SLEEP);
|
||||
}
|
||||
|
||||
/* edonr - needs to be the first one here (slow CPU check) */
|
||||
cs = &chksum_stat_data[cbid++];
|
||||
|
|
|
|||
|
|
@ -1531,7 +1531,8 @@ zfs_ereport_taskq_fini(void)
|
|||
{
|
||||
mutex_enter(&recent_events_lock);
|
||||
if (recent_events_cleaner_tqid != 0) {
|
||||
taskq_cancel_id(system_delay_taskq, recent_events_cleaner_tqid);
|
||||
taskq_cancel_id(system_delay_taskq, recent_events_cleaner_tqid,
|
||||
B_TRUE);
|
||||
recent_events_cleaner_tqid = 0;
|
||||
}
|
||||
mutex_exit(&recent_events_lock);
|
||||
|
|
|
|||
|
|
@ -3088,7 +3088,7 @@ zio_gang_issue(zio_t *zio)
|
|||
}
|
||||
|
||||
static void
|
||||
zio_gang_inherit_allocator(zio_t *pio, zio_t *cio)
|
||||
zio_inherit_allocator(zio_t *pio, zio_t *cio)
|
||||
{
|
||||
cio->io_allocator = pio->io_allocator;
|
||||
}
|
||||
|
|
@ -3223,7 +3223,7 @@ zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
|
|||
zio_write_gang_done, NULL, pio->io_priority,
|
||||
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
|
||||
|
||||
zio_gang_inherit_allocator(pio, zio);
|
||||
zio_inherit_allocator(pio, zio);
|
||||
if (pio->io_flags & ZIO_FLAG_ALLOC_THROTTLED) {
|
||||
boolean_t more;
|
||||
VERIFY(metaslab_class_throttle_reserve(mc, zio->io_allocator,
|
||||
|
|
@ -3285,7 +3285,7 @@ zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
|
|||
(allocated ? ZIO_FLAG_PREALLOCATED : 0), &pio->io_bookmark);
|
||||
|
||||
resid -= psize;
|
||||
zio_gang_inherit_allocator(zio, cio);
|
||||
zio_inherit_allocator(zio, cio);
|
||||
if (allocated) {
|
||||
metaslab_trace_move(&cio_list, &cio->io_alloc_list);
|
||||
metaslab_group_alloc_increment_all(spa,
|
||||
|
|
@ -3683,7 +3683,7 @@ zio_ddt_child_write_done(zio_t *zio)
|
|||
ddt_phys_variant_t v = DDT_PHYS_VARIANT(ddt, p);
|
||||
ddt_univ_phys_t *ddp = dde->dde_phys;
|
||||
|
||||
ddt_enter(ddt);
|
||||
mutex_enter(&dde->dde_io->dde_io_lock);
|
||||
|
||||
/* we're the lead, so once we're done there's no one else outstanding */
|
||||
if (dde->dde_io->dde_lead_zio[p] == zio)
|
||||
|
|
@ -3700,21 +3700,24 @@ zio_ddt_child_write_done(zio_t *zio)
|
|||
ddt_phys_unextend(ddp, orig, v);
|
||||
ddt_phys_clear(orig, v);
|
||||
|
||||
mutex_exit(&dde->dde_io->dde_io_lock);
|
||||
|
||||
/*
|
||||
* Undo the optimistic refcount increments that were done in
|
||||
* zio_ddt_write() for all non-DDT-child parents. Since errors
|
||||
* are rare, taking the global lock here is acceptable.
|
||||
*/
|
||||
ddt_enter(ddt);
|
||||
zio_t *pio;
|
||||
zl = NULL;
|
||||
while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
|
||||
if (!(pio->io_flags & ZIO_FLAG_DDT_CHILD))
|
||||
ddt_phys_decref(ddp, v);
|
||||
}
|
||||
ddt_exit(ddt);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add references for all dedup writes that were waiting on the
|
||||
* physical one, skipping any other physical writes that are waiting.
|
||||
*/
|
||||
zio_t *pio;
|
||||
zl = NULL;
|
||||
while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
|
||||
if (!(pio->io_flags & ZIO_FLAG_DDT_CHILD))
|
||||
ddt_phys_addref(ddp, v);
|
||||
}
|
||||
|
||||
/*
|
||||
* We've successfully added new DVAs to the entry. Clear the saved
|
||||
* state or, if there's still outstanding IO, remember it so we can
|
||||
|
|
@ -3725,7 +3728,7 @@ zio_ddt_child_write_done(zio_t *zio)
|
|||
else
|
||||
ddt_phys_copy(orig, ddp, v);
|
||||
|
||||
ddt_exit(ddt);
|
||||
mutex_exit(&dde->dde_io->dde_io_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -3753,7 +3756,7 @@ zio_ddt_child_write_ready(zio_t *zio)
|
|||
if (zio->io_error != 0)
|
||||
return;
|
||||
|
||||
ddt_enter(ddt);
|
||||
mutex_enter(&dde->dde_io->dde_io_lock);
|
||||
|
||||
ddt_phys_extend(dde->dde_phys, v, zio->io_bp);
|
||||
|
||||
|
|
@ -3764,7 +3767,7 @@ zio_ddt_child_write_ready(zio_t *zio)
|
|||
ddt_bp_fill(dde->dde_phys, v, pio->io_bp, zio->io_txg);
|
||||
}
|
||||
|
||||
ddt_exit(ddt);
|
||||
mutex_exit(&dde->dde_io->dde_io_lock);
|
||||
}
|
||||
|
||||
static zio_t *
|
||||
|
|
@ -3799,11 +3802,11 @@ zio_ddt_write(zio_t *zio)
|
|||
dde = ddt_lookup(ddt, bp, B_FALSE);
|
||||
if (dde == NULL) {
|
||||
/* DDT size is over its quota so no new entries */
|
||||
ddt_exit(ddt);
|
||||
zp->zp_dedup = B_FALSE;
|
||||
BP_SET_DEDUP(bp, B_FALSE);
|
||||
if (zio->io_bp_override == NULL)
|
||||
zio->io_pipeline = ZIO_WRITE_PIPELINE;
|
||||
ddt_exit(ddt);
|
||||
return (zio);
|
||||
}
|
||||
|
||||
|
|
@ -3814,6 +3817,7 @@ zio_ddt_write(zio_t *zio)
|
|||
* we can't resolve it, so just convert to an ordinary write.
|
||||
* (And automatically e-mail a paper to Nature?)
|
||||
*/
|
||||
ddt_exit(ddt);
|
||||
if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
|
||||
ZCHECKSUM_FLAG_DEDUP)) {
|
||||
zp->zp_checksum = spa_dedup_checksum(spa);
|
||||
|
|
@ -3826,7 +3830,6 @@ zio_ddt_write(zio_t *zio)
|
|||
}
|
||||
ASSERT(!BP_GET_DEDUP(bp));
|
||||
zio->io_pipeline = ZIO_WRITE_PIPELINE;
|
||||
ddt_exit(ddt);
|
||||
return (zio);
|
||||
}
|
||||
|
||||
|
|
@ -3877,10 +3880,15 @@ zio_ddt_write(zio_t *zio)
|
|||
uint8_t parent_dvas = 0;
|
||||
|
||||
/*
|
||||
* What we do next depends on whether or not there's IO outstanding that
|
||||
* will update this entry.
|
||||
* What we do next depends on whether or not there's IO outstanding
|
||||
* that will update this entry. If dde_io exists, we need to hold
|
||||
* its lock to safely check and use dde_lead_zio.
|
||||
*/
|
||||
if (dde->dde_io == NULL || dde->dde_io->dde_lead_zio[p] == NULL) {
|
||||
ddt_entry_io_t *dde_io = dde->dde_io;
|
||||
if (dde_io != NULL)
|
||||
mutex_enter(&dde_io->dde_io_lock);
|
||||
|
||||
if (dde_io == NULL || dde_io->dde_lead_zio[p] == NULL) {
|
||||
/*
|
||||
* No IO outstanding, so we only need to worry about ourselves.
|
||||
*/
|
||||
|
|
@ -3895,6 +3903,8 @@ zio_ddt_write(zio_t *zio)
|
|||
* block and leave.
|
||||
*/
|
||||
if (have_dvas == 0) {
|
||||
if (dde_io != NULL)
|
||||
mutex_exit(&dde_io->dde_io_lock);
|
||||
ASSERT(BP_GET_BIRTH(bp) == txg);
|
||||
ASSERT(BP_EQUAL(bp, zio->io_bp_override));
|
||||
ddt_phys_extend(ddp, v, bp);
|
||||
|
|
@ -3923,6 +3933,9 @@ zio_ddt_write(zio_t *zio)
|
|||
* then we can just use them as-is.
|
||||
*/
|
||||
if (have_dvas >= need_dvas) {
|
||||
if (dde_io != NULL)
|
||||
mutex_exit(&dde_io->dde_io_lock);
|
||||
|
||||
/*
|
||||
* For rewrite operations, try preserving the original
|
||||
* logical birth time. If the result matches the
|
||||
|
|
@ -3934,8 +3947,8 @@ zio_ddt_write(zio_t *zio)
|
|||
ddt_bp_fill(ddp, v, bp, orig_logical_birth);
|
||||
if (BP_EQUAL(bp, &zio->io_bp_orig)) {
|
||||
/* We can skip accounting. */
|
||||
zio->io_flags |= ZIO_FLAG_NOPWRITE;
|
||||
ddt_exit(ddt);
|
||||
zio->io_flags |= ZIO_FLAG_NOPWRITE;
|
||||
return (zio);
|
||||
}
|
||||
}
|
||||
|
|
@ -3997,7 +4010,16 @@ zio_ddt_write(zio_t *zio)
|
|||
* missed out.
|
||||
*/
|
||||
ddt_bp_fill(ddp, v, bp, txg);
|
||||
zio_add_child(zio, dde->dde_io->dde_lead_zio[p]);
|
||||
piggyback:
|
||||
zio_add_child(zio, dde_io->dde_lead_zio[p]);
|
||||
|
||||
/*
|
||||
* Optimistically increment refcount for this parent.
|
||||
* If the write fails, zio_ddt_child_write_done() will
|
||||
* decrement for all non-DDT-child parents.
|
||||
*/
|
||||
ddt_phys_addref(ddp, v);
|
||||
mutex_exit(&dde_io->dde_io_lock);
|
||||
ddt_exit(ddt);
|
||||
return (zio);
|
||||
}
|
||||
|
|
@ -4023,11 +4045,8 @@ zio_ddt_write(zio_t *zio)
|
|||
ASSERT(pio);
|
||||
parent_dvas = pio->io_prop.zp_copies;
|
||||
|
||||
if (parent_dvas >= need_dvas) {
|
||||
zio_add_child(zio, dde->dde_io->dde_lead_zio[p]);
|
||||
ddt_exit(ddt);
|
||||
return (zio);
|
||||
}
|
||||
if (parent_dvas >= need_dvas)
|
||||
goto piggyback;
|
||||
|
||||
/*
|
||||
* Still not enough, so we will need to issue to get the
|
||||
|
|
@ -4037,10 +4056,12 @@ zio_ddt_write(zio_t *zio)
|
|||
}
|
||||
|
||||
if (is_ganged) {
|
||||
if (dde_io != NULL)
|
||||
mutex_exit(&dde_io->dde_io_lock);
|
||||
ddt_exit(ddt);
|
||||
zp->zp_dedup = B_FALSE;
|
||||
BP_SET_DEDUP(bp, B_FALSE);
|
||||
zio->io_pipeline = ZIO_WRITE_PIPELINE;
|
||||
ddt_exit(ddt);
|
||||
return (zio);
|
||||
}
|
||||
|
||||
|
|
@ -4062,6 +4083,7 @@ zio_ddt_write(zio_t *zio)
|
|||
zio_ddt_child_write_ready, NULL,
|
||||
zio_ddt_child_write_done, dde, zio->io_priority,
|
||||
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
|
||||
zio_inherit_allocator(zio, cio);
|
||||
|
||||
zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
|
||||
|
||||
|
|
@ -4069,21 +4091,45 @@ zio_ddt_write(zio_t *zio)
|
|||
* We are the new lead zio, because our parent has the highest
|
||||
* zp_copies that has been requested for this entry so far.
|
||||
*/
|
||||
ddt_alloc_entry_io(dde);
|
||||
if (dde->dde_io->dde_lead_zio[p] == NULL) {
|
||||
if (dde_io == NULL) {
|
||||
/*
|
||||
* New dde_io. No lock needed since no other thread can have
|
||||
* a reference yet.
|
||||
*/
|
||||
ddt_alloc_entry_io(dde);
|
||||
dde_io = dde->dde_io;
|
||||
/*
|
||||
* First time out, take a copy of the stable entry to revert
|
||||
* to if there's an error (see zio_ddt_child_write_done())
|
||||
*/
|
||||
ddt_phys_copy(&dde->dde_io->dde_orig_phys, dde->dde_phys, v);
|
||||
ddt_phys_copy(&dde_io->dde_orig_phys, dde->dde_phys, v);
|
||||
dde_io->dde_lead_zio[p] = cio;
|
||||
} else {
|
||||
/*
|
||||
* Make the existing chain our child, because it cannot
|
||||
* complete until we have.
|
||||
*/
|
||||
zio_add_child(cio, dde->dde_io->dde_lead_zio[p]);
|
||||
if (dde_io->dde_lead_zio[p] == NULL) {
|
||||
/*
|
||||
* First time out, take a copy of the stable entry
|
||||
* to revert to if there's an error (see
|
||||
* zio_ddt_child_write_done())
|
||||
*/
|
||||
ddt_phys_copy(&dde_io->dde_orig_phys, dde->dde_phys,
|
||||
v);
|
||||
} else {
|
||||
/*
|
||||
* Make the existing chain our child, because it
|
||||
* cannot complete until we have.
|
||||
*/
|
||||
zio_add_child(cio, dde_io->dde_lead_zio[p]);
|
||||
}
|
||||
dde_io->dde_lead_zio[p] = cio;
|
||||
mutex_exit(&dde_io->dde_io_lock);
|
||||
}
|
||||
dde->dde_io->dde_lead_zio[p] = cio;
|
||||
|
||||
/*
|
||||
* Optimistically increment the refcount for this dedup write.
|
||||
* If the write fails, zio_ddt_child_write_done() will decrement
|
||||
* for all non-DDT-child parents.
|
||||
*/
|
||||
ddt_phys_addref(ddp, v);
|
||||
|
||||
ddt_exit(ddt);
|
||||
|
||||
|
|
|
|||
|
|
@ -113,7 +113,6 @@ Source0: %{name}-%{version}.tar.gz
|
|||
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
|
||||
Requires: libzpool7%{?_isa} = %{version}-%{release}
|
||||
Requires: libnvpair3%{?_isa} = %{version}-%{release}
|
||||
Requires: libuutil3%{?_isa} = %{version}-%{release}
|
||||
Requires: libzfs7%{?_isa} = %{version}-%{release}
|
||||
Requires: %{name}-kmod = %{version}
|
||||
Provides: %{name}-kmod-common = %{version}-%{release}
|
||||
|
|
@ -199,29 +198,6 @@ to write self describing data structures on disk.
|
|||
%postun -n libnvpair3 -p /sbin/ldconfig
|
||||
%endif
|
||||
|
||||
%package -n libuutil3
|
||||
Summary: Solaris userland utility library for Linux
|
||||
Group: System Environment/Kernel
|
||||
Obsoletes: libuutil1 <= %{version}
|
||||
|
||||
%description -n libuutil3
|
||||
This library provides a variety of compatibility functions for OpenZFS:
|
||||
* libspl: The Solaris Porting Layer userland library, which provides APIs
|
||||
that make it possible to run Solaris user code in a Linux environment
|
||||
with relatively minimal modification.
|
||||
* libavl: The Adelson-Velskii Landis balanced binary tree manipulation
|
||||
library.
|
||||
* libefi: The Extensible Firmware Interface library for GUID disk
|
||||
partitioning.
|
||||
* libshare: NFS, SMB, and iSCSI service integration for ZFS.
|
||||
|
||||
%if %{defined ldconfig_scriptlets}
|
||||
%ldconfig_scriptlets -n libuutil3
|
||||
%else
|
||||
%post -n libuutil3 -p /sbin/ldconfig
|
||||
%postun -n libuutil3 -p /sbin/ldconfig
|
||||
%endif
|
||||
|
||||
# The library version is encoded in the package name. When updating the
|
||||
# version information it is important to add an obsoletes line below for
|
||||
# the previous version of the package.
|
||||
|
|
@ -249,10 +225,8 @@ Group: System Environment/Kernel
|
|||
Requires: libzfs7%{?_isa} = %{version}-%{release}
|
||||
Requires: libzpool7%{?_isa} = %{version}-%{release}
|
||||
Requires: libnvpair3%{?_isa} = %{version}-%{release}
|
||||
Requires: libuutil3%{?_isa} = %{version}-%{release}
|
||||
Provides: libzpool7-devel = %{version}-%{release}
|
||||
Provides: libnvpair3-devel = %{version}-%{release}
|
||||
Provides: libuutil3-devel = %{version}-%{release}
|
||||
Obsoletes: zfs-devel <= %{version}
|
||||
Obsoletes: libzfs2-devel <= %{version}
|
||||
Obsoletes: libzfs4-devel <= %{version}
|
||||
|
|
@ -556,9 +530,6 @@ systemctl --system daemon-reload >/dev/null || true
|
|||
%files -n libnvpair3
|
||||
%{_libdir}/libnvpair.so.*
|
||||
|
||||
%files -n libuutil3
|
||||
%{_libdir}/libuutil.so.*
|
||||
|
||||
%files -n libzfs7
|
||||
%{_libdir}/libzfs*.so.*
|
||||
|
||||
|
|
|
|||
|
|
@ -60,7 +60,12 @@ set_tunable64 SIT_OUT_CHECK_INTERVAL 20
|
|||
|
||||
log_must truncate -s 150M $TEST_BASE_DIR/vdev.$$.{0..9}
|
||||
|
||||
for raidtype in raidz2 raidz3 draid2 draid3 ; do
|
||||
raidtypes=(raidz2 raidz3 draid2 draid3)
|
||||
retry=0
|
||||
|
||||
for (( t=0; t<4; t++ )); do
|
||||
raidtype="${raidtypes[$t]}"
|
||||
|
||||
log_must zpool create $TESTPOOL2 $raidtype $TEST_BASE_DIR/vdev.$$.{0..9}
|
||||
log_must zpool set autosit=on $TESTPOOL2 "${raidtype}-0"
|
||||
log_must dd if=/dev/urandom of=/$TESTPOOL2/bigfile bs=1M count=400
|
||||
|
|
@ -90,13 +95,24 @@ for raidtype in raidz2 raidz3 draid2 draid3 ; do
|
|||
fi
|
||||
done
|
||||
|
||||
log_must test "$(get_vdev_prop sit_out $TESTPOOL2 $SLOW_VDEV)" == "on"
|
||||
|
||||
# Clear fault injection
|
||||
log_must zinject -c all
|
||||
|
||||
# Wait for us to exit our sit out period
|
||||
log_must wait_sit_out $TESTPOOL2 $SLOW_VDEV 10
|
||||
if test "$(get_vdev_prop sit_out $TESTPOOL2 $SLOW_VDEV)" == "on"; then
|
||||
# Wait for us to exit our sit out period
|
||||
log_must wait_sit_out $TESTPOOL2 $SLOW_VDEV 10
|
||||
else
|
||||
# Depending on exactly how the blocks are laid out and the
|
||||
# I/O is issued we may not always trigger a sitout. Allow
|
||||
# up to 3 retries to avoid false positives.
|
||||
if test $retry -lt 3; then
|
||||
retry=$((retry + 1))
|
||||
t=$(($t - 1))
|
||||
log_note "Retrying $retry/3 $raidtype vdev type"
|
||||
else
|
||||
log_fail "Exceeded total allowed retries"
|
||||
fi
|
||||
fi
|
||||
|
||||
log_must test "$(get_vdev_prop sit_out $TESTPOOL2 $SLOW_VDEV)" == "off"
|
||||
destroy_pool $TESTPOOL2
|
||||
|
|
|
|||
|
|
@ -34,6 +34,6 @@
|
|||
# This option should make raidz_test to return non 0.
|
||||
#
|
||||
|
||||
log_mustnot raidz_test -T
|
||||
log_mustnot raidz_test -Tv
|
||||
|
||||
log_pass "raidz_test detects errors as expected."
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ main(int argc, const char *const *argv)
|
|||
return (1);
|
||||
}
|
||||
|
||||
char zvol_name[MAXNAMELEN + strlen("-part") + 10];
|
||||
char zvol_name[MAXNAMELEN+15];
|
||||
if (ioctl(fd, BLKZNAME, zvol_name) == -1) {
|
||||
fprintf(stderr, "%s: BLKZNAME: %s\n",
|
||||
dev_name, strerror(errno));
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue