ref: 2064a55284f4735920c790a54e94d357095e79a6
parent: 9190df910a2ba3d6d1c27989a5b1ab525da02a01
parent: dc8cbeb1b0b44d8070630b71a5cd0385fd94c17d
author: gkostka <kostka.grzegorz@gmail.com>
date: Thu Oct 8 14:21:11 EDT 2015
Merge pull request #8 from ngkaho1234/master Experimental Extended Attribute supports.
--- a/lwext4/ext4.c
+++ b/lwext4/ext4.c
@@ -44,6 +44,7 @@
#include "ext4_inode.h"
#include "ext4_super.h"
#include "ext4_dir_idx.h"
+#include "ext4_xattr.h"
#include "ext4.h"
#include <stdlib.h>
@@ -730,6 +731,7 @@
if (f->flags & O_APPEND)
f->fpos = f->fsize;
+
}
r = ext4_fs_put_inode_ref(&ref);
--- a/lwext4/ext4_errno.h
+++ b/lwext4/ext4_errno.h
@@ -76,6 +76,8 @@
#define EDOM 33 /* Math argument out of domain of func */
#define ERANGE 34 /* Math result not representable */
#define ENOTEMPTY 39 /* Directory not empty */
+#define ENODATA 61 /* No data available */
+#define ENOATTR ENODATA /* No attribute available */
#define ENOTSUP 95 /* Not supported */
#endif
--- a/lwext4/ext4_types.h
+++ b/lwext4/ext4_types.h
@@ -44,6 +44,7 @@
#include "ext4_config.h"
#include "ext4_blockdev.h"
+#include "tree.h"
#include <stdint.h>
@@ -627,6 +628,111 @@
uint32_t hash_version;
const uint32_t *seed;
};
+
+/* Extended Attribute(EA) */
+
+/* Magic value in attribute blocks */
+#define EXT4_XATTR_MAGIC 0xEA020000
+
+/* Maximum number of references to one attribute block */
+#define EXT4_XATTR_REFCOUNT_MAX 1024
+
+/* Name indexes */
+#define EXT4_XATTR_INDEX_USER 1
+#define EXT4_XATTR_INDEX_POSIX_ACL_ACCESS 2
+#define EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT 3
+#define EXT4_XATTR_INDEX_TRUSTED 4
+#define EXT4_XATTR_INDEX_LUSTRE 5
+#define EXT4_XATTR_INDEX_SECURITY 6
+#define EXT4_XATTR_INDEX_SYSTEM 7
+#define EXT4_XATTR_INDEX_RICHACL 8
+#define EXT4_XATTR_INDEX_ENCRYPTION 9
+
+struct ext4_xattr_header {
+ uint32_t h_magic; /* magic number for identification */
+ uint32_t h_refcount; /* reference count */
+ uint32_t h_blocks; /* number of disk blocks used */
+ uint32_t h_hash; /* hash value of all attributes */
+ uint32_t h_checksum; /* crc32c(uuid+id+xattrblock) */
+ /* id = inum if refcount=1, blknum otherwise */
+ uint32_t h_reserved[3]; /* zero right now */
+} __attribute__((packed));
+
+struct ext4_xattr_ibody_header {
+ uint32_t h_magic; /* magic number for identification */
+} __attribute__((packed));
+
+struct ext4_xattr_entry {
+ uint8_t e_name_len; /* length of name */
+ uint8_t e_name_index; /* attribute name index */
+ uint16_t e_value_offs; /* offset in disk block of value */
+ uint32_t e_value_block; /* disk block attribute is stored on (n/i) */
+ uint32_t e_value_size; /* size of attribute value */
+ uint32_t e_hash; /* hash value of name and value */
+} __attribute__((packed));
+
+struct ext4_xattr_item {
+ uint8_t name_index;
+ char *name;
+ size_t name_len;
+ void *data;
+ size_t data_size;
+
+ RB_ENTRY(ext4_xattr_item) node;
+};
+
+struct ext4_xattr_ref {
+ bool block_loaded;
+ struct ext4_block block;
+ struct ext4_inode_ref *inode_ref;
+ bool dirty;
+ size_t ea_size;
+ struct ext4_fs *fs;
+
+ struct ext4_xattr_item *iter_from;
+
+ RB_HEAD(ext4_xattr_tree,
+ ext4_xattr_item) root;
+};
+
+#define EXT4_XATTR_ITERATE_CONT 0
+#define EXT4_XATTR_ITERATE_STOP 1
+#define EXT4_XATTR_ITERATE_PAUSE 2
+
+#define EXT4_GOOD_OLD_INODE_SIZE 128
+
+#define EXT4_XATTR_PAD_BITS 2
+#define EXT4_XATTR_PAD (1<<EXT4_XATTR_PAD_BITS)
+#define EXT4_XATTR_ROUND (EXT4_XATTR_PAD-1)
+#define EXT4_XATTR_LEN(name_len) \
+ (((name_len) + EXT4_XATTR_ROUND + \
+ sizeof(struct ext4_xattr_entry)) & ~EXT4_XATTR_ROUND)
+#define EXT4_XATTR_NEXT(entry) \
+ ((struct ext4_xattr_entry *)( \
+ (char *)(entry) + EXT4_XATTR_LEN((entry)->e_name_len)))
+#define EXT4_XATTR_SIZE(size) \
+ (((size) + EXT4_XATTR_ROUND) & ~EXT4_XATTR_ROUND)
+#define EXT4_XATTR_NAME(entry) \
+ ((char *)((entry) + 1))
+
+#define EXT4_XATTR_IHDR(raw_inode) \
+ ((struct ext4_xattr_ibody_header *) \
+ ((char *)raw_inode + \
+ EXT4_GOOD_OLD_INODE_SIZE + \
+ (raw_inode)->extra_isize))
+#define EXT4_XATTR_IFIRST(hdr) \
+ ((struct ext4_xattr_entry *)((hdr)+1))
+
+#define EXT4_XATTR_BHDR(block) \
+ ((struct ext4_xattr_header *)((block)->data))
+#define EXT4_XATTR_ENTRY(ptr) \
+ ((struct ext4_xattr_entry *)(ptr))
+#define EXT4_XATTR_BFIRST(block) \
+ EXT4_XATTR_ENTRY(EXT4_XATTR_BHDR(block)+1)
+#define EXT4_XATTR_IS_LAST_ENTRY(entry) \
+ (*(uint32_t *)(entry) == 0)
+
+#define EXT4_ZERO_XATTR_VALUE ((void *)-1)
/*****************************************************************************/
--- /dev/null
+++ b/lwext4/ext4_xattr.c
@@ -1,0 +1,852 @@
+/*
+ * Copyright (c) 2015 Grzegorz Kostka (kostka.grzegorz@gmail.com)
+ * Copyright (c) 2015 Kaho Ng (ngkaho1234@gmail.com)
+ *
+ *
+ * HelenOS:
+ * Copyright (c) 2012 Martin Sucha
+ * Copyright (c) 2012 Frantisek Princ
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * - The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/** @addtogroup lwext4
+ * @{
+ */
+/**
+ * @file ext4_xattr.c
+ * @brief Extended Attribute manipulation.
+ */
+
+#include "ext4_config.h"
+#include "ext4_types.h"
+#include "ext4_fs.h"
+#include "ext4_errno.h"
+#include "ext4_blockdev.h"
+#include "ext4_super.h"
+#include "ext4_debug.h"
+#include "ext4_block_group.h"
+#include "ext4_balloc.h"
+#include "ext4_inode.h"
+#include "ext4_extent.h"
+
+#include <string.h>
+#include <stdlib.h>
+
+/**
+ * @file ext4_xattr.c
+ * @brief Extended Attribute Manipulation
+ */
+
+#define NAME_HASH_SHIFT 5
+#define VALUE_HASH_SHIFT 16
+
+static inline void ext4_xattr_compute_hash(struct ext4_xattr_header *header,
+ struct ext4_xattr_entry *entry)
+{
+ uint32_t hash = 0;
+ char *name = EXT4_XATTR_NAME(entry);
+ int n;
+
+ for (n = 0; n < entry->e_name_len; n++) {
+ hash = (hash << NAME_HASH_SHIFT) ^
+ (hash >> (8 * sizeof(hash) - NAME_HASH_SHIFT)) ^ *name++;
+ }
+
+ if (entry->e_value_block == 0 && entry->e_value_size != 0) {
+ uint32_t *value =
+ (uint32_t *)((char *)header + to_le16(entry->e_value_offs));
+ for (n = (to_le32(entry->e_value_size) + EXT4_XATTR_ROUND) >>
+ EXT4_XATTR_PAD_BITS;
+ n; n--) {
+ hash = (hash << VALUE_HASH_SHIFT) ^
+ (hash >> (8 * sizeof(hash) - VALUE_HASH_SHIFT)) ^
+ to_le32(*value++);
+ }
+ }
+ entry->e_hash = to_le32(hash);
+}
+
+#define BLOCK_HASH_SHIFT 16
+
+/*
+ * ext4_xattr_rehash()
+ *
+ * Re-compute the extended attribute hash value after an entry has changed.
+ */
+static void ext4_xattr_rehash(struct ext4_xattr_header *header,
+ struct ext4_xattr_entry *entry)
+{
+ struct ext4_xattr_entry *here;
+ uint32_t hash = 0;
+
+ ext4_xattr_compute_hash(header, entry);
+ here = EXT4_XATTR_ENTRY(header + 1);
+ while (!EXT4_XATTR_IS_LAST_ENTRY(here)) {
+ if (!here->e_hash) {
+ /* Block is not shared if an entry's hash value == 0 */
+ hash = 0;
+ break;
+ }
+ hash = (hash << BLOCK_HASH_SHIFT) ^
+ (hash >> (8 * sizeof(hash) - BLOCK_HASH_SHIFT)) ^
+ to_le32(here->e_hash);
+ here = EXT4_XATTR_NEXT(here);
+ }
+ header->h_hash = to_le32(hash);
+}
+
+static int ext4_xattr_item_cmp(struct ext4_xattr_item *a,
+ struct ext4_xattr_item *b)
+{
+ int result;
+ result = a->name_index - b->name_index;
+ if (result)
+ return result;
+
+ result = a->name_len - b->name_len;
+ if (result)
+ return result;
+
+ return memcmp(a->name, b->name, a->name_len);
+}
+
+RB_GENERATE_INTERNAL(ext4_xattr_tree, ext4_xattr_item, node,
+ ext4_xattr_item_cmp, static inline)
+
+static struct ext4_xattr_item *
+ext4_xattr_item_alloc(uint8_t name_index, char *name, size_t name_len)
+{
+ struct ext4_xattr_item *item;
+ item = malloc(sizeof(struct ext4_xattr_item) + name_len);
+ if (!item)
+ return NULL;
+
+ item->name_index = name_index;
+ item->name = (char *)(item + 1);
+ item->name_len = name_len;
+ item->data = NULL;
+ item->data_size = 0;
+
+ memset(&item->node, 0, sizeof(item->node));
+ memcpy(item->name, name, name_len);
+
+ return item;
+}
+
+static int ext4_xattr_item_alloc_data(struct ext4_xattr_item *item,
+ void *orig_data, size_t data_size)
+{
+ void *data = NULL;
+ ext4_assert(!item->data);
+ data = malloc(data_size);
+ if (!data)
+ return ENOMEM;
+
+ if (orig_data)
+ memcpy(data, orig_data, data_size);
+
+ item->data = data;
+ item->data_size = data_size;
+ return EOK;
+}
+
+static void ext4_xattr_item_free_data(struct ext4_xattr_item *item)
+{
+ ext4_assert(item->data);
+ free(item->data);
+ item->data = NULL;
+ item->data_size = 0;
+}
+
+static int ext4_xattr_item_resize_data(struct ext4_xattr_item *item,
+ size_t new_data_size)
+{
+ if (new_data_size != item->data_size) {
+ void *new_data;
+ new_data = realloc(item->data, new_data_size);
+ if (!new_data)
+ return ENOMEM;
+
+ item->data = new_data;
+ item->data_size = new_data_size;
+ }
+ return EOK;
+}
+
+static void ext4_xattr_item_free(struct ext4_xattr_item *item)
+{
+ if (item->data)
+ ext4_xattr_item_free_data(item);
+
+ free(item);
+}
+
+static void *ext4_xattr_entry_data(struct ext4_xattr_ref *xattr_ref,
+ struct ext4_xattr_entry *entry,
+ bool in_inode)
+{
+ void *ret;
+ if (in_inode) {
+ struct ext4_xattr_ibody_header *header;
+ struct ext4_xattr_entry *first_entry;
+ uint16_t inode_size =
+ ext4_get16(&xattr_ref->fs->sb, inode_size);
+ header = EXT4_XATTR_IHDR(xattr_ref->inode_ref->inode);
+ first_entry = EXT4_XATTR_IFIRST(header);
+
+ ret = (void *)((char *)first_entry +
+ to_le16(entry->e_value_offs));
+ if ((char *)ret +
+ EXT4_XATTR_SIZE(to_le32(entry->e_value_size)) -
+ (char *)xattr_ref->inode_ref->inode >
+ inode_size)
+ ret = NULL;
+
+ } else {
+ int32_t block_size = ext4_sb_get_block_size(&xattr_ref->fs->sb);
+ ret = (void *)((char *)xattr_ref->block.data +
+ to_le16(entry->e_value_offs));
+ if ((char *)ret +
+ EXT4_XATTR_SIZE(to_le32(entry->e_value_size)) -
+ (char *)xattr_ref->block.data >
+ block_size)
+ ret = NULL;
+ }
+ return ret;
+}
+
+static int ext4_xattr_block_fetch(struct ext4_xattr_ref *xattr_ref)
+{
+ int ret = EOK;
+ size_t size_rem;
+ void *data;
+ struct ext4_xattr_entry *entry = NULL;
+
+ ext4_assert(xattr_ref->block.data);
+ entry = EXT4_XATTR_BFIRST(&xattr_ref->block);
+
+ size_rem = ext4_sb_get_block_size(&xattr_ref->fs->sb);
+ for (; size_rem > 0 && !EXT4_XATTR_IS_LAST_ENTRY(entry);
+ entry = EXT4_XATTR_NEXT(entry),
+ size_rem -= EXT4_XATTR_LEN(entry->e_name_len)) {
+ struct ext4_xattr_item *item;
+ char *e_name = EXT4_XATTR_NAME(entry);
+
+ data = ext4_xattr_entry_data(xattr_ref, entry, false);
+ if (!data) {
+ ret = EIO;
+ goto Finish;
+ }
+
+ item = ext4_xattr_item_alloc(entry->e_name_index, e_name,
+ (size_t)entry->e_name_len);
+ if (!item) {
+ ret = ENOMEM;
+ goto Finish;
+ }
+ if (ext4_xattr_item_alloc_data(
+ item, data, to_le32(entry->e_value_size)) != EOK) {
+ ext4_xattr_item_free(item);
+ ret = ENOMEM;
+ goto Finish;
+ }
+ RB_INSERT(ext4_xattr_tree, &xattr_ref->root, item);
+ xattr_ref->ea_size += EXT4_XATTR_SIZE(item->data_size) +
+ EXT4_XATTR_LEN(item->name_len);
+ }
+
+Finish:
+ return ret;
+}
+
+static int ext4_xattr_inode_fetch(struct ext4_xattr_ref *xattr_ref)
+{
+ void *data;
+ size_t size_rem;
+ int ret = EOK;
+ struct ext4_xattr_ibody_header *header = NULL;
+ struct ext4_xattr_entry *entry = NULL;
+ uint16_t inode_size = ext4_get16(&xattr_ref->fs->sb, inode_size);
+
+ header = EXT4_XATTR_IHDR(xattr_ref->inode_ref->inode);
+ entry = EXT4_XATTR_IFIRST(header);
+
+ size_rem = inode_size - EXT4_GOOD_OLD_INODE_SIZE -
+ xattr_ref->inode_ref->inode->extra_isize;
+ for (; size_rem > 0 && !EXT4_XATTR_IS_LAST_ENTRY(entry);
+ entry = EXT4_XATTR_NEXT(entry),
+ size_rem -= EXT4_XATTR_LEN(entry->e_name_len)) {
+ struct ext4_xattr_item *item;
+ char *e_name = EXT4_XATTR_NAME(entry);
+
+ data = ext4_xattr_entry_data(xattr_ref, entry, true);
+ if (!data) {
+ ret = EIO;
+ goto Finish;
+ }
+
+ item = ext4_xattr_item_alloc(entry->e_name_index, e_name,
+ (size_t)entry->e_name_len);
+ if (!item) {
+ ret = ENOMEM;
+ goto Finish;
+ }
+ if (ext4_xattr_item_alloc_data(
+ item, data, to_le32(entry->e_value_size)) != EOK) {
+ ext4_xattr_item_free(item);
+ ret = ENOMEM;
+ goto Finish;
+ }
+ RB_INSERT(ext4_xattr_tree, &xattr_ref->root, item);
+ xattr_ref->ea_size += EXT4_XATTR_SIZE(item->data_size) +
+ EXT4_XATTR_LEN(item->name_len);
+ }
+
+Finish:
+ return ret;
+}
+
+static size_t ext4_xattr_inode_space(struct ext4_xattr_ref *xattr_ref)
+{
+ uint16_t inode_size = ext4_get16(&xattr_ref->fs->sb, inode_size);
+ uint16_t size_rem = inode_size - EXT4_GOOD_OLD_INODE_SIZE -
+ xattr_ref->inode_ref->inode->extra_isize;
+ return size_rem;
+}
+
+static size_t ext4_xattr_block_space(struct ext4_xattr_ref *xattr_ref)
+{
+ return ext4_sb_get_block_size(&xattr_ref->fs->sb);
+}
+
+static int ext4_xattr_fetch(struct ext4_xattr_ref *xattr_ref)
+{
+ int ret = EOK;
+ uint16_t inode_size = ext4_get16(&xattr_ref->fs->sb, inode_size);
+ if (inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
+ ret = ext4_xattr_inode_fetch(xattr_ref);
+ if (ret != EOK)
+ return ret;
+ }
+
+ if (xattr_ref->block_loaded)
+ ret = ext4_xattr_block_fetch(xattr_ref);
+
+ xattr_ref->dirty = false;
+ return ret;
+}
+
+static struct ext4_xattr_item *
+ext4_xattr_lookup_item(struct ext4_xattr_ref *xattr_ref, uint8_t name_index,
+ char *name, size_t name_len)
+{
+ struct ext4_xattr_item tmp, *ret;
+ tmp.name_index = name_index;
+ tmp.name = name;
+ tmp.name_len = name_len;
+ ret = RB_FIND(ext4_xattr_tree, &xattr_ref->root, &tmp);
+ return ret;
+}
+
+static struct ext4_xattr_item *
+ext4_xattr_insert_item(struct ext4_xattr_ref *xattr_ref, uint8_t name_index,
+ char *name, size_t name_len, void *data,
+ size_t data_size)
+{
+ struct ext4_xattr_item *item;
+ item = ext4_xattr_item_alloc(name_index, name, name_len);
+ if (!item)
+ return NULL;
+
+ if (xattr_ref->ea_size + EXT4_XATTR_SIZE(item->data_size) +
+ EXT4_XATTR_LEN(item->name_len) >
+ ext4_xattr_inode_space(xattr_ref) +
+ ext4_xattr_block_space(xattr_ref)) {
+ ext4_xattr_item_free(item);
+ return NULL;
+ }
+ if (ext4_xattr_item_alloc_data(item, data, data_size) != EOK) {
+ ext4_xattr_item_free(item);
+ return NULL;
+ }
+ RB_INSERT(ext4_xattr_tree, &xattr_ref->root, item);
+ xattr_ref->ea_size +=
+ EXT4_XATTR_SIZE(item->data_size) + EXT4_XATTR_LEN(item->name_len);
+ xattr_ref->dirty = true;
+ return item;
+}
+
+static int ext4_xattr_remove_item(struct ext4_xattr_ref *xattr_ref,
+ uint8_t name_index, char *name,
+ size_t name_len)
+{
+ int ret = ENOENT;
+ struct ext4_xattr_item *item =
+ ext4_xattr_lookup_item(xattr_ref, name_index, name, name_len);
+ if (item) {
+ if (item == xattr_ref->iter_from)
+ xattr_ref->iter_from =
+ RB_NEXT(ext4_xattr_tree, &xattr_ref->root, item);
+
+ RB_REMOVE(ext4_xattr_tree, &xattr_ref->root, item);
+ ext4_xattr_item_free(item);
+ xattr_ref->ea_size -= EXT4_XATTR_SIZE(item->data_size) +
+ EXT4_XATTR_LEN(item->name_len);
+ xattr_ref->dirty = true;
+ ret = EOK;
+ }
+ return ret;
+}
+
+static int ext4_xattr_resize_item(struct ext4_xattr_ref *xattr_ref,
+ struct ext4_xattr_item *item,
+ size_t new_data_size)
+{
+ int ret = EOK;
+ if (xattr_ref->ea_size - EXT4_XATTR_SIZE(item->data_size) +
+ EXT4_XATTR_SIZE(new_data_size) >
+ ext4_xattr_inode_space(xattr_ref) +
+ ext4_xattr_block_space(xattr_ref)) {
+
+ return ENOSPC;
+ }
+ ret = ext4_xattr_item_resize_data(item, new_data_size);
+ if (ret != EOK) {
+ return ret;
+ }
+ xattr_ref->ea_size -=
+ EXT4_XATTR_SIZE(item->data_size) + EXT4_XATTR_SIZE(new_data_size);
+ xattr_ref->dirty = true;
+ return ret;
+}
+
+static void ext4_xattr_purge_items(struct ext4_xattr_ref *xattr_ref)
+{
+ struct ext4_xattr_item *item, *save_item;
+ uint64_t xattr_block = ext4_inode_get_file_acl(
+ xattr_ref->inode_ref->inode, &xattr_ref->fs->sb);
+ RB_FOREACH_SAFE(item, ext4_xattr_tree, &xattr_ref->root, save_item)
+ {
+ RB_REMOVE(ext4_xattr_tree, &xattr_ref->root, item);
+ ext4_xattr_item_free(item);
+ }
+ xattr_ref->ea_size = 0;
+ if (xattr_block)
+ xattr_ref->ea_size += sizeof(struct ext4_xattr_header);
+
+ if (ext4_xattr_inode_space(xattr_ref) >
+ sizeof(struct ext4_xattr_ibody_header))
+ xattr_ref->ea_size += sizeof(struct ext4_xattr_ibody_header);
+}
+
+static int ext4_xattr_try_alloc_block(struct ext4_xattr_ref *xattr_ref)
+{
+ int ret = EOK;
+
+ uint64_t xattr_block = 0;
+ xattr_block = ext4_inode_get_file_acl(xattr_ref->inode_ref->inode,
+ &xattr_ref->fs->sb);
+ if (!xattr_block) {
+ ret = ext4_balloc_alloc_block(xattr_ref->inode_ref,
+ (uint32_t *)&xattr_block);
+ if (ret != EOK)
+ goto Finish;
+
+ ret = ext4_block_get(xattr_ref->fs->bdev, &xattr_ref->block,
+ xattr_block);
+ if (ret != EOK) {
+ ext4_balloc_free_block(xattr_ref->inode_ref,
+ xattr_block);
+ goto Finish;
+ }
+
+ ext4_inode_set_file_acl(xattr_ref->inode_ref->inode,
+ &xattr_ref->fs->sb, xattr_block);
+ xattr_ref->inode_ref->dirty = true;
+ xattr_ref->block_loaded = true;
+ xattr_ref->ea_size += sizeof(struct ext4_xattr_header);
+ }
+
+Finish:
+ return ret;
+}
+
+static void ext4_xattr_try_free_block(struct ext4_xattr_ref *xattr_ref)
+{
+ uint64_t xattr_block;
+ xattr_block = ext4_inode_get_file_acl(xattr_ref->inode_ref->inode,
+ &xattr_ref->fs->sb);
+ ext4_inode_set_file_acl(xattr_ref->inode_ref->inode, &xattr_ref->fs->sb,
+ 0);
+ ext4_block_set(xattr_ref->fs->bdev, &xattr_ref->block);
+ ext4_balloc_free_block(xattr_ref->inode_ref, xattr_block);
+ xattr_ref->inode_ref->dirty = true;
+ xattr_ref->block_loaded = false;
+ xattr_ref->ea_size -= sizeof(struct ext4_xattr_header);
+}
+
+static void ext4_xattr_set_block_header(struct ext4_xattr_ref *xattr_ref)
+{
+ struct ext4_xattr_header *block_header = NULL;
+ block_header = EXT4_XATTR_BHDR(&xattr_ref->block);
+
+ memset(block_header, 0, sizeof(struct ext4_xattr_header));
+ block_header->h_magic = EXT4_XATTR_MAGIC;
+ block_header->h_refcount = to_le32(1);
+ block_header->h_blocks = to_le32(1);
+}
+
+static void
+ext4_xattr_set_inode_entry(struct ext4_xattr_item *item,
+ struct ext4_xattr_ibody_header *ibody_header,
+ struct ext4_xattr_entry *entry, void *ibody_data_ptr)
+{
+ entry->e_name_len = to_le32(item->name_len);
+ entry->e_name_index = item->name_index;
+ entry->e_value_offs =
+ (char *)ibody_data_ptr - (char *)EXT4_XATTR_IFIRST(ibody_header);
+ entry->e_value_block = 0;
+ entry->e_value_size = item->data_size;
+}
+
+static void ext4_xattr_set_block_entry(struct ext4_xattr_item *item,
+ struct ext4_xattr_header *block_header,
+ struct ext4_xattr_entry *block_entry,
+ void *block_data_ptr)
+{
+ block_entry->e_name_len = to_le32(item->name_len);
+ block_entry->e_name_index = item->name_index;
+ block_entry->e_value_offs =
+ (char *)block_data_ptr - (char *)block_header;
+ block_entry->e_value_block = 0;
+ block_entry->e_value_size = item->data_size;
+}
+
+static int ext4_xattr_write_to_disk(struct ext4_xattr_ref *xattr_ref)
+{
+ int ret = EOK;
+ bool block_modified = false;
+ void *ibody_data, *block_data;
+ struct ext4_xattr_item *item, *save_item;
+ size_t inode_size_rem, block_size_rem;
+ struct ext4_xattr_ibody_header *ibody_header = NULL;
+ struct ext4_xattr_header *block_header = NULL;
+ struct ext4_xattr_entry *entry = NULL;
+ struct ext4_xattr_entry *block_entry = NULL;
+
+ inode_size_rem = ext4_xattr_inode_space(xattr_ref);
+ block_size_rem = ext4_xattr_block_space(xattr_ref);
+ if (inode_size_rem > sizeof(struct ext4_xattr_ibody_header)) {
+ ibody_header = EXT4_XATTR_IHDR(xattr_ref->inode_ref->inode);
+ entry = EXT4_XATTR_IFIRST(ibody_header);
+ }
+
+ if (xattr_ref->dirty) {
+ /* If there are enough spaces in the ibody EA table.*/
+ if (inode_size_rem > sizeof(struct ext4_xattr_ibody_header)) {
+ memset(ibody_header, 0, inode_size_rem);
+ ibody_header->h_magic = EXT4_XATTR_MAGIC;
+ ibody_data = (char *)ibody_header + inode_size_rem;
+ inode_size_rem -=
+ sizeof(struct ext4_xattr_ibody_header);
+
+ xattr_ref->inode_ref->dirty = true;
+ }
+ /* If we need an extra block to hold the EA entries*/
+ if (xattr_ref->ea_size > inode_size_rem) {
+ if (!xattr_ref->block_loaded) {
+ ret = ext4_xattr_try_alloc_block(xattr_ref);
+ if (ret != EOK)
+ goto Finish;
+ }
+ block_header = EXT4_XATTR_BHDR(&xattr_ref->block);
+ block_entry = EXT4_XATTR_BFIRST(&xattr_ref->block);
+ ext4_xattr_set_block_header(xattr_ref);
+ block_data = (char *)block_header + block_size_rem;
+ block_size_rem -= sizeof(struct ext4_xattr_header);
+
+ xattr_ref->block.dirty = true;
+ } else {
+ /* We don't need an extra block.*/
+ if (xattr_ref->block_loaded) {
+ block_header =
+ EXT4_XATTR_BHDR(&xattr_ref->block);
+ block_header->h_refcount = to_le32(
+ to_le32(block_header->h_refcount) - 1);
+ if (!block_header->h_refcount) {
+ ext4_xattr_try_free_block(xattr_ref);
+ block_header = NULL;
+ } else {
+ block_entry = EXT4_XATTR_BFIRST(
+ &xattr_ref->block);
+ block_data = (char *)block_header +
+ block_size_rem;
+ block_size_rem -=
+ sizeof(struct ext4_xattr_header);
+ ext4_inode_set_file_acl(
+ xattr_ref->inode_ref->inode,
+ &xattr_ref->fs->sb, 0);
+
+ xattr_ref->inode_ref->dirty = true;
+ xattr_ref->block.dirty = true;
+ }
+ }
+ }
+ RB_FOREACH_SAFE(item, ext4_xattr_tree, &xattr_ref->root,
+ save_item)
+ {
+ if (EXT4_XATTR_SIZE(item->data_size) +
+ EXT4_XATTR_LEN(item->name_len) <=
+ inode_size_rem) {
+ ibody_data = (char *)ibody_data -
+ EXT4_XATTR_SIZE(item->data_size);
+ ext4_xattr_set_inode_entry(item, ibody_header,
+ entry, ibody_data);
+ memcpy(EXT4_XATTR_NAME(entry), item->name,
+ item->name_len);
+ memcpy(ibody_data, item->data, item->data_size);
+ entry = EXT4_XATTR_NEXT(entry);
+ inode_size_rem -=
+ EXT4_XATTR_SIZE(item->data_size) +
+ EXT4_XATTR_LEN(item->name_len);
+
+ xattr_ref->inode_ref->dirty = true;
+ continue;
+ }
+ if (EXT4_XATTR_SIZE(item->data_size) +
+ EXT4_XATTR_LEN(item->name_len) >
+ block_size_rem) {
+ ret = ENOSPC;
+ goto Finish;
+ }
+ block_data = (char *)block_data -
+ EXT4_XATTR_SIZE(item->data_size);
+ ext4_xattr_set_block_entry(item, block_header,
+ block_entry, block_data);
+ memcpy(EXT4_XATTR_NAME(block_entry), item->name,
+ item->name_len);
+ memcpy(block_data, item->data, item->data_size);
+ block_entry = EXT4_XATTR_NEXT(block_entry);
+ block_size_rem -= EXT4_XATTR_SIZE(item->data_size) +
+ EXT4_XATTR_LEN(item->name_len);
+
+ block_modified = true;
+ }
+ xattr_ref->dirty = false;
+ if (block_modified) {
+ ext4_xattr_rehash(block_header,
+ EXT4_XATTR_BFIRST(&xattr_ref->block));
+ xattr_ref->block.dirty = true;
+ }
+ }
+
+Finish:
+ return ret;
+}
+
+void ext4_fs_xattr_iterate(struct ext4_xattr_ref *ref,
+ int(iter)(struct ext4_xattr_ref *ref,
+ struct ext4_xattr_item *item))
+{
+ struct ext4_xattr_item *item;
+ if (!ref->iter_from)
+ ref->iter_from = RB_MIN(ext4_xattr_tree, &ref->root);
+
+ RB_FOREACH_FROM(item, ext4_xattr_tree, ref->iter_from)
+ {
+ int ret = EXT4_XATTR_ITERATE_CONT;
+ if (iter)
+ iter(ref, item);
+
+ if (ret != EXT4_XATTR_ITERATE_CONT) {
+ if (ret == EXT4_XATTR_ITERATE_STOP)
+ ref->iter_from = NULL;
+
+ break;
+ }
+ }
+}
+
+void ext4_fs_xattr_iterate_reset(struct ext4_xattr_ref *ref)
+{
+ ref->iter_from = NULL;
+}
+
+int ext4_fs_set_xattr(struct ext4_xattr_ref *ref, uint8_t name_index,
+ char *name, size_t name_len, void *data, size_t data_size,
+ bool replace)
+{
+ int ret = EOK;
+ struct ext4_xattr_item *item =
+ ext4_xattr_lookup_item(ref, name_index, name, name_len);
+ if (replace) {
+ if (!item) {
+ ret = ENOATTR;
+ goto Finish;
+ }
+ if (item->data_size != data_size)
+ ret = ext4_xattr_resize_item(ref, item, data_size);
+
+ if (ret != EOK) {
+ goto Finish;
+ }
+ memcpy(item->data, data, data_size);
+ } else {
+ if (item) {
+ ret = EEXIST;
+ goto Finish;
+ }
+ item = ext4_xattr_insert_item(ref, name_index, name, name_len,
+ data, data_size);
+ if (!item)
+ ret = ENOMEM;
+ }
+Finish:
+ return ret;
+}
+
+int ext4_fs_remove_xattr(struct ext4_xattr_ref *ref, uint8_t name_index,
+ char *name, size_t name_len)
+{
+ return ext4_xattr_remove_item(ref, name_index, name, name_len);
+}
+
+int ext4_fs_get_xattr(struct ext4_xattr_ref *ref, uint8_t name_index,
+ char *name, size_t name_len, void *buf, size_t buf_size,
+ size_t *size_got)
+{
+ int ret = EOK;
+ size_t item_size = 0;
+ struct ext4_xattr_item *item =
+ ext4_xattr_lookup_item(ref, name_index, name, name_len);
+
+ if (!item) {
+ ret = ENOATTR;
+ goto Finish;
+ }
+ item_size = item->data_size;
+ if (buf_size > item_size)
+ buf_size = item_size;
+
+ if (buf)
+ memcpy(buf, item->data, buf_size);
+
+Finish:
+ if (size_got)
+ *size_got = buf_size;
+
+ return ret;
+}
+
+int ext4_fs_get_xattr_ref(struct ext4_fs *fs, struct ext4_inode_ref *inode_ref,
+ struct ext4_xattr_ref *ref)
+{
+ int rc;
+ uint64_t xattr_block;
+ xattr_block = ext4_inode_get_file_acl(inode_ref->inode, &fs->sb);
+ RB_INIT(&ref->root);
+ ref->ea_size = 0;
+ ref->iter_from = NULL;
+ if (xattr_block) {
+ rc = ext4_block_get(fs->bdev, &ref->block, xattr_block);
+ if (rc != EOK)
+ return EIO;
+
+ ref->ea_size += sizeof(struct ext4_xattr_header);
+ ref->block_loaded = true;
+ } else
+ ref->block_loaded = false;
+
+ ref->inode_ref = inode_ref;
+ ref->fs = fs;
+
+ if (ext4_xattr_inode_space(ref) >
+ sizeof(struct ext4_xattr_ibody_header))
+ ref->ea_size += sizeof(struct ext4_xattr_ibody_header);
+
+ rc = ext4_xattr_fetch(ref);
+ if (rc != EOK) {
+ ext4_xattr_purge_items(ref);
+ if (xattr_block)
+ ext4_block_set(fs->bdev, &inode_ref->block);
+
+ ref->block_loaded = false;
+ return rc;
+ }
+ return EOK;
+}
+
+void ext4_fs_put_xattr_ref(struct ext4_xattr_ref *ref)
+{
+ ext4_xattr_write_to_disk(ref);
+ if (ref->block_loaded) {
+ ext4_block_set(ref->fs->bdev, &ref->block);
+ ref->block_loaded = false;
+ }
+ ext4_xattr_purge_items(ref);
+ ref->inode_ref = NULL;
+ ref->fs = NULL;
+}
+
+struct xattr_prefix {
+ char *prefix;
+ uint8_t name_index;
+} prefix_tbl[] = {
+ {"user.", EXT4_XATTR_INDEX_USER},
+ {"system.", EXT4_XATTR_INDEX_SYSTEM},
+ {"system.posix_acl_access", EXT4_XATTR_INDEX_POSIX_ACL_ACCESS},
+ {"system.posix_acl_default", EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT},
+ {NULL, 0},
+};
+
+char *ext4_extract_xattr_name(char *full_name, size_t full_name_len,
+ uint8_t *name_index, size_t *name_len)
+{
+ int i;
+ ext4_assert(name_index);
+ if (!full_name_len)
+ return NULL;
+
+ for (i = 0; prefix_tbl[i].prefix; i++) {
+ size_t prefix_len = strlen(prefix_tbl[i].prefix);
+ if (full_name_len >= prefix_len &&
+ !memcmp(full_name, prefix_tbl[i].prefix, prefix_len)) {
+ *name_index = prefix_tbl[i].name_index;
+ if (name_len)
+ *name_len = full_name_len - prefix_len;
+
+ return full_name + prefix_len;
+ }
+ }
+ if (name_len)
+ *name_len = 0;
+
+ return NULL;
+}
+
+/**
+ * @}
+ */
--- /dev/null
+++ b/lwext4/ext4_xattr.h
@@ -1,0 +1,32 @@
+#ifndef EXT4_XATTR_H_
+#define EXT4_XATTR_H_
+
+#include "ext4_config.h"
+#include "ext4_types.h"
+
+int ext4_fs_get_xattr_ref(struct ext4_fs *fs, struct ext4_inode_ref *inode_ref,
+ struct ext4_xattr_ref *ref);
+
+void ext4_fs_put_xattr_ref(struct ext4_xattr_ref *ref);
+
+int ext4_fs_set_xattr(struct ext4_xattr_ref *ref, uint8_t name_index,
+ char *name, size_t name_len, void *data, size_t data_size,
+ bool replace);
+
+int ext4_fs_remove_xattr(struct ext4_xattr_ref *ref, uint8_t name_index,
+ char *name, size_t name_len);
+
+int ext4_fs_get_xattr(struct ext4_xattr_ref *ref, uint8_t name_index,
+ char *name, size_t name_len, void *buf, size_t buf_size,
+ size_t *size_got);
+
+void ext4_fs_xattr_iterate(struct ext4_xattr_ref *ref,
+ int(iter)(struct ext4_xattr_ref *ref,
+ struct ext4_xattr_item *item));
+
+void ext4_fs_xattr_iterate_reset(struct ext4_xattr_ref *ref);
+
+char *ext4_extract_xattr_name(char *full_name, size_t full_name_len,
+ uint8_t *name_index, size_t *name_len);
+
+#endif
--- /dev/null
+++ b/lwext4/tree.h
@@ -1,0 +1,801 @@
+/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */
+/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
+/* $FreeBSD$ */
+
+/*-
+ * Copyright 2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SYS_TREE_H_
+#define _SYS_TREE_H_
+
+#include <sys/cdefs.h>
+
+/*
+ * This file defines data structures for different types of trees:
+ * splay trees and red-black trees.
+ *
+ * A splay tree is a self-organizing data structure. Every operation
+ * on the tree causes a splay to happen. The splay moves the requested
+ * node to the root of the tree and partly rebalances it.
+ *
+ * This has the benefit that request locality causes faster lookups as
+ * the requested nodes move to the top of the tree. On the other hand,
+ * every lookup causes memory writes.
+ *
+ * The Balance Theorem bounds the total access time for m operations
+ * and n inserts on an initially empty tree as O((m + n)lg n). The
+ * amortized cost for a sequence of m accesses to a splay tree is O(lg n);
+ *
+ * A red-black tree is a binary search tree with the node color as an
+ * extra attribute. It fulfills a set of conditions:
+ * - every search path from the root to a leaf consists of the
+ * same number of black nodes,
+ * - each red node (except for the root) has a black parent,
+ * - each leaf node is black.
+ *
+ * Every operation on a red-black tree is bounded as O(lg n).
+ * The maximum height of a red-black tree is 2lg (n+1).
+ */
+
+#define SPLAY_HEAD(name, type) \
+struct name { \
+ struct type *sph_root; /* root of the tree */ \
+}
+
+#define SPLAY_INITIALIZER(root) \
+ { NULL }
+
+#define SPLAY_INIT(root) do { \
+ (root)->sph_root = NULL; \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ENTRY(type) \
+struct { \
+ struct type *spe_left; /* left element */ \
+ struct type *spe_right; /* right element */ \
+}
+
+#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
+#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
+#define SPLAY_ROOT(head) (head)->sph_root
+#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
+
+/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
+#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_LINKLEFT(head, tmp, field) do { \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_LINKRIGHT(head, tmp, field) do { \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
+ SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
+ SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
+} while (/*CONSTCOND*/ 0)
+
+/* Generates prototypes and inline functions */
+
+#define SPLAY_PROTOTYPE(name, type, field, cmp) \
+void name##_SPLAY(struct name *, struct type *); \
+void name##_SPLAY_MINMAX(struct name *, int); \
+struct type *name##_SPLAY_INSERT(struct name *, struct type *); \
+struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \
+ \
+/* Finds the node with the same key as elm */ \
+static __inline struct type * \
+name##_SPLAY_FIND(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) \
+ return(NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) \
+ return (head->sph_root); \
+ return (NULL); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_NEXT(struct name *head, struct type *elm) \
+{ \
+ name##_SPLAY(head, elm); \
+ if (SPLAY_RIGHT(elm, field) != NULL) { \
+ elm = SPLAY_RIGHT(elm, field); \
+ while (SPLAY_LEFT(elm, field) != NULL) { \
+ elm = SPLAY_LEFT(elm, field); \
+ } \
+ } else \
+ elm = NULL; \
+ return (elm); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_MIN_MAX(struct name *head, int val) \
+{ \
+ name##_SPLAY_MINMAX(head, val); \
+ return (SPLAY_ROOT(head)); \
+}
+
+/* Main splay operation.
+ * Moves node close to the key of elm to top
+ */
+#define SPLAY_GENERATE(name, type, field, cmp) \
+struct type * \
+name##_SPLAY_INSERT(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) { \
+ SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
+ } else { \
+ int __comp; \
+ name##_SPLAY(head, elm); \
+ __comp = (cmp)(elm, (head)->sph_root); \
+ if(__comp < 0) { \
+ SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\
+ SPLAY_RIGHT(elm, field) = (head)->sph_root; \
+ SPLAY_LEFT((head)->sph_root, field) = NULL; \
+ } else if (__comp > 0) { \
+ SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\
+ SPLAY_LEFT(elm, field) = (head)->sph_root; \
+ SPLAY_RIGHT((head)->sph_root, field) = NULL; \
+ } else \
+ return ((head)->sph_root); \
+ } \
+ (head)->sph_root = (elm); \
+ return (NULL); \
+} \
+ \
+struct type * \
+name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *__tmp; \
+ if (SPLAY_EMPTY(head)) \
+ return (NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) { \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\
+ } else { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\
+ name##_SPLAY(head, elm); \
+ SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
+ } \
+ return (elm); \
+ } \
+ return (NULL); \
+} \
+ \
+void \
+name##_SPLAY(struct name *head, struct type *elm) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+ int __comp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) > 0){ \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+} \
+ \
+/* Splay with either the minimum or the maximum element \
+ * Used to find minimum or maximum element in tree. \
+ */ \
+void name##_SPLAY_MINMAX(struct name *head, int __comp) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while (1) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp > 0) { \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+}
+
+#define SPLAY_NEGINF -1
+#define SPLAY_INF 1
+
+#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
+#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
+#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
+#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
+#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
+#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
+
+#define SPLAY_FOREACH(x, name, head) \
+ for ((x) = SPLAY_MIN(name, head); \
+ (x) != NULL; \
+ (x) = SPLAY_NEXT(name, head, x))
+
+/* Macros that define a red-black tree */
+#define RB_HEAD(name, type) \
+struct name { \
+ struct type *rbh_root; /* root of the tree */ \
+}
+
+#define RB_INITIALIZER(root) \
+ { NULL }
+
+#define RB_INIT(root) do { \
+ (root)->rbh_root = NULL; \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_BLACK 0
+#define RB_RED 1
+#define RB_ENTRY(type) \
+struct { \
+ struct type *rbe_left; /* left element */ \
+ struct type *rbe_right; /* right element */ \
+ struct type *rbe_parent; /* parent element */ \
+ int rbe_color; /* node color */ \
+}
+
+#define RB_LEFT(elm, field) (elm)->field.rbe_left
+#define RB_RIGHT(elm, field) (elm)->field.rbe_right
+#define RB_PARENT(elm, field) (elm)->field.rbe_parent
+#define RB_COLOR(elm, field) (elm)->field.rbe_color
+#define RB_ROOT(head) (head)->rbh_root
+#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
+
+#define RB_SET(elm, parent, field) do { \
+ RB_PARENT(elm, field) = parent; \
+ RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
+ RB_COLOR(elm, field) = RB_RED; \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_SET_BLACKRED(black, red, field) do { \
+ RB_COLOR(black, field) = RB_BLACK; \
+ RB_COLOR(red, field) = RB_RED; \
+} while (/*CONSTCOND*/ 0)
+
+#ifndef RB_AUGMENT
+#define RB_AUGMENT(x) do {} while (0)
+#endif
+
+#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
+ (tmp) = RB_RIGHT(elm, field); \
+ if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \
+ RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_LEFT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (/*CONSTCOND*/ 0)
+
+#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
+ (tmp) = RB_LEFT(elm, field); \
+ if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \
+ RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_RIGHT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (/*CONSTCOND*/ 0)
+
+/* Generates prototypes and inline functions */
+#define RB_PROTOTYPE(name, type, field, cmp) \
+ RB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
+#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \
+ RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static)
+#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
+ RB_PROTOTYPE_INSERT_COLOR(name, type, attr); \
+ RB_PROTOTYPE_REMOVE_COLOR(name, type, attr); \
+ RB_PROTOTYPE_INSERT(name, type, attr); \
+ RB_PROTOTYPE_REMOVE(name, type, attr); \
+ RB_PROTOTYPE_FIND(name, type, attr); \
+ RB_PROTOTYPE_NFIND(name, type, attr); \
+ RB_PROTOTYPE_NEXT(name, type, attr); \
+ RB_PROTOTYPE_PREV(name, type, attr); \
+ RB_PROTOTYPE_MINMAX(name, type, attr);
+#define RB_PROTOTYPE_INSERT_COLOR(name, type, attr) \
+ attr void name##_RB_INSERT_COLOR(struct name *, struct type *)
+#define RB_PROTOTYPE_REMOVE_COLOR(name, type, attr) \
+ attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *)
+#define RB_PROTOTYPE_REMOVE(name, type, attr) \
+ attr struct type *name##_RB_REMOVE(struct name *, struct type *)
+#define RB_PROTOTYPE_INSERT(name, type, attr) \
+ attr struct type *name##_RB_INSERT(struct name *, struct type *)
+#define RB_PROTOTYPE_FIND(name, type, attr) \
+ attr struct type *name##_RB_FIND(struct name *, struct type *)
+#define RB_PROTOTYPE_NFIND(name, type, attr) \
+ attr struct type *name##_RB_NFIND(struct name *, struct type *)
+#define RB_PROTOTYPE_NEXT(name, type, attr) \
+ attr struct type *name##_RB_NEXT(struct type *)
+#define RB_PROTOTYPE_PREV(name, type, attr) \
+ attr struct type *name##_RB_PREV(struct type *)
+#define RB_PROTOTYPE_MINMAX(name, type, attr) \
+ attr struct type *name##_RB_MINMAX(struct name *, int)
+
+/* Main rb operation.
+ * Moves node close to the key of elm to top
+ */
+#define RB_GENERATE(name, type, field, cmp) \
+ RB_GENERATE_INTERNAL(name, type, field, cmp,)
+#define RB_GENERATE_STATIC(name, type, field, cmp) \
+ RB_GENERATE_INTERNAL(name, type, field, cmp, __unused static)
+#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \
+ RB_GENERATE_INSERT_COLOR(name, type, field, attr) \
+ RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \
+ RB_GENERATE_INSERT(name, type, field, cmp, attr) \
+ RB_GENERATE_REMOVE(name, type, field, attr) \
+ RB_GENERATE_FIND(name, type, field, cmp, attr) \
+ RB_GENERATE_NFIND(name, type, field, cmp, attr) \
+ RB_GENERATE_NEXT(name, type, field, attr) \
+ RB_GENERATE_PREV(name, type, field, attr) \
+ RB_GENERATE_MINMAX(name, type, field, attr)
+
+#define RB_GENERATE_INSERT_COLOR(name, type, field, attr) \
+attr void \
+name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
+{ \
+ struct type *parent, *gparent, *tmp; \
+ while ((parent = RB_PARENT(elm, field)) != NULL && \
+ RB_COLOR(parent, field) == RB_RED) { \
+ gparent = RB_PARENT(parent, field); \
+ if (parent == RB_LEFT(gparent, field)) { \
+ tmp = RB_RIGHT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_RIGHT(parent, field) == elm) { \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_RIGHT(head, gparent, tmp, field); \
+ } else { \
+ tmp = RB_LEFT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_LEFT(parent, field) == elm) { \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_LEFT(head, gparent, tmp, field); \
+ } \
+ } \
+ RB_COLOR(head->rbh_root, field) = RB_BLACK; \
+}
+
+#define RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \
+attr void \
+name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \
+{ \
+ struct type *tmp; \
+ while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
+ elm != RB_ROOT(head)) { \
+ if (RB_LEFT(parent, field) == elm) { \
+ tmp = RB_RIGHT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\
+ struct type *oleft; \
+ if ((oleft = RB_LEFT(tmp, field)) \
+ != NULL) \
+ RB_COLOR(oleft, field) = RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_RIGHT(head, tmp, oleft, field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_RIGHT(tmp, field)) \
+ RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } else { \
+ tmp = RB_LEFT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\
+ struct type *oright; \
+ if ((oright = RB_RIGHT(tmp, field)) \
+ != NULL) \
+ RB_COLOR(oright, field) = RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_LEFT(head, tmp, oright, field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_LEFT(tmp, field)) \
+ RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } \
+ } \
+ if (elm) \
+ RB_COLOR(elm, field) = RB_BLACK; \
+}
+
+#define RB_GENERATE_REMOVE(name, type, field, attr) \
+attr struct type * \
+name##_RB_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *child, *parent, *old = elm; \
+ int color; \
+ if (RB_LEFT(elm, field) == NULL) \
+ child = RB_RIGHT(elm, field); \
+ else if (RB_RIGHT(elm, field) == NULL) \
+ child = RB_LEFT(elm, field); \
+ else { \
+ struct type *left; \
+ elm = RB_RIGHT(elm, field); \
+ while ((left = RB_LEFT(elm, field)) != NULL) \
+ elm = left; \
+ child = RB_RIGHT(elm, field); \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+ if (RB_PARENT(elm, field) == old) \
+ parent = elm; \
+ (elm)->field = (old)->field; \
+ if (RB_PARENT(old, field)) { \
+ if (RB_LEFT(RB_PARENT(old, field), field) == old)\
+ RB_LEFT(RB_PARENT(old, field), field) = elm;\
+ else \
+ RB_RIGHT(RB_PARENT(old, field), field) = elm;\
+ RB_AUGMENT(RB_PARENT(old, field)); \
+ } else \
+ RB_ROOT(head) = elm; \
+ RB_PARENT(RB_LEFT(old, field), field) = elm; \
+ if (RB_RIGHT(old, field)) \
+ RB_PARENT(RB_RIGHT(old, field), field) = elm; \
+ if (parent) { \
+ left = parent; \
+ do { \
+ RB_AUGMENT(left); \
+ } while ((left = RB_PARENT(left, field)) != NULL); \
+ } \
+ goto color; \
+ } \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+color: \
+ if (color == RB_BLACK) \
+ name##_RB_REMOVE_COLOR(head, parent, child); \
+ return (old); \
+} \
+
+#define RB_GENERATE_INSERT(name, type, field, cmp, attr) \
+/* Inserts a node into the RB tree */ \
+attr struct type * \
+name##_RB_INSERT(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp; \
+ struct type *parent = NULL; \
+ int comp = 0; \
+ tmp = RB_ROOT(head); \
+ while (tmp) { \
+ parent = tmp; \
+ comp = (cmp)(elm, parent); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ RB_SET(elm, parent, field); \
+ if (parent != NULL) { \
+ if (comp < 0) \
+ RB_LEFT(parent, field) = elm; \
+ else \
+ RB_RIGHT(parent, field) = elm; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = elm; \
+ name##_RB_INSERT_COLOR(head, elm); \
+ return (NULL); \
+}
+
+#define RB_GENERATE_FIND(name, type, field, cmp, attr) \
+/* Finds the node with the same key as elm */ \
+attr struct type * \
+name##_RB_FIND(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ int comp; \
+ while (tmp) { \
+ comp = cmp(elm, tmp); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ return (NULL); \
+}
+
+#define RB_GENERATE_NFIND(name, type, field, cmp, attr) \
+/* Finds the first node greater than or equal to the search key */ \
+attr struct type * \
+name##_RB_NFIND(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ struct type *res = NULL; \
+ int comp; \
+ while (tmp) { \
+ comp = cmp(elm, tmp); \
+ if (comp < 0) { \
+ res = tmp; \
+ tmp = RB_LEFT(tmp, field); \
+ } \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ return (res); \
+}
+
+#define RB_GENERATE_NEXT(name, type, field, attr) \
+/* ARGSUSED */ \
+attr struct type * \
+name##_RB_NEXT(struct type *elm) \
+{ \
+ if (RB_RIGHT(elm, field)) { \
+ elm = RB_RIGHT(elm, field); \
+ while (RB_LEFT(elm, field)) \
+ elm = RB_LEFT(elm, field); \
+ } else { \
+ if (RB_PARENT(elm, field) && \
+ (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ else { \
+ while (RB_PARENT(elm, field) && \
+ (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\
+ elm = RB_PARENT(elm, field); \
+ elm = RB_PARENT(elm, field); \
+ } \
+ } \
+ return (elm); \
+}
+
+#define RB_GENERATE_PREV(name, type, field, attr) \
+/* ARGSUSED */ \
+attr struct type * \
+name##_RB_PREV(struct type *elm) \
+{ \
+ if (RB_LEFT(elm, field)) { \
+ elm = RB_LEFT(elm, field); \
+ while (RB_RIGHT(elm, field)) \
+ elm = RB_RIGHT(elm, field); \
+ } else { \
+ if (RB_PARENT(elm, field) && \
+ (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ else { \
+ while (RB_PARENT(elm, field) && \
+ (elm == RB_LEFT(RB_PARENT(elm, field), field)))\
+ elm = RB_PARENT(elm, field); \
+ elm = RB_PARENT(elm, field); \
+ } \
+ } \
+ return (elm); \
+}
+
+#define RB_GENERATE_MINMAX(name, type, field, attr) \
+attr struct type * \
+name##_RB_MINMAX(struct name *head, int val) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ struct type *parent = NULL; \
+ while (tmp) { \
+ parent = tmp; \
+ if (val < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else \
+ tmp = RB_RIGHT(tmp, field); \
+ } \
+ return (parent); \
+}
+
+#define RB_NEGINF -1
+#define RB_INF 1
+
+#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
+#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
+#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
+#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
+#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
+#define RB_PREV(name, x, y) name##_RB_PREV(y)
+#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
+#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
+
+#define RB_FOREACH(x, name, head) \
+ for ((x) = RB_MIN(name, head); \
+ (x) != NULL; \
+ (x) = name##_RB_NEXT(x))
+
+#define RB_FOREACH_FROM(x, name, y) \
+ for ((x) = (y); \
+ ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
+ (x) = (y))
+
+#define RB_FOREACH_SAFE(x, name, head, y) \
+ for ((x) = RB_MIN(name, head); \
+ ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
+ (x) = (y))
+
+#define RB_FOREACH_REVERSE(x, name, head) \
+ for ((x) = RB_MAX(name, head); \
+ (x) != NULL; \
+ (x) = name##_RB_PREV(x))
+
+#define RB_FOREACH_REVERSE_FROM(x, name, y) \
+ for ((x) = (y); \
+ ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
+ (x) = (y))
+
+#define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \
+ for ((x) = RB_MAX(name, head); \
+ ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
+ (x) = (y))
+
+#endif /* _SYS_TREE_H_ */