Adding upstream version 0.6.0.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-09 07:35:45 +01:00
parent c49a9029dc
commit 7f70a05c55
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
465 changed files with 60158 additions and 0 deletions

64
src/Makefile.in Normal file
View file

@ -0,0 +1,64 @@
.PHONY: clean distribution
include @BUILD_DIR@/build/ck.build
TARGET_DIR=$(BUILD_DIR)/src
SDIR=$(SRC_DIR)/src
INCLUDE_DIR=$(SRC_DIR)/include
OBJECTS=ck_barrier_centralized.o \
ck_barrier_combining.o \
ck_barrier_dissemination.o \
ck_barrier_tournament.o \
ck_barrier_mcs.o \
ck_epoch.o \
ck_ht.o \
ck_hp.o \
ck_hs.o \
ck_rhs.o \
ck_array.o
all: $(ALL_LIBS)
libck.so: $(OBJECTS)
$(LD) $(LDFLAGS) -o $(TARGET_DIR)/libck.so $(OBJECTS)
libck.a: $(OBJECTS)
ar rcs $(TARGET_DIR)/libck.a $(OBJECTS)
ck_array.o: $(INCLUDE_DIR)/ck_array.h $(SDIR)/ck_array.c
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_array.o $(SDIR)/ck_array.c
ck_epoch.o: $(INCLUDE_DIR)/ck_epoch.h $(SDIR)/ck_epoch.c $(INCLUDE_DIR)/ck_stack.h
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_epoch.o $(SDIR)/ck_epoch.c
ck_hs.o: $(INCLUDE_DIR)/ck_hs.h $(SDIR)/ck_hs.c
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_hs.o $(SDIR)/ck_hs.c
ck_rhs.o: $(INCLUDE_DIR)/ck_rhs.h $(SDIR)/ck_rhs.c
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_rhs.o $(SDIR)/ck_rhs.c
ck_ht.o: $(INCLUDE_DIR)/ck_ht.h $(SDIR)/ck_ht.c
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_ht.o $(SDIR)/ck_ht.c
ck_hp.o: $(SDIR)/ck_hp.c $(INCLUDE_DIR)/ck_hp.h $(INCLUDE_DIR)/ck_stack.h
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_hp.o $(SDIR)/ck_hp.c
ck_barrier_centralized.o: $(SDIR)/ck_barrier_centralized.c
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_barrier_centralized.o $(SDIR)/ck_barrier_centralized.c
ck_barrier_combining.o: $(SDIR)/ck_barrier_combining.c
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_barrier_combining.o $(SDIR)/ck_barrier_combining.c
ck_barrier_dissemination.o: $(SDIR)/ck_barrier_dissemination.c
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_barrier_dissemination.o $(SDIR)/ck_barrier_dissemination.c
ck_barrier_tournament.o: $(SDIR)/ck_barrier_tournament.c
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_barrier_tournament.o $(SDIR)/ck_barrier_tournament.c
ck_barrier_mcs.o: $(SDIR)/ck_barrier_mcs.c
$(CC) $(CFLAGS) -c -o $(TARGET_DIR)/ck_barrier_mcs.o $(SDIR)/ck_barrier_mcs.c
clean:
rm -rf $(TARGET_DIR)/*.dSYM $(TARGET_DIR)/*~ $(TARGET_DIR)/*.o \
$(OBJECTS) $(TARGET_DIR)/libck.a $(TARGET_DIR)/libck.so

240
src/ck_array.c Normal file
View file

@ -0,0 +1,240 @@
/*
* Copyright 2013-2015 Samy Al Bahra
* Copyright 2013-2014 AppNexus, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ck_array.h>
#include <ck_cc.h>
#include <ck_pr.h>
#include <ck_stdbool.h>
#include <ck_string.h>
static struct _ck_array *
ck_array_create(struct ck_malloc *allocator, unsigned int length)
{
struct _ck_array *active;
active = allocator->malloc(sizeof(struct _ck_array) + sizeof(void *) * length);
if (active == NULL)
return NULL;
active->n_committed = 0;
active->length = length;
return active;
}
bool
ck_array_init(struct ck_array *array, unsigned int mode, struct ck_malloc *allocator, unsigned int length)
{
struct _ck_array *active;
(void)mode;
if (allocator->realloc == NULL ||
allocator->malloc == NULL ||
allocator->free == NULL ||
length == 0)
return false;
active = ck_array_create(allocator, length);
if (active == NULL)
return false;
array->n_entries = 0;
array->allocator = allocator;
array->active = active;
array->transaction = NULL;
return true;
}
bool
ck_array_put(struct ck_array *array, void *value)
{
struct _ck_array *target;
unsigned int size;
/*
* If no transaction copy has been necessary, attempt to do in-place
* modification of the array.
*/
if (array->transaction == NULL) {
target = array->active;
if (array->n_entries == target->length) {
size = target->length << 1;
target = array->allocator->realloc(target,
sizeof(struct _ck_array) + sizeof(void *) * array->n_entries,
sizeof(struct _ck_array) + sizeof(void *) * size,
true);
if (target == NULL)
return false;
ck_pr_store_uint(&target->length, size);
/* Serialize with respect to contents. */
ck_pr_fence_store();
ck_pr_store_ptr(&array->active, target);
}
target->values[array->n_entries++] = value;
return true;
}
target = array->transaction;
if (array->n_entries == target->length) {
size = target->length << 1;
target = array->allocator->realloc(target,
sizeof(struct _ck_array) + sizeof(void *) * array->n_entries,
sizeof(struct _ck_array) + sizeof(void *) * size,
true);
if (target == NULL)
return false;
target->length = size;
array->transaction = target;
}
target->values[array->n_entries++] = value;
return false;
}
int
ck_array_put_unique(struct ck_array *array, void *value)
{
unsigned int i, limit;
void **v;
limit = array->n_entries;
if (array->transaction != NULL) {
v = array->transaction->values;
} else {
v = array->active->values;
}
for (i = 0; i < limit; i++) {
if (v[i] == value)
return 1;
}
return -!ck_array_put(array, value);
}
bool
ck_array_remove(struct ck_array *array, void *value)
{
struct _ck_array *target;
unsigned int i;
if (array->transaction != NULL) {
target = array->transaction;
for (i = 0; i < array->n_entries; i++) {
if (target->values[i] == value) {
target->values[i] = target->values[--array->n_entries];
return true;
}
}
return false;
}
target = array->active;
for (i = 0; i < array->n_entries; i++) {
if (target->values[i] == value)
break;
}
if (i == array->n_entries)
return false;
/* If there are pending additions, immediately eliminate the operation. */
if (target->n_committed != array->n_entries) {
ck_pr_store_ptr(&target->values[i], target->values[--array->n_entries]);
return true;
}
/*
* The assumption is that these allocations are small to begin with.
* If there is no immediate opportunity for transaction, allocate a
* transactional array which will be applied upon commit time.
*/
target = ck_array_create(array->allocator, array->n_entries);
if (target == NULL)
return false;
memcpy(target->values, array->active->values, sizeof(void *) * array->n_entries);
target->length = array->n_entries;
target->n_committed = array->n_entries;
target->values[i] = target->values[--array->n_entries];
array->transaction = target;
return true;
}
bool
ck_array_commit(ck_array_t *array)
{
struct _ck_array *m = array->transaction;
if (m != NULL) {
struct _ck_array *p;
m->n_committed = array->n_entries;
ck_pr_fence_store();
p = array->active;
ck_pr_store_ptr(&array->active, m);
array->allocator->free(p, sizeof(struct _ck_array) +
p->length * sizeof(void *), true);
array->transaction = NULL;
return true;
}
ck_pr_fence_store();
ck_pr_store_uint(&array->active->n_committed, array->n_entries);
return true;
}
void
ck_array_deinit(struct ck_array *array, bool defer)
{
array->allocator->free(array->active,
sizeof(struct _ck_array) + sizeof(void *) * array->active->length, defer);
if (array->transaction != NULL) {
array->allocator->free(array->transaction,
sizeof(struct _ck_array) + sizeof(void *) * array->transaction->length, defer);
}
array->transaction = array->active = NULL;
return;
}

View file

@ -0,0 +1,59 @@
/*
* Copyright 2011-2015 Samy Al Bahra.
* Copyright 2011 David Joseph.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ck_barrier.h>
#include <ck_pr.h>
void
ck_barrier_centralized(struct ck_barrier_centralized *barrier,
struct ck_barrier_centralized_state *state,
unsigned int n_threads)
{
unsigned int sense, value;
/*
* Every execution context has a sense associated with it.
* This sense is reversed when the barrier is entered. Every
* thread will spin on the global sense until the last thread
* reverses it.
*/
sense = state->sense = ~state->sense;
value = ck_pr_faa_uint(&barrier->value, 1);
if (value == n_threads - 1) {
ck_pr_store_uint(&barrier->value, 0);
ck_pr_fence_memory();
ck_pr_store_uint(&barrier->sense, sense);
return;
}
ck_pr_fence_atomic_load();
while (sense != ck_pr_load_uint(&barrier->sense))
ck_pr_stall();
ck_pr_fence_acquire();
return;
}

207
src/ck_barrier_combining.c Normal file
View file

@ -0,0 +1,207 @@
/*
* Copyright 2011-2015 Samy Al Bahra.
* Copyright 2011 David Joseph.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ck_barrier.h>
#include <ck_cc.h>
#include <ck_pr.h>
#include <ck_spinlock.h>
struct ck_barrier_combining_queue {
struct ck_barrier_combining_group *head;
struct ck_barrier_combining_group *tail;
};
CK_CC_INLINE static struct ck_barrier_combining_group *
ck_barrier_combining_queue_dequeue(struct ck_barrier_combining_queue *queue)
{
struct ck_barrier_combining_group *front = NULL;
if (queue->head != NULL) {
front = queue->head;
queue->head = queue->head->next;
}
return front;
}
CK_CC_INLINE static void
ck_barrier_combining_insert(struct ck_barrier_combining_group *parent,
struct ck_barrier_combining_group *tnode,
struct ck_barrier_combining_group **child)
{
*child = tnode;
tnode->parent = parent;
/*
* After inserting, we must increment the parent group's count for
* number of threads expected to reach it; otherwise, the
* barrier may end prematurely.
*/
parent->k++;
return;
}
/*
* This implementation of software combining tree barriers
* uses level order traversal to insert new thread groups
* into the barrier's tree. We use a queue to implement this
* traversal.
*/
CK_CC_INLINE static void
ck_barrier_combining_queue_enqueue(struct ck_barrier_combining_queue *queue,
struct ck_barrier_combining_group *node_value)
{
node_value->next = NULL;
if (queue->head == NULL) {
queue->head = queue->tail = node_value;
return;
}
queue->tail->next = node_value;
queue->tail = node_value;
return;
}
void
ck_barrier_combining_group_init(struct ck_barrier_combining *root,
struct ck_barrier_combining_group *tnode,
unsigned int nthr)
{
struct ck_barrier_combining_group *node;
struct ck_barrier_combining_queue queue;
queue.head = queue.tail = NULL;
tnode->k = nthr;
tnode->count = 0;
tnode->sense = 0;
tnode->left = tnode->right = NULL;
/*
* Finds the first available node for linkage into the combining
* tree. The use of a spinlock is excusable as this is a one-time
* initialization cost.
*/
ck_spinlock_fas_lock(&root->mutex);
ck_barrier_combining_queue_enqueue(&queue, root->root);
while (queue.head != NULL) {
node = ck_barrier_combining_queue_dequeue(&queue);
/* If the left child is free, link the group there. */
if (node->left == NULL) {
ck_barrier_combining_insert(node, tnode, &node->left);
goto leave;
}
/* If the right child is free, link the group there. */
if (node->right == NULL) {
ck_barrier_combining_insert(node, tnode, &node->right);
goto leave;
}
/*
* If unsuccessful, try inserting as a child of the children of the
* current node.
*/
ck_barrier_combining_queue_enqueue(&queue, node->left);
ck_barrier_combining_queue_enqueue(&queue, node->right);
}
leave:
ck_spinlock_fas_unlock(&root->mutex);
return;
}
void
ck_barrier_combining_init(struct ck_barrier_combining *root,
struct ck_barrier_combining_group *init_root)
{
init_root->k = 0;
init_root->count = 0;
init_root->sense = 0;
init_root->parent = init_root->left = init_root->right = NULL;
ck_spinlock_fas_init(&root->mutex);
root->root = init_root;
return;
}
static void
ck_barrier_combining_aux(struct ck_barrier_combining *barrier,
struct ck_barrier_combining_group *tnode,
unsigned int sense)
{
/*
* If this is the last thread in the group, it moves on to the parent group.
* Otherwise, it spins on this group's sense.
*/
if (ck_pr_faa_uint(&tnode->count, 1) == tnode->k - 1) {
/*
* If we are and will be the last thread entering the barrier for the
* current group then signal the parent group if one exists.
*/
if (tnode->parent != NULL)
ck_barrier_combining_aux(barrier, tnode->parent, sense);
/*
* Once the thread returns from its parent(s), it reinitializes the group's
* arrival count and signals other threads to continue by flipping the group
* sense. Order of these operations is not important since we assume a static
* number of threads are members of a barrier for the lifetime of the barrier.
* Since count is explicitly reinitialized, it is guaranteed that at any point
* tnode->count is equivalent to tnode->k if and only if that many threads
* are at the barrier.
*/
ck_pr_store_uint(&tnode->count, 0);
ck_pr_fence_store();
ck_pr_store_uint(&tnode->sense, ~tnode->sense);
} else {
ck_pr_fence_memory();
while (sense != ck_pr_load_uint(&tnode->sense))
ck_pr_stall();
}
return;
}
void
ck_barrier_combining(struct ck_barrier_combining *barrier,
struct ck_barrier_combining_group *tnode,
struct ck_barrier_combining_state *state)
{
ck_barrier_combining_aux(barrier, tnode, state->sense);
/* Reverse the execution context's sense for the next barrier. */
state->sense = ~state->sense;
return;
}

View file

@ -0,0 +1,130 @@
/*
* Copyright 2011-2015 Samy Al Bahra.
* Copyright 2011 David Joseph.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ck_barrier.h>
#include <ck_cc.h>
#include <ck_pr.h>
#include <ck_spinlock.h>
#include "ck_internal.h"
void
ck_barrier_dissemination_init(struct ck_barrier_dissemination *barrier,
struct ck_barrier_dissemination_flag **barrier_internal,
unsigned int nthr)
{
unsigned int i, j, k, size, offset;
bool p = nthr & (nthr - 1);
barrier->nthr = nthr;
barrier->size = size = ck_internal_log(ck_internal_power_2(nthr));
ck_pr_store_uint(&barrier->tid, 0);
for (i = 0; i < nthr; ++i) {
barrier[i].flags[0] = barrier_internal[i];
barrier[i].flags[1] = barrier_internal[i] + size;
}
for (i = 0; i < nthr; ++i) {
for (k = 0, offset = 1; k < size; ++k, offset <<= 1) {
/*
* Determine the thread's partner, j, for the current round, k.
* Partners are chosen such that by the completion of the barrier,
* every thread has been directly (having one of its flag set) or
* indirectly (having one of its partners's flags set) signaled
* by every other thread in the barrier.
*/
if (p == false)
j = (i + offset) & (nthr - 1);
else
j = (i + offset) % nthr;
/* Set the thread's partner for round k. */
barrier[i].flags[0][k].pflag = &barrier[j].flags[0][k].tflag;
barrier[i].flags[1][k].pflag = &barrier[j].flags[1][k].tflag;
/* Set the thread's flags to false. */
barrier[i].flags[0][k].tflag = barrier[i].flags[1][k].tflag = 0;
}
}
return;
}
void
ck_barrier_dissemination_subscribe(struct ck_barrier_dissemination *barrier,
struct ck_barrier_dissemination_state *state)
{
state->parity = 0;
state->sense = ~0;
state->tid = ck_pr_faa_uint(&barrier->tid, 1);
return;
}
unsigned int
ck_barrier_dissemination_size(unsigned int nthr)
{
return (ck_internal_log(ck_internal_power_2(nthr)) << 1);
}
void
ck_barrier_dissemination(struct ck_barrier_dissemination *barrier,
struct ck_barrier_dissemination_state *state)
{
unsigned int i;
unsigned int size = barrier->size;
for (i = 0; i < size; ++i) {
unsigned int *pflag, *tflag;
pflag = barrier[state->tid].flags[state->parity][i].pflag;
tflag = &barrier[state->tid].flags[state->parity][i].tflag;
/* Unblock current partner. */
ck_pr_store_uint(pflag, state->sense);
/* Wait until some other thread unblocks this one. */
while (ck_pr_load_uint(tflag) != state->sense)
ck_pr_stall();
}
/*
* Dissemination barriers use two sets of flags to prevent race conditions
* between successive calls to the barrier. Parity indicates which set will
* be used for the next barrier. They also use a sense reversal technique
* to avoid re-initialization of the flags for every two calls to the barrier.
*/
if (state->parity == 1)
state->sense = ~state->sense;
state->parity = 1 - state->parity;
ck_pr_fence_acquire();
return;
}

141
src/ck_barrier_mcs.c Normal file
View file

@ -0,0 +1,141 @@
/*
* Copyright 2011-2015 Samy Al Bahra.
* Copyright 2011 David Joseph.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ck_barrier.h>
#include <ck_cc.h>
#include <ck_pr.h>
#include <ck_stdbool.h>
void
ck_barrier_mcs_init(struct ck_barrier_mcs *barrier, unsigned int nthr)
{
unsigned int i, j;
ck_pr_store_uint(&barrier->tid, 0);
for (i = 0; i < nthr; ++i) {
for (j = 0; j < 4; ++j) {
/*
* If there are still threads that don't have parents,
* add it as a child.
*/
barrier[i].havechild[j] = ((i << 2) + j < nthr - 1) ? ~0 : 0;
/*
* childnotready is initialized to havechild to ensure
* a thread does not wait for a child that does not exist.
*/
barrier[i].childnotready[j] = barrier[i].havechild[j];
}
/* The root thread does not have a parent. */
barrier[i].parent = (i == 0) ?
&barrier[i].dummy :
&barrier[(i - 1) >> 2].childnotready[(i - 1) & 3];
/* Leaf threads do not have any children. */
barrier[i].children[0] = ((i << 1) + 1 >= nthr) ?
&barrier[i].dummy :
&barrier[(i << 1) + 1].parentsense;
barrier[i].children[1] = ((i << 1) + 2 >= nthr) ?
&barrier[i].dummy :
&barrier[(i << 1) + 2].parentsense;
barrier[i].parentsense = 0;
}
return;
}
void
ck_barrier_mcs_subscribe(struct ck_barrier_mcs *barrier, struct ck_barrier_mcs_state *state)
{
state->sense = ~0;
state->vpid = ck_pr_faa_uint(&barrier->tid, 1);
return;
}
CK_CC_INLINE static bool
ck_barrier_mcs_check_children(unsigned int *childnotready)
{
if (ck_pr_load_uint(&childnotready[0]) != 0)
return false;
if (ck_pr_load_uint(&childnotready[1]) != 0)
return false;
if (ck_pr_load_uint(&childnotready[2]) != 0)
return false;
if (ck_pr_load_uint(&childnotready[3]) != 0)
return false;
return true;
}
CK_CC_INLINE static void
ck_barrier_mcs_reinitialize_children(struct ck_barrier_mcs *node)
{
ck_pr_store_uint(&node->childnotready[0], node->havechild[0]);
ck_pr_store_uint(&node->childnotready[1], node->havechild[1]);
ck_pr_store_uint(&node->childnotready[2], node->havechild[2]);
ck_pr_store_uint(&node->childnotready[3], node->havechild[3]);
return;
}
void
ck_barrier_mcs(struct ck_barrier_mcs *barrier,
struct ck_barrier_mcs_state *state)
{
/*
* Wait until all children have reached the barrier and are done waiting
* for their children.
*/
while (ck_barrier_mcs_check_children(barrier[state->vpid].childnotready) == false)
ck_pr_stall();
/* Reinitialize for next barrier. */
ck_barrier_mcs_reinitialize_children(&barrier[state->vpid]);
/* Inform parent thread and its children have arrived at the barrier. */
ck_pr_store_uint(barrier[state->vpid].parent, 0);
/* Wait until parent indicates all threads have arrived at the barrier. */
if (state->vpid != 0) {
while (ck_pr_load_uint(&barrier[state->vpid].parentsense) != state->sense)
ck_pr_stall();
}
/* Inform children of successful barrier. */
ck_pr_store_uint(barrier[state->vpid].children[0], state->sense);
ck_pr_store_uint(barrier[state->vpid].children[1], state->sense);
state->sense = ~state->sense;
ck_pr_fence_memory();
return;
}

184
src/ck_barrier_tournament.c Normal file
View file

@ -0,0 +1,184 @@
/*
* Copyright 2011-2015 Samy Al Bahra.
* Copyright 2011 David Joseph.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ck_barrier.h>
#include <ck_pr.h>
#include "ck_internal.h"
/*
* This is a tournament barrier implementation. Threads are statically
* assigned roles to perform for each round of the barrier. Winners
* move on to the next round, while losers spin in their current rounds
* on their own flags. During the last round, the champion of the tournament
* sets the last flag that begins the wakeup process.
*/
enum {
CK_BARRIER_TOURNAMENT_BYE,
CK_BARRIER_TOURNAMENT_CHAMPION,
CK_BARRIER_TOURNAMENT_DROPOUT,
CK_BARRIER_TOURNAMENT_LOSER,
CK_BARRIER_TOURNAMENT_WINNER
};
void
ck_barrier_tournament_subscribe(struct ck_barrier_tournament *barrier,
struct ck_barrier_tournament_state *state)
{
state->sense = ~0;
state->vpid = ck_pr_faa_uint(&barrier->tid, 1);
return;
}
void
ck_barrier_tournament_init(struct ck_barrier_tournament *barrier,
struct ck_barrier_tournament_round **rounds,
unsigned int nthr)
{
unsigned int i, k, size, twok, twokm1, imod2k;
ck_pr_store_uint(&barrier->tid, 0);
barrier->size = size = ck_barrier_tournament_size(nthr);
for (i = 0; i < nthr; ++i) {
/* The first role is always CK_BARRIER_TOURNAMENT_DROPOUT. */
rounds[i][0].flag = 0;
rounds[i][0].role = CK_BARRIER_TOURNAMENT_DROPOUT;
for (k = 1, twok = 2, twokm1 = 1; k < size; ++k, twokm1 = twok, twok <<= 1) {
rounds[i][k].flag = 0;
imod2k = i & (twok - 1);
if (imod2k == 0) {
if ((i + twokm1 < nthr) && (twok < nthr))
rounds[i][k].role = CK_BARRIER_TOURNAMENT_WINNER;
else if (i + twokm1 >= nthr)
rounds[i][k].role = CK_BARRIER_TOURNAMENT_BYE;
}
if (imod2k == twokm1)
rounds[i][k].role = CK_BARRIER_TOURNAMENT_LOSER;
else if ((i == 0) && (twok >= nthr))
rounds[i][k].role = CK_BARRIER_TOURNAMENT_CHAMPION;
if (rounds[i][k].role == CK_BARRIER_TOURNAMENT_LOSER)
rounds[i][k].opponent = &rounds[i - twokm1][k].flag;
else if (rounds[i][k].role == CK_BARRIER_TOURNAMENT_WINNER ||
rounds[i][k].role == CK_BARRIER_TOURNAMENT_CHAMPION)
rounds[i][k].opponent = &rounds[i + twokm1][k].flag;
}
}
ck_pr_store_ptr(&barrier->rounds, rounds);
return;
}
unsigned int
ck_barrier_tournament_size(unsigned int nthr)
{
return (ck_internal_log(ck_internal_power_2(nthr)) + 1);
}
void
ck_barrier_tournament(struct ck_barrier_tournament *barrier,
struct ck_barrier_tournament_state *state)
{
struct ck_barrier_tournament_round **rounds = ck_pr_load_ptr(&barrier->rounds);
int round = 1;
if (barrier->size == 1)
return;
for (;; ++round) {
switch (rounds[state->vpid][round].role) {
case CK_BARRIER_TOURNAMENT_BYE:
break;
case CK_BARRIER_TOURNAMENT_CHAMPION:
/*
* The CK_BARRIER_TOURNAMENT_CHAMPION waits until it wins the tournament; it then
* sets the final flag before the wakeup phase of the barrier.
*/
while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
ck_pr_stall();
ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
goto wakeup;
case CK_BARRIER_TOURNAMENT_DROPOUT:
/* NOTREACHED */
break;
case CK_BARRIER_TOURNAMENT_LOSER:
/*
* CK_BARRIER_TOURNAMENT_LOSERs set the flags of their opponents and wait until
* their opponents release them after the tournament is over.
*/
ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
ck_pr_stall();
goto wakeup;
case CK_BARRIER_TOURNAMENT_WINNER:
/*
* CK_BARRIER_TOURNAMENT_WINNERs wait until their current opponent sets their flag; they then
* continue to the next round of the tournament.
*/
while (ck_pr_load_uint(&rounds[state->vpid][round].flag) != state->sense)
ck_pr_stall();
break;
}
}
wakeup:
for (round -= 1 ;; --round) {
switch (rounds[state->vpid][round].role) {
case CK_BARRIER_TOURNAMENT_BYE:
break;
case CK_BARRIER_TOURNAMENT_CHAMPION:
/* NOTREACHED */
break;
case CK_BARRIER_TOURNAMENT_DROPOUT:
goto leave;
break;
case CK_BARRIER_TOURNAMENT_LOSER:
/* NOTREACHED */
break;
case CK_BARRIER_TOURNAMENT_WINNER:
/*
* Winners inform their old opponents the tournament is over
* by setting their flags.
*/
ck_pr_store_uint(rounds[state->vpid][round].opponent, state->sense);
break;
}
}
leave:
ck_pr_fence_memory();
state->sense = ~state->sense;
return;
}

545
src/ck_epoch.c Normal file
View file

@ -0,0 +1,545 @@
/*
* Copyright 2011-2015 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* The implementation here is inspired from the work described in:
* Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
* of Cambridge Computing Laboratory.
*/
#include <ck_backoff.h>
#include <ck_cc.h>
#include <ck_epoch.h>
#include <ck_pr.h>
#include <ck_stack.h>
#include <ck_stdbool.h>
#include <ck_string.h>
/*
* Only three distinct values are used for reclamation, but reclamation occurs
* at e+2 rather than e+1. Any thread in a "critical section" would have
* acquired some snapshot (e) of the global epoch value (e_g) and set an active
* flag. Any hazardous references will only occur after a full memory barrier.
* For example, assume an initial e_g value of 1, e value of 0 and active value
* of 0.
*
* ck_epoch_begin(...)
* e = e_g
* active = 1
* memory_barrier();
*
* Any serialized reads may observe e = 0 or e = 1 with active = 0, or e = 0 or
* e = 1 with active = 1. The e_g value can only go from 1 to 2 if every thread
* has already observed the value of "1" (or the value we are incrementing
* from). This guarantees us that for any given value e_g, any threads with-in
* critical sections (referred to as "active" threads from here on) would have
* an e value of e_g-1 or e_g. This also means that hazardous references may be
* shared in both e_g-1 and e_g even if they are logically deleted in e_g.
*
* For example, assume all threads have an e value of e_g. Another thread may
* increment to e_g to e_g+1. Older threads may have a reference to an object
* which is only deleted in e_g+1. It could be that reader threads are
* executing some hash table look-ups, while some other writer thread (which
* causes epoch counter tick) actually deletes the same items that reader
* threads are looking up (this writer thread having an e value of e_g+1).
* This is possible if the writer thread re-observes the epoch after the
* counter tick.
*
* Psuedo-code for writer:
* ck_epoch_begin()
* ht_delete(x)
* ck_epoch_end()
* ck_epoch_begin()
* ht_delete(x)
* ck_epoch_end()
*
* Psuedo-code for reader:
* for (;;) {
* x = ht_lookup(x)
* ck_pr_inc(&x->value);
* }
*
* Of course, it is also possible for references logically deleted at e_g-1 to
* still be accessed at e_g as threads are "active" at the same time
* (real-world time) mutating shared objects.
*
* Now, if the epoch counter is ticked to e_g+1, then no new hazardous
* references could exist to objects logically deleted at e_g-1. The reason for
* this is that at e_g+1, all epoch read-side critical sections started at
* e_g-1 must have been completed. If any epoch read-side critical sections at
* e_g-1 were still active, then we would never increment to e_g+1 (active != 0
* ^ e != e_g). Additionally, e_g may still have hazardous references to
* objects logically deleted at e_g-1 which means objects logically deleted at
* e_g-1 cannot be deleted at e_g+1 unless all threads have observed e_g+1
* (since it is valid for active threads to be at e_g and threads at e_g still
* require safe memory accesses).
*
* However, at e_g+2, all active threads must be either at e_g+1 or e_g+2.
* Though e_g+2 may share hazardous references with e_g+1, and e_g+1 shares
* hazardous references to e_g, no active threads are at e_g or e_g-1. This
* means no hazardous references could exist to objects deleted at e_g-1 (at
* e_g+2).
*
* To summarize these important points,
* 1) Active threads will always have a value of e_g or e_g-1.
* 2) Items that are logically deleted e_g or e_g-1 cannot be physically
* deleted.
* 3) Objects logically deleted at e_g-1 can be physically destroyed at e_g+2
* or at e_g+1 if no threads are at e_g.
*
* Last but not least, if we are at e_g+2, then no active thread is at e_g
* which means it is safe to apply modulo-3 arithmetic to e_g value in order to
* re-use e_g to represent the e_g+3 state. This means it is sufficient to
* represent e_g using only the values 0, 1 or 2. Every time a thread re-visits
* a e_g (which can be determined with a non-empty deferral list) it can assume
* objects in the e_g deferral list involved at least three e_g transitions and
* are thus, safe, for physical deletion.
*
* Blocking semantics for epoch reclamation have additional restrictions.
* Though we only require three deferral lists, reasonable blocking semantics
* must be able to more gracefully handle bursty write work-loads which could
* easily cause e_g wrap-around if modulo-3 arithmetic is used. This allows for
* easy-to-trigger live-lock situations. The work-around to this is to not
* apply modulo arithmetic to e_g but only to deferral list indexing.
*/
#define CK_EPOCH_GRACE 3U
enum {
CK_EPOCH_STATE_USED = 0,
CK_EPOCH_STATE_FREE = 1
};
CK_STACK_CONTAINER(struct ck_epoch_record, record_next,
ck_epoch_record_container)
CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
ck_epoch_entry_container)
#define CK_EPOCH_SENSE_MASK (CK_EPOCH_SENSE - 1)
void
_ck_epoch_delref(struct ck_epoch_record *record,
struct ck_epoch_section *section)
{
struct ck_epoch_ref *current, *other;
unsigned int i = section->bucket;
current = &record->local.bucket[i];
current->count--;
if (current->count > 0)
return;
/*
* If the current bucket no longer has any references, then
* determine whether we have already transitioned into a newer
* epoch. If so, then make sure to update our shared snapshot
* to allow for forward progress.
*
* If no other active bucket exists, then the record will go
* inactive in order to allow for forward progress.
*/
other = &record->local.bucket[(i + 1) &
CK_EPOCH_SENSE_MASK];
if (other->count > 0 &&
((int)(current->epoch - other->epoch) < 0)) {
/*
* The other epoch value is actually the newest,
* transition to it.
*/
ck_pr_store_uint(&record->epoch, other->epoch);
}
return;
}
void
_ck_epoch_addref(struct ck_epoch_record *record,
struct ck_epoch_section *section)
{
struct ck_epoch *global = record->global;
struct ck_epoch_ref *ref;
unsigned int epoch, i;
epoch = ck_pr_load_uint(&global->epoch);
i = epoch & CK_EPOCH_SENSE_MASK;
ref = &record->local.bucket[i];
if (ref->count++ == 0) {
#ifndef CK_MD_TSO
struct ck_epoch_ref *previous;
/*
* The system has already ticked. If another non-zero bucket
* exists, make sure to order our observations with respect
* to it. Otherwise, it is possible to acquire a reference
* from the previous epoch generation.
*
* On TSO architectures, the monoticity of the global counter
* and load-{store, load} ordering are sufficient to guarantee
* this ordering.
*/
previous = &record->local.bucket[(i + 1) &
CK_EPOCH_SENSE_MASK];
if (previous->count > 0)
ck_pr_fence_acqrel();
#endif /* !CK_MD_TSO */
/*
* If this is this is a new reference into the current
* bucket then cache the associated epoch value.
*/
ref->epoch = epoch;
}
section->bucket = i;
return;
}
void
ck_epoch_init(struct ck_epoch *global)
{
ck_stack_init(&global->records);
global->epoch = 1;
global->n_free = 0;
ck_pr_fence_store();
return;
}
struct ck_epoch_record *
ck_epoch_recycle(struct ck_epoch *global)
{
struct ck_epoch_record *record;
ck_stack_entry_t *cursor;
unsigned int state;
if (ck_pr_load_uint(&global->n_free) == 0)
return NULL;
CK_STACK_FOREACH(&global->records, cursor) {
record = ck_epoch_record_container(cursor);
if (ck_pr_load_uint(&record->state) == CK_EPOCH_STATE_FREE) {
/* Serialize with respect to deferral list clean-up. */
ck_pr_fence_load();
state = ck_pr_fas_uint(&record->state,
CK_EPOCH_STATE_USED);
if (state == CK_EPOCH_STATE_FREE) {
ck_pr_dec_uint(&global->n_free);
return record;
}
}
}
return NULL;
}
void
ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record)
{
size_t i;
record->global = global;
record->state = CK_EPOCH_STATE_USED;
record->active = 0;
record->epoch = 0;
record->n_dispatch = 0;
record->n_peak = 0;
record->n_pending = 0;
memset(&record->local, 0, sizeof record->local);
for (i = 0; i < CK_EPOCH_LENGTH; i++)
ck_stack_init(&record->pending[i]);
ck_pr_fence_store();
ck_stack_push_upmc(&global->records, &record->record_next);
return;
}
void
ck_epoch_unregister(struct ck_epoch_record *record)
{
struct ck_epoch *global = record->global;
size_t i;
record->active = 0;
record->epoch = 0;
record->n_dispatch = 0;
record->n_peak = 0;
record->n_pending = 0;
memset(&record->local, 0, sizeof record->local);
for (i = 0; i < CK_EPOCH_LENGTH; i++)
ck_stack_init(&record->pending[i]);
ck_pr_fence_store();
ck_pr_store_uint(&record->state, CK_EPOCH_STATE_FREE);
ck_pr_inc_uint(&global->n_free);
return;
}
static struct ck_epoch_record *
ck_epoch_scan(struct ck_epoch *global,
struct ck_epoch_record *cr,
unsigned int epoch,
bool *af)
{
ck_stack_entry_t *cursor;
if (cr == NULL) {
cursor = CK_STACK_FIRST(&global->records);
*af = false;
} else {
cursor = &cr->record_next;
*af = true;
}
while (cursor != NULL) {
unsigned int state, active;
cr = ck_epoch_record_container(cursor);
state = ck_pr_load_uint(&cr->state);
if (state & CK_EPOCH_STATE_FREE) {
cursor = CK_STACK_NEXT(cursor);
continue;
}
active = ck_pr_load_uint(&cr->active);
*af |= active;
if (active != 0 && ck_pr_load_uint(&cr->epoch) != epoch)
return cr;
cursor = CK_STACK_NEXT(cursor);
}
return NULL;
}
static void
ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e)
{
unsigned int epoch = e & (CK_EPOCH_LENGTH - 1);
ck_stack_entry_t *head, *next, *cursor;
unsigned int i = 0;
head = CK_STACK_FIRST(&record->pending[epoch]);
ck_stack_init(&record->pending[epoch]);
for (cursor = head; cursor != NULL; cursor = next) {
struct ck_epoch_entry *entry =
ck_epoch_entry_container(cursor);
next = CK_STACK_NEXT(cursor);
entry->function(entry);
i++;
}
if (record->n_pending > record->n_peak)
record->n_peak = record->n_pending;
record->n_dispatch += i;
record->n_pending -= i;
return;
}
/*
* Reclaim all objects associated with a record.
*/
void
ck_epoch_reclaim(struct ck_epoch_record *record)
{
unsigned int epoch;
for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
ck_epoch_dispatch(record, epoch);
return;
}
/*
* This function must not be called with-in read section.
*/
void
ck_epoch_synchronize(struct ck_epoch_record *record)
{
struct ck_epoch *global = record->global;
struct ck_epoch_record *cr;
unsigned int delta, epoch, goal, i;
bool active;
ck_pr_fence_memory();
/*
* The observation of the global epoch must be ordered with respect to
* all prior operations. The re-ordering of loads is permitted given
* monoticity of global epoch counter.
*
* If UINT_MAX concurrent mutations were to occur then it is possible
* to encounter an ABA-issue. If this is a concern, consider tuning
* write-side concurrency.
*/
delta = epoch = ck_pr_load_uint(&global->epoch);
goal = epoch + CK_EPOCH_GRACE;
for (i = 0, cr = NULL; i < CK_EPOCH_GRACE - 1; cr = NULL, i++) {
bool r;
/*
* Determine whether all threads have observed the current
* epoch with respect to the updates on invocation.
*/
while (cr = ck_epoch_scan(global, cr, delta, &active),
cr != NULL) {
unsigned int e_d;
ck_pr_stall();
/*
* Another writer may have already observed a grace
* period.
*/
e_d = ck_pr_load_uint(&global->epoch);
if (e_d != delta) {
delta = e_d;
goto reload;
}
}
/*
* If we have observed all threads as inactive, then we assume
* we are at a grace period.
*/
if (active == false)
break;
/*
* Increment current epoch. CAS semantics are used to eliminate
* increment operations for synchronization that occurs for the
* same global epoch value snapshot.
*
* If we can guarantee there will only be one active barrier or
* epoch tick at a given time, then it is sufficient to use an
* increment operation. In a multi-barrier workload, however,
* it is possible to overflow the epoch value if we apply
* modulo-3 arithmetic.
*/
r = ck_pr_cas_uint_value(&global->epoch, delta, delta + 1,
&delta);
/* Order subsequent thread active checks. */
ck_pr_fence_atomic_load();
/*
* If CAS has succeeded, then set delta to latest snapshot.
* Otherwise, we have just acquired latest snapshot.
*/
delta = delta + r;
continue;
reload:
if ((goal > epoch) & (delta >= goal)) {
/*
* Right now, epoch overflow is handled as an edge
* case. If we have already observed an epoch
* generation, then we can be sure no hazardous
* references exist to objects from this generation. We
* can actually avoid an addtional scan step at this
* point.
*/
break;
}
}
/*
* A majority of use-cases will not require full barrier semantics.
* However, if non-temporal instructions are used, full barrier
* semantics are necessary.
*/
ck_pr_fence_memory();
record->epoch = delta;
return;
}
void
ck_epoch_barrier(struct ck_epoch_record *record)
{
ck_epoch_synchronize(record);
ck_epoch_reclaim(record);
return;
}
/*
* It may be worth it to actually apply these deferral semantics to an epoch
* that was observed at ck_epoch_call time. The problem is that the latter
* would require a full fence.
*
* ck_epoch_call will dispatch to the latest epoch snapshot that was observed.
* There are cases where it will fail to reclaim as early as it could. If this
* becomes a problem, we could actually use a heap for epoch buckets but that
* is far from ideal too.
*/
bool
ck_epoch_poll(struct ck_epoch_record *record)
{
bool active;
unsigned int epoch;
unsigned int snapshot;
struct ck_epoch_record *cr = NULL;
struct ck_epoch *global = record->global;
epoch = ck_pr_load_uint(&global->epoch);
/* Serialize epoch snapshots with respect to global epoch. */
ck_pr_fence_memory();
cr = ck_epoch_scan(global, cr, epoch, &active);
if (cr != NULL) {
record->epoch = epoch;
return false;
}
/* We are at a grace period if all threads are inactive. */
if (active == false) {
record->epoch = epoch;
for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
ck_epoch_dispatch(record, epoch);
return true;
}
/* If an active thread exists, rely on epoch observation. */
if (ck_pr_cas_uint_value(&global->epoch, epoch, epoch + 1,
&snapshot) == false) {
record->epoch = snapshot;
} else {
record->epoch = epoch + 1;
}
ck_epoch_dispatch(record, epoch + 1);
return true;
}

323
src/ck_hp.c Normal file
View file

@ -0,0 +1,323 @@
/*
* Copyright 2010-2015 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* (c) Copyright 2008, IBM Corporation.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* This is an implementation of hazard pointers as detailed in:
* http://www.research.ibm.com/people/m/michael/ieeetpds-2004.pdf
*
* This API provides a publishing mechanism that defers destruction of
* hazard pointers until it is safe to do so. Preventing arbitrary re-use
* protects against the ABA problem and provides safe memory reclamation.
* The implementation was derived from the Hazard Pointers implementation
* from the Amino CBBS project. It has been heavily modified for Concurrency
* Kit.
*/
#include <ck_backoff.h>
#include <ck_cc.h>
#include <ck_hp.h>
#include <ck_pr.h>
#include <ck_stack.h>
#include <ck_stdbool.h>
#include <ck_stddef.h>
#include <ck_stdlib.h>
#include <ck_string.h>
CK_STACK_CONTAINER(struct ck_hp_record, global_entry, ck_hp_record_container)
CK_STACK_CONTAINER(struct ck_hp_hazard, pending_entry, ck_hp_hazard_container)
void
ck_hp_init(struct ck_hp *state,
unsigned int degree,
unsigned int threshold,
ck_hp_destructor_t destroy)
{
state->threshold = threshold;
state->degree = degree;
state->destroy = destroy;
state->n_subscribers = 0;
state->n_free = 0;
ck_stack_init(&state->subscribers);
ck_pr_fence_store();
return;
}
void
ck_hp_set_threshold(struct ck_hp *state, unsigned int threshold)
{
ck_pr_store_uint(&state->threshold, threshold);
return;
}
struct ck_hp_record *
ck_hp_recycle(struct ck_hp *global)
{
struct ck_hp_record *record;
ck_stack_entry_t *entry;
int state;
if (ck_pr_load_uint(&global->n_free) == 0)
return NULL;
CK_STACK_FOREACH(&global->subscribers, entry) {
record = ck_hp_record_container(entry);
if (ck_pr_load_int(&record->state) == CK_HP_FREE) {
ck_pr_fence_load();
state = ck_pr_fas_int(&record->state, CK_HP_USED);
if (state == CK_HP_FREE) {
ck_pr_dec_uint(&global->n_free);
return record;
}
}
}
return NULL;
}
void
ck_hp_unregister(struct ck_hp_record *entry)
{
entry->n_pending = 0;
entry->n_peak = 0;
entry->n_reclamations = 0;
ck_stack_init(&entry->pending);
ck_pr_fence_store();
ck_pr_store_int(&entry->state, CK_HP_FREE);
ck_pr_inc_uint(&entry->global->n_free);
return;
}
void
ck_hp_register(struct ck_hp *state,
struct ck_hp_record *entry,
void **pointers)
{
entry->state = CK_HP_USED;
entry->global = state;
entry->pointers = pointers;
entry->n_pending = 0;
entry->n_peak = 0;
entry->n_reclamations = 0;
memset(pointers, 0, state->degree * sizeof(void *));
ck_stack_init(&entry->pending);
ck_pr_fence_store();
ck_stack_push_upmc(&state->subscribers, &entry->global_entry);
ck_pr_inc_uint(&state->n_subscribers);
return;
}
static int
hazard_compare(const void *a, const void *b)
{
void * const *x;
void * const *y;
x = a;
y = b;
return ((*x > *y) - (*x < *y));
}
CK_CC_INLINE static bool
ck_hp_member_scan(ck_stack_entry_t *entry, unsigned int degree, void *pointer)
{
struct ck_hp_record *record;
unsigned int i;
void *hazard;
do {
record = ck_hp_record_container(entry);
if (ck_pr_load_int(&record->state) == CK_HP_FREE)
continue;
if (ck_pr_load_ptr(&record->pointers) == NULL)
continue;
for (i = 0; i < degree; i++) {
hazard = ck_pr_load_ptr(&record->pointers[i]);
if (hazard == pointer)
return (true);
}
} while ((entry = CK_STACK_NEXT(entry)) != NULL);
return (false);
}
CK_CC_INLINE static void *
ck_hp_member_cache(struct ck_hp *global, void **cache, unsigned int *n_hazards)
{
struct ck_hp_record *record;
ck_stack_entry_t *entry;
unsigned int hazards = 0;
unsigned int i;
void *pointer;
CK_STACK_FOREACH(&global->subscribers, entry) {
record = ck_hp_record_container(entry);
if (ck_pr_load_int(&record->state) == CK_HP_FREE)
continue;
if (ck_pr_load_ptr(&record->pointers) == NULL)
continue;
for (i = 0; i < global->degree; i++) {
if (hazards > CK_HP_CACHE)
break;
pointer = ck_pr_load_ptr(&record->pointers[i]);
if (pointer != NULL)
cache[hazards++] = pointer;
}
}
*n_hazards = hazards;
return (entry);
}
void
ck_hp_reclaim(struct ck_hp_record *thread)
{
struct ck_hp_hazard *hazard;
struct ck_hp *global = thread->global;
unsigned int n_hazards;
void **cache, *marker, *match;
ck_stack_entry_t *previous, *entry, *next;
/* Store as many entries as possible in local array. */
cache = thread->cache;
marker = ck_hp_member_cache(global, cache, &n_hazards);
/*
* In theory, there is an n such that (n * (log n) ** 2) < np.
*/
qsort(cache, n_hazards, sizeof(void *), hazard_compare);
previous = NULL;
CK_STACK_FOREACH_SAFE(&thread->pending, entry, next) {
hazard = ck_hp_hazard_container(entry);
match = bsearch(&hazard->pointer, cache, n_hazards,
sizeof(void *), hazard_compare);
if (match != NULL) {
previous = entry;
continue;
}
if (marker != NULL &&
ck_hp_member_scan(marker, global->degree, hazard->pointer)) {
previous = entry;
continue;
}
thread->n_pending -= 1;
/* Remove from the pending stack. */
if (previous)
CK_STACK_NEXT(previous) = CK_STACK_NEXT(entry);
else
CK_STACK_FIRST(&thread->pending) = CK_STACK_NEXT(entry);
/* The entry is now safe to destroy. */
global->destroy(hazard->data);
thread->n_reclamations++;
}
return;
}
void
ck_hp_retire(struct ck_hp_record *thread,
struct ck_hp_hazard *hazard,
void *data,
void *pointer)
{
ck_pr_store_ptr(&hazard->pointer, pointer);
ck_pr_store_ptr(&hazard->data, data);
ck_stack_push_spnc(&thread->pending, &hazard->pending_entry);
thread->n_pending += 1;
if (thread->n_pending > thread->n_peak)
thread->n_peak = thread->n_pending;
return;
}
void
ck_hp_free(struct ck_hp_record *thread,
struct ck_hp_hazard *hazard,
void *data,
void *pointer)
{
struct ck_hp *global;
global = ck_pr_load_ptr(&thread->global);
ck_pr_store_ptr(&hazard->data, data);
ck_pr_store_ptr(&hazard->pointer, pointer);
ck_stack_push_spnc(&thread->pending, &hazard->pending_entry);
thread->n_pending += 1;
if (thread->n_pending > thread->n_peak)
thread->n_peak = thread->n_pending;
if (thread->n_pending >= global->threshold)
ck_hp_reclaim(thread);
return;
}
void
ck_hp_purge(struct ck_hp_record *thread)
{
ck_backoff_t backoff = CK_BACKOFF_INITIALIZER;
while (thread->n_pending > 0) {
ck_hp_reclaim(thread);
if (thread->n_pending > 0)
ck_backoff_eb(&backoff);
}
return;
}

941
src/ck_hs.c Normal file
View file

@ -0,0 +1,941 @@
/*
* Copyright 2012-2015 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <ck_cc.h>
#include <ck_hs.h>
#include <ck_limits.h>
#include <ck_md.h>
#include <ck_pr.h>
#include <ck_stdint.h>
#include <ck_stdbool.h>
#include <ck_string.h>
#include "ck_internal.h"
#ifndef CK_HS_PROBE_L1_SHIFT
#define CK_HS_PROBE_L1_SHIFT 3ULL
#endif /* CK_HS_PROBE_L1_SHIFT */
#define CK_HS_PROBE_L1 (1 << CK_HS_PROBE_L1_SHIFT)
#define CK_HS_PROBE_L1_MASK (CK_HS_PROBE_L1 - 1)
#ifndef CK_HS_PROBE_L1_DEFAULT
#define CK_HS_PROBE_L1_DEFAULT CK_MD_CACHELINE
#endif
#define CK_HS_VMA_MASK ((uintptr_t)((1ULL << CK_MD_VMA_BITS) - 1))
#define CK_HS_VMA(x) \
((void *)((uintptr_t)(x) & CK_HS_VMA_MASK))
#define CK_HS_EMPTY NULL
#define CK_HS_TOMBSTONE ((void *)~(uintptr_t)0)
#define CK_HS_G (2)
#define CK_HS_G_MASK (CK_HS_G - 1)
#if defined(CK_F_PR_LOAD_8) && defined(CK_F_PR_STORE_8)
#define CK_HS_WORD uint8_t
#define CK_HS_WORD_MAX UINT8_MAX
#define CK_HS_STORE(x, y) ck_pr_store_8(x, y)
#define CK_HS_LOAD(x) ck_pr_load_8(x)
#elif defined(CK_F_PR_LOAD_16) && defined(CK_F_PR_STORE_16)
#define CK_HS_WORD uint16_t
#define CK_HS_WORD_MAX UINT16_MAX
#define CK_HS_STORE(x, y) ck_pr_store_16(x, y)
#define CK_HS_LOAD(x) ck_pr_load_16(x)
#elif defined(CK_F_PR_LOAD_32) && defined(CK_F_PR_STORE_32)
#define CK_HS_WORD uint32_t
#define CK_HS_WORD_MAX UINT32_MAX
#define CK_HS_STORE(x, y) ck_pr_store_32(x, y)
#define CK_HS_LOAD(x) ck_pr_load_32(x)
#else
#error "ck_hs is not supported on your platform."
#endif
enum ck_hs_probe_behavior {
CK_HS_PROBE = 0, /* Default behavior. */
CK_HS_PROBE_TOMBSTONE, /* Short-circuit on tombstone. */
CK_HS_PROBE_INSERT /* Short-circuit on probe bound if tombstone found. */
};
struct ck_hs_map {
unsigned int generation[CK_HS_G];
unsigned int probe_maximum;
unsigned long mask;
unsigned long step;
unsigned int probe_limit;
unsigned int tombstones;
unsigned long n_entries;
unsigned long capacity;
unsigned long size;
CK_HS_WORD *probe_bound;
const void **entries;
};
static inline void
ck_hs_map_signal(struct ck_hs_map *map, unsigned long h)
{
h &= CK_HS_G_MASK;
ck_pr_store_uint(&map->generation[h],
map->generation[h] + 1);
ck_pr_fence_store();
return;
}
void
ck_hs_iterator_init(struct ck_hs_iterator *iterator)
{
iterator->cursor = NULL;
iterator->offset = 0;
return;
}
bool
ck_hs_next(struct ck_hs *hs, struct ck_hs_iterator *i, void **key)
{
struct ck_hs_map *map = hs->map;
void *value;
if (i->offset >= map->capacity)
return false;
do {
value = CK_CC_DECONST_PTR(map->entries[i->offset]);
if (value != CK_HS_EMPTY && value != CK_HS_TOMBSTONE) {
#ifdef CK_HS_PP
if (hs->mode & CK_HS_MODE_OBJECT)
value = CK_HS_VMA(value);
#endif
i->offset++;
*key = value;
return true;
}
} while (++i->offset < map->capacity);
return false;
}
void
ck_hs_stat(struct ck_hs *hs, struct ck_hs_stat *st)
{
struct ck_hs_map *map = hs->map;
st->n_entries = map->n_entries;
st->tombstones = map->tombstones;
st->probe_maximum = map->probe_maximum;
return;
}
unsigned long
ck_hs_count(struct ck_hs *hs)
{
return hs->map->n_entries;
}
static void
ck_hs_map_destroy(struct ck_malloc *m, struct ck_hs_map *map, bool defer)
{
m->free(map, map->size, defer);
return;
}
void
ck_hs_destroy(struct ck_hs *hs)
{
ck_hs_map_destroy(hs->m, hs->map, false);
return;
}
static struct ck_hs_map *
ck_hs_map_create(struct ck_hs *hs, unsigned long entries)
{
struct ck_hs_map *map;
unsigned long size, n_entries, prefix, limit;
n_entries = ck_internal_power_2(entries);
if (n_entries < CK_HS_PROBE_L1)
n_entries = CK_HS_PROBE_L1;
size = sizeof(struct ck_hs_map) + (sizeof(void *) * n_entries + CK_MD_CACHELINE - 1);
if (hs->mode & CK_HS_MODE_DELETE) {
prefix = sizeof(CK_HS_WORD) * n_entries;
size += prefix;
} else {
prefix = 0;
}
map = hs->m->malloc(size);
if (map == NULL)
return NULL;
map->size = size;
/* We should probably use a more intelligent heuristic for default probe length. */
limit = ck_internal_max(n_entries >> (CK_HS_PROBE_L1_SHIFT + 2), CK_HS_PROBE_L1_DEFAULT);
if (limit > UINT_MAX)
limit = UINT_MAX;
map->probe_limit = (unsigned int)limit;
map->probe_maximum = 0;
map->capacity = n_entries;
map->step = ck_internal_bsf(n_entries);
map->mask = n_entries - 1;
map->n_entries = 0;
/* Align map allocation to cache line. */
map->entries = (void *)(((uintptr_t)&map[1] + prefix +
CK_MD_CACHELINE - 1) & ~(CK_MD_CACHELINE - 1));
memset(map->entries, 0, sizeof(void *) * n_entries);
memset(map->generation, 0, sizeof map->generation);
if (hs->mode & CK_HS_MODE_DELETE) {
map->probe_bound = (CK_HS_WORD *)&map[1];
memset(map->probe_bound, 0, prefix);
} else {
map->probe_bound = NULL;
}
/* Commit entries purge with respect to map publication. */
ck_pr_fence_store();
return map;
}
bool
ck_hs_reset_size(struct ck_hs *hs, unsigned long capacity)
{
struct ck_hs_map *map, *previous;
previous = hs->map;
map = ck_hs_map_create(hs, capacity);
if (map == NULL)
return false;
ck_pr_store_ptr(&hs->map, map);
ck_hs_map_destroy(hs->m, previous, true);
return true;
}
bool
ck_hs_reset(struct ck_hs *hs)
{
struct ck_hs_map *previous;
previous = hs->map;
return ck_hs_reset_size(hs, previous->capacity);
}
static inline unsigned long
ck_hs_map_probe_next(struct ck_hs_map *map,
unsigned long offset,
unsigned long h,
unsigned long level,
unsigned long probes)
{
unsigned long r, stride;
r = (h >> map->step) >> level;
stride = (r & ~CK_HS_PROBE_L1_MASK) << 1 | (r & CK_HS_PROBE_L1_MASK);
return (offset + (probes >> CK_HS_PROBE_L1_SHIFT) +
(stride | CK_HS_PROBE_L1)) & map->mask;
}
static inline void
ck_hs_map_bound_set(struct ck_hs_map *m,
unsigned long h,
unsigned long n_probes)
{
unsigned long offset = h & m->mask;
if (n_probes > m->probe_maximum)
ck_pr_store_uint(&m->probe_maximum, n_probes);
if (m->probe_bound != NULL && m->probe_bound[offset] < n_probes) {
if (n_probes > CK_HS_WORD_MAX)
n_probes = CK_HS_WORD_MAX;
CK_HS_STORE(&m->probe_bound[offset], n_probes);
ck_pr_fence_store();
}
return;
}
static inline unsigned int
ck_hs_map_bound_get(struct ck_hs_map *m, unsigned long h)
{
unsigned long offset = h & m->mask;
unsigned int r = CK_HS_WORD_MAX;
if (m->probe_bound != NULL) {
r = CK_HS_LOAD(&m->probe_bound[offset]);
if (r == CK_HS_WORD_MAX)
r = ck_pr_load_uint(&m->probe_maximum);
} else {
r = ck_pr_load_uint(&m->probe_maximum);
}
return r;
}
bool
ck_hs_grow(struct ck_hs *hs,
unsigned long capacity)
{
struct ck_hs_map *map, *update;
unsigned long k, i, j, offset, probes;
const void *previous, **bucket;
restart:
map = hs->map;
if (map->capacity > capacity)
return false;
update = ck_hs_map_create(hs, capacity);
if (update == NULL)
return false;
for (k = 0; k < map->capacity; k++) {
unsigned long h;
previous = map->entries[k];
if (previous == CK_HS_EMPTY || previous == CK_HS_TOMBSTONE)
continue;
#ifdef CK_HS_PP
if (hs->mode & CK_HS_MODE_OBJECT)
previous = CK_HS_VMA(previous);
#endif
h = hs->hf(previous, hs->seed);
offset = h & update->mask;
i = probes = 0;
for (;;) {
bucket = (const void **)((uintptr_t)&update->entries[offset] & ~(CK_MD_CACHELINE - 1));
for (j = 0; j < CK_HS_PROBE_L1; j++) {
const void **cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));
if (probes++ == update->probe_limit)
break;
if (CK_CC_LIKELY(*cursor == CK_HS_EMPTY)) {
*cursor = map->entries[k];
update->n_entries++;
ck_hs_map_bound_set(update, h, probes);
break;
}
}
if (j < CK_HS_PROBE_L1)
break;
offset = ck_hs_map_probe_next(update, offset, h, i++, probes);
}
if (probes > update->probe_limit) {
/*
* We have hit the probe limit, map needs to be even larger.
*/
ck_hs_map_destroy(hs->m, update, false);
capacity <<= 1;
goto restart;
}
}
ck_pr_fence_store();
ck_pr_store_ptr(&hs->map, update);
ck_hs_map_destroy(hs->m, map, true);
return true;
}
static void
ck_hs_map_postinsert(struct ck_hs *hs, struct ck_hs_map *map)
{
map->n_entries++;
if ((map->n_entries << 1) > map->capacity)
ck_hs_grow(hs, map->capacity << 1);
return;
}
bool
ck_hs_rebuild(struct ck_hs *hs)
{
return ck_hs_grow(hs, hs->map->capacity);
}
static const void **
ck_hs_map_probe(struct ck_hs *hs,
struct ck_hs_map *map,
unsigned long *n_probes,
const void ***priority,
unsigned long h,
const void *key,
const void **object,
unsigned long probe_limit,
enum ck_hs_probe_behavior behavior)
{
const void **bucket, **cursor, *k, *compare;
const void **pr = NULL;
unsigned long offset, j, i, probes, opl;
#ifdef CK_HS_PP
/* If we are storing object pointers, then we may leverage pointer packing. */
unsigned long hv = 0;
if (hs->mode & CK_HS_MODE_OBJECT) {
hv = (h >> 25) & CK_HS_KEY_MASK;
compare = CK_HS_VMA(key);
} else {
compare = key;
}
#else
compare = key;
#endif
offset = h & map->mask;
*object = NULL;
i = probes = 0;
opl = probe_limit;
if (behavior == CK_HS_PROBE_INSERT)
probe_limit = ck_hs_map_bound_get(map, h);
for (;;) {
bucket = (const void **)((uintptr_t)&map->entries[offset] & ~(CK_MD_CACHELINE - 1));
for (j = 0; j < CK_HS_PROBE_L1; j++) {
cursor = bucket + ((j + offset) & (CK_HS_PROBE_L1 - 1));
if (probes++ == probe_limit) {
if (probe_limit == opl || pr != NULL) {
k = CK_HS_EMPTY;
goto leave;
}
/*
* If no eligible slot has been found yet, continue probe
* sequence with original probe limit.
*/
probe_limit = opl;
}
k = ck_pr_load_ptr(cursor);
if (k == CK_HS_EMPTY)
goto leave;
if (k == CK_HS_TOMBSTONE) {
if (pr == NULL) {
pr = cursor;
*n_probes = probes;
if (behavior == CK_HS_PROBE_TOMBSTONE) {
k = CK_HS_EMPTY;
goto leave;
}
}
continue;
}
#ifdef CK_HS_PP
if (hs->mode & CK_HS_MODE_OBJECT) {
if (((uintptr_t)k >> CK_MD_VMA_BITS) != hv)
continue;
k = CK_HS_VMA(k);
}
#endif
if (k == compare)
goto leave;
if (hs->compare == NULL)
continue;
if (hs->compare(k, key) == true)
goto leave;
}
offset = ck_hs_map_probe_next(map, offset, h, i++, probes);
}
leave:
if (probes > probe_limit) {
cursor = NULL;
} else {
*object = k;
}
if (pr == NULL)
*n_probes = probes;
*priority = pr;
return cursor;
}
static inline const void *
ck_hs_marshal(unsigned int mode, const void *key, unsigned long h)
{
#ifdef CK_HS_PP
const void *insert;
if (mode & CK_HS_MODE_OBJECT) {
insert = (void *)((uintptr_t)CK_HS_VMA(key) |
((h >> 25) << CK_MD_VMA_BITS));
} else {
insert = key;
}
return insert;
#else
(void)mode;
(void)h;
return key;
#endif
}
bool
ck_hs_gc(struct ck_hs *hs, unsigned long cycles, unsigned long seed)
{
unsigned long size = 0;
unsigned long i;
struct ck_hs_map *map = hs->map;
unsigned int maximum;
CK_HS_WORD *bounds = NULL;
if (map->n_entries == 0) {
ck_pr_store_uint(&map->probe_maximum, 0);
if (map->probe_bound != NULL)
memset(map->probe_bound, 0, sizeof(CK_HS_WORD) * map->capacity);
return true;
}
if (cycles == 0) {
maximum = 0;
if (map->probe_bound != NULL) {
size = sizeof(CK_HS_WORD) * map->capacity;
bounds = hs->m->malloc(size);
if (bounds == NULL)
return false;
memset(bounds, 0, size);
}
} else {
maximum = map->probe_maximum;
}
for (i = 0; i < map->capacity; i++) {
const void **first, *object, **slot, *entry;
unsigned long n_probes, offset, h;
entry = map->entries[(i + seed) & map->mask];
if (entry == CK_HS_EMPTY || entry == CK_HS_TOMBSTONE)
continue;
#ifdef CK_HS_PP
if (hs->mode & CK_HS_MODE_OBJECT)
entry = CK_HS_VMA(entry);
#endif
h = hs->hf(entry, hs->seed);
offset = h & map->mask;
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, entry, &object,
ck_hs_map_bound_get(map, h), CK_HS_PROBE);
if (first != NULL) {
const void *insert = ck_hs_marshal(hs->mode, entry, h);
ck_pr_store_ptr(first, insert);
ck_hs_map_signal(map, h);
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
}
if (cycles == 0) {
if (n_probes > maximum)
maximum = n_probes;
if (n_probes > CK_HS_WORD_MAX)
n_probes = CK_HS_WORD_MAX;
if (bounds != NULL && n_probes > bounds[offset])
bounds[offset] = n_probes;
} else if (--cycles == 0)
break;
}
/*
* The following only apply to garbage collection involving
* a full scan of all entries.
*/
if (maximum != map->probe_maximum)
ck_pr_store_uint(&map->probe_maximum, maximum);
if (bounds != NULL) {
for (i = 0; i < map->capacity; i++)
CK_HS_STORE(&map->probe_bound[i], bounds[i]);
hs->m->free(bounds, size, false);
}
return true;
}
bool
ck_hs_fas(struct ck_hs *hs,
unsigned long h,
const void *key,
void **previous)
{
const void **slot, **first, *object, *insert;
struct ck_hs_map *map = hs->map;
unsigned long n_probes;
*previous = NULL;
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
ck_hs_map_bound_get(map, h), CK_HS_PROBE);
/* Replacement semantics presume existence. */
if (object == NULL)
return false;
insert = ck_hs_marshal(hs->mode, key, h);
if (first != NULL) {
ck_pr_store_ptr(first, insert);
ck_hs_map_signal(map, h);
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
} else {
ck_pr_store_ptr(slot, insert);
}
*previous = CK_CC_DECONST_PTR(object);
return true;
}
/*
* An apply function takes two arguments. The first argument is a pointer to a
* pre-existing object. The second argument is a pointer to the fifth argument
* passed to ck_hs_apply. If a non-NULL pointer is passed to the first argument
* and the return value of the apply function is NULL, then the pre-existing
* value is deleted. If the return pointer is the same as the one passed to the
* apply function then no changes are made to the hash table. If the first
* argument is non-NULL and the return pointer is different than that passed to
* the apply function, then the pre-existing value is replaced. For
* replacement, it is required that the value itself is identical to the
* previous value.
*/
bool
ck_hs_apply(struct ck_hs *hs,
unsigned long h,
const void *key,
ck_hs_apply_fn_t *fn,
void *cl)
{
const void **slot, **first, *object, *delta, *insert;
unsigned long n_probes;
struct ck_hs_map *map;
restart:
map = hs->map;
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_HS_PROBE_INSERT);
if (slot == NULL && first == NULL) {
if (ck_hs_grow(hs, map->capacity << 1) == false)
return false;
goto restart;
}
delta = fn(CK_CC_DECONST_PTR(object), cl);
if (delta == NULL) {
/*
* The apply function has requested deletion. If the object doesn't exist,
* then exit early.
*/
if (CK_CC_UNLIKELY(object == NULL))
return true;
/* Otherwise, mark slot as deleted. */
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
map->n_entries--;
map->tombstones++;
return true;
}
/* The apply function has not requested hash set modification so exit early. */
if (delta == object)
return true;
/* A modification or insertion has been requested. */
ck_hs_map_bound_set(map, h, n_probes);
insert = ck_hs_marshal(hs->mode, delta, h);
if (first != NULL) {
/*
* This follows the same semantics as ck_hs_set, please refer to that
* function for documentation.
*/
ck_pr_store_ptr(first, insert);
if (object != NULL) {
ck_hs_map_signal(map, h);
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
}
} else {
/*
* If we are storing into same slot, then atomic store is sufficient
* for replacement.
*/
ck_pr_store_ptr(slot, insert);
}
if (object == NULL)
ck_hs_map_postinsert(hs, map);
return true;
}
bool
ck_hs_set(struct ck_hs *hs,
unsigned long h,
const void *key,
void **previous)
{
const void **slot, **first, *object, *insert;
unsigned long n_probes;
struct ck_hs_map *map;
*previous = NULL;
restart:
map = hs->map;
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, map->probe_limit, CK_HS_PROBE_INSERT);
if (slot == NULL && first == NULL) {
if (ck_hs_grow(hs, map->capacity << 1) == false)
return false;
goto restart;
}
ck_hs_map_bound_set(map, h, n_probes);
insert = ck_hs_marshal(hs->mode, key, h);
if (first != NULL) {
/* If an earlier bucket was found, then store entry there. */
ck_pr_store_ptr(first, insert);
/*
* If a duplicate key was found, then delete it after
* signaling concurrent probes to restart. Optionally,
* it is possible to install tombstone after grace
* period if we can guarantee earlier position of
* duplicate key.
*/
if (object != NULL) {
ck_hs_map_signal(map, h);
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
}
} else {
/*
* If we are storing into same slot, then atomic store is sufficient
* for replacement.
*/
ck_pr_store_ptr(slot, insert);
}
if (object == NULL)
ck_hs_map_postinsert(hs, map);
*previous = CK_CC_DECONST_PTR(object);
return true;
}
CK_CC_INLINE static bool
ck_hs_put_internal(struct ck_hs *hs,
unsigned long h,
const void *key,
enum ck_hs_probe_behavior behavior)
{
const void **slot, **first, *object, *insert;
unsigned long n_probes;
struct ck_hs_map *map;
restart:
map = hs->map;
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
map->probe_limit, behavior);
if (slot == NULL && first == NULL) {
if (ck_hs_grow(hs, map->capacity << 1) == false)
return false;
goto restart;
}
/* Fail operation if a match was found. */
if (object != NULL)
return false;
ck_hs_map_bound_set(map, h, n_probes);
insert = ck_hs_marshal(hs->mode, key, h);
if (first != NULL) {
/* Insert key into first bucket in probe sequence. */
ck_pr_store_ptr(first, insert);
} else {
/* An empty slot was found. */
ck_pr_store_ptr(slot, insert);
}
ck_hs_map_postinsert(hs, map);
return true;
}
bool
ck_hs_put(struct ck_hs *hs,
unsigned long h,
const void *key)
{
return ck_hs_put_internal(hs, h, key, CK_HS_PROBE_INSERT);
}
bool
ck_hs_put_unique(struct ck_hs *hs,
unsigned long h,
const void *key)
{
return ck_hs_put_internal(hs, h, key, CK_HS_PROBE_TOMBSTONE);
}
void *
ck_hs_get(struct ck_hs *hs,
unsigned long h,
const void *key)
{
const void **first, *object;
struct ck_hs_map *map;
unsigned long n_probes;
unsigned int g, g_p, probe;
unsigned int *generation;
do {
map = ck_pr_load_ptr(&hs->map);
generation = &map->generation[h & CK_HS_G_MASK];
g = ck_pr_load_uint(generation);
probe = ck_hs_map_bound_get(map, h);
ck_pr_fence_load();
ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object, probe, CK_HS_PROBE);
ck_pr_fence_load();
g_p = ck_pr_load_uint(generation);
} while (g != g_p);
return CK_CC_DECONST_PTR(object);
}
void *
ck_hs_remove(struct ck_hs *hs,
unsigned long h,
const void *key)
{
const void **slot, **first, *object;
struct ck_hs_map *map = hs->map;
unsigned long n_probes;
slot = ck_hs_map_probe(hs, map, &n_probes, &first, h, key, &object,
ck_hs_map_bound_get(map, h), CK_HS_PROBE);
if (object == NULL)
return NULL;
ck_pr_store_ptr(slot, CK_HS_TOMBSTONE);
map->n_entries--;
map->tombstones++;
return CK_CC_DECONST_PTR(object);
}
bool
ck_hs_move(struct ck_hs *hs,
struct ck_hs *source,
ck_hs_hash_cb_t *hf,
ck_hs_compare_cb_t *compare,
struct ck_malloc *m)
{
if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
return false;
hs->mode = source->mode;
hs->seed = source->seed;
hs->map = source->map;
hs->m = m;
hs->hf = hf;
hs->compare = compare;
return true;
}
bool
ck_hs_init(struct ck_hs *hs,
unsigned int mode,
ck_hs_hash_cb_t *hf,
ck_hs_compare_cb_t *compare,
struct ck_malloc *m,
unsigned long n_entries,
unsigned long seed)
{
if (m == NULL || m->malloc == NULL || m->free == NULL || hf == NULL)
return false;
hs->m = m;
hs->mode = mode;
hs->seed = seed;
hs->hf = hf;
hs->compare = compare;
hs->map = ck_hs_map_create(hs, n_entries);
return hs->map != NULL;
}

1036
src/ck_ht.c Normal file

File diff suppressed because it is too large Load diff

269
src/ck_ht_hash.h Normal file
View file

@ -0,0 +1,269 @@
/*
* Copyright 2012-2015 Samy Al Bahra
* Copyright 2011-2014 AppNexus, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef CK_HT_HASH_H
#define CK_HT_HASH_H
/*
* This is the Murmur hash written by Austin Appleby.
*/
#include <ck_stdint.h>
#include <ck_string.h>
//-----------------------------------------------------------------------------
// MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
// Note - The x86 and x64 versions do _not_ produce the same results, as the
// algorithms are optimized for their respective platforms. You can still
// compile and run any of them on any platform, but your performance with the
// non-native version will be less than optimal.
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER)
#define FORCE_INLINE __forceinline
#include <stdlib.h>
#define ROTL32(x,y) _rotl(x,y)
#define ROTL64(x,y) _rotl64(x,y)
#define BIG_CONSTANT(x) (x)
// Other compilers
#else // defined(_MSC_VER)
#define FORCE_INLINE inline __attribute__((always_inline))
static inline uint32_t rotl32 ( uint32_t x, int8_t r )
{
return (x << r) | (x >> (32 - r));
}
static inline uint64_t rotl64 ( uint64_t x, int8_t r )
{
return (x << r) | (x >> (64 - r));
}
#define ROTL32(x,y) rotl32(x,y)
#define ROTL64(x,y) rotl64(x,y)
#define BIG_CONSTANT(x) (x##LLU)
#endif // !defined(_MSC_VER)
//-----------------------------------------------------------------------------
// Block read - if your platform needs to do endian-swapping or can only
// handle aligned reads, do the conversion here
FORCE_INLINE static uint32_t getblock ( const uint32_t * p, int i )
{
return p[i];
}
//-----------------------------------------------------------------------------
// Finalization mix - force all bits of a hash block to avalanche
FORCE_INLINE static uint32_t fmix ( uint32_t h )
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
//-----------------------------------------------------------------------------
static inline void MurmurHash3_x86_32 ( const void * key, int len,
uint32_t seed, uint32_t * out )
{
const uint8_t * data = (const uint8_t*)key;
const int nblocks = len / 4;
int i;
uint32_t h1 = seed;
uint32_t c1 = 0xcc9e2d51;
uint32_t c2 = 0x1b873593;
//----------
// body
const uint32_t * blocks = (const uint32_t *)(const void *)(data + nblocks*4);
for(i = -nblocks; i; i++)
{
uint32_t k1 = getblock(blocks,i);
k1 *= c1;
k1 = ROTL32(k1,15);
k1 *= c2;
h1 ^= k1;
h1 = ROTL32(h1,13);
h1 = h1*5+0xe6546b64;
}
//----------
// tail
const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
uint32_t k1 = 0;
switch(len & 3)
{
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0];
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
};
//----------
// finalization
h1 ^= len;
h1 = fmix(h1);
*(uint32_t *)out = h1;
}
static inline uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed )
{
const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
const int r = 47;
uint64_t h = seed ^ (len * m);
const uint64_t * data = (const uint64_t *)key;
const uint64_t * end = data + (len/8);
while(data != end)
{
uint64_t k;
if (!((uintptr_t)data & 0x7))
k = *data++;
else {
memcpy(&k, data, sizeof(k));
data++;
}
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
const unsigned char * data2 = (const unsigned char*)data;
switch(len & 7)
{
case 7: h ^= (uint64_t)(data2[6]) << 48;
case 6: h ^= (uint64_t)(data2[5]) << 40;
case 5: h ^= (uint64_t)(data2[4]) << 32;
case 4: h ^= (uint64_t)(data2[3]) << 24;
case 3: h ^= (uint64_t)(data2[2]) << 16;
case 2: h ^= (uint64_t)(data2[1]) << 8;
case 1: h ^= (uint64_t)(data2[0]);
h *= m;
};
h ^= h >> r;
h *= m;
h ^= h >> r;
return h;
}
// 64-bit hash for 32-bit platforms
static inline uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed )
{
const uint32_t m = 0x5bd1e995;
const int r = 24;
uint32_t h1 = (uint32_t)(seed) ^ len;
uint32_t h2 = (uint32_t)(seed >> 32);
const uint32_t * data = (const uint32_t *)key;
while(len >= 8)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
uint32_t k2 = *data++;
k2 *= m; k2 ^= k2 >> r; k2 *= m;
h2 *= m; h2 ^= k2;
len -= 4;
}
if(len >= 4)
{
uint32_t k1 = *data++;
k1 *= m; k1 ^= k1 >> r; k1 *= m;
h1 *= m; h1 ^= k1;
len -= 4;
}
switch(len)
{
case 3: h2 ^= ((const unsigned char*)data)[2] << 16;
case 2: h2 ^= ((const unsigned char*)data)[1] << 8;
case 1: h2 ^= ((const unsigned char*)data)[0];
h2 *= m;
};
h1 ^= h2 >> 18; h1 *= m;
h2 ^= h1 >> 22; h2 *= m;
h1 ^= h2 >> 17; h1 *= m;
h2 ^= h1 >> 19; h2 *= m;
uint64_t h = h1;
h = (h << 32) | h2;
return h;
}
#endif /* CK_HT_HASH_H */

119
src/ck_internal.h Normal file
View file

@ -0,0 +1,119 @@
/*
* Copyright 2011-2015 Samy Al Bahra.
* Copyright 2011 David Joseph.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Several of these are from: http://graphics.stanford.edu/~seander/bithacks.html
*/
#define CK_INTERNAL_LOG_0 (0xAAAAAAAA)
#define CK_INTERNAL_LOG_1 (0xCCCCCCCC)
#define CK_INTERNAL_LOG_2 (0xF0F0F0F0)
#define CK_INTERNAL_LOG_3 (0xFF00FF00)
#define CK_INTERNAL_LOG_4 (0xFFFF0000)
CK_CC_INLINE static uint32_t
ck_internal_log(uint32_t v)
{
uint32_t r = (v & CK_INTERNAL_LOG_0) != 0;
r |= ((v & CK_INTERNAL_LOG_4) != 0) << 4;
r |= ((v & CK_INTERNAL_LOG_3) != 0) << 3;
r |= ((v & CK_INTERNAL_LOG_2) != 0) << 2;
r |= ((v & CK_INTERNAL_LOG_1) != 0) << 1;
return (r);
}
CK_CC_INLINE static uint32_t
ck_internal_power_2(uint32_t v)
{
--v;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
return (++v);
}
CK_CC_INLINE static unsigned long
ck_internal_max(unsigned long x, unsigned long y)
{
return x ^ ((x ^ y) & -(x < y));
}
CK_CC_INLINE static uint64_t
ck_internal_max_64(uint64_t x, uint64_t y)
{
return x ^ ((x ^ y) & -(x < y));
}
CK_CC_INLINE static uint32_t
ck_internal_max_32(uint32_t x, uint32_t y)
{
return x ^ ((x ^ y) & -(x < y));
}
CK_CC_INLINE static unsigned long
ck_internal_bsf(unsigned long v)
{
#if defined(__GNUC__)
return __builtin_ffs(v);
#else
unsigned int i;
const unsigned int s = sizeof(unsigned long) * 8 - 1;
for (i = 0; i < s; i++) {
if (v & (1UL << (s - i)))
return sizeof(unsigned long) * 8 - i;
}
return 1;
#endif /* !__GNUC__ */
}
CK_CC_INLINE static uint64_t
ck_internal_bsf_64(uint64_t v)
{
#if defined(__GNUC__)
return __builtin_ffs(v);
#else
unsigned int i;
const unsigned int s = sizeof(unsigned long) * 8 - 1;
for (i = 0; i < s; i++) {
if (v & (1ULL << (63U - i)))
return i;
}
#endif /* !__GNUC__ */
return 1;
}

1480
src/ck_rhs.c Normal file

File diff suppressed because it is too large Load diff