Adding upstream version 0.6.0.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-09 07:35:45 +01:00
parent c49a9029dc
commit 7f70a05c55
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
465 changed files with 60158 additions and 0 deletions

View file

@ -0,0 +1,42 @@
.PHONY: check clean distribution
OBJECTS=ck_stack ck_epoch_synchronize ck_epoch_poll ck_epoch_call \
ck_epoch_section ck_epoch_section_2 torture
HALF=`expr $(CORES) / 2`
all: $(OBJECTS)
check: all
./ck_stack $(CORES) 1
./ck_epoch_synchronize $(HALF) $(HALF) 1
./ck_epoch_poll $(CORES) 1 1
./ck_epoch_section
./ck_epoch_section_2 $(HALF) $(HALF) 1
./torture $(HALF) $(HALF) 1
ck_epoch_synchronize: ck_epoch_synchronize.c ../../../include/ck_stack.h ../../../include/ck_epoch.h ../../../src/ck_epoch.c
$(CC) $(CFLAGS) -o ck_epoch_synchronize ck_epoch_synchronize.c ../../../src/ck_epoch.c
ck_epoch_poll: ck_epoch_poll.c ../../../include/ck_stack.h ../../../include/ck_epoch.h ../../../src/ck_epoch.c
$(CC) $(CFLAGS) -o ck_epoch_poll ck_epoch_poll.c ../../../src/ck_epoch.c
torture: torture.c ../../../include/ck_epoch.h ../../../src/ck_epoch.c
$(CC) $(CFLAGS) -o torture torture.c ../../../src/ck_epoch.c
ck_epoch_section: ck_epoch_section.c ../../../include/ck_epoch.h ../../../src/ck_epoch.c
$(CC) $(CFLAGS) -o ck_epoch_section ck_epoch_section.c ../../../src/ck_epoch.c
ck_epoch_section_2: ck_epoch_section_2.c ../../../include/ck_epoch.h ../../../src/ck_epoch.c
$(CC) $(CFLAGS) -o ck_epoch_section_2 ck_epoch_section_2.c ../../../src/ck_epoch.c
ck_epoch_call: ck_epoch_call.c ../../../include/ck_stack.h ../../../include/ck_epoch.h ../../../src/ck_epoch.c
$(CC) $(CFLAGS) -o ck_epoch_call ck_epoch_call.c ../../../src/ck_epoch.c
ck_stack: ck_stack.c ../../../include/ck_stack.h ../../../include/ck_epoch.h ../../../src/ck_epoch.c
$(CC) $(CFLAGS) -o ck_stack ck_stack.c ../../../src/ck_epoch.c
clean:
rm -rf *~ *.o $(OBJECTS) *.dSYM *.exe
include ../../../build/regressions.build
CFLAGS+=$(PTHREAD_CFLAGS) -D_GNU_SOURCE

View file

@ -0,0 +1,64 @@
/*
* Copyright 2014 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stdio.h>
#include <ck_epoch.h>
#include "../../common.h"
static ck_epoch_t epoch;
static unsigned int counter;
static ck_epoch_record_t record[2];
static void
cb(ck_epoch_entry_t *p)
{
if (counter == 0)
ck_epoch_call(&record[1], p, cb);
printf("Counter value: %u -> %u\n",
counter, counter + 1);
counter++;
return;
}
int
main(void)
{
ck_epoch_entry_t entry;
ck_epoch_register(&epoch, &record[0]);
ck_epoch_register(&epoch, &record[1]);
ck_epoch_call(&record[1], &entry, cb);
ck_epoch_barrier(&record[1]);
ck_epoch_barrier(&record[1]);
if (counter != 2)
ck_error("Expected counter value 2, read %u.\n", counter);
return 0;
}

View file

@ -0,0 +1,236 @@
/*
* Copyright 2010-2015 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <sys/time.h>
#include <ck_backoff.h>
#include <ck_cc.h>
#include <ck_pr.h>
#include <stdbool.h>
#include <stddef.h>
#include <string.h>
#include <ck_epoch.h>
#include <ck_stack.h>
#include "../../common.h"
static unsigned int n_rd;
static unsigned int n_wr;
static unsigned int n_threads;
static unsigned int barrier;
static unsigned int e_barrier;
static unsigned int readers;
static unsigned int writers;
#ifndef PAIRS_S
#define PAIRS_S 100000
#endif
#ifndef ITERATE_S
#define ITERATE_S 20
#endif
struct node {
unsigned int value;
ck_stack_entry_t stack_entry;
ck_epoch_entry_t epoch_entry;
};
static ck_stack_t stack = CK_STACK_INITIALIZER;
static ck_epoch_t stack_epoch;
CK_STACK_CONTAINER(struct node, stack_entry, stack_container)
CK_EPOCH_CONTAINER(struct node, epoch_entry, epoch_container)
static struct affinity a;
static const char animate[] = "-/|\\";
static void
destructor(ck_epoch_entry_t *p)
{
struct node *e = epoch_container(p);
free(e);
return;
}
static void *
read_thread(void *unused CK_CC_UNUSED)
{
unsigned int j;
ck_epoch_record_t record CK_CC_CACHELINE;
ck_stack_entry_t *cursor, *n;
ck_epoch_register(&stack_epoch, &record);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
exit(EXIT_FAILURE);
}
ck_pr_inc_uint(&barrier);
while (ck_pr_load_uint(&barrier) < n_threads);
while (CK_STACK_ISEMPTY(&stack) == true) {
if (ck_pr_load_uint(&readers) != 0)
break;
ck_pr_stall();
}
j = 0;
for (;;) {
ck_epoch_begin(&record, NULL);
CK_STACK_FOREACH(&stack, cursor) {
if (cursor == NULL)
continue;
n = CK_STACK_NEXT(cursor);
j += ck_pr_load_ptr(&n) != NULL;
}
ck_epoch_end(&record, NULL);
if (j != 0 && ck_pr_load_uint(&readers) == 0)
ck_pr_store_uint(&readers, 1);
if (CK_STACK_ISEMPTY(&stack) == true &&
ck_pr_load_uint(&e_barrier) != 0)
break;
}
ck_pr_inc_uint(&e_barrier);
while (ck_pr_load_uint(&e_barrier) < n_threads);
fprintf(stderr, "[R] Observed entries: %u\n", j);
return (NULL);
}
static void *
write_thread(void *unused CK_CC_UNUSED)
{
struct node **entry, *e;
unsigned int i, j, tid;
ck_epoch_record_t record;
ck_stack_entry_t *s;
ck_epoch_register(&stack_epoch, &record);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
exit(EXIT_FAILURE);
}
tid = ck_pr_faa_uint(&writers, 1);
ck_pr_inc_uint(&barrier);
while (ck_pr_load_uint(&barrier) < n_threads);
entry = malloc(sizeof(struct node *) * PAIRS_S);
if (entry == NULL) {
ck_error("Failed allocation.\n");
}
for (j = 0; j < ITERATE_S; j++) {
for (i = 0; i < PAIRS_S; i++) {
entry[i] = malloc(sizeof(struct node));
if (entry == NULL) {
ck_error("Failed individual allocation\n");
}
}
for (i = 0; i < PAIRS_S; i++) {
ck_stack_push_upmc(&stack, &entry[i]->stack_entry);
}
while (ck_pr_load_uint(&readers) == 0)
ck_pr_stall();
if (tid == 0) {
fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] %2.2f: %c",
(double)j / ITERATE_S, animate[i % strlen(animate)]);
}
for (i = 0; i < PAIRS_S; i++) {
ck_epoch_begin(&record, NULL);
s = ck_stack_pop_upmc(&stack);
e = stack_container(s);
ck_epoch_end(&record, NULL);
ck_epoch_call(&record, &e->epoch_entry, destructor);
ck_epoch_poll(&record);
}
}
ck_epoch_barrier(&record);
if (tid == 0) {
fprintf(stderr, "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b[W] Peak: %u (%2.2f%%)\n Reclamations: %lu\n\n",
record.n_peak,
(double)record.n_peak / ((double)PAIRS_S * ITERATE_S) * 100,
record.n_dispatch);
}
ck_pr_inc_uint(&e_barrier);
while (ck_pr_load_uint(&e_barrier) < n_threads);
return (NULL);
}
int
main(int argc, char *argv[])
{
unsigned int i;
pthread_t *threads;
if (argc != 4) {
ck_error("Usage: stack <#readers> <#writers> <affinity delta>\n");
}
n_rd = atoi(argv[1]);
n_wr = atoi(argv[2]);
n_threads = n_wr + n_rd;
a.delta = atoi(argv[3]);
a.request = 0;
threads = malloc(sizeof(pthread_t) * n_threads);
ck_epoch_init(&stack_epoch);
for (i = 0; i < n_rd; i++)
pthread_create(threads + i, NULL, read_thread, NULL);
do {
pthread_create(threads + i, NULL, write_thread, NULL);
} while (++i < n_wr + n_rd);
for (i = 0; i < n_threads; i++)
pthread_join(threads[i], NULL);
return (0);
}

View file

@ -0,0 +1,311 @@
/*
* Copyright 2015 John Esmet.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <assert.h>
#include <pthread.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <ck_epoch.h>
#include "../../common.h"
static ck_epoch_t epc;
static ck_epoch_record_t record, record2;
static unsigned int cleanup_calls;
static void
setup_test(void)
{
ck_epoch_init(&epc);
ck_epoch_register(&epc, &record);
ck_epoch_register(&epc, &record2);
cleanup_calls = 0;
return;
}
static void
teardown_test(void)
{
memset(&epc, 0, sizeof(ck_epoch_t));
ck_epoch_unregister(&record);
memset(&record, 0, sizeof(ck_epoch_record_t));
memset(&record2, 0, sizeof(ck_epoch_record_t));
cleanup_calls = 0;
return;
}
static void
cleanup(ck_epoch_entry_t *e)
{
(void) e;
cleanup_calls++;
return;
}
static void
test_simple_read_section(void)
{
ck_epoch_entry_t entry;
ck_epoch_section_t section;
memset(&entry, 0, sizeof(ck_epoch_entry_t));
setup_test();
ck_epoch_begin(&record, &section);
ck_epoch_call(&record, &entry, cleanup);
assert(cleanup_calls == 0);
ck_epoch_end(&record, &section);
ck_epoch_barrier(&record);
assert(cleanup_calls == 1);
teardown_test();
return;
}
static void
test_nested_read_section(void)
{
ck_epoch_entry_t entry1, entry2;
ck_epoch_section_t section1, section2;
memset(&entry1, 0, sizeof(ck_epoch_entry_t));
memset(&entry2, 0, sizeof(ck_epoch_entry_t));
setup_test();
ck_epoch_begin(&record, &section1);
ck_epoch_call(&record, &entry1, cleanup);
assert(cleanup_calls == 0);
ck_epoch_begin(&record, &section2);
ck_epoch_call(&record, &entry2, cleanup);
assert(cleanup_calls == 0);
ck_epoch_end(&record, &section2);
assert(cleanup_calls == 0);
ck_epoch_end(&record, &section1);
assert(cleanup_calls == 0);
ck_epoch_barrier(&record);
assert(cleanup_calls == 2);
teardown_test();
return;
}
struct obj {
ck_epoch_entry_t entry;
unsigned int destroyed;
};
static void *
barrier_work(void *arg)
{
unsigned int *run;
run = (unsigned int *)arg;
while (ck_pr_load_uint(run) != 0) {
/*
* Need to use record2, as record is local
* to the test thread.
*/
ck_epoch_barrier(&record2);
usleep(5 * 1000);
}
return NULL;
}
static void *
reader_work(void *arg)
{
ck_epoch_record_t local_record;
ck_epoch_section_t section;
struct obj *o;
ck_epoch_register(&epc, &local_record);
o = (struct obj *)arg;
/*
* Begin a read section. The calling thread has an open read section,
* so the object should not be destroyed for the lifetime of this
* thread.
*/
ck_epoch_begin(&local_record, &section);
usleep((common_rand() % 100) * 1000);
assert(ck_pr_load_uint(&o->destroyed) == 0);
ck_epoch_end(&local_record, &section);
ck_epoch_unregister(&local_record);
return NULL;
}
static void
obj_destroy(ck_epoch_entry_t *e)
{
struct obj *o;
o = (struct obj *)e;
ck_pr_fas_uint(&o->destroyed, 1);
return;
}
static void
test_single_reader_with_barrier_thread(void)
{
const int num_sections = 10;
struct obj o;
unsigned int run;
pthread_t thread;
ck_epoch_section_t sections[num_sections];
int shuffled[num_sections];
run = 1;
memset(&o, 0, sizeof(struct obj));
common_srand(time(NULL));
setup_test();
if (pthread_create(&thread, NULL, barrier_work, &run) != 0) {
abort();
}
/* Start a bunch of sections. */
for (int i = 0; i < num_sections; i++) {
ck_epoch_begin(&record, &sections[i]);
shuffled[i] = i;
if (i == num_sections / 2) {
usleep(1 * 1000);
}
}
/* Generate a shuffle. */
for (int i = num_sections - 1; i >= 0; i--) {
int k = common_rand() % (i + 1);
int tmp = shuffled[k];
shuffled[k] = shuffled[i];
shuffled[i] = tmp;
}
ck_epoch_call(&record, &o.entry, obj_destroy);
/* Close the sections in shuffle-order. */
for (int i = 0; i < num_sections; i++) {
ck_epoch_end(&record, &sections[shuffled[i]]);
if (i != num_sections - 1) {
assert(ck_pr_load_uint(&o.destroyed) == 0);
usleep(3 * 1000);
}
}
ck_pr_store_uint(&run, 0);
if (pthread_join(thread, NULL) != 0) {
abort();
}
ck_epoch_barrier(&record);
assert(ck_pr_load_uint(&o.destroyed) == 1);
teardown_test();
return;
}
static void
test_multiple_readers_with_barrier_thread(void)
{
const int num_readers = 10;
struct obj o;
unsigned int run;
ck_epoch_section_t section;
pthread_t threads[num_readers + 1];
run = 1;
memset(&o, 0, sizeof(struct obj));
memset(&section, 0, sizeof(ck_epoch_section_t));
common_srand(time(NULL));
setup_test();
/* Create a thread to call barrier() while we create reader threads.
* Each barrier will attempt to move the global epoch forward so
* it will make the read section code coverage more interesting. */
if (pthread_create(&threads[num_readers], NULL,
barrier_work, &run) != 0) {
abort();
}
ck_epoch_begin(&record, &section);
ck_epoch_call(&record, &o.entry, obj_destroy);
for (int i = 0; i < num_readers; i++) {
if (pthread_create(&threads[i], NULL, reader_work, &o) != 0) {
abort();
}
}
ck_epoch_end(&record, &section);
ck_pr_store_uint(&run, 0);
if (pthread_join(threads[num_readers], NULL) != 0) {
abort();
}
/* After the barrier, the object should be destroyed and readers
* should return. */
for (int i = 0; i < num_readers; i++) {
if (pthread_join(threads[i], NULL) != 0) {
abort();
}
}
teardown_test();
return;
}
int
main(void)
{
test_simple_read_section();
test_nested_read_section();
test_single_reader_with_barrier_thread();
test_multiple_readers_with_barrier_thread();
return 0;
}

View file

@ -0,0 +1,195 @@
/*
* Copyright 2010-2015 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <assert.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <ck_cc.h>
#include <ck_pr.h>
#include <stdbool.h>
#include <stddef.h>
#include <string.h>
#include <ck_epoch.h>
#include <ck_stack.h>
#include "../../common.h"
static unsigned int n_rd;
static unsigned int n_wr;
static unsigned int n_threads;
static unsigned int barrier;
static unsigned int leave;
#ifndef PAIRS_S
#define PAIRS_S 10000
#endif
#ifndef CK_EPOCH_T_DEPTH
#define CK_EPOCH_T_DEPTH 8
#endif
static ck_epoch_t epoch;
static struct affinity a;
static void *
read_thread(void *unused CK_CC_UNUSED)
{
ck_epoch_record_t *record;
unsigned long long i = 0;
record = malloc(sizeof *record);
assert(record != NULL);
ck_epoch_register(&epoch, record);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
exit(EXIT_FAILURE);
}
ck_pr_inc_uint(&barrier);
while (ck_pr_load_uint(&barrier) < n_threads);
for (;;) {
ck_epoch_section_t section[2];
ck_epoch_section_t junk[CK_EPOCH_T_DEPTH];
unsigned int j;
ck_epoch_begin(record, &section[0]);
for (j = 0; j < CK_EPOCH_T_DEPTH; j++)
ck_epoch_begin(record, &junk[j]);
for (j = 0; j < CK_EPOCH_T_DEPTH; j++)
ck_epoch_end(record, &junk[j]);
if (i > 0)
ck_epoch_end(record, &section[1]);
/* Wait for the next synchronize operation. */
while ((ck_pr_load_uint(&epoch.epoch) & 1) ==
section[0].bucket) {
i++;
if (!(i % 10000000)) {
fprintf(stderr, "%u %u %u\n",
ck_pr_load_uint(&epoch.epoch),
section[0].bucket, record->epoch);
}
while ((ck_pr_load_uint(&epoch.epoch) & 1) ==
section[0].bucket) {
if (ck_pr_load_uint(&leave) == 1)
break;
ck_pr_stall();
}
}
ck_epoch_begin(record, &section[1]);
assert(section[0].bucket != section[1].bucket);
ck_epoch_end(record, &section[0]);
assert(ck_pr_load_uint(&record->active) > 0);
if (ck_pr_load_uint(&leave) == 1) {
ck_epoch_end(record, &section[1]);
break;
}
i++;
}
return NULL;
}
static void *
write_thread(void *unused CK_CC_UNUSED)
{
ck_epoch_record_t record;
unsigned long iterations = 0;
ck_epoch_register(&epoch, &record);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
exit(EXIT_FAILURE);
}
ck_pr_inc_uint(&barrier);
while (ck_pr_load_uint(&barrier) < n_threads);
for (;;) {
if (!(iterations % 1048575))
fprintf(stderr, ".");
ck_epoch_synchronize(&record);
iterations++;
if (ck_pr_load_uint(&leave) == 1)
break;
}
fprintf(stderr, "%lu iterations\n", iterations);
return NULL;
}
int
main(int argc, char *argv[])
{
unsigned int i;
pthread_t *threads;
if (argc != 4) {
ck_error("Usage: stack <#readers> <#writers> <affinity delta>\n");
}
n_rd = atoi(argv[1]);
n_wr = atoi(argv[2]);
n_threads = n_wr + n_rd;
a.delta = atoi(argv[3]);
a.request = 0;
threads = malloc(sizeof(pthread_t) * n_threads);
ck_epoch_init(&epoch);
for (i = 0; i < n_rd; i++)
pthread_create(threads + i, NULL, read_thread, NULL);
do {
pthread_create(threads + i, NULL, write_thread, NULL);
} while (++i < n_wr + n_rd);
common_sleep(10);
ck_pr_store_uint(&leave, 1);
for (i = 0; i < n_threads; i++)
pthread_join(threads[i], NULL);
return (0);
}

View file

@ -0,0 +1,249 @@
/*
* Copyright 2010-2015 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <sys/time.h>
#include <ck_backoff.h>
#include <ck_cc.h>
#include <ck_pr.h>
#include <stdbool.h>
#include <stddef.h>
#include <string.h>
#include <ck_epoch.h>
#include <ck_stack.h>
#include "../../common.h"
static unsigned int n_rd;
static unsigned int n_wr;
static unsigned int n_threads;
static unsigned int barrier;
static unsigned int e_barrier;
static unsigned int readers;
static unsigned int writers;
#ifndef PAIRS_S
#define PAIRS_S 10000
#endif
#ifndef ITERATE_S
#define ITERATE_S 20
#endif
struct node {
unsigned int value;
ck_stack_entry_t stack_entry;
ck_epoch_entry_t epoch_entry;
};
static ck_stack_t stack = CK_STACK_INITIALIZER;
static ck_epoch_t stack_epoch;
CK_STACK_CONTAINER(struct node, stack_entry, stack_container)
CK_EPOCH_CONTAINER(struct node, epoch_entry, epoch_container)
static struct affinity a;
static const char animate[] = "-/|\\";
static void
destructor(ck_epoch_entry_t *p)
{
struct node *e = epoch_container(p);
free(e);
return;
}
static void *
read_thread(void *unused CK_CC_UNUSED)
{
unsigned int j;
ck_epoch_record_t record CK_CC_CACHELINE;
ck_stack_entry_t *cursor;
ck_stack_entry_t *n;
unsigned int i;
ck_epoch_register(&stack_epoch, &record);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
exit(EXIT_FAILURE);
}
ck_pr_inc_uint(&barrier);
while (ck_pr_load_uint(&barrier) < n_threads);
while (CK_STACK_ISEMPTY(&stack) == true) {
if (ck_pr_load_uint(&readers) != 0)
break;
ck_pr_stall();
}
j = 0;
for (;;) {
i = 0;
ck_epoch_begin(&record, NULL);
CK_STACK_FOREACH(&stack, cursor) {
if (cursor == NULL)
continue;
n = CK_STACK_NEXT(cursor);
j += ck_pr_load_ptr(&n) != NULL;
if (i++ > 4098)
break;
}
ck_epoch_end(&record, NULL);
if (j != 0 && ck_pr_load_uint(&readers) == 0)
ck_pr_store_uint(&readers, 1);
if (CK_STACK_ISEMPTY(&stack) == true &&
ck_pr_load_uint(&e_barrier) != 0)
break;
}
ck_pr_inc_uint(&e_barrier);
while (ck_pr_load_uint(&e_barrier) < n_threads);
fprintf(stderr, "[R] Observed entries: %u\n", j);
return (NULL);
}
static void *
write_thread(void *unused CK_CC_UNUSED)
{
struct node **entry, *e;
unsigned int i, j, tid;
ck_epoch_record_t record;
ck_stack_entry_t *s;
ck_epoch_register(&stack_epoch, &record);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
exit(EXIT_FAILURE);
}
tid = ck_pr_faa_uint(&writers, 1);
ck_pr_inc_uint(&barrier);
while (ck_pr_load_uint(&barrier) < n_threads);
entry = malloc(sizeof(struct node *) * PAIRS_S);
if (entry == NULL) {
ck_error("Failed allocation.\n");
}
for (j = 0; j < ITERATE_S; j++) {
for (i = 0; i < PAIRS_S; i++) {
entry[i] = malloc(sizeof(struct node));
if (entry == NULL) {
ck_error("Failed individual allocation\n");
}
}
for (i = 0; i < PAIRS_S; i++) {
ck_stack_push_upmc(&stack, &entry[i]->stack_entry);
}
while (ck_pr_load_uint(&readers) == 0)
ck_pr_stall();
for (i = 0; i < PAIRS_S; i++) {
ck_epoch_begin(&record, NULL);
s = ck_stack_pop_upmc(&stack);
e = stack_container(s);
ck_epoch_end(&record, NULL);
if (i & 1) {
ck_epoch_synchronize(&record);
ck_epoch_reclaim(&record);
ck_epoch_call(&record, &e->epoch_entry, destructor);
} else {
ck_epoch_barrier(&record);
destructor(&e->epoch_entry);
}
if (tid == 0 && (i % 16384) == 0) {
fprintf(stderr, "[W] %2.2f: %c\n",
(double)j / ITERATE_S, animate[i % strlen(animate)]);
}
}
}
ck_epoch_synchronize(&record);
if (tid == 0) {
fprintf(stderr, "[W] Peak: %u (%2.2f%%)\n Reclamations: %lu\n\n",
record.n_peak,
(double)record.n_peak / ((double)PAIRS_S * ITERATE_S) * 100,
record.n_dispatch);
}
ck_pr_inc_uint(&e_barrier);
while (ck_pr_load_uint(&e_barrier) < n_threads);
return (NULL);
}
int
main(int argc, char *argv[])
{
unsigned int i;
pthread_t *threads;
if (argc != 4) {
ck_error("Usage: stack <#readers> <#writers> <affinity delta>\n");
}
n_rd = atoi(argv[1]);
n_wr = atoi(argv[2]);
n_threads = n_wr + n_rd;
a.delta = atoi(argv[3]);
a.request = 0;
threads = malloc(sizeof(pthread_t) * n_threads);
ck_epoch_init(&stack_epoch);
for (i = 0; i < n_rd; i++)
pthread_create(threads + i, NULL, read_thread, NULL);
do {
pthread_create(threads + i, NULL, write_thread, NULL);
} while (++i < n_wr + n_rd);
for (i = 0; i < n_threads; i++)
pthread_join(threads[i], NULL);
return (0);
}

View file

@ -0,0 +1,164 @@
/*
* Copyright 2010-2015 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <sys/time.h>
#include <ck_backoff.h>
#include <ck_cc.h>
#include <ck_pr.h>
#include <stdbool.h>
#include <stddef.h>
#include <ck_epoch.h>
#include <ck_stack.h>
#include "../../common.h"
static unsigned int n_threads;
static unsigned int barrier;
static unsigned int e_barrier;
#ifndef PAIRS
#define PAIRS 5000000
#endif
struct node {
unsigned int value;
ck_epoch_entry_t epoch_entry;
ck_stack_entry_t stack_entry;
};
static ck_stack_t stack = {NULL, NULL};
static ck_epoch_t stack_epoch;
CK_STACK_CONTAINER(struct node, stack_entry, stack_container)
CK_EPOCH_CONTAINER(struct node, epoch_entry, epoch_container)
static struct affinity a;
static void
destructor(ck_epoch_entry_t *p)
{
struct node *e = epoch_container(p);
free(e);
return;
}
static void *
thread(void *unused CK_CC_UNUSED)
{
struct node **entry, *e;
ck_epoch_record_t record;
ck_stack_entry_t *s;
unsigned long smr = 0;
unsigned int i;
ck_epoch_register(&stack_epoch, &record);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
exit(EXIT_FAILURE);
}
entry = malloc(sizeof(struct node *) * PAIRS);
if (entry == NULL) {
ck_error("Failed allocation.\n");
}
for (i = 0; i < PAIRS; i++) {
entry[i] = malloc(sizeof(struct node));
if (entry == NULL) {
ck_error("Failed individual allocation\n");
}
}
ck_pr_inc_uint(&barrier);
while (ck_pr_load_uint(&barrier) < n_threads);
for (i = 0; i < PAIRS; i++) {
ck_epoch_begin(&record, NULL);
ck_stack_push_upmc(&stack, &entry[i]->stack_entry);
s = ck_stack_pop_upmc(&stack);
ck_epoch_end(&record, NULL);
e = stack_container(s);
ck_epoch_call(&record, &e->epoch_entry, destructor);
smr += ck_epoch_poll(&record) == false;
}
ck_pr_inc_uint(&e_barrier);
while (ck_pr_load_uint(&e_barrier) < n_threads);
fprintf(stderr, "Deferrals: %lu (%2.2f)\n", smr, (double)smr / PAIRS);
fprintf(stderr, "Peak: %u (%2.2f%%), %u pending\nReclamations: %lu\n\n",
record.n_peak,
(double)record.n_peak / PAIRS * 100,
record.n_pending,
record.n_dispatch);
ck_epoch_barrier(&record);
ck_pr_inc_uint(&e_barrier);
while (ck_pr_load_uint(&e_barrier) < (n_threads << 1));
if (record.n_pending != 0) {
ck_error("ERROR: %u pending, expecting none.\n",
record.n_pending);
}
return (NULL);
}
int
main(int argc, char *argv[])
{
unsigned int i;
pthread_t *threads;
if (argc != 3) {
ck_error("Usage: stack <threads> <affinity delta>\n");
}
n_threads = atoi(argv[1]);
a.delta = atoi(argv[2]);
a.request = 0;
threads = malloc(sizeof(pthread_t) * n_threads);
ck_epoch_init(&stack_epoch);
for (i = 0; i < n_threads; i++)
pthread_create(threads + i, NULL, thread, NULL);
for (i = 0; i < n_threads; i++)
pthread_join(threads[i], NULL);
return (0);
}

View file

@ -0,0 +1,234 @@
/*
* Copyright 2010-2015 Samy Al Bahra.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <assert.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <ck_cc.h>
#include <ck_pr.h>
#include <stdbool.h>
#include <stddef.h>
#include <string.h>
#include <ck_epoch.h>
#include <ck_stack.h>
#include "../../common.h"
static unsigned int n_rd;
static unsigned int n_wr;
static unsigned int n_threads;
static unsigned int barrier;
static unsigned int leave;
static unsigned int first;
struct {
unsigned int value;
} valid CK_CC_CACHELINE = { 1 };
struct {
unsigned int value;
} invalid CK_CC_CACHELINE;
#ifndef PAIRS_S
#define PAIRS_S 10000
#endif
#ifndef CK_EPOCH_T_DEPTH
#define CK_EPOCH_T_DEPTH 8
#endif
static ck_epoch_t epoch;
static struct affinity a;
static void
test(struct ck_epoch_record *record)
{
unsigned int j[3];
unsigned int b, c;
const unsigned int r = 100;
size_t i;
for (i = 0; i < 8; i++) {
ck_epoch_begin(record, NULL);
c = ck_pr_load_uint(&invalid.value);
ck_pr_fence_load();
b = ck_pr_load_uint(&valid.value);
ck_test(c > b, "Invalid value: %u > %u\n", c, b);
ck_epoch_end(record, NULL);
}
ck_epoch_begin(record, NULL);
/* This implies no early load of epoch occurs. */
j[0] = record->epoch;
/* We should observe up to one epoch migration. */
do {
ck_pr_fence_load();
j[1] = ck_pr_load_uint(&epoch.epoch);
if (ck_pr_load_uint(&leave) == 1) {
ck_epoch_end(record, NULL);
return;
}
} while (j[1] == j[0]);
/* No more epoch migrations should occur */
for (i = 0; i < r; i++) {
ck_pr_fence_strict_load();
j[2] = ck_pr_load_uint(&epoch.epoch);
ck_test(j[2] != j[1], "Inconsistency detected: %u %u %u\n",
j[0], j[1], j[2]);
}
ck_epoch_end(record, NULL);
return;
}
static void *
read_thread(void *unused CK_CC_UNUSED)
{
ck_epoch_record_t *record;
record = malloc(sizeof *record);
assert(record != NULL);
ck_epoch_register(&epoch, record);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
exit(EXIT_FAILURE);
}
ck_pr_inc_uint(&barrier);
while (ck_pr_load_uint(&barrier) < n_threads);
do {
test(record);
test(record);
test(record);
test(record);
} while (ck_pr_load_uint(&leave) == 0);
ck_pr_dec_uint(&n_rd);
return NULL;
}
static void *
write_thread(void *unused CK_CC_UNUSED)
{
ck_epoch_record_t *record;
unsigned long iterations = 0;
bool c = ck_pr_faa_uint(&first, 1);
record = malloc(sizeof *record);
assert(record != NULL);
ck_epoch_register(&epoch, record);
if (aff_iterate(&a)) {
perror("ERROR: failed to affine thread");
exit(EXIT_FAILURE);
}
ck_pr_inc_uint(&barrier);
while (ck_pr_load_uint(&barrier) < n_threads);
do {
/*
* A thread should never observe invalid.value > valid.value.
* inside a protected section. Only
* invalid.value <= valid.value is valid.
*/
if (!c) ck_pr_store_uint(&valid.value, 1);
ck_epoch_synchronize(record);
if (!c) ck_pr_store_uint(&invalid.value, 1);
ck_pr_fence_store();
if (!c) ck_pr_store_uint(&valid.value, 2);
ck_epoch_synchronize(record);
if (!c) ck_pr_store_uint(&invalid.value, 2);
ck_pr_fence_store();
if (!c) ck_pr_store_uint(&valid.value, 3);
ck_epoch_synchronize(record);
if (!c) ck_pr_store_uint(&invalid.value, 3);
ck_pr_fence_store();
if (!c) ck_pr_store_uint(&valid.value, 4);
ck_epoch_synchronize(record);
if (!c) ck_pr_store_uint(&invalid.value, 4);
ck_epoch_synchronize(record);
if (!c) ck_pr_store_uint(&invalid.value, 0);
ck_epoch_synchronize(record);
iterations += 4;
} while (ck_pr_load_uint(&leave) == 0 &&
ck_pr_load_uint(&n_rd) > 0);
fprintf(stderr, "%lu iterations\n", iterations);
return NULL;
}
int
main(int argc, char *argv[])
{
unsigned int i;
pthread_t *threads;
if (argc != 4) {
ck_error("Usage: stack <#readers> <#writers> <affinity delta>\n");
}
n_rd = atoi(argv[1]);
n_wr = atoi(argv[2]);
n_threads = n_wr + n_rd;
a.delta = atoi(argv[3]);
a.request = 0;
threads = malloc(sizeof(pthread_t) * n_threads);
ck_epoch_init(&epoch);
for (i = 0; i < n_rd; i++)
pthread_create(threads + i, NULL, read_thread, NULL);
do {
pthread_create(threads + i, NULL, write_thread, NULL);
} while (++i < n_wr + n_rd);
common_sleep(30);
ck_pr_store_uint(&leave, 1);
for (i = 0; i < n_threads; i++)
pthread_join(threads[i], NULL);
return 0;
}