Merging upstream version 0.7.1 (Closes: #991419).
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
05c588e9d7
commit
9e09e0ef69
99 changed files with 6727 additions and 943 deletions
186
src/ck_epoch.c
186
src/ck_epoch.c
|
@ -127,6 +127,14 @@
|
|||
*/
|
||||
#define CK_EPOCH_GRACE 3U
|
||||
|
||||
/*
|
||||
* CK_EPOCH_LENGTH must be a power-of-2 (because (CK_EPOCH_LENGTH - 1) is used
|
||||
* as a mask, and it must be at least 3 (see comments above).
|
||||
*/
|
||||
#if (CK_EPOCH_LENGTH < 3 || (CK_EPOCH_LENGTH & (CK_EPOCH_LENGTH - 1)) != 0)
|
||||
#error "CK_EPOCH_LENGTH must be a power of 2 and >= 3"
|
||||
#endif
|
||||
|
||||
enum {
|
||||
CK_EPOCH_STATE_USED = 0,
|
||||
CK_EPOCH_STATE_FREE = 1
|
||||
|
@ -139,7 +147,7 @@ CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
|
|||
|
||||
#define CK_EPOCH_SENSE_MASK (CK_EPOCH_SENSE - 1)
|
||||
|
||||
void
|
||||
bool
|
||||
_ck_epoch_delref(struct ck_epoch_record *record,
|
||||
struct ck_epoch_section *section)
|
||||
{
|
||||
|
@ -150,7 +158,7 @@ _ck_epoch_delref(struct ck_epoch_record *record,
|
|||
current->count--;
|
||||
|
||||
if (current->count > 0)
|
||||
return;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If the current bucket no longer has any references, then
|
||||
|
@ -161,8 +169,7 @@ _ck_epoch_delref(struct ck_epoch_record *record,
|
|||
* If no other active bucket exists, then the record will go
|
||||
* inactive in order to allow for forward progress.
|
||||
*/
|
||||
other = &record->local.bucket[(i + 1) &
|
||||
CK_EPOCH_SENSE_MASK];
|
||||
other = &record->local.bucket[(i + 1) & CK_EPOCH_SENSE_MASK];
|
||||
if (other->count > 0 &&
|
||||
((int)(current->epoch - other->epoch) < 0)) {
|
||||
/*
|
||||
|
@ -172,7 +179,7 @@ _ck_epoch_delref(struct ck_epoch_record *record,
|
|||
ck_pr_store_uint(&record->epoch, other->epoch);
|
||||
}
|
||||
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -230,7 +237,7 @@ ck_epoch_init(struct ck_epoch *global)
|
|||
}
|
||||
|
||||
struct ck_epoch_record *
|
||||
ck_epoch_recycle(struct ck_epoch *global)
|
||||
ck_epoch_recycle(struct ck_epoch *global, void *ct)
|
||||
{
|
||||
struct ck_epoch_record *record;
|
||||
ck_stack_entry_t *cursor;
|
||||
|
@ -249,6 +256,12 @@ ck_epoch_recycle(struct ck_epoch *global)
|
|||
CK_EPOCH_STATE_USED);
|
||||
if (state == CK_EPOCH_STATE_FREE) {
|
||||
ck_pr_dec_uint(&global->n_free);
|
||||
ck_pr_store_ptr(&record->ct, ct);
|
||||
|
||||
/*
|
||||
* The context pointer is ordered by a
|
||||
* subsequent protected section.
|
||||
*/
|
||||
return record;
|
||||
}
|
||||
}
|
||||
|
@ -258,7 +271,8 @@ ck_epoch_recycle(struct ck_epoch *global)
|
|||
}
|
||||
|
||||
void
|
||||
ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record)
|
||||
ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record,
|
||||
void *ct)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
|
@ -269,6 +283,7 @@ ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record)
|
|||
record->n_dispatch = 0;
|
||||
record->n_peak = 0;
|
||||
record->n_pending = 0;
|
||||
record->ct = ct;
|
||||
memset(&record->local, 0, sizeof record->local);
|
||||
|
||||
for (i = 0; i < CK_EPOCH_LENGTH; i++)
|
||||
|
@ -295,6 +310,7 @@ ck_epoch_unregister(struct ck_epoch_record *record)
|
|||
for (i = 0; i < CK_EPOCH_LENGTH; i++)
|
||||
ck_stack_init(&record->pending[i]);
|
||||
|
||||
ck_pr_store_ptr(&record->ct, NULL);
|
||||
ck_pr_fence_store();
|
||||
ck_pr_store_uint(&record->state, CK_EPOCH_STATE_FREE);
|
||||
ck_pr_inc_uint(&global->n_free);
|
||||
|
@ -340,31 +356,41 @@ ck_epoch_scan(struct ck_epoch *global,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e)
|
||||
static unsigned int
|
||||
ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e, ck_stack_t *deferred)
|
||||
{
|
||||
unsigned int epoch = e & (CK_EPOCH_LENGTH - 1);
|
||||
ck_stack_entry_t *head, *next, *cursor;
|
||||
unsigned int n_pending, n_peak;
|
||||
unsigned int i = 0;
|
||||
|
||||
head = CK_STACK_FIRST(&record->pending[epoch]);
|
||||
ck_stack_init(&record->pending[epoch]);
|
||||
|
||||
head = ck_stack_batch_pop_upmc(&record->pending[epoch]);
|
||||
for (cursor = head; cursor != NULL; cursor = next) {
|
||||
struct ck_epoch_entry *entry =
|
||||
ck_epoch_entry_container(cursor);
|
||||
|
||||
next = CK_STACK_NEXT(cursor);
|
||||
entry->function(entry);
|
||||
if (deferred != NULL)
|
||||
ck_stack_push_spnc(deferred, &entry->stack_entry);
|
||||
else
|
||||
entry->function(entry);
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
if (record->n_pending > record->n_peak)
|
||||
record->n_peak = record->n_pending;
|
||||
n_peak = ck_pr_load_uint(&record->n_peak);
|
||||
n_pending = ck_pr_load_uint(&record->n_pending);
|
||||
|
||||
record->n_dispatch += i;
|
||||
record->n_pending -= i;
|
||||
return;
|
||||
/* We don't require accuracy around peak calculation. */
|
||||
if (n_pending > n_peak)
|
||||
ck_pr_store_uint(&record->n_peak, n_peak);
|
||||
|
||||
if (i > 0) {
|
||||
ck_pr_add_uint(&record->n_dispatch, i);
|
||||
ck_pr_sub_uint(&record->n_pending, i);
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -376,7 +402,18 @@ ck_epoch_reclaim(struct ck_epoch_record *record)
|
|||
unsigned int epoch;
|
||||
|
||||
for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
|
||||
ck_epoch_dispatch(record, epoch);
|
||||
ck_epoch_dispatch(record, epoch, NULL);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
CK_CC_FORCE_INLINE static void
|
||||
epoch_block(struct ck_epoch *global, struct ck_epoch_record *cr,
|
||||
ck_epoch_wait_cb_t *cb, void *ct)
|
||||
{
|
||||
|
||||
if (cb != NULL)
|
||||
cb(global, cr, ct);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -385,9 +422,9 @@ ck_epoch_reclaim(struct ck_epoch_record *record)
|
|||
* This function must not be called with-in read section.
|
||||
*/
|
||||
void
|
||||
ck_epoch_synchronize(struct ck_epoch_record *record)
|
||||
ck_epoch_synchronize_wait(struct ck_epoch *global,
|
||||
ck_epoch_wait_cb_t *cb, void *ct)
|
||||
{
|
||||
struct ck_epoch *global = record->global;
|
||||
struct ck_epoch_record *cr;
|
||||
unsigned int delta, epoch, goal, i;
|
||||
bool active;
|
||||
|
@ -424,10 +461,27 @@ ck_epoch_synchronize(struct ck_epoch_record *record)
|
|||
* period.
|
||||
*/
|
||||
e_d = ck_pr_load_uint(&global->epoch);
|
||||
if (e_d != delta) {
|
||||
delta = e_d;
|
||||
goto reload;
|
||||
if (e_d == delta) {
|
||||
epoch_block(global, cr, cb, ct);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the epoch has been updated, we may have already
|
||||
* met our goal.
|
||||
*/
|
||||
delta = e_d;
|
||||
if ((goal > epoch) & (delta >= goal))
|
||||
goto leave;
|
||||
|
||||
epoch_block(global, cr, cb, ct);
|
||||
|
||||
/*
|
||||
* If the epoch has been updated, then a grace period
|
||||
* requires that all threads are observed idle at the
|
||||
* same epoch.
|
||||
*/
|
||||
cr = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -459,20 +513,6 @@ ck_epoch_synchronize(struct ck_epoch_record *record)
|
|||
* Otherwise, we have just acquired latest snapshot.
|
||||
*/
|
||||
delta = delta + r;
|
||||
continue;
|
||||
|
||||
reload:
|
||||
if ((goal > epoch) & (delta >= goal)) {
|
||||
/*
|
||||
* Right now, epoch overflow is handled as an edge
|
||||
* case. If we have already observed an epoch
|
||||
* generation, then we can be sure no hazardous
|
||||
* references exist to objects from this generation. We
|
||||
* can actually avoid an addtional scan step at this
|
||||
* point.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -480,8 +520,16 @@ reload:
|
|||
* However, if non-temporal instructions are used, full barrier
|
||||
* semantics are necessary.
|
||||
*/
|
||||
leave:
|
||||
ck_pr_fence_memory();
|
||||
record->epoch = delta;
|
||||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_epoch_synchronize(struct ck_epoch_record *record)
|
||||
{
|
||||
|
||||
ck_epoch_synchronize_wait(record->global, NULL, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -494,6 +542,16 @@ ck_epoch_barrier(struct ck_epoch_record *record)
|
|||
return;
|
||||
}
|
||||
|
||||
void
|
||||
ck_epoch_barrier_wait(struct ck_epoch_record *record, ck_epoch_wait_cb_t *cb,
|
||||
void *ct)
|
||||
{
|
||||
|
||||
ck_epoch_synchronize_wait(record->global, cb, ct);
|
||||
ck_epoch_reclaim(record);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* It may be worth it to actually apply these deferral semantics to an epoch
|
||||
* that was observed at ck_epoch_call time. The problem is that the latter
|
||||
|
@ -505,41 +563,61 @@ ck_epoch_barrier(struct ck_epoch_record *record)
|
|||
* is far from ideal too.
|
||||
*/
|
||||
bool
|
||||
ck_epoch_poll(struct ck_epoch_record *record)
|
||||
ck_epoch_poll_deferred(struct ck_epoch_record *record, ck_stack_t *deferred)
|
||||
{
|
||||
bool active;
|
||||
unsigned int epoch;
|
||||
unsigned int snapshot;
|
||||
struct ck_epoch_record *cr = NULL;
|
||||
struct ck_epoch *global = record->global;
|
||||
unsigned int n_dispatch;
|
||||
|
||||
epoch = ck_pr_load_uint(&global->epoch);
|
||||
|
||||
/* Serialize epoch snapshots with respect to global epoch. */
|
||||
ck_pr_fence_memory();
|
||||
|
||||
/*
|
||||
* At this point, epoch is the current global epoch value.
|
||||
* There may or may not be active threads which observed epoch - 1.
|
||||
* (ck_epoch_scan() will tell us that). However, there should be
|
||||
* no active threads which observed epoch - 2.
|
||||
*
|
||||
* Note that checking epoch - 2 is necessary, as race conditions can
|
||||
* allow another thread to increment the global epoch before this
|
||||
* thread runs.
|
||||
*/
|
||||
n_dispatch = ck_epoch_dispatch(record, epoch - 2, deferred);
|
||||
|
||||
cr = ck_epoch_scan(global, cr, epoch, &active);
|
||||
if (cr != NULL) {
|
||||
record->epoch = epoch;
|
||||
return false;
|
||||
}
|
||||
if (cr != NULL)
|
||||
return (n_dispatch > 0);
|
||||
|
||||
/* We are at a grace period if all threads are inactive. */
|
||||
if (active == false) {
|
||||
record->epoch = epoch;
|
||||
for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
|
||||
ck_epoch_dispatch(record, epoch);
|
||||
ck_epoch_dispatch(record, epoch, deferred);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* If an active thread exists, rely on epoch observation. */
|
||||
if (ck_pr_cas_uint_value(&global->epoch, epoch, epoch + 1,
|
||||
&snapshot) == false) {
|
||||
record->epoch = snapshot;
|
||||
} else {
|
||||
record->epoch = epoch + 1;
|
||||
}
|
||||
/*
|
||||
* If an active thread exists, rely on epoch observation.
|
||||
*
|
||||
* All the active threads entered the epoch section during
|
||||
* the current epoch. Therefore, we can now run the handlers
|
||||
* for the immediately preceding epoch and attempt to
|
||||
* advance the epoch if it hasn't been already.
|
||||
*/
|
||||
(void)ck_pr_cas_uint(&global->epoch, epoch, epoch + 1);
|
||||
|
||||
ck_epoch_dispatch(record, epoch + 1);
|
||||
ck_epoch_dispatch(record, epoch - 1, deferred);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ck_epoch_poll(struct ck_epoch_record *record)
|
||||
{
|
||||
|
||||
return ck_epoch_poll_deferred(record, NULL);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue