GSP
Quick Navigator

Search Site

Unix VPS
A - Starter
B - Basic
C - Preferred
D - Commercial
MPS - Dedicated
Previous VPSs
* Sign Up! *

Support
Contact Us
Online Help
Handbooks
Domain Status
Man Pages

FAQ
Virtual Servers
Pricing
Billing
Technical

Network
Facilities
Connectivity
Topology Map

Miscellaneous
Server Agreement
Year 2038
Credits
 

USA Flag

 

 

Man Pages
ck_spinlock(3) FreeBSD Library Functions Manual ck_spinlock(3)

ck_spinlock_init, ck_spinlock_lock, ck_spinlock_unlock, ck_spinlock_locked, ck_spinlock_trylock, ck_spinlock_anderson_init, ck_spinlock_anderson_locked, ck_spinlock_anderson_lock, ck_spinlock_anderson_unlock, ck_spinlock_cas_init, ck_spinlock_cas_locked, ck_spinlock_cas_lock, ck_spinlock_cas_lock_eb, ck_spinlock_cas_trylock, ck_spinlock_cas_unlock, ck_spinlock_clh_init, ck_spinlock_clh_locked, ck_spinlock_clh_lock, ck_spinlock_clh_unlock, ck_spinlock_dec_init, ck_spinlock_dec_locked, ck_spinlock_dec_lock, ck_spinlock_dec_lock_eb, ck_spinlock_dec_trylock, ck_spinlock_dec_unlock, ck_spinlock_fas_init, ck_spinlock_fas_lock, ck_spinlock_fas_lock_eb, ck_spinlock_fas_locked, ck_spinlock_fas_trylock, ck_spinlock_fas_unlock, ck_spinlock_hclh_init, ck_spinlock_hclh_locked, ck_spinlock_hclh_lock, ck_spinlock_hclh_unlock, ck_spinlock_mcs_init, ck_spinlock_mcs_locked, ck_spinlock_mcs_lock, ck_spinlock_mcs_trylock, ck_spinlock_mcs_unlock, ck_spinlock_ticket_init, ck_spinlock_ticket_locked, ck_spinlock_ticket_lock, ck_spinlock_ticket_lock_pb, ck_spinlock_ticket_trylock, ck_spinlock_ticket_unlock
spinlock implementations

Concurrency Kit (libck, -lck)

#include <ck_spinlock.h>

ck_spinlock_t spinlock = CK_SPINLOCK_INITIALIZER;
void
ck_spinlock_init(ck_spinlock_t *lock);

void
ck_spinlock_lock(ck_spinlock_t *lock);

void
ck_spinlock_unlock(ck_spinlock_t *lock);

bool
ck_spinlock_locked(ck_spinlock_t *lock);

bool
ck_spinlock_trylock(ck_spinlock_t *lock);

void
ck_spinlock_anderson_init(ck_spinlock_anderson_t *lock, ck_spinlock_anderson_thread_t *slots, unsigned int count);

bool
ck_spinlock_anderson_locked(ck_spinlock_anderson_t *lock);

void
ck_spinlock_anderson_lock(ck_spinlock_anderson_t *lock, ck_spinlock_anderson_thread_t **slot);

void
ck_spinlock_anderson_unlock(ck_spinlock_anderson_t *lock, ck_spinlock_anderson_thread_t *slot);

ck_spinlock_cas_t spinlock = CK_SPINLOCK_CAS_INITIALIZER;
void
ck_spinlock_cas_init(ck_spinlock_cas_t *lock);

bool
ck_spinlock_cas_locked(ck_spinlock_cas_t *lock);

void
ck_spinlock_cas_lock(ck_spinlock_cas_t *lock);

void
ck_spinlock_cas_lock_eb(ck_spinlock_cas_t *lock);

bool
ck_spinlock_cas_trylock(ck_spinlock_cas_t *lock);

void
ck_spinlock_cas_unlock(ck_spinlock_cas_t *lock);

void
ck_spinlock_clh_init(ck_spinlock_clh_t **lock, ck_spinlock_clh_t *unowned);

bool
ck_spinlock_clh_locked(ck_spinlock_clh_t **lock);

void
ck_spinlock_clh_lock(ck_spinlock_clh_t **lock, ck_spinlock_clh_t *node);

void
ck_spinlock_clh_unlock(ck_spinlock_clh_t **node);

ck_spinlock_dec_t spinlock = CK_SPINLOCK_DEC_INITIALIZER;
void
ck_spinlock_dec_init(ck_spinlock_dec_t *lock);

bool
ck_spinlock_dec_locked(ck_spinlock_dec_t *lock);

void
ck_spinlock_dec_lock(ck_spinlock_dec_t *lock);

void
ck_spinlock_dec_lock_eb(ck_spinlock_dec_t *lock);

bool
ck_spinlock_dec_trylock(ck_spinlock_dec_t *lock);

void
ck_spinlock_dec_unlock(ck_spinlock_dec_t *lock);

ck_spinlock_fas_t spinlock = CK_SPINLOCK_FAS_INITIALIZER;
void
ck_spinlock_fas_init(ck_spinlock_fas_t *lock);

void
ck_spinlock_fas_lock(ck_spinlock_fas_t *lock);

void
ck_spinlock_fas_lock_eb(ck_spinlock_fas_t *lock);

bool
ck_spinlock_fas_locked(ck_spinlock_fas_t *lock);

bool
ck_spinlock_fas_trylock(ck_spinlock_fas_t *lock);

void
ck_spinlock_fas_unlock(ck_spinlock_fas_t *lock);


void
ck_spinlock_hclh_init(ck_spinlock_hclh_t **lock, ck_spinlock_hclh_t *unowned);

bool
ck_spinlock_hclh_locked(ck_spinlock_hclh_t **lock);

void
ck_spinlock_hclh_lock(ck_spinlock_hclh_t **lock, ck_spinlock_hclh_t *node);

void
ck_spinlock_hclh_unlock(ck_spinlock_hclh_t **node);

ck_spinlock_mcs_t spinlock = CK_SPINLOCK_MCS_INITIALIZER;
void
ck_spinlock_mcs_init(ck_spinlock_mcs_t **lock);

bool
ck_spinlock_mcs_locked(ck_spinlock_mcs_t **lock);

void
ck_spinlock_mcs_lock(ck_spinlock_mcs_t **lock, ck_spinlock_mcs_t *node);

bool
ck_spinlock_mcs_trylock(ck_spinlock_mcs_t **lock, ck_spinlock_mcs_t *node);

void
ck_spinlock_mcs_unlock(ck_spinlock_mcs_t **lock, ck_spinlock_mcs_t *node);

ck_spinlock_ticket_t spinlock = CK_SPINLOCK_TICKET_INITIALIZER;
void
ck_spinlock_ticket_init(ck_spinlock_ticket_t *lock);

bool
ck_spinlock_ticket_locked(ck_spinlock_ticket_t *lock);

void
ck_spinlock_ticket_lock(ck_spinlock_ticket_t *lock);

void
ck_spinlock_ticket_lock_pb(ck_spinlock_ticket_t *lock, unsigned int period);

bool
ck_spinlock_ticket_trylock(ck_spinlock_ticket_t *lock);

void
ck_spinlock_ticket_unlock(ck_spinlock_ticket_t *lock);

A family of busy-wait spinlock implementations. The ck_spinlock_t implementation is simply a wrapper around the fetch-and-swap (ck_spinlock_fas_t) implementation. The table below provides a summary of the current implementations.
|            Namespace | Algorithm                   | Type          | Restrictions            | Fair   |
´----------------------|-----------------------------|---------------|-------------------------|--------'
  ck_spinlock_anderson   Anderson                      Array           Fixed number of threads   Yes
       ck_spinlock_cas   Compare-and-Swap              Centralized     None                      No
       ck_spinlock_clh   Craig, Landin and Hagersten   Queue           Lifetime requirements     Yes
       ck_spinlock_dec   Decrement (Linux kernel)      Centralized     UINT_MAX concurrency      No
       ck_spinlock_fas   Fetch-and-store               Centralized     None                      No
       ck_spinlock_hclh  Hierarchical CLH              Queue           Lifetime requirements     Yes *
       ck_spinlock_mcs   Mellor-Crummey and Scott      Queue           None                      Yes
    ck_spinlock_ticket   Ticket                        Centralized     None                      Yes

* Hierarchical CLH only offers weak fairness for threads accross cluster nodes.

If contention is low and there is no hard requirement for starvation-freedom then a centralized greedy (unfair) spinlock is recommended. If contention is high and there is no requirement for starvation-freedom then a centralized greedy spinlock is recommended to be used with an exponential backoff mechanism. If contention is generally low and there is a hard requirement for starvation-freedom then the ticket lock is recommended. If contention is high and there is a hard requirement for starvation-freedom then the Craig and Landin and Hagersten queue spinlock is recommended unless stack allocation is necessary or NUMA factor is high, in which case the Mellor-Crummey and Scott spinlock is recommended. If you cannot afford O(n) space-usage from array or queue spinlocks but still require fairness under high contention then the ticket lock with proportional back-off is recommended. If NUMA factor is high but prefer a greedy lock, then please see ck_cohort(3).

#include <ck_spinlock.h>
#include <stdbool.h>

/*
 * Alternatively, the mutex may be initialized at run-time with
 * ck_spinlock_init(&mutex).
 */
ck_spinlock_t mutex = CK_SPINLOCK_INITIALIZER;

void
example(void)
{

        ck_spinlock_lock(&mutex);
        /*
         * Critical section.
         */
        ck_spinlock_unlock(&mutex);

        ck_spinlock_lock_eb(&mutex);
        /*
         * Critical section.
         */
        ck_spinlock_unlock(&mutex);

        if (ck_spinlock_trylock(&mutex) == true) {
                /*
                 * Critical section.
                 */
                ck_spinlock_unlock(&mutex);
        }
}

ck_cohort(3), ck_elide(3)

Additional information available at http://concurrencykit.org/

July 26, 2013.

Search for    or go to Top of page |  Section 3 |  Main Index

Powered by GSP Visit the GSP FreeBSD Man Page Interface.
Output converted with ManDoc.