003 File Manager
Current Path:
/usr/src/sys/contrib/ck/include
usr
/
src
/
sys
/
contrib
/
ck
/
include
/
📁
..
📄
ck_array.h
(3.06 KB)
📄
ck_backoff.h
(1.84 KB)
📄
ck_barrier.h
(5.31 KB)
📄
ck_bitmap.h
(12.25 KB)
📄
ck_brlock.h
(6.47 KB)
📄
ck_bytelock.h
(5.3 KB)
📄
ck_cc.h
(3.68 KB)
📄
ck_cohort.h
(5.64 KB)
📄
ck_elide.h
(8.74 KB)
📄
ck_epoch.h
(7.82 KB)
📄
ck_fifo.h
(12.87 KB)
📄
ck_hp.h
(3.31 KB)
📄
ck_hp_fifo.h
(5.99 KB)
📄
ck_hp_stack.h
(3.08 KB)
📄
ck_hs.h
(4.46 KB)
📄
ck_ht.h
(6.51 KB)
📄
ck_limits.h
(1.77 KB)
📄
ck_malloc.h
(1.57 KB)
📄
ck_md.h
(3.91 KB)
📄
ck_pflock.h
(3.68 KB)
📄
ck_pr.h
(31.64 KB)
📄
ck_queue.h
(15.78 KB)
📄
ck_rhs.h
(4.41 KB)
📄
ck_ring.h
(16.8 KB)
📄
ck_rwcohort.h
(11.17 KB)
📄
ck_rwlock.h
(6.15 KB)
📄
ck_sequence.h
(3.35 KB)
📄
ck_spinlock.h
(2.46 KB)
📄
ck_stack.h
(9.03 KB)
📄
ck_stdbool.h
(1.42 KB)
📄
ck_stddef.h
(1.42 KB)
📄
ck_stdint.h
(1.55 KB)
📄
ck_stdlib.h
(1.42 KB)
📄
ck_string.h
(1.42 KB)
📄
ck_swlock.h
(5.09 KB)
📄
ck_tflock.h
(3.9 KB)
📁
gcc
📁
spinlock
Editing: ck_rwcohort.h
/* * Copyright 2013-2015 Samy Al Bahra. * Copyright 2013 Brendon Scheinman. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef CK_RWCOHORT_H #define CK_RWCOHORT_H /* * This is an implementation of NUMA-aware reader-writer locks as described in: * Calciu, I.; Dice, D.; Lev, Y.; Luchangco, V.; Marathe, V.; and Shavit, N. 2014. * NUMA-Aware Reader-Writer Locks */ #include <ck_cc.h> #include <ck_pr.h> #include <ck_stddef.h> #include <ck_cohort.h> #define CK_RWCOHORT_WP_NAME(N) ck_rwcohort_wp_##N #define CK_RWCOHORT_WP_INSTANCE(N) struct CK_RWCOHORT_WP_NAME(N) #define CK_RWCOHORT_WP_INIT(N, RW, WL) ck_rwcohort_wp_##N##_init(RW, WL) #define CK_RWCOHORT_WP_READ_LOCK(N, RW, C, GC, LC) \ ck_rwcohort_wp_##N##_read_lock(RW, C, GC, LC) #define CK_RWCOHORT_WP_READ_UNLOCK(N, RW, C, GC, LC) \ ck_rwcohort_wp_##N##_read_unlock(RW) #define CK_RWCOHORT_WP_WRITE_LOCK(N, RW, C, GC, LC) \ ck_rwcohort_wp_##N##_write_lock(RW, C, GC, LC) #define CK_RWCOHORT_WP_WRITE_UNLOCK(N, RW, C, GC, LC) \ ck_rwcohort_wp_##N##_write_unlock(RW, C, GC, LC) #define CK_RWCOHORT_WP_DEFAULT_WAIT_LIMIT 1000 #define CK_RWCOHORT_WP_PROTOTYPE(N) \ CK_RWCOHORT_WP_INSTANCE(N) { \ unsigned int read_counter; \ unsigned int write_barrier; \ unsigned int wait_limit; \ }; \ CK_CC_INLINE static void \ ck_rwcohort_wp_##N##_init(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \ unsigned int wait_limit) \ { \ \ rw_cohort->read_counter = 0; \ rw_cohort->write_barrier = 0; \ rw_cohort->wait_limit = wait_limit; \ ck_pr_barrier(); \ return; \ } \ CK_CC_INLINE static void \ ck_rwcohort_wp_##N##_write_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ \ while (ck_pr_load_uint(&rw_cohort->write_barrier) > 0) \ ck_pr_stall(); \ \ CK_COHORT_LOCK(N, cohort, global_context, local_context); \ \ while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) \ ck_pr_stall(); \ \ return; \ } \ CK_CC_INLINE static void \ ck_rwcohort_wp_##N##_write_unlock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ \ (void)rw_cohort; \ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ return; \ } \ CK_CC_INLINE static void \ ck_rwcohort_wp_##N##_read_lock(CK_RWCOHORT_WP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ unsigned int wait_count = 0; \ bool raised = false; \ \ for (;;) { \ ck_pr_inc_uint(&rw_cohort->read_counter); \ ck_pr_fence_atomic_load(); \ if (CK_COHORT_LOCKED(N, cohort, global_context, \ local_context) == false) \ break; \ \ ck_pr_dec_uint(&rw_cohort->read_counter); \ while (CK_COHORT_LOCKED(N, cohort, global_context, \ local_context) == true) { \ ck_pr_stall(); \ if (++wait_count > rw_cohort->wait_limit && \ raised == false) { \ ck_pr_inc_uint(&rw_cohort->write_barrier); \ raised = true; \ } \ } \ } \ \ if (raised == true) \ ck_pr_dec_uint(&rw_cohort->write_barrier); \ \ ck_pr_fence_load(); \ return; \ } \ CK_CC_INLINE static void \ ck_rwcohort_wp_##N##_read_unlock(CK_RWCOHORT_WP_INSTANCE(N) *cohort) \ { \ \ ck_pr_fence_load_atomic(); \ ck_pr_dec_uint(&cohort->read_counter); \ return; \ } #define CK_RWCOHORT_WP_INITIALIZER { \ .read_counter = 0, \ .write_barrier = 0, \ .wait_limit = 0 \ } #define CK_RWCOHORT_RP_NAME(N) ck_rwcohort_rp_##N #define CK_RWCOHORT_RP_INSTANCE(N) struct CK_RWCOHORT_RP_NAME(N) #define CK_RWCOHORT_RP_INIT(N, RW, WL) ck_rwcohort_rp_##N##_init(RW, WL) #define CK_RWCOHORT_RP_READ_LOCK(N, RW, C, GC, LC) \ ck_rwcohort_rp_##N##_read_lock(RW, C, GC, LC) #define CK_RWCOHORT_RP_READ_UNLOCK(N, RW, C, GC, LC) \ ck_rwcohort_rp_##N##_read_unlock(RW) #define CK_RWCOHORT_RP_WRITE_LOCK(N, RW, C, GC, LC) \ ck_rwcohort_rp_##N##_write_lock(RW, C, GC, LC) #define CK_RWCOHORT_RP_WRITE_UNLOCK(N, RW, C, GC, LC) \ ck_rwcohort_rp_##N##_write_unlock(RW, C, GC, LC) #define CK_RWCOHORT_RP_DEFAULT_WAIT_LIMIT 1000 #define CK_RWCOHORT_RP_PROTOTYPE(N) \ CK_RWCOHORT_RP_INSTANCE(N) { \ unsigned int read_counter; \ unsigned int read_barrier; \ unsigned int wait_limit; \ }; \ CK_CC_INLINE static void \ ck_rwcohort_rp_##N##_init(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \ unsigned int wait_limit) \ { \ \ rw_cohort->read_counter = 0; \ rw_cohort->read_barrier = 0; \ rw_cohort->wait_limit = wait_limit; \ ck_pr_barrier(); \ return; \ } \ CK_CC_INLINE static void \ ck_rwcohort_rp_##N##_write_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ unsigned int wait_count = 0; \ bool raised = false; \ \ for (;;) { \ CK_COHORT_LOCK(N, cohort, global_context, local_context); \ if (ck_pr_load_uint(&rw_cohort->read_counter) == 0) \ break; \ \ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \ ck_pr_stall(); \ if (++wait_count > rw_cohort->wait_limit && \ raised == false) { \ ck_pr_inc_uint(&rw_cohort->read_barrier); \ raised = true; \ } \ } \ } \ \ if (raised == true) \ ck_pr_dec_uint(&rw_cohort->read_barrier); \ \ return; \ } \ CK_CC_INLINE static void \ ck_rwcohort_rp_##N##_write_unlock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \ { \ \ (void)rw_cohort; \ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ return; \ } \ CK_CC_INLINE static void \ ck_rwcohort_rp_##N##_read_lock(CK_RWCOHORT_RP_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ \ while (ck_pr_load_uint(&rw_cohort->read_barrier) > 0) \ ck_pr_stall(); \ \ ck_pr_inc_uint(&rw_cohort->read_counter); \ ck_pr_fence_atomic_load(); \ \ while (CK_COHORT_LOCKED(N, cohort, global_context, \ local_context) == true) \ ck_pr_stall(); \ \ return; \ } \ CK_CC_INLINE static void \ ck_rwcohort_rp_##N##_read_unlock(CK_RWCOHORT_RP_INSTANCE(N) *cohort) \ { \ \ ck_pr_fence_load_atomic(); \ ck_pr_dec_uint(&cohort->read_counter); \ return; \ } #define CK_RWCOHORT_RP_INITIALIZER { \ .read_counter = 0, \ .read_barrier = 0, \ .wait_limit = 0 \ } #define CK_RWCOHORT_NEUTRAL_NAME(N) ck_rwcohort_neutral_##N #define CK_RWCOHORT_NEUTRAL_INSTANCE(N) struct CK_RWCOHORT_NEUTRAL_NAME(N) #define CK_RWCOHORT_NEUTRAL_INIT(N, RW) ck_rwcohort_neutral_##N##_init(RW) #define CK_RWCOHORT_NEUTRAL_READ_LOCK(N, RW, C, GC, LC) \ ck_rwcohort_neutral_##N##_read_lock(RW, C, GC, LC) #define CK_RWCOHORT_NEUTRAL_READ_UNLOCK(N, RW, C, GC, LC) \ ck_rwcohort_neutral_##N##_read_unlock(RW) #define CK_RWCOHORT_NEUTRAL_WRITE_LOCK(N, RW, C, GC, LC) \ ck_rwcohort_neutral_##N##_write_lock(RW, C, GC, LC) #define CK_RWCOHORT_NEUTRAL_WRITE_UNLOCK(N, RW, C, GC, LC) \ ck_rwcohort_neutral_##N##_write_unlock(RW, C, GC, LC) #define CK_RWCOHORT_NEUTRAL_DEFAULT_WAIT_LIMIT 1000 #define CK_RWCOHORT_NEUTRAL_PROTOTYPE(N) \ CK_RWCOHORT_NEUTRAL_INSTANCE(N) { \ unsigned int read_counter; \ }; \ CK_CC_INLINE static void \ ck_rwcohort_neutral_##N##_init(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort) \ { \ \ rw_cohort->read_counter = 0; \ ck_pr_barrier(); \ return; \ } \ CK_CC_INLINE static void \ ck_rwcohort_neutral_##N##_write_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort,\ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ \ CK_COHORT_LOCK(N, cohort, global_context, local_context); \ while (ck_pr_load_uint(&rw_cohort->read_counter) > 0) { \ ck_pr_stall(); \ } \ return; \ } \ CK_CC_INLINE static void \ ck_rwcohort_neutral_##N##_write_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort,\ CK_COHORT_INSTANCE(N) *cohort, void *global_context, void *local_context) \ { \ \ (void)rw_cohort; \ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ return; \ } \ CK_CC_INLINE static void \ ck_rwcohort_neutral_##N##_read_lock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *rw_cohort, \ CK_COHORT_INSTANCE(N) *cohort, void *global_context, \ void *local_context) \ { \ \ CK_COHORT_LOCK(N, cohort, global_context, local_context); \ ck_pr_inc_uint(&rw_cohort->read_counter); \ CK_COHORT_UNLOCK(N, cohort, global_context, local_context); \ return; \ } \ CK_CC_INLINE static void \ ck_rwcohort_neutral_##N##_read_unlock(CK_RWCOHORT_NEUTRAL_INSTANCE(N) *cohort) \ { \ \ ck_pr_fence_load_atomic(); \ ck_pr_dec_uint(&cohort->read_counter); \ return; \ } #define CK_RWCOHORT_NEUTRAL_INITIALIZER { \ .read_counter = 0, \ } #endif /* CK_RWCOHORT_H */
Upload File
Create Folder