003 File Manager
Current Path:
/usr/src/contrib/ntp/libntp
usr
/
src
/
contrib
/
ntp
/
libntp
/
📁
..
📄
Makefile.am
(3.27 KB)
📄
Makefile.in
(105.83 KB)
📄
README
(272 B)
📄
a_md5encrypt.c
(6.76 KB)
📄
adjtime.c
(10.29 KB)
📄
adjtimex.c
(334 B)
📄
atoint.c
(801 B)
📄
atolfp.c
(1.88 KB)
📄
atouint.c
(885 B)
📄
audio.c
(13.09 KB)
📄
authkeys.c
(19.92 KB)
📄
authreadkeys.c
(9.06 KB)
📄
authusekey.c
(748 B)
📄
bsd_strerror.c
(1.35 KB)
📄
buftvtots.c
(643 B)
📄
caljulian.c
(891 B)
📄
caltontp.c
(2.25 KB)
📄
calyearstart.c
(2.1 KB)
📄
clocktime.c
(4.49 KB)
📄
clocktypes.c
(3.69 KB)
📄
decodenetnum.c
(3.93 KB)
📄
dofptoa.c
(2.19 KB)
📄
dolfptoa.c
(3.14 KB)
📄
emalloc.c
(3.4 KB)
📄
findconfig.c
(1.46 KB)
📄
getopt.c
(2.21 KB)
📄
hextoint.c
(673 B)
📄
hextolfp.c
(1.35 KB)
📄
humandate.c
(1.1 KB)
📄
icom.c
(4.22 KB)
📄
iosignal.c
(12.27 KB)
📄
is_ip_address.c
(2.02 KB)
📄
lib_strbuf.c
(633 B)
📄
libssl_compat.c
(6.13 KB)
📄
machines.c
(13.05 KB)
📄
mktime.c
(8.29 KB)
📄
modetoa.c
(507 B)
📄
mstolfp.c
(2.01 KB)
📄
msyslog.c
(12.67 KB)
📄
netof.c
(1.17 KB)
📄
ntp_calendar.c
(58.49 KB)
📄
ntp_calgps.c
(15.73 KB)
📄
ntp_crypto_rnd.c
(2.11 KB)
📄
ntp_intres.c
(28.64 KB)
📄
ntp_libopts.c
(1.27 KB)
📄
ntp_lineedit.c
(3.88 KB)
📄
ntp_random.c
(17.63 KB)
📄
ntp_rfc2553.c
(14.85 KB)
📄
ntp_worker.c
(7.21 KB)
📄
numtoa.c
(963 B)
📄
numtohost.c
(908 B)
📄
octtoint.c
(578 B)
📄
prettydate.c
(5.53 KB)
📄
recvbuff.c
(7.3 KB)
📄
refidsmear.c
(1.05 KB)
📄
refnumtoa.c
(673 B)
📄
snprintf.c
(52.56 KB)
📄
socket.c
(4.88 KB)
📄
socktoa.c
(2.88 KB)
📄
socktohost.c
(2.54 KB)
📄
ssl_init.c
(5.09 KB)
📄
statestr.c
(11.4 KB)
📄
strdup.c
(854 B)
📄
strl_obsd.c
(3.65 KB)
📄
syssignal.c
(2.69 KB)
📄
systime.c
(17.51 KB)
📄
systime_s.c
(33 B)
📄
timespecops.c
(4.27 KB)
📄
timetoa.c
(2.75 KB)
📄
timevalops.c
(12.97 KB)
📄
timexsup.c
(1.17 KB)
📄
uglydate.c
(958 B)
📄
vint64ops.c
(5.1 KB)
📄
work_fork.c
(12.73 KB)
📄
work_thread.c
(22.88 KB)
📄
xsbprintf.c
(2.06 KB)
📄
ymd2yd.c
(582 B)
Editing: ntp_worker.c
/* * ntp_worker.c */ #include <config.h> #include "ntp_workimpl.h" #ifdef WORKER #include <stdio.h> #include <ctype.h> #include <signal.h> #include "iosignal.h" #include "ntp_stdlib.h" #include "ntp_malloc.h" #include "ntp_syslog.h" #include "ntpd.h" #include "ntp_io.h" #include "ntp_assert.h" #include "ntp_unixtime.h" #include "intreswork.h" #define CHILD_MAX_IDLE (3 * 60) /* seconds, idle worker limit */ blocking_child ** blocking_children; size_t blocking_children_alloc; int worker_per_query; /* boolean */ int intres_req_pending; volatile u_int blocking_child_ready_seen; volatile u_int blocking_child_ready_done; #ifndef HAVE_IO_COMPLETION_PORT /* * pipe_socketpair() * * Provides an AF_UNIX socketpair on systems which have them, otherwise * pair of unidirectional pipes. */ int pipe_socketpair( int caller_fds[2], int * is_pipe ) { int rc; int fds[2]; int called_pipe; #ifdef HAVE_SOCKETPAIR rc = socketpair(AF_UNIX, SOCK_STREAM, 0, &fds[0]); #else rc = -1; #endif if (-1 == rc) { rc = pipe(&fds[0]); called_pipe = TRUE; } else { called_pipe = FALSE; } if (-1 == rc) return rc; caller_fds[0] = fds[0]; caller_fds[1] = fds[1]; if (is_pipe != NULL) *is_pipe = called_pipe; return 0; } /* * close_all_except() * * Close all file descriptors except the given keep_fd. */ void close_all_except( int keep_fd ) { int fd; for (fd = 0; fd < keep_fd; fd++) close(fd); close_all_beyond(keep_fd); } /* * close_all_beyond() * * Close all file descriptors after the given keep_fd, which is the * highest fd to keep open. */ void close_all_beyond( int keep_fd ) { # ifdef HAVE_CLOSEFROM closefrom(keep_fd + 1); # elif defined(F_CLOSEM) /* * From 'Writing Reliable AIX Daemons,' SG24-4946-00, * by Eric Agar (saves us from doing 32767 system * calls) */ if (fcntl(keep_fd + 1, F_CLOSEM, 0) == -1) msyslog(LOG_ERR, "F_CLOSEM(%d): %m", keep_fd + 1); # else /* !HAVE_CLOSEFROM && !F_CLOSEM follows */ int fd; int max_fd; max_fd = GETDTABLESIZE(); for (fd = keep_fd + 1; fd < max_fd; fd++) close(fd); # endif /* !HAVE_CLOSEFROM && !F_CLOSEM */ } #endif /* HAVE_IO_COMPLETION_PORT */ u_int available_blocking_child_slot(void) { const size_t each = sizeof(blocking_children[0]); u_int slot; size_t prev_alloc; size_t new_alloc; size_t prev_octets; size_t octets; for (slot = 0; slot < blocking_children_alloc; slot++) { if (NULL == blocking_children[slot]) return slot; if (blocking_children[slot]->reusable) { blocking_children[slot]->reusable = FALSE; return slot; } } prev_alloc = blocking_children_alloc; prev_octets = prev_alloc * each; new_alloc = blocking_children_alloc + 4; octets = new_alloc * each; blocking_children = erealloc_zero(blocking_children, octets, prev_octets); blocking_children_alloc = new_alloc; /* assume we'll never have enough workers to overflow u_int */ return (u_int)prev_alloc; } int queue_blocking_request( blocking_work_req rtype, void * req, size_t reqsize, blocking_work_callback done_func, void * context ) { static u_int intres_slot = UINT_MAX; u_int child_slot; blocking_child * c; blocking_pipe_header req_hdr; req_hdr.octets = sizeof(req_hdr) + reqsize; req_hdr.magic_sig = BLOCKING_REQ_MAGIC; req_hdr.rtype = rtype; req_hdr.done_func = done_func; req_hdr.context = context; child_slot = UINT_MAX; if (worker_per_query || UINT_MAX == intres_slot || blocking_children[intres_slot]->reusable) child_slot = available_blocking_child_slot(); if (!worker_per_query) { if (UINT_MAX == intres_slot) intres_slot = child_slot; else child_slot = intres_slot; if (0 == intres_req_pending) intres_timeout_req(0); } intres_req_pending++; INSIST(UINT_MAX != child_slot); c = blocking_children[child_slot]; if (NULL == c) { c = emalloc_zero(sizeof(*c)); #ifdef WORK_FORK c->req_read_pipe = -1; c->req_write_pipe = -1; #endif #ifdef WORK_PIPE c->resp_read_pipe = -1; c->resp_write_pipe = -1; #endif blocking_children[child_slot] = c; } req_hdr.child_idx = child_slot; return send_blocking_req_internal(c, &req_hdr, req); } int queue_blocking_response( blocking_child * c, blocking_pipe_header * resp, size_t respsize, const blocking_pipe_header * req ) { resp->octets = respsize; resp->magic_sig = BLOCKING_RESP_MAGIC; resp->rtype = req->rtype; resp->context = req->context; resp->done_func = req->done_func; return send_blocking_resp_internal(c, resp); } void process_blocking_resp( blocking_child * c ) { blocking_pipe_header * resp; void * data; /* * On Windows send_blocking_resp_internal() may signal the * blocking_response_ready event multiple times while we're * processing a response, so always consume all available * responses before returning to test the event again. */ #ifdef WORK_THREAD do { #endif resp = receive_blocking_resp_internal(c); if (NULL != resp) { DEBUG_REQUIRE(BLOCKING_RESP_MAGIC == resp->magic_sig); data = (char *)resp + sizeof(*resp); intres_req_pending--; (*resp->done_func)(resp->rtype, resp->context, resp->octets - sizeof(*resp), data); free(resp); } #ifdef WORK_THREAD } while (NULL != resp); #endif if (!worker_per_query && 0 == intres_req_pending) intres_timeout_req(CHILD_MAX_IDLE); else if (worker_per_query) req_child_exit(c); } void harvest_blocking_responses(void) { size_t idx; blocking_child* cp; u_int scseen, scdone; scseen = blocking_child_ready_seen; scdone = blocking_child_ready_done; if (scdone != scseen) { blocking_child_ready_done = scseen; for (idx = 0; idx < blocking_children_alloc; idx++) { cp = blocking_children[idx]; if (NULL == cp) continue; scseen = cp->resp_ready_seen; scdone = cp->resp_ready_done; if (scdone != scseen) { cp->resp_ready_done = scseen; process_blocking_resp(cp); } } } } /* * blocking_child_common runs as a forked child or a thread */ int blocking_child_common( blocking_child *c ) { int say_bye; blocking_pipe_header *req; say_bye = FALSE; while (!say_bye) { req = receive_blocking_req_internal(c); if (NULL == req) { say_bye = TRUE; continue; } DEBUG_REQUIRE(BLOCKING_REQ_MAGIC == req->magic_sig); switch (req->rtype) { case BLOCKING_GETADDRINFO: if (blocking_getaddrinfo(c, req)) say_bye = TRUE; break; case BLOCKING_GETNAMEINFO: if (blocking_getnameinfo(c, req)) say_bye = TRUE; break; default: msyslog(LOG_ERR, "unknown req %d to blocking worker", req->rtype); say_bye = TRUE; } free(req); } return 0; } /* * worker_idle_timer_fired() * * The parent starts this timer when the last pending response has been * received from the child, making it idle, and clears the timer when a * request is dispatched to the child. Once the timer expires, the * child is sent packing. * * This is called when worker_idle_timer is nonzero and less than or * equal to current_time. */ void worker_idle_timer_fired(void) { u_int idx; blocking_child * c; DEBUG_REQUIRE(0 == intres_req_pending); intres_timeout_req(0); for (idx = 0; idx < blocking_children_alloc; idx++) { c = blocking_children[idx]; if (NULL == c) continue; req_child_exit(c); } } #else /* !WORKER follows */ int ntp_worker_nonempty_compilation_unit; #endif
Upload File
Create Folder