/**********************************************************************/ /* Howard Eng 3/94 */ /* Unixpros, Inc. */ /* */ /* This module defines wrappers for the DCE cma library. These */ /* are required to re-use as much as possible, the existing I/O code */ /* */ /**********************************************************************/ #include #include #include #include #include #include #include "ada_ptd_exc.h" #undef malloc #undef free #undef realloc #undef calloc #undef cfree #undef cma__int_lock #undef cma__int_unlock #undef cma__int_signal static pthread_t ___null_thread___; void cma__int_signal ( char *cv ) { extern void ada_thd_int_signal( char * ); #ifndef NDEBUG if( cv == (char *) NULL ) fprintf( stderr, "CV is NULL\n" ); else #endif ada_thd_int_signal( cv ); } char *cma__get_cv ( char * attr ) { char *ptr; extern char *ada_thd_create_int_cv(); ptr = ada_thd_create_int_cv(); return ( ptr ); } char *cma__get_mutex ( char *attr ) { char *ptr; extern char *ada_thd_create_int_lock(); ptr = ada_thd_create_int_lock(); if( ptr == (char *) NULL ) fprintf( stderr, "Couldn't Create Mutex\n" ); return ( ptr ); } char *cma__get_first_mutex( char *attr ) { return ( cma__get_mutex( attr ) ); } void cma__int_lock ( char *lock ) { extern void ada_thd_lock_int_lock( char * ); if( lock == (char *) NULL ) fprintf( stderr, "Lock is NULL: %s %d\n", \ __FILE__, __LINE__ ); else ada_thd_lock_int_lock( lock ); } void cma__int_unlock ( char *lock ) { extern void ada_thd_unlock_int_lock( char * ); if( lock == (char *) NULL ) fprintf( stderr, "Lock is NULL: %s %d\n", __FILE__, __LINE__ ); else ada_thd_unlock_int_lock( lock ); } void cma__int_wait ( char *cv, char *mutex ) { extern void ada_thd_int_wait( char *, char * ); ada_thd_int_wait( cv, mutex ); } cma_t_status cma__int_timed_wait ( char *cv, char *mutex, struct timespec *abs_time ) { extern void ada_thd_int_timedwait( char *, char *, struct timespec * ); ada_thd_int_timedwait( cv, mutex, abs_time ); return ( cma_s_timed_out ); } pthread_mutex_t __ada__g_global_lock__ = (pthread_mutex_t *) NULL; void cma__init_static () { } char *cma__g_global_lock = (char *) NULL; static cma__t_file_mask ___ada_nonasync_io_fds___; static int ___ada_nonblock_fd_count___; static pthread_cond_t ___ada_nonasync_io_cv___; static pthread_mutex_t ___ada_nonasync_io_mutex___; void cma_init () { extern void ___null_thread_routine___(); pthread_mutexattr_t attr; static int flag = 0; extern int cma__g_mx_file; if( !flag ) { flag = 1; cma__fdm_zero( &___ada_nonasync_io_fds___ ); pthread_mutexattr_create( &attr ); pthread_mutexattr_setkind_np( &attr, MUTEX_RECURSIVE_NP ); pthread_mutex_init( &__ada__g_global_lock__, attr ); pthread_mutex_init( &___ada_nonasync_io_mutex___, attr ); pthread_mutexattr_delete( &attr ); pthread_cond_init( &___ada_nonasync_io_cv___, pthread_condattr_default ); cma__g_global_lock = cma__get_mutex( (char *) NULL ); #if _CMA_UNIX_TYPE == _CMA__SVR4 { struct rlimit rip; if ( getrlimit(RLIMIT_NOFILE, &rip) == -1 ) { /* * We should never get an error here, but... * * Since everything is not initialized at this point, I'm * not sure that it would be wise to call cma__bugcheck(), * so we'll just exit the old fashioned way. */ cma__bugcheck("cma__init_static: Inconsistent return from getrlimit"); } cma__g_mx_file = rip.rlim_max; } #elif _CMA_OSIMPL_ == _CMA__OS_OSF /* * On OSF/1 1.1.1 the number of file descriptors is dynamic with * the default at 1024. The select mask width is still fixed at * 256. */ cma__g_mx_file = getdtablesize(); if(cma__g_mx_file > FD_SETSIZE) cma__g_mx_file = FD_SETSIZE; #else cma__g_mx_file = getdtablesize(); #endif cma__g_nspm = ((cma__g_mx_file + cma__c_nbpm - 1)/cma__c_nbpm); cma__init_atfork(); cma__init_memory(); cma__init_mem_locks(); cma__init_defer(); pthread_create( &___null_thread___, pthread_attr_default, (pthread_startroutine_t) ___null_thread_routine___, (pthread_addr_t) NULL ); cma__init_thread_io(); cma__init_signal(); ada_thd_init_signal(); } else flag = 1; } #include void cma__bugcheck ( char *text, ... ) { #ifdef CMA_BUGCHECK va_list args; va_start( args, text ); vfprintf( stderr, text, args ); va_end( args ); #endif } void cma__error ( int code ) { ada_pthread_exc_t exc; switch ( code ) { case cma_s_alerted: exc = ADA_PTHREAD_CANCEL_E; break; case cma_s_exit_thread: exc = ADA_EXIT_THREAD_E; break; case cma_s_existence: exc = ADA_INVALID_OBJ_E; break; case cma_s_in_use: case cma_s_use_error: exc = ADA_USE_ERROR_E; break; case cma_s_timed_out: exc = ADA_TIMED_OUT_E; break; case exc_s_SIGPIPE: exc = ADA_SIGPIPE_E; break; case exc_s_SIGSYS: exc = ADA_SIGSYS_E; break; case exc_s_SIGTRAP: exc = ADA_SIGTRAP_E; break; case exc_s_SIGIOT: exc = ADA_SIGIOT_E; break; case exc_s_SIGEMT: exc = ADA_SIGEMT_E; break; default: exc = ADA_MAX_EXCEPTIONS; break; } ada_thd_raise_exception( exc ); } cma__t_int_tcb cma__g_def_tcb; cma__t_int_tcb *cma__g_current_thread = &cma__g_def_tcb; cma__t_atomic_bit cma__g_init_done = cma__c_tac_static_clear; cma__t_atomic_bit cma__g_init_started = cma__c_tac_static_clear; cma__t_atomic_bit cma__g_kernel_critical = cma__c_tac_static_clear; cma__t_int_attr cma__g_def_attr; cma__t_env cma__g_env[ cma__c_env_count ]; cma__free_mutex () { } cma__free_cv () { } extern void cma__attempt_delivery #ifdef _CMA_PROTO_ ( cma__t_int_tcb *tcb) /* TCB to check */ #else /* no prototypes */ (tcb) cma__t_int_tcb *tcb; /* TCB to check */ #endif /* prototype */ { cma__int_lock (tcb->mutex); /* * Test whether alert is pending and we're allowed to raise it. * * NOTE: asynchronous alert delivery is disabled during alert delivery * * NOTE: the mutex is released before raising the alert, since access to * the TCB might be required by a handler and we don't want a deadlock... * the "else" clause of this "if" releases the mutex if alert delivery is * disabled or unnecessary. Unstructured, but necessary. */ if (tcb->alert.pending && (tcb->alert.g_enable)) { tcb->alert.pending = cma_c_false; tcb->alert.a_enable = cma_c_false; cma__int_unlock (tcb->mutex); cma__error (cma_s_alerted); } else cma__int_unlock (tcb->mutex); } #include int getdtablesize() { struct rlimit rl; getrlimit(RLIMIT_NOFILE, &rl); return rl.rlim_cur; } extern void cma__get_time #ifdef _CMA_PROTO_ ( cma_t_date_time *time) /* Get the current date and time */ #else /* no prototypes */ (time) cma_t_date_time *time; /* Get the current date and time */ #endif /* prototype */ { #if _CMA_OS_ == _CMA__VMS sys$gettim ((long int *)time); #endif #if _CMA_OS_ == _CMA__UNIX # if (_CMA_UNIX_TYPE != _CMA__SVR4) struct timezone tmptz; # endif /* * We don't bother with local time, so just put the timezone info in a * temporary and forget it. */ # if (_CMA_UNIX_TYPE != _CMA__SVR4) gettimeofday (time, &tmptz); # else gettimeofday (time); # endif #endif } extern void cma__add_time #ifdef _CMA_PROTO_ ( cma_t_date_time *result, cma_t_date_time *time1, cma_t_date_time *time2) #else /* no prototypes */ (result, time1, time2) cma_t_date_time *result; cma_t_date_time *time1; cma_t_date_time *time2; #endif /* prototype */ { #if _CMA_OS_ == _CMA__VMS lib$add_times (time1, time2, result); #else result->tv_usec = time1->tv_usec + time2->tv_usec; result->tv_sec = time1->tv_sec + time2->tv_sec; if (result->tv_usec >= 1000000) { /* check for carry */ result->tv_usec -= 1000000; result->tv_sec += 1; } #endif } void cma_lock_global () { pthread_lock_global_np(); } void cma_unlock_global () { pthread_unlock_global_np(); } void ada_thd_runtime_cleanup ( void ) { #ifndef NDEBUG fprintf( stderr, "Cancelling Null Thread!\n" ); #endif pthread_cancel( ___null_thread___ ); pthread_cond_signal( &___ada_nonasync_io_cv___ ); } /**********************************************************************/ /* Keep track of descriptors that have been opened that are designated*/ /* as not being able to support async I/O. The "null" thread is used */ /* to poll for I/O on these. */ /**********************************************************************/ void ___ada_open_nonasync_io_fd___ ( fd ) int fd; { if ( fd > 2 ) { pthread_mutex_lock( &___ada_nonasync_io_mutex___ ); cma__fdm_set_bit( fd, &___ada_nonasync_io_fds___ ); pthread_mutex_unlock( &___ada_nonasync_io_mutex___ ); } } /**********************************************************************/ /* Remove a descriptor from the non-async descriptor list. */ /**********************************************************************/ void ___ada_close_nonasync_io_fd___ ( fd ) int fd; { pthread_mutex_lock( &___ada_nonasync_io_mutex___ ); if( cma__fdm_is_set( fd, &___ada_nonasync_io_fds___ ) ) { cma__fdm_clr_bit( fd, &___ada_nonasync_io_fds___ ); } pthread_mutex_unlock( &___ada_nonasync_io_mutex___ ); } /**********************************************************************/ /* Enable the null thread to start polling for this descriptor */ /**********************************************************************/ void ___ada_enable_nonasync_io_fd___ ( fd ) int fd; { pthread_mutex_lock( &___ada_nonasync_io_mutex___ ); if( cma__fdm_is_set( fd, &___ada_nonasync_io_fds___ ) ) { ___ada_nonblock_fd_count___++; pthread_mutex_unlock( &___ada_nonasync_io_mutex___ ); pthread_cond_signal( &___ada_nonasync_io_cv___ ); } else pthread_mutex_unlock( &___ada_nonasync_io_mutex___ ); } /**********************************************************************/ /* Disable this descriptor from being polled for. */ /**********************************************************************/ void ___ada_disable_nonasync_io_fd___ ( fd ) int fd; { pthread_mutex_lock( &___ada_nonasync_io_mutex___ ); if( cma__fdm_is_set( fd, &___ada_nonasync_io_fds___ ) ) { ___ada_nonblock_fd_count___--; pthread_mutex_unlock( &___ada_nonasync_io_mutex___ ); } else pthread_mutex_unlock( &___ada_nonasync_io_mutex___ ); } /**********************************************************************/ /* "NULL" thread that is used to poll for I/O when descriptors are */ /* opened that don't support Asynchronous I/O. As long as there are */ /* descriptors open the thread will poll. Otherwise, it waits on a CV*/ /* until the application opens a non-async I/O descriptor. */ /**********************************************************************/ static void ___null_thread_routine___ () { struct timespec ts = { 3, 0 }; struct timeval wait_ts = { 0, 0 }; struct timeval *wait_ptr = &wait_ts; extern cma__t_int_tcb *cma__get_self_tcb(); (cma__get_self_tcb())->kind = cma__c_thkind_null; pthread_setcancel( CANCEL_ON ); pthread_setasynccancel( CANCEL_ON ); while ( 1 ) { extern cma_t_integer io_avail_null_thread; TRY if ( ___ada_nonblock_fd_count___ ) cma__io_available( cma__c_io_read, 0, wait_ptr ); CATCH_ALL io_avail_null_thread = 0; ENDTRY; if( !cma__kernel_set( &cma__g_defers_processed ) ) { cma__bugcheck( "Undefering Requests\n" ); cma__undefer(); } pthread_mutex_lock( &___ada_nonasync_io_mutex___ ); if ( ___ada_nonblock_fd_count___ ) { pthread_mutex_unlock( &___ada_nonasync_io_mutex___ ); TRY pthread_delay_np( &ts ); CATCH_ALL pthread_exit( 0 ); ENDTRY; } else { pthread_cond_wait( &___ada_nonasync_io_cv___, &___ada_nonasync_io_mutex___ ); pthread_mutex_unlock( &___ada_nonasync_io_mutex___ ); } } } /**********************************************************************/ /* Main signal delivery mechanism. This will be called by an Ada */ /* interrupt entry. If "ada_tcb" is NULL, then the signal is */ /* considered to be part of the asynchronous signal set. All waiters */ /* will then be woken-up. If the signal is for async I/O then those */ /* waiters are also woken-up. */ /* */ /* If "ada_tcb" is not NULL, then it points to the current Ada TCB */ /* that caused the synchronous signal. The handler that the thread */ /* installed is then executed, if any. */ /**********************************************************************/ int ada_thd_sig_deliver ( sig, ada_tcb ) int sig; char *ada_tcb; /* TCB to check */ { #if _CMA_UNIX_TYPE == _CMA__SVR4 cma__t_siginfo siginfo = 0; #else int code = 0; #endif static struct sigcontext scp; struct timeval ts = { 0, 0}; cma__t_int_tcb *tcb; cma__bugcheck( "Delivering Signal: %d\n", sig ); if ( ada_tcb == (char *) NULL ) { #if _CMA_UNIX_TYPE == _CMA__SVR4 cma__sig_deliver( sig, siginfo, &scp ); #else cma__sig_deliver( sig, code, &scp ); #endif /**********************************************************************/ /* Now check if the signal was for async I/O. If it was then process */ /* the request. */ /**********************************************************************/ if( sig == SIGIO || sig == SIGURG ) { cma__io_available( cma__c_io_read, 0, &ts ); if( !cma__kernel_set( &cma__g_defers_processed ) ) { cma__bugcheck( "Undeferring Requests\n" ); cma__undefer(); } } } else { /**********************************************************************/ /* For synchronous events, "ada_tcb" will be non-NULL and set to the */ /* current Ada runtime TCB. In order to make use of this, it needs */ /* to be mapped to the "bindings" TCB. */ /**********************************************************************/ char *handle; extern char *ada_thd_get_from_task_list(); cma__t_int_tcb *ada_thd_get_cma_tcb(); if ( (handle = ada_thd_get_from_task_list( ada_tcb )) == NULL ) return (int) cma_c_false; tcb = ada_thd_get_cma_tcb( handle ); #if _CMA_OS_ != _CMA__VMS if (tcb->sigaction_data[sig].sa_handler == SIG_IGN) { return (int) cma_c_true; /* Ignore this signal */ } else if ((tcb->sigaction_data[sig].sa_handler != 0) && (tcb->sigaction_data[sig].sa_handler != SIG_DFL)) { # if (_CMA_PLATFORM_ == _CMA__IBMR2_UNIX) || (_CMA_OSIMPL_ == _CMA__OS_OSF) cma___t_sighandler handler = (cma___t_sighandler)tcb->sigaction_data[sig].sa_handler; (*handler)(sig, code, scp); # else #if _CMA_UNIX_TYPE == _CMA__SVR4 (tcb->sigaction_data[sig].sa_handler)(sig, siginfo, scp); #else (tcb->sigaction_data[sig].sa_handler)(sig, code, scp); #endif /* _CMA__SVR4 */ # endif return (int) cma_c_true; } #endif } return ( (int) cma_c_true ); } void ada_thd_c_free ( addr ) char *addr; { free( (void *) addr ); } /**********************************************************************/ /* Converts an expiration time to a delay. */ /**********************************************************************/ int ada_thd_expiration_to_delay ( ts, ds ) struct timespec *ts; struct timespec *ds; { struct timeval tmptime; struct timezone zone; int status = 0; long diff; gettimeofday( &tmptime, &zone ); diff = ts->tv_sec - tmptime.tv_sec; ds->tv_sec = diff < 0 ? (status = -1, 0 ) : diff; diff = ts->tv_nsec - ( tmptime.tv_usec * 1000 ); ds->tv_nsec = diff < 0 ? 0 : diff; return ( status ); } float ada_thd_long_to_float ( val ) long val; { return ( (float) val ); } /**********************************************************************/ /* Adds "fd" to the list of descriptors "tracked" by DCE. This is */ /* meant to be used by descriptors opened outside of DCE (i.e. opened */ /* by "open" not "cma_open". */ /**********************************************************************/ void dup_dce ( fd ) int fd; { void cma__open_general( int ); cma__open_general( fd ); }