#ifndef __VMS_MACROS_LOADED #define __VMS_MACROS_LOADED 1 /***************************************************************************** * Copyright 2002 Compaq Information Technologies Group, L.P. * * Compaq and the Compaq logo are trademarks of Compaq Information * Technologies Group, L.P. in the U.S. and/or other countries. * * Confidential computer software. Valid license from Compaq required for * possession, use or copying. Consistent with FAR 12.211 and 12.212, * Commercial Computer Software, Computer Software Documentation, and * Technical Data for Commercial Items are licensed to the U.S. Government * under vendor's standard commercial license. *****************************************************************************/ /* *++ * FACILITY: * * VMS Executive (LIB) * * ABSTRACT: * * This header file will provide a basic set of C macros for system programmers * to hide some particularly ugly things or for commonly used functions which * are somewhat performance sensitive. These macros should be very simple (not * that C would let you get away with anything else). If complex, reconsider * creating a new exec routine to perform the function rather than a macro. * * AUTHOR: * * Steve DiPirro * * CREATION DATE: 17-Feb-1993 * * MODIFICATION HISTORY: * * X-46 CMOS Christian Moser 24-FEB-2005 * Rewrite the LOCK_CPU_MUTEX and UNLOCK_CPU_MUTEX to match what * the Macro32 macro does to acquire and release the CPU mutex. * Only use a single atomic update operation to avoid some * fairness issues. * * X-45 Clair Grant 02-Apr-2004 * Add "atomic" macros * * X-44 CMOS Christian Moser 7-SEP-2003 * Add EXC_PRINT for exception tracing * * X-43 Clair Grant 01-Nov-2002 * builtins.h must precede kriprdef.h * * X-42 Burns Fisher 31-Oct-2002 * KRIPRDEF include is IA64-only * * X-41 Burns Fisher 30-Oct-2002 * Use KRs for get_cpu_data and add get_slot_va for IA64. * * X-40 Clair Grant 23-Jul-2002 * We won't be calling a service to bugcheck so replace * sys$pal_bugchk with the __PAL_BUGCHK builtin which uses * a break instruction directly. * * X-39 KLN3080 Karen L. Noel 18-Jul-2002 * o Fix lower case conditional. * o Reset ident numbering to match source control. * * X-37A1A4 KLN3048 Karen L. Noel 28-Mar-2002 * o Avoid assigning a 64-bit pointer to a 32-bit int. * o Conditionalize Alpha specific code. * o Require /pointer_size to use 64-bit pointers. * * X-41 KLN3037 Karen L. Noel 13-Mar-2002 * Inline static routines so compiler doesn't mess up * initialization routines. * * X-40 KLN3035 Karen L. Noel 11-Mar-2002 * Remove arch_defs.h and change conditionals to use * __ALPHA and __ia64. Including arch_defs.h breaks * XFC and perhaps other code that does conditionals * a different way. * * X-39 KLN3025 Karen L. Noel 26-Feb-2002 * o Conditionalize MTPR/MFPR macro definitions. * o Cast new_ipl in sys_unlock macro to get rid * of compiler informational. * o Make two page table spaces per IA64 region. * o Call sys$pal_bugchk for IA64 in bug_check macro. * o Fix comment for sys_lock_nospin to indicate SS$_LOCKINUSE * can be returned. * o Remove inline pragmas. We trust the compiler now. * * X-38 CMOS Christian Moser 10-JAN-2002 * Update comment for TR_PRINT macro to include example * usage based on popular feedback request. * * X-37 CMOS Christian Moser 26-JUN-2001 * Add new TR_PRINT macro, which can be used as a general * purpose debug aid in combination with TR$DEBUG and TR$SDA. * * X-36 JRK Jim Kauffman 6-Nov-2000 * Fix CPU mutex deadlock * * X-35 CMOS Christian Moser 16-AUG-1999 * Add new SYS_LOCK and SYS_UNLOCK variants to support * sharelocks and nospin locking. * * X-34 RAB Richard A. Bishop 9-Apr-1999 * Make bug_check macro use some names that aren't so likely * to clash with customer definitions. * * X-33 PAJ0988 Paul A. Jacobi 16-Jun-1998 * Define BUGCHK_POWEROFF for use with BUG_CHECK macro. Update * module IDENT to match VDE. Fold of X-28A1 from BLIZZARD_FT1. * * X-30 KLN2084 Karen L. Noel 5-Jun-1998 * Protect the sys_lock macro so that it can be used * properly within a module compiled for 64-bit pointers. * * X-29 KLN2077 Karen L. Noel 20-May-1998 * Add "int" to declaration in bug_check macro so the C * compiler doesn't complain when using level4 checking. * * X-28 CMOS Christian Moser 27-APR-1998 * Replace bug_check macro to generate inline bugcheck with * correct PALcode instruction, instead of calling routine * EXE$GEN_BUGCHK to generate an inline bugcheck. * * X-27 JRK388 Jim Kauffman 4-Nov-1997 * Fix bug_check usage in lock_cpu_mutex and unlock_cpu_mutex * * X-26 KLN1570 Karen L. Noel 18-Jul-1996 * Fix calls to TBI_DATA* routines so that the call * entry points are used instead of the JSB entries. * * X-25 SDD Steve DiPirro 26-Apr-1996 * Fix exe$gen_bugchk parameter declarations to be more standard * and to avoid the DECC V5.3 bug. * * X-24 NYK521 Nitin Y. Karkhanis 30-Nov-1995 * Add macros to fetch contents of item list entry fields. * * X-23 EMB0381 Ellen M. Batbouta 03-Oct-1995 * Add NO_PCB symbol to TBI_DATA_64 and TBI_SINGLE macros. * * X-22 NYK326 Nitin Y. Karkhanis 30-Mar-1995 * Cast PTE pointers to integers before performing any * arithmetic. (In va_pte_to_svapte and svapte_to_va_pte.) * * X-21 EMB0355 Ellen M. Batbouta 08-Mar-1995 * Add TB invalidate macros, TBI_ALL, TBI_SINGLE, and * TBI_DATA_64. * * X-20 NYK251 Nitin Y. Karkhanis 1-Feb-1995 * Removed 64B_REVISIT for PTE_VA macro since the newest * generation of the C compiler that's coupled with Theta * correctly evaluates 64-bit expressions. * Added const to all extern system data cells declarations. * * X-19 NYK231 Nitin Y. Karkhanis 27-Jan-1995 * Add new routines to convert a VA_PTE to a SVAPTE and vice * versa. * Replaced instances of __unsigned int64 with uint64 for the * PTE macros only. * * X-18 LSS0314 Leonard S. Szubowicz 10-Jan-1995 * Handle multiple inclusions of this header file gracefully by * doing nothing after the first time. * * X-17 NYK102 Nitin Y. Karkhanis 10-Nov-1994 * Temporarily make pte_va an inline routine. This was done * to work around some compiler problems with 64-bit expressions. * * X-16 NYK075 Nitin Y. Karkhanis 17-Oct-1994 * The presence of access_backpointer and establish_backpointer * in this header file resulted in too much pain for the * build. Access_backpointer, decref, establish_backpointer, * incref, is_encumbered, and pfn_to_entry therefore have been * moved to [LIB_H]PFN_MACROS.H. The VA_PTE_TO_VA inline routine * need the static qualifer on its definition. * * X-15 NYK073 Nitin Y. Karkhanis 14-Oct-1994 * MMG$GQ_LEVEL_WIDTH and MMG$GQ_NON_VA_MASK need to be * declared within inline routine establish_backpointer. * * X-14 NYK072 Nitin Y. Karkhanis 13-Oct-1994 * Safe to include access_backpointer routine since the * system data cells it uses have now been defined. The * inclusion of the routine ended up being a separate edit from * X-13 since the symbols were defined (in SYSLNK.OPT, etc.) * after edit X-13 hit the "pack". * * X-13 NYK071 Nitin Y. Karkhanis 13-Oct-1994 * Update PTE macros according to code review comments. * Add PFN macros. * * X-12 NYK055 Nitin Y. Karkhanis 19-Sep-1994 * MMG$GL_PAGE_SIZE in va_pte_to_va should really be * MMG$GQ_PAGE_SIZE. * * X-11 NYK046 Nitin Y. Karkhanis 14-Sep-1994 * Adding paging equation macros (l1pte_va, l2pte_va, pte_va, * va_pte_to_va- routine). Also changed pfn_to_entry macro since * the shift is no longer viable. PFN database entry size has * grown for 64-bits and is no longer a power of 2. * * X-10 SDD Steve DiPirro 18-Aug-1994 * Some idiot screwed up all the va/vpn conversion macros for P0 * space using a nonexistent (and unneeded) mask symbol. Oh yeah, * that was me. What a surprise. * * X-9 SDD Steve DiPirro 23-May-1994 * Extern declaration of smp$gl_flags should use same type * as in VMS_DRIVERS.H (type SMP rather than int). * * X-8 SDD Steve DiPirro 27-Jan-1994 * Function prototypes defined here are obsolete and conflict * with the actual definitions now available in other include * files. * * X-7 SDD Steve DiPirro 08-Nov-1993 * Fixed sys_unlock to restore IPL, even when SMP enabled. * * X-6 SDD Steve DiPirro 08-Sep-1993 * Added vms_assert, good_status, and bad_status macros. * Fixed sys_lock problem accessing spinlock vector. * * X-5 SDD Steve DiPirro 26-Aug-1993 * Fixed sys_lock and sys_unlock references to SPL$C_xxx * symbols causing new compilation problems. * * X-4 SDD Steve DiPirro 18-Aug-1993 * Fixed (erroneous) extra level of indirection in the * bug_check macro. * * X-3 SDD Steve DiPirro 09-Jun-1993 * I'm an idiot. Fix the case of constants used by macros. * Fix sys_lock to allow defaulting of saved_ipl parameter. * Make bugcheck code references consistent across the macros. * * X-2 SDD Steve DiPirro 30-Apr-1993 * Added new bug_check macro. * *-- */ /* Include any header files we need to make these macros work */ #ifdef __ALPHA #include #include #endif #include #ifdef __ia64 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* These are macros which facilitate the creation of other macros */ #define concat_sym(a,b) a ## b #define concat_defs(a,b) concat_sym(a,b) #define bld_sym1(x,line) concat_sym(x,line) #define bld_sym(x) bld_sym1(x,__LINE__) /* Here are some macros which need no explanation (except for, perhaps, why we're bothering to define them in the first place) */ #define mfpr_asten __PAL_MFPR_ASTEN() #define mfpr_astsr __PAL_MFPR_ASTSR() #define mfpr_esp __PAL_MFPR_ESP() #define mfpr_fen __PAL_MFPR_FEN() #define mfpr_ipl __PAL_MFPR_IPL() #define mfpr_mces __PAL_MFPR_MCES() #define mfpr_pcbb __PAL_MFPR_PCBB() #define mfpr_prbr __PAL_MFPR_PRBR() #define mfpr_sisr __PAL_MFPR_SISR() #define mfpr_ssp __PAL_MFPR_SSP() #define mfpr_tbchk(check_adr) __PAL_MFPR_TBCHK(check_adr) #define mfpr_usp __PAL_MFPR_USP() #define mfpr_whami __PAL_MFPR_WHAMI() #define mtpr_asten(mask) __PAL_MTPR_ASTEN(mask) #define mtpr_astsr(mask) __PAL_MTPR_ASTSR(mask) #define mtpr_datfx(enable) __PAL_MTPR_DATFX(enable) #define mtpr_esp(new_sp) __PAL_MTPR_ESP(new_sp) #define mtpr_fen(new_fen) __PAL_MTPR_FEN(new_fen) #define mtpr_ipir(ipir_mask) __PAL_MTPR_IPIR(ipir_mask) #define mtpr_ipl(newipl) __PAL_MTPR_IPL(newipl) #define mtpr_mces(mck_sum) __PAL_MTPR_MCES(mck_sum) #define mtpr_prbr(new_prbr) __PAL_MTPR_PRBR(new_prbr) #define mtpr_sirr(mask) __PAL_MTPR_SIRR(mask) #define mtpr_ssp(new_sp) __PAL_MTPR_SSP(new_sp) #define mtpr_tbia __PAL_MTPR_TBIA() #define mtpr_tbiap __PAL_MTPR_TBIAP() #define mtpr_tbis(tb_adr) __PAL_MTPR_TBIS(tb_adr) #define mtpr_tbisd(tb_adr) __PAL_MTPR_TBISD(tb_adr) #define mtpr_tbisi(tb_adr) __PAL_MTPR_TBISI(tb_adr) #define mtpr_usp(new_sp) __PAL_MTPR_USP(new_sp) /* The following macros are modelled after macros available to BLISS and MACRO but are basically just simple PAL calls. */ #define dsbint(newipl,saved_ipl) saved_ipl = __PAL_MTPR_IPL(newipl) #define enbint(newipl) __PAL_MTPR_IPL(newipl) #define setipl(newipl) __PAL_MTPR_IPL(newipl) #define softint(ipl) __PAL_MTPR_SIRR(ipl) #ifdef __ia64 #define find_cpu_data __getReg(KR$C_CPUDB_VA) #define get_slot_va __getReg(KR$C_SLOT_VA) #else #define find_cpu_data __PAL_MFPR_PRBR() #endif /* The following MFPR/MTPR calls have no IA64 builtins. */ #ifdef __ALPHA #define mfpr_scbb __PAL_MFPR_SCBB() #define mtpr_scbb(base_adr) __PAL_MTPR_SCBB(base_adr) #define mfpr_ptbr __PAL_MFPR_PTBR() #define mfpr_vptb __PAL_MFPR_VPTB() #define mtpr_vptb(new_vptb) __PAL_MTPR_VPTB(new_vptb) #endif /* What follows is a bug_check macro for system C programmers which can be used to generate a bugcheck. Included are some #define's of constants which can be used with the macro invocation. An example of its use is (note, all parameters are in uppercase): bug_check (INCONSTATE, FATAL, COLD); */ #define BUGCHK_QUOTE(s) #s #define BUGCHK_STR(s) BUGCHK_QUOTE(s) #define BUGCHK_FATAL 1 #define BUGCHK_NONFATAL 0 #define BUGCHK_POWEROFF 2 #define BUGCHK_COLD 1 #define BUGCHK_WARM 0 #ifdef __ALPHA #define bug_check(code, severity, reboot) \ { \ extern const int BUG$_##code; \ int bug_code = (int) &BUG$_##code; \ bug_code |= (BUGCHK_##severity) ? ((BUGCHK_##reboot) ? 5 : 4) : 0; \ asm ( "call_pal " BUGCHK_STR(EVX$PAL_BUGCHK) ";", bug_code ); \ } #endif #ifdef __ia64 #define bug_check(code, severity, reboot) \ { \ extern const int BUG$_##code; \ int bug_code = (int) &BUG$_##code; \ bug_code |= (BUGCHK_##severity) ? ((BUGCHK_##reboot) ? 5 : 4) : 0; \ __PAL_BUGCHK(bug_code); \ } #endif /* The following code is only available to modules that compile with the */ /* pointer_size qualifier. Short, long, 32, or 64 are all okay. */ #ifdef __INITIAL_POINTER_SIZE #if __INITIAL_POINTER_SIZE /* Macro to return the VA of the L1PTE that maps the virtual address passed in. L1PTE_VA = MMG$GQ_L1_BASE[VA] + 8*VA */ #pragma inline ($$$l1pte_va) static PTE_PQ $$$l1pte_va (uint64 addr) { extern PTE_PQ const mmg$gq_l1_base[VA$C_VRNX_COUNT]; int vrnx; unsigned __int64 va_pte; #ifdef __NEW_STARLET VA v; v.va$q_quad = addr; #else va v; v.va$q_quad[0] = addr; v.va$q_quad[1] = addr>>32; #endif vrnx = v.va$v_vrnx; va_pte = (uint64) mmg$gq_l1_base[vrnx] + ((addr & ~mmg$gq_non_va_mask) >> 3*mmg$gq_level_width) & (uint64) ~(PTE$C_BYTES_PER_PTE-1); return ((PTE_PQ) va_pte); } #define l1pte_va(addr) $$$l1pte_va((uint64)(addr)) /* Macro to return the VA of the L2PTE that maps the virtual address passed in. L2PTE_VA = MMG$GQ_L2_BASE[VA] + 8*VA */ #pragma inline ($$$l2pte_va) static PTE_PQ $$$l2pte_va (uint64 addr) { extern PTE_PQ const mmg$gq_l2_base[VA$C_VRNX_COUNT]; extern const uint64 mmg$gq_non_va_mask; extern const uint64 mmg$gq_level_width; int vrnx; unsigned __int64 va_pte; #ifdef __NEW_STARLET VA v; v.va$q_quad = addr; #else va v; v.va$q_quad[0] = addr; v.va$q_quad[1] = addr>>32; #endif vrnx = v.va$v_vrnx; va_pte = (uint64) mmg$gq_l2_base[vrnx] + ((addr & ~mmg$gq_non_va_mask) >> 2*mmg$gq_level_width) & (uint64) ~(PTE$C_BYTES_PER_PTE-1); return ((PTE_PQ)va_pte); } #define l2pte_va(addr) $$$l2pte_va((uint64)(addr)) /* Macro to return the VA of the PTE that maps the virtual address passed in. PTE_VA = MMG$GQ_PT_BASE[VA] + 8*VA */ #pragma inline ($$$pte_va) static PTE_PQ $$$pte_va (uint64 addr) { extern PTE_PQ const mmg$gq_pt_base[VA$C_VRNX_COUNT]; extern const uint64 mmg$gq_non_pt_mask; extern const uint64 mmg$gq_level_width; int vrnx; unsigned __int64 va_pte; #ifdef __NEW_STARLET VA v; v.va$q_quad = addr; #else va v; v.va$q_quad[0] = addr; v.va$q_quad[1] = addr>>32; #endif vrnx = v.va$v_vrnx; va_pte = (uint64) mmg$gq_pt_base[vrnx] + ((addr & ~mmg$gq_non_pt_mask) >> mmg$gq_level_width); return ((PTE_PQ)va_pte); } #define pte_va(addr) $$$pte_va((uint64)(addr)) /* Routine to return the VA mapped by the VA_PTE passed in. VA = ((VA_PTE - MMG$GQ_PT_BASE[VA])/PTE_SIZE) * PAGE_SIZE */ #pragma inline ($$$va_pte_to_va) static VOID_PQ $$$va_pte_to_va (PTE_PQ va_pte) { extern PTE_PQ const mmg$gq_pt_base[VA$C_VRNX_COUNT]; extern const uint64 mmg$gq_va_bits; extern const uint64 mmg$gq_level_width; uint64 temp1; int vrnx; #ifdef __NEW_STARLET VA v; VA temp2; v.va$q_quad = (uint64)va_pte; #else va v; va temp2; temp1 = (uint64)va_pte; v.va$q_quad[0] = (unsigned int)temp1; v.va$q_quad[1] = (unsigned int)(temp1 >> 32); #endif vrnx = v.va$v_vrnx; temp1 = ((uint64)va_pte - (uint64) mmg$gq_pt_base[vrnx]) << mmg$gq_level_width; /* Since the above statement creates an address that does not have the bits above the L1 MSB set according to the setting of the L1 MSB, the following code handles this contingency. */ #ifdef __ALPHA if ((int64) (temp1 << (64 - mmg$gq_va_bits)) < 0) temp1 = temp1 | ((int64) -1 << mmg$gq_va_bits); #endif /* IA64 we now have to insert the vrnx bits */ #ifdef __ia64 if (vrnx&1) temp1 = temp1 | ((int64) -1 << mmg$gq_va_bits); # ifdef __NEW_STARLET temp2.va$q_quad = temp1; temp2.va$v_vrnx = vrnx; temp1 = temp2.va$q_quad; # else temp2.va$q_quad[0] = temp1; temp2.va$q_quad[1] = temp1>>32; temp2.va$v_vrnx = vrnx; temp1 = temp2.va$q_quad[0]; temp1 |= (uint64)temp2.va$q_quad[1]<<32; # endif #endif return ((VOID_PQ) temp1); } #define va_pte_to_va(va_pte) $$$va_pte_to_va((PTE_PQ)(va_pte)) /* Routine to convert a SVAPTE to a VA_PTE. */ #pragma inline (svapte_to_va_pte) static PTE_PQ svapte_to_va_pte (PTE *svapte) { extern PTE_PQ const mmg$gq_s0s1base_pte_address; extern PTE * const mmg$gl_sptbase; return ((PTE_PQ) ((uint64) mmg$gq_s0s1base_pte_address + (uint64) ((int) svapte - (int) mmg$gl_sptbase))); } /* Routine to convert a VA_PTE to a SVAPTE. */ #pragma inline (va_pte_to_svapte) static PTE * va_pte_to_svapte (PTE_PQ va_pte) { extern PTE_PQ const mmg$gq_s0s1base_pte_address; extern PTE * const mmg$gl_sptbase; return ((PTE *) ((int) mmg$gl_sptbase + (int) ((uint64) va_pte - (uint64) mmg$gq_s0s1base_pte_address))); } #endif /* if __INITIAL_POINTER_SIZE */ #endif /* ifdef __INITIAL_POINTER_SIZE */ /* These macros, MAKE_VA_xx, are similar to $MAKE_VA for MACRO and BLISS but not as sophisticated. They will convert a virtual page number (VPN) to an address (the first byte of the page) for the specified address space. These macros ALWAYS assume page-size-independent code and that the LIB symbols VA$M_xxx are defined and you're linking against SYS$BASE_IMAGE. */ #define make_va_s0(vpn) ((vpn << mmg$gl_vpn_to_va) | VA$M_SYSTEM) /* These macros, EXTRACT_VA_xx, are similar to $EXTRACT_VA for MACRO and BLISS but not as sophisticated. They will convert a virtual address to a virtual page number (VPN) for the specified address space. These macros ALWAYS assume page-size-independent code and that the LIB symbols VA$M_xxx are defined and you're linking against SYS$BASE_IMAGE. */ #define extract_va_s0(va) ((va & (~ VA$M_SYSTEM)) >> mmg$gl_vpn_to_va) /* These macros, EXTRACT_PTE_OFFSET_xx, are similar to $EXTRACT_PTE_OFFSET for MACRO and BLISS but not as sophisticated. They will convert a virtual address to a PTE offset for the specified address space. These macros ALWAYS assume page-size-independent code and that the LIB symbols VA$M_xxx are defined and you're linking against SYS$BASE_IMAGE. */ #define extract_pte_offset_s0(va) \ ((va & (~ VA$M_SYSTEM)) >> mmg$gl_pte_offset_to_va) #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __save /* Save the previously-defined required ptr size */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif /* Guard against anyone using this macro with their module compiled with long pointers */ typedef struct _spl ** SPL_PPL; /* Short pointer to a short pointer to an SPL structure */ #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __restore /* Restore the previously-defined required ptr size */ #endif /* Define some constants used by the various spinlock acquisition and release macros */ #define NOSAVE_IPL ((int *) 0) /* don't save original IPL */ #define NOLOWER_IPL -1 /* don't lower IPL on unlock */ #define NORAISE_IPL 0 /* don't raise IPL on lock */ #define RAISE_IPL 1 /* do raise IPL on lock */ #define SMP_RELEASE 0 /* unconditionally release spinlock */ #define SMP_RESTORE 1 /* conditionally release spinlock */ /* These macros are similar to the LOCK macro (in MACRO-32) to acquire a spinlock and/or raise IPL. They do NOT take out a mutex, however. This is a separate function. lockname = Name of the spinlock in uppercase (IOLOCK8, etc.) change_ipl = 0 => No, 1 => Yes saved_ipl = Address of variable (int) to receive the previous IPL (or zero) status = address of variable (int) to receive the status of the lock operation For example, to take out IOLOCK8, change IPL, and save the previous IPL: int old_ipl; sys_lock(IOLOCK8,1,&old_ipl); and if you didn't want to save the previous IPL: sys_lock(IOLOCK8,1,0); The _NOSPIN variants return a status of either SS$_NORMAL or SS$_LOCKINUSE, depending if the spinlock was locked or not. */ #define sys_lock(lockname,change_ipl,saved_ipl) \ { \ extern SMP smp$gl_flags; \ extern SPL_PPL smp$ar_spnlkvec; \ \ if (saved_ipl != 0) \ *(int *)saved_ipl = mfpr_ipl; \ \ if (change_ipl == 0) \ smp_std$acqnoipl(smp$ar_spnlkvec[concat_sym(SPL$C_,lockname)]); \ else \ if (smp$gl_flags.smp$v_enabled) \ smp_std$acquire(concat_sym(SPL$C_,lockname)); \ else \ mtpr_ipl(concat_sym(IPL$_,lockname)); \ } #define sys_lock_shr(lockname,change_ipl,saved_ipl) \ { \ extern SMP smp$gl_flags; \ extern SPL_PPL smp$ar_spnlkvec; \ \ if (saved_ipl != 0) \ *(int *)saved_ipl = mfpr_ipl; \ \ if (change_ipl == 0) \ smp_std$acqnoipl_shr(smp$ar_spnlkvec[concat_sym(SPL$C_,lockname)]); \ else \ if (smp$gl_flags.smp$v_enabled) \ smp_std$acquire_shr(concat_sym(SPL$C_,lockname)); \ else \ mtpr_ipl(concat_sym(IPL$_,lockname)); \ } #define sys_lock_nospin(lockname,change_ipl,saved_ipl,status) \ { \ extern SMP smp$gl_flags; \ extern SPL_PPL smp$ar_spnlkvec; \ \ if (saved_ipl != 0) \ *(int *)saved_ipl = mfpr_ipl; \ \ if (change_ipl == 0) \ *(int *)status = smp_std$acqnoipl_nospin(smp$ar_spnlkvec[concat_sym(SPL$C_,lockname)]); \ else \ if (smp$gl_flags.smp$v_enabled) \ *(int *)status = smp_std$acquire_nospin(concat_sym(SPL$C_,lockname)); \ else \ { \ mtpr_ipl(concat_sym(IPL$_,lockname)); \ *(int *)status = SS$_NORMAL; \ } \ } #define sys_lock_shr_nospin(lockname,change_ipl,saved_ipl,status) \ { \ extern SMP smp$gl_flags; \ extern SPL_PPL smp$ar_spnlkvec; \ \ if (saved_ipl != 0) \ *(int *)saved_ipl = mfpr_ipl; \ \ if (change_ipl == 0) \ *(int *)status = smp_std$acqnoipl_shr_nospin(smp$ar_spnlkvec[concat_sym(SPL$C_,lockname)]); \ else \ if (smp$gl_flags.smp$v_enabled) \ *(int *)status = smp_std$acquire_shr_nospin(concat_sym(SPL$C_,lockname)); \ else \ { \ mtpr_ipl(concat_sym(IPL$_,lockname)); \ *(int *)status = SS$_NORMAL; \ } \ } #define sys_lock_cvt_to_shr(lockname) \ { \ extern SMP smp$gl_flags; \ extern SPL_PPL smp$ar_spnlkvec; \ \ smp_std$cvt_to_shared(smp$ar_spnlkvec[concat_sym(SPL$C_,lockname)]); \ } #define sys_lock_cvt_to_ex(lockname,status) \ { \ extern SMP smp$gl_flags; \ extern SPL_PPL smp$ar_spnlkvec; \ \ *(int *)status = smp_std$cvt_to_ex(smp$ar_spnlkvec[concat_sym(SPL$C_,lockname)]); \ } /* These macros are similar to the UNLOCK macro (in MACRO-32) to release a spinlock and/or lower IPL. They do NOT release a mutex, however. This is a separate function. lockname = Name of the spinlock in uppercase (IOLOCK8, etc.) new_ipl = if >= 0, then this is to be the new IPL (< 0 implies no change) restore = if != 0, then use SMP restore function, else use release. */ #define sys_unlock(lockname,new_ipl,restore) \ { \ extern SMP smp$gl_flags; \ \ if (smp$gl_flags.smp$v_enabled) \ if (restore != 0) \ smp_std$restore(concat_sym(SPL$C_,lockname)); \ else \ smp_std$release(concat_sym(SPL$C_,lockname)); \ if ((int)(new_ipl) >= 0) \ mtpr_ipl(new_ipl); \ } #define sys_unlock_shr(lockname,new_ipl,restore) \ { \ extern SMP smp$gl_flags; \ \ if (smp$gl_flags.smp$v_enabled) \ { \ if (restore != 0) \ smp_std$restore_shr(concat_sym(SPL$C_,lockname)); \ else \ smp_std$release_shr(concat_sym(SPL$C_,lockname)); \ } \ if ((int)(new_ipl) >= 0) \ mtpr_ipl(new_ipl); \ } ///////////////////////////////////////////////////////////////////////////////// // // The LOCK_CPU_MUTEX and UNLOCK_CPU_MUTEX macros are similar to the // Macro32 LOCK and UNLOCK macros for acquiring and releasing the // CPU mutex, the only mutex ever acquired or released by those // macros. // // The LOCK_CPU_MUTEX and UNLOCK_CPU_MUTEX macros do not take a // mutex parameter and instead deal exclusively with the CPU mutex. // The caller can only specify whether shared or exclusive access is // required. // // Acquiring the CPU mutex in shared mode will restore the IPL, but // if the CPU mutex is acquired in exclusive mode, then IPL remains // at IPL$_POWER and it is the responsibility of the caller to restore // the IPL after releasing the CPU mutex. // // Shared Usage: // lock_cpu_mutex (1); // ... // ... // unlock_cpu_mutex (1); // // Exclusive Usage: // saved_ipl = __PAL_MFPR_IPL(); // lock_cpu_mutex (0); // ... // ... // unlock_cpu_mutex (0); // setipl (saved_ipl); // // #ifdef __alpha #define CMPXCHG8(ptr,old,new) __CMP_STORE_QUAD(ptr,old,new,ptr) #endif #ifdef __ia64 #define CMPXCHG8(ptr,old,new) __CMP_SWAP_QUAD(ptr,old,new) #endif #define lock_cpu_mutex(share) \ { \ int status; \ int saved_ipl; \ int drop_ipl; \ int retry; \ int64 delta_time; \ int64 end_time; \ CPU *cpu; \ MUTEX old_mutex; \ MUTEX new_mutex; \ extern int sgn$gl_smp_spinwait; \ extern SYS_TIME_CONTROL exe$gl_time_control; \ extern volatile struct _mutex smp$gq_cpu_mutex; \ \ \ /* */ \ /* Disable interrupts, initialize drop IPL and */ \ /* retrieve CPU database address */ \ /* */ \ saved_ipl = setipl ( IPL$_POWER ); \ if ( saved_ipl >= IPL$_SCHED ) \ drop_ipl = saved_ipl; \ else \ drop_ipl = IPL$_SCHED; \ cpu = (CPU *) find_cpu_data; \ retry = 0; \ \ /* */ \ /* Compute endtime token */ \ /* */ \ delta_time = sgn$gl_smp_spinwait << 13; \ exe$timedwait_setup ( &delta_time, &end_time ); \ \ /* */ \ /* Loop to increment mutex owner count atomically */ \ /* */ \ do \ { \ /* */ \ /* Issue memory barrier and read current mutex */ \ /* */ \ __MB(); \ old_mutex.mutex$q_quadword = smp$gq_cpu_mutex.mutex$q_quadword; \ new_mutex.mutex$q_quadword = old_mutex.mutex$q_quadword; \ \ /* */ \ /* check if mutex is free or not */ \ /* */ \ if ( old_mutex.mutex$v_interlock ) \ { \ /* */ \ /* Give the IPINT handler a chance */ \ /* */ \ setipl ( drop_ipl ); \ setipl ( IPL$_POWER ); \ \ /* */ \ /* Check for bugcheck requests. */ \ /* */ \ if ( cpu->cpu$v_bugchk ) bug_check ( CPUEXIT, FATAL, COLD ); \ \ /* */ \ /* If SMP timeouts are not disabled check for timeout, */ \ /* then try another wait loop */ \ /* */ \ if ( !exe$gl_time_control.exe$v_nospinwait ) \ { \ status = exe$timedwait_complete ( &end_time ); \ if ( !$VMS_STATUS_SUCCESS(status) ) \ { \ smp$timeout(); \ exe$timedwait_setup ( &delta_time, &end_time ); \ } \ } \ } \ else \ { \ /* */ \ /* Mutex is free (interlock bit clear) */ \ /* For shared access, only increment the owner count. */ \ /* For exclusive access, only set the interlock bit */ \ /* after making sure it is not owned. */ \ /* */ \ if ( share ) \ { \ new_mutex.mutex$l_owncnt++; \ retry = CMPXCHG8 ( &smp$gq_cpu_mutex, \ old_mutex.mutex$q_quadword, \ new_mutex.mutex$q_quadword ); \ } \ else \ { \ if ( (int)new_mutex.mutex$l_owncnt == -1 ) \ { \ new_mutex.mutex$v_interlock = 1; \ retry = CMPXCHG8 ( &smp$gq_cpu_mutex, \ old_mutex.mutex$q_quadword, \ new_mutex.mutex$q_quadword ); \ } \ } \ } \ \ } while ( !retry ); \ \ /* */ \ /* record this mutex held-count and restore IPL */ \ /* shared access */ \ /* */ \ __MB(); \ cpu->cpu$l_cpumtx++; \ if ( share ) setipl ( saved_ipl ); \ } #define unlock_cpu_mutex(share) \ { \ int status; \ int saved_ipl; \ int drop_ipl; \ int retry; \ int64 delta_time; \ int64 end_time; \ CPU *cpu; \ MUTEX old_mutex; \ MUTEX new_mutex; \ extern int sgn$gl_smp_spinwait; \ extern SYS_TIME_CONTROL exe$gl_time_control; \ extern volatile struct _mutex smp$gq_cpu_mutex; \ \ \ if ( share ) \ { \ /* */ \ /* Disable interrupts, initialize drop IPL and */ \ /* retrieve CPU database address */ \ /* */ \ saved_ipl = setipl ( IPL$_POWER ); \ if ( saved_ipl >= IPL$_SCHED ) \ drop_ipl = saved_ipl; \ else \ drop_ipl = IPL$_SCHED; \ cpu = (CPU *) find_cpu_data; \ retry = 0; \ \ /* */ \ /* Compute endtime token */ \ /* */ \ delta_time = sgn$gl_smp_spinwait << 13; \ exe$timedwait_setup ( &delta_time, &end_time ); \ \ /* */ \ /* Loop to decrement CPU mutex owner count atomically */ \ /* */ \ do \ { \ /* */ \ /* Issue memory barrier and read current CPU mutex */ \ /* */ \ __MB(); \ old_mutex.mutex$q_quadword = smp$gq_cpu_mutex.mutex$q_quadword; \ new_mutex.mutex$q_quadword = old_mutex.mutex$q_quadword; \ \ /* */ \ /* check if CPU mutex is free or not */ \ /* */ \ if ( old_mutex.mutex$v_interlock ) \ { \ /* */ \ /* Give the IPINT handler a chance */ \ /* */ \ setipl ( drop_ipl ); \ setipl ( IPL$_POWER ); \ \ /* */ \ /* Check for bugcheck requests. */ \ /* */ \ if ( cpu->cpu$v_bugchk ) bug_check ( CPUEXIT, FATAL, COLD ); \ \ /* */ \ /* If SMP timeouts are not disabled check for timeout, */ \ /* then try another wait loop */ \ /* */ \ if ( !exe$gl_time_control.exe$v_nospinwait ) \ { \ status = exe$timedwait_complete ( &end_time ); \ if ( !$VMS_STATUS_SUCCESS(status) ) \ { \ smp$timeout(); \ exe$timedwait_setup ( &delta_time, &end_time ); \ } \ } \ } \ else \ { \ /* */ \ /* Decrement owner count */ \ /* */ \ new_mutex.mutex$l_owncnt--; \ retry = CMPXCHG8 ( &smp$gq_cpu_mutex, \ old_mutex.mutex$q_quadword, \ new_mutex.mutex$q_quadword ); \ } \ \ } while ( !retry ); \ \ /* */ \ /* rundown this CPU's mutex held-count and restore IPL */ \ /* */ \ __MB(); \ cpu->cpu$l_cpumtx--; \ setipl ( saved_ipl ); \ } \ else \ { \ /* */ \ /* Interrupts are already disabled, so just retrieve */ \ /* CPU database address */ \ /* */ \ cpu = (CPU *) find_cpu_data; \ \ /* */ \ /* decrement this CPU's mutex held-count */ \ /* */ \ cpu->cpu$l_cpumtx--; \ \ /* */ \ /* Clear the interlocked bit atomically */ \ /* */ \ __ATOMIC_DECREMENT_QUAD ( &smp$gq_cpu_mutex ); \ \ /* */ \ /* Synchronize with any TB invalidates that might have */ \ /* occured in the active set prior to this CPU joining */ \ /* the active set. */ \ /* */ \ mtpr_tbia; \ } \ } ///////////////////////////////////////////////////////////////////////////////// /* This vms_assert macro is intended to provide C "assert" behavior in a twisted, perverted, VMS fashion...That is, if the specified expression turns out to be false (evaluated VMS-style) at run-time, fatal bugcheck. Otherwise, do nothing. The macro does nothing in a non-debug mode too. */ #ifdef NDEBUG #define vms_assert(ignore) #else #define vms_assert(expr) \ { if (!((expr) & 1)) bug_check(ASSERTFAIL,FATAL,COLD) } #endif /* This macro improves readability of VMS code which checks status return values for success/failure based on the low bit of the status value. In C, the test for this is slightly uglier than in MACRO and BLISS and can be hidden inside a macro which makes it clear what the code is trying to do. This macro takes a single argument, the return status or function return value and returns true (1) or false (0) based on the low bit of this value. */ #define good_status(status) (((status) & 1) == 1) #define bad_status(status) (((status) & 1) == 0) /* The following definitions of constants and system data cells are needed by the TB invalidate routines which follow them. */ #define NO_PCB ((PCB *) 0) #define THIS_CPU_ONLY 1 #define ALL_CPUS 2 #define ASSUME_PRIVATE 3 #define ASSUME_SHARED 4 extern SMP smp$gl_flags; /* TB Invalidate All Entries (System and Process) TBI_ALL ENVIRON ENVIRON = "THIS_CPU_ONLY" indicates that this invocation of TBIA is to be executed strictly within the context of the local CPU only. Thus, no attempt is made whatsoever to extend the TBIA request to any CPU or other 'processor' that might exist within the system. = "ALL_CPUS" forces the TBIA to be extended to all components of the system that may have cached PTEs. */ #define tbi_all(environ) { \ \ if (environ == THIS_CPU_ONLY) \ mtpr_tbia; \ else \ mmg$tbi_all(); \ } /* TB Invalidate Data Single 64 TBI_DATA_64 ADDR, ENVIRON, PCBADDR ADDR = 64-bit Virtual Address to be invalidated. ENVIRON = "THIS_CPU_ONLY" indicates that this invocation of TBISD is to be executed strictly within the context of the local CPU only. Thus, no attempt is made whatsoever to extend the TBISD request to any CPU or other 'processor' that might exist within the system. = "ASSUME_PRIVATE" indicates that this is a threads environment and that the address should be treated as a private address and not checked. Therefore, in an SMP environment, we need to do the invalidate to other CPUs which are running a kernel thread from this process. This argument is used for system space addresses which should be treated as private to the process (e.g. for L2PTE's which are also mapped in "page table space"). = "ASSUME_SHARED" indicates that this invocation of TBISD should be broadcast to all other CPUs in the system. ASSUME_ SHARED is the exact opposite of THIS_CPU_ONLY. = "ALL_CPUS" forces the TB invalidate to be extended to all components of the system that may have cached PTEs. PCBADDR = Address of current process control block. The NO_PCB symbol can be used for this argument if the PCB address is not required (for example, when using the qualifier, ENVIRON=THIS_CPU_ONLY or ENVIRON=ASSUMED_SHARED). */ #define tbi_data_64(addr,environ,pcbaddr) { \ \ switch (environ) \ { \ case THIS_CPU_ONLY: \ mtpr_tbisd(addr); \ break; \ case ALL_CPUS: \ if (smp$gl_flags.smp$v_enabled == 0) \ mtpr_tbisd(addr); \ else \ if ($is_shared_va(addr)) \ mmg_std$tbi_data_64(addr); \ else \ { \ if (pcbaddr->pcb$l_multithread <= 1) \ mtpr_tbisd(addr); \ else \ mmg_std$tbi_data_64_threads(addr); \ } \ break; \ case ASSUME_PRIVATE: \ if (smp$gl_flags.smp$v_enabled == 0) \ mtpr_tbisd(addr); \ else \ if (pcbaddr->pcb$l_multithread <= 1) \ mtpr_tbisd(addr); \ else \ mmg_std$tbi_data_64_threads(addr); \ break; \ case ASSUME_SHARED: \ if (smp$gl_flags.smp$v_enabled == 0) \ mtpr_tbisd(addr); \ else \ mmg_std$tbi_data_64(addr); \ break; \ } \ } /* TB Invalidate Single TBI_SINGLE ADDR, ENVIRON, PCBADDR ADDR = Virtual Address to be invalidated. ENVIRON = "THIS_CPU_ONLY" indicates that this invocation of TBIS is to be executed strictly within the context of the local CPU only. Thus, no attempt is made whatsoever to extend the TBIS request to any CPU or other 'processor' that might exist within the system. = "ASSUME_PRIVATE" indicates that this is a threads environment and that the address should be treated as a private address and not checked. Therefore, in an SMP environment, we need to do the invalidate to other CPUs which are running a kernel thread from this process. This argument is used for system space addresses which should be treated as private to the process (e.g. for L2PTE's which are also mapped in "page table space"). = "ASSUME_SHARED" indicates that this invocation of TBIS should be broadcast to all other CPUs in the system. ASSUME_ SHARED is the exact opposite of THIS_CPU_ONLY. = "ALL_CPUS" forces the TB invalidate to be extended to all components of the system that may have cached PTEs. PCBADDR = Address of current process control block. The NO_PCB symbol can be used for this argument if the PCB address is not required (for example, when using the qualifier, ENVIRON=THIS_CPU_ONLY). */ #define tbi_single(addr,environ,pcbaddr) { \ \ switch (environ) \ { \ case THIS_CPU_ONLY: \ mtpr_tbis(addr); \ break; \ case ALL_CPUS: \ if (smp$gl_flags.smp$v_enabled == 0) \ mtpr_tbis(addr); \ else \ if ($is_shared_va(addr)) \ mmg$tbi_single(addr); \ else \ { \ if (pcbaddr->pcb$l_multithread <= 1) \ mtpr_tbis(addr); \ else \ mmg$tbi_single_threads(addr); \ } \ break; \ case ASSUME_PRIVATE: \ if (smp$gl_flags.smp$v_enabled == 0) \ mtpr_tbis(addr); \ else \ if (pcbaddr->pcb$l_multithread <= 1) \ mtpr_tbis(addr); \ else \ mmg$tbi_single_threads(addr); \ break; \ case ASSUME_SHARED: \ if (smp$gl_flags.smp$v_enabled == 0) \ mtpr_tbis(addr); \ else \ mmg$tbi_single(addr); \ break; \ } \ } /* * Convert bitmask to bit number. I.e., the xxx$V_yyy version of xxx$M_yyy. * The name is uppercase to reflect the fact that the input is a compile-time * constant, such as IRP$M_ERASE. * * Input (mask) Output (position) * 0x0001 0 * 0x0002 1 * 0x0004 2 * ... * 0x4000 14 * etc. * * Currently limited to 32 bit wide single-bit masks. */ #define MASK_TO_POSITION(m) \ (m>>24 ? (m>>31?31:m>>30?30:m>>29?29:m>>28?28: \ m>>27?27:m>>26?26:m>>25?25:24) : \ m>>16 ? (m>>23?23:m>>22?22:m>>21?21:m>>20?20: \ m>>19?19:m>>18?18:m>>17?17:16) : \ m>>8 ? (m>>15?15:m>>14?14:m>>13?13:m>>12?12: \ m>>11?11:m>>10?10:m>> 9? 9: 8) : \ (m>> 7? 7:m>> 6? 6:m>> 5? 5:m>> 4? 4: \ m>> 3? 3:m>> 2? 2:m>> 1? 1: 0) ) /* $get_item_code This macro fetches the contents of the item code field from an item list entry. Note that the item code field is in the same place for 32-bit and 64-bit item list entries. ARGUMENTS: item_list: Specifies the item list entry from which the item code is extracted. USAGE: item_code = $get_item_code (item_list); */ #define $get_item_code(item_list) \ ( ((ILEA_64_PQ)item_list)->ilea_64$w_code ) /* $GET_LENGTH This macro fetches the contents of the length field from an item list entry. ARGUMENTS: flag: A flag denoting the type of item list specified. Low bit set denotes a 64-bit item list, while low bit clear denotes a 32-bit item list. itemlist: Specifies the item list entry from which the length is extracted. USAGE: item_length = $get_length (flag, item_list); */ #define $get_length(flag,item_list) \ ( flag == 1 ? ((ILEA_64_PQ)item_list)->ilea_64$q_length : ((ILE2_PQ)item_list)->ile2$w_length ) /* $GET_BUFADDR This macro fetches the contents of the buffer address field from an item list entry. ARGUMENTS: flag: A flag denoting the type of item list specified. Low bit set denotes a 64-bit item list, while low bit clear denotes a 32-bit item list. item_list: Specifies the item list entry from which the buffer address is extracted. USAGE: item_bufaddr = $get_bufaddr (flag, item_list); */ #define $get_bufaddr(flag,item_list) \ ( flag == 1 ? ((ILEA_64_PQ)item_list)->ilea_64$pq_bufaddr : ((ILE2_PQ)item_list)->ile2$ps_bufaddr ) /* $GET_RETLEN_ADDR This macro fetches the contents of the return length address field from an item list entry. The return length address field only exists for item_list_3 and item_list_64_b item list types. ARGUMENTS: flag: A flag denoting the type of item list specified. Low bit set denotes a 64-bit item list, while low bit clear denotes a 32-bit item list. item_list: Specifies the item list entry from which the return length address is extracted. USAGE: item_retlen_addr = $get_retlen_addr (flag, item_list); */ #define $get_retlen_addr(flag,item_list) \ ( flag == 1 ? ((ILEB_64_PQ)item_list)->ileb_64$pq_retlen_addr : ((ILE3_PQ)item_list)->ile3$ps_retlen_addr ) /* $GET_ILE_FIELDS This macro fetches the contents of the item list entry fields and writes them to the user-supplied registers. ARGUMENTS: flag: A flag denoting the type of item list entry specified in the item list argument. Low bit set denotes a 64-bit item list, while low bit clear denotes a 32-bit item list. item_list: An item list entry from which to fetch the contents of the various fields. item_code: Contents of the item code field are recorded here. length: Contents of the length field are recorded here. bufaddr: Contents of the buffer address field are recorded here. retlen_addr: Contents of the return length address field are recorded here. USAGE: $get_ile_fields (flag, item_list, item_code, length, bufaddr, retlen_addr); */ #define $get_ile_fields(flag, item_list, item_code, length, bufaddr, retlen_addr) \ { \ if (flag == 1 ) \ { \ item_code = ((ILEB_64_PQ)item_list)->ileb_64$w_code; \ length = ((ILEB_64_PQ)item_list)->ileb_64$q_length; \ bufaddr = ((ILEB_64_PQ)item_list)->ileb_64$pq_bufaddr; \ retlen_addr = ((ILEB_64_PQ)item_list)->ileb_64$pq_retlen_addr; \ } \ else \ { \ item_code = ((ILE3_PQ)item_list)->ile3$w_code; \ length = ((ILE3_PQ)item_list)->ile3$w_length; \ bufaddr = ((ILE3_PQ)item_list)->ile3$ps_bufaddr; \ retlen_addr = ((ILE3_PQ)item_list)->ile3$ps_retlen_addr; \ } \ } // TR_PRINT - Debug print // // // This macro adds an informational message to the TR trace buffer. // The ctrstr argument has similar syntax to a "printf" statement. // // Inputs: // // ctrstr - The text and optional formatting directives to be // saved in the trace ring buffer, only the following // directives are allowed, no width: // %s - zero-terminated string // %a - ascii string (pointer & length) // %d - decimal value // %X - hexadecimal longword // %L - hexadecimal quadword // p1-p5 - The corresponding values to be formatted. For the %s // directive, this is the address of the zero-terminated // string. For the %a directive, this requires 2 arguments, // first the address of the string buffer, then the length // of the string (by value). For the other directives, this // is passed by value. // // Usage Examples: // Macro32: // tr_print ctrstr=,p1=r4 // tr_print ctrstr=,p1=r3,p2=r5 // C: // #include vms_macros // tr_print (("this is a C test and needs double-parentheses, index %d", idx )); // tr_print (("a hex number %X and a quadword %L", irp->irp$l_func, irp->irp$q_fr3 )); // Bliss: // tr_print ('this is a Bliss test, index %d', .idx ); // tr_print ('a hex number %X and a quadword %L', .irp, .ucb ); // extern uint64 tr$gq_debug; #define tr_print(_printf_args) \ if ( tr$gq_debug & 1 ) \ { \ int *tr_print_rtn = (int *) (tr$gq_debug & ~1); \ ((void (*)()) *tr_print_rtn) _printf_args ; \ } // EXC_PRINT - Exception trace print // // // This macro adds an informational message to the EXC trace buffer. // The ctrstr argument has similar syntax to a "printf" statement. // // Inputs: // // ctrstr - The text and optional formatting directives to be // saved in the trace ring buffer, only the following // directives are allowed, no width: // %s - zero-terminated string // %a - ascii string (pointer & length) // %d - decimal value // %X - hexadecimal longword // %L - hexadecimal quadword // p1-p5 - The corresponding values to be formatted. For the %s // directive, this is the address of the zero-terminated // string. For the %a directive, this requires 2 arguments, // first the address of the string buffer, then the length // of the string (by value). For the other directives, this // is passed by value. // // Usage Examples: // Macro32: // exc_print ctrstr=,p1=r4 // exc_print ctrstr=,p1=r3,p2=r5 // C: // #include vms_macros // exc_print (("this is a C test and needs double-parentheses, index %d", idx )); // exc_print (("a hex number %X and a quadword %L", irp->irp$l_func, irp->irp$q_fr3 )); // Bliss: // exc_print ('this is a Bliss test, index %d', .idx ); // exc_print ('a hex number %X and a quadword %L', .irp, .ucb ); // extern uint64 exc$gq_debug; #define exc_print(_printf_args) \ if ( exc$gq_debug & 1 ) \ { \ int *exc_print_rtn = (int *) (exc$gq_debug & ~1); \ ((void (*)()) *exc_print_rtn) _printf_args ; \ } #endif /* __VMS_MACROS_LOADED */ // __ADD_ATOMIC_LONG, and QUAD, generate memory barriers on Alpha and memory // fences on IPF around the atomic instruction sequence. __ATOMIC_ADD_LONG, and // QUAD, do not. This is a little-known fact and __ADD_ATOMIC_LONG, and QUAD, // are frequently used when the memory barriers are not needed. These macros are // intended to help code writers and readers with this distinction. Every bit // of performance improvement helps. // #define $ADD_ATOMIC_LONG_BARRIER(data,count){ \ __ADD_ATOMIC_LONG(data,count); \ } #define $ADD_ATOMIC_LONG_NO_BARRIER(data,count){\ __ATOMIC_ADD_LONG(data,count); \ } #define $ADD_ATOMIC_QUAD_BARRIER(data,count){ \ __ADD_ATOMIC_QUAD(data,count); \ } #define $ADD_ATOMIC_QUAD_NO_BARRIER(data,count){\ __ATOMIC_ADD_QUAD(data,count); \ }