/**/ /***************************************************************************/ /** **/ /** © Copyright 2010, Hewlett-Packard Development Company, L.P. **/ /** **/ /** Confidential computer software. Valid license from HP and/or **/ /** its subsidiaries required for possession, use, or copying. **/ /** **/ /** Consistent with FAR 12.211 and 12.212, Commercial Computer Software, **/ /** Computer Software Documentation, and Technical Data for Commercial **/ /** Items are licensed to the U.S. Government under vendor's standard **/ /** commercial license. **/ /** **/ /** Neither HP nor any of its subsidiaries shall be liable for technical **/ /** or editorial errors or omissions contained herein. The information **/ /** in this document is provided "as is" without warranty of any kind and **/ /** is subject to change without notice. The warranties for HP products **/ /** are set forth in the express limited warranty statements accompanying **/ /** such products. Nothing herein should be construed as constituting an **/ /** additional warranty. **/ /** **/ /***************************************************************************/ /********************************************************************************************************************************/ /* Created: 30-Mar-2010 17:37:39 by OpenVMS SDL EV3-3 */ /* Source: 21-JUL-2009 16:30:34 $1$DGA7274:[LIB_H.SRC]CPUDEF.SDL;1 */ /********************************************************************************************************************************/ /*** MODULE $CPUDEF ***/ #ifndef __CPUDEF_LOADED #define __CPUDEF_LOADED 1 #pragma __nostandard /* This file uses non-ANSI-Standard features */ #pragma __member_alignment __save #pragma __nomember_alignment #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __save /* Save the previously-defined required ptr size */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif #ifdef __cplusplus extern "C" { #define __unknown_params ... #define __optional_params ... #else #define __unknown_params #define __optional_params ... #endif #ifndef __struct #if !defined(__VAXC) #define __struct struct #else #define __struct variant_struct #endif #endif #ifndef __union #if !defined(__VAXC) #define __union union #else #define __union variant_union #endif #endif #include /* Define the FKB type; CPU$ contains an embedded FKB */ #include #include /*+ */ /* */ /* Per-CPU Database definitions. One of these structures exists for */ /* each CPU that is participating in symmetric multiprocessing. */ /* */ /* The per-CPU database consists of 2 parts. A fixed portion that exists */ /* for any CPU type is defined first. A variable portion is also defined as */ /* necessary for various CPU types. The contents of the variable portion */ /* are CPU-specific. */ /* */ /* When creating a per-CPU database, one must allocate space to include */ /* both the fixed portion and a variable portion that is specific to the */ /* CPU type for which the database is being created. */ /* */ /*- */ #define CPU$C_RESERVED 0 /* Zero is reserved */ #define CPU$C_INIT 1 /* CPU is being INITialized */ #define CPU$C_RUN 2 /* CPU is RUNning */ #define CPU$C_STOPPING 3 /* CPU is STOPping */ #define CPU$C_STOPPED 4 /* CPU is STOPPED */ #define CPU$C_TIMOUT 5 /* Boot of CPU timed out */ #define CPU$C_BOOT_REJECTED 6 /* CPU refuses to join SMP */ #define CPU$C_BOOTED 7 /* CPU booted - waiting for "go" */ #define CPU$C_NOT_CONFIGURED 8 /* CPU exists, but not in configure set */ #define CPU$C_POWERED_DOWN 9 /* CPU in configure set, but powered down */ #define CPU$C_DEALLOCATED 10 /* CPU has been deallocated */ #define CPU$M_INV_TBS 0x1 #define CPU$M_INV_TBA 0x2 #define CPU$M_BUGCHK 0x4 #define CPU$M_BUGCHKACK 0x8 #define CPU$M_RECALSCHD 0x10 #define CPU$M_UPDASTSR 0x20 #define CPU$M_UPDATE_HWCLOCK 0x40 #define CPU$M_WORK_FQP 0x80 #define CPU$M_QLOST 0x100 #define CPU$M_RESCHED 0x200 #define CPU$M_VIRTCONS 0x400 #define CPU$M_IOPOST 0x800 #define CPU$M_INV_ISTREAM 0x1000 #define CPU$M_INV_TBSD 0x2000 #define CPU$M_INV_TBS_MMG 0x4000 #define CPU$M_INV_TBSD_MMG 0x8000 #define CPU$M_IO_INT_AFF 0x10000 #define CPU$M_IO_START_AFF 0x20000 #define CPU$M_UPDATE_SYSPTBR 0x40000 #define CPU$M_PERFMON 0x80000 #define CPU$M_READ_SCC 0x100000 #define CPU$M_CPUFILL_1 0xFFFFFFF #define CPU$M_CPUSPEC1 0x10000000 #define CPU$M_CPUSPEC2 0x20000000 #define CPU$M_CPUSPEC3 0x40000000 #define CPU$M_CPUSPEC4 0x80000000 #define CPU$K_NUM_SWIQS 6 /* Number of software interrupt queues */ #define CPU$M_SYS_ASTEN 0xF #define CPU$M_SYS_ASTSR 0xF0 #define CPU$M_FEN 0x1 #define CPU$M_PME 0x4000000000000000 #define CPU$M_DATFX 0x8000000000000000 #define CPU$C_HWPCBLEN 128 /* Length of HWPCB in 128 bytes */ #define CPU$K_HWPCBLEN 128 /* Length of HWPCB in 128 bytes */ #define CPU$M_TERM_ASTEN 0xF #define CPU$M_TERM_ASTSR 0xF0 #define CPU$M_TERM_FEN 0x1 #define CPU$M_TERM_PME 0x4000000000000000 #define CPU$M_TERM_DATFX 0x8000000000000000 #define CPU$M_SCHED 0x1 #define CPU$M_FOREVER 0x2 #define CPU$M_NEWPRIM 0x4 #define CPU$M_PSWITCH 0x8 #define CPU$M_BC_STACK 0x10 #define CPU$M_BC_CONTEXT 0x20 #define CPU$M_USER_CAPABILITIES_SET 0x40 #define CPU$M_RESET_LOW_POWER 0x80 #define CPU$M_STOPPING 0x1 #define CPU$M_PCSAMPLE_ACTIVE 0x1 #define CPU$M_IO_AFF_FKB_INUSE 0x1 #define CPU$M_PORT_ASSIGNED 0x2 #define CPU$M_DISTRIBUTED_INTS 0x4 #define CPU$M_LASTPAGE_TESTED 0x20000000 #define CPU$M_MCHECK 0x40000000 #define CPU$M_MEMORY_WRITE 0x80000000 #define CPU$M_AUTO_START 0x1 #define CPU$M_NOBINDINGS 0x2 #ifdef __cplusplus /* Define structure prototypes */ struct _pcb; struct _ktb; struct _irp; struct _pte; #endif /* #ifdef __cplusplus */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif typedef struct _cpu { #pragma __nomember_alignment __union { struct _pcb *cpu$l_curpcb; /* Address of CPU's current PCB */ struct _ktb *cpu$l_curktb; /* Address of CPU's current KTB */ } cpu$r_curpcb_overlay; void *cpu$l_slot_va; /* Address of CPU's HWRPB slot */ unsigned short int cpu$w_size; /* Structure size */ unsigned char cpu$b_type; /* Structure type */ unsigned char cpu$b_subtype; /* Structure subtype */ unsigned int cpu$l_state; /* State of this processor */ unsigned int cpu$l_cpumtx; /* Count of CPUMTX acquires */ unsigned int cpu$l_cur_pri; /* Current Process Priority */ /* */ /* CPU type independent work request bits */ /* */ __union { __union { unsigned int cpu$l_work_req; /* Work request bitmask */ __struct { unsigned cpu$v_inv_tbs : 1; /* Invalidate TB single */ unsigned cpu$v_inv_tba : 1; /* Invalidate TB all */ unsigned cpu$v_bugchk : 1; /* BUG_CHECK requested */ unsigned cpu$v_bugchkack : 1; /* BUG_CHECK acked */ unsigned cpu$v_recalschd : 1; /* Recalculate per cpu mask,reschedule */ unsigned cpu$v_updastsr : 1; /* Update ASTSR register */ unsigned cpu$v_update_hwclock : 1; /* Update local hardware clocks */ unsigned cpu$v_work_fqp : 1; /* Process work queue */ unsigned cpu$v_qlost : 1; /* Stall until quorum regained */ unsigned cpu$v_resched : 1; /* Issue IPL 3 SOFTINT */ unsigned cpu$v_virtcons : 1; /* Enter virtual console mode (primary) */ unsigned cpu$v_iopost : 1; /* Issue IPL 4 SOFTINT */ unsigned cpu$v_inv_istream : 1; /* Invalidate cached instruction stream */ unsigned cpu$v_inv_tbsd : 1; /* Invalidate data TB single */ unsigned cpu$v_inv_tbs_mmg : 1; /* Invalidate TB single MMG synchronized */ unsigned cpu$v_inv_tbsd_mmg : 1; /* Invalidate TB single MMG synchronized */ unsigned cpu$v_io_int_aff : 1; /* Fast Path I/O completion event */ unsigned cpu$v_io_start_aff : 1; /* Fast Path I/O start event */ unsigned cpu$v_update_sysptbr : 1; /* Update SYSPTBR register */ unsigned cpu$v_perfmon : 1; /* Performance Monitoring */ unsigned cpu$v_read_scc : 1; /* Read SCC */ unsigned cpu$v_fill_16_ : 3; } cpu$r_fill_1_; } cpu$r_fill_0_; /* */ /* Define 4 CPU type specific work request bits as bit #s 28-31. */ /* */ __union { /* CPU specific work requests */ __union { unsigned int cpu$l_cpuspec; /* generic definition */ __struct { unsigned cpu$v_cpufill_1 : 28; /* pad bit definitions into position */ unsigned cpu$v_cpuspec1 : 1; /* CPU specific */ unsigned cpu$v_cpuspec2 : 1; /* CPU specific */ unsigned cpu$v_cpuspec3 : 1; /* CPU specific */ unsigned cpu$v_cpuspec4 : 1; /* CPU specific */ } cpu$r_fill_3_; } cpu$r_fill_2_; } cpu$r_cpuspec_overlay; } cpu$r_work_req_overlay; /* */ unsigned int cpu$l_phy_cpuid; /* CPU ID number */ int cpu$l_cbb_reserved_1; /* $l_cpuid_mask moves to bottom */ unsigned int cpu$l_busywait; /* <>0 = Spinning for lock */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif __int64 cpu$q_swiqfl [6]; /* Software interrupt queues */ #pragma __nomember_alignment struct _irp *cpu$l_psfl; /* POST QUEUE forward link */ struct _irp *cpu$l_psbl; /* POST QUEUE backward link */ __union { /* Work queue overlay */ unsigned __int64 cpu$q_work_fqfl; /* Work packet queue */ unsigned __int64 cpu$q_work_ifq; /* Work packet queue */ } cpu$r_ifq_overlay; /* */ struct _pte *cpu$l_zeroed_page_spte; /* SPTE address */ void *cpu$l_zeroed_page_va; /* VA for zeroed page filling */ __int64 cpu$q_zeroed_page_state; /* State for interrupted filling */ /******************************************************************* */ /* HWPCB for this CPU's dedicated System Process */ /* */ /* This Hardware Privileged Context Block provides the context for when this */ /* CPU has no other process to run. */ /* */ /* NOTE WELL: This HWPCB must be aligned to a 128 byte boundary, the */ /* architected natural alignment of a HWPCB. */ /* */ /* NOTE WELL: There are bit symbols defined here for accessing the saved ASTEN, */ /* ASTSR, FEN and DATFX values in the HWPCB. These symbols are NOT to be used when */ /* interfacing to the ASTEN, ASTSR, FEN or DATFX internal processor registers directly. */ /* See the specific internal register definitions for bitmasks and constants */ /* to be used when interfacing to the IPRs directly. */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif unsigned __int64 cpu$q_phy_sys_hwpcb; /* Physical address of HWPCB */ /* Start of aligned section */ #pragma __nomember_alignment __union { unsigned __int64 cpu$q_sys_hwpcb; /* Base of HWPCB */ unsigned __int64 cpu$q_sys_ksp; /* Kernel stack pointer */ } cpu$r_hwpcb_overlay; unsigned __int64 cpu$q_sys_esp; /* Executive stack pointer */ unsigned __int64 cpu$q_sys_ssp; /* Supervisor stack pointer */ unsigned __int64 cpu$q_sys_usp; /* User stack pointer */ unsigned __int64 cpu$q_sys_ptbr; /* Page Table Base Register */ unsigned __int64 cpu$q_sys_asn; /* Address Space Number */ __union { unsigned __int64 cpu$q_sys_astsr_asten; /* ASTSR / ASTEN quadword */ __struct { unsigned cpu$v_sys_asten : 4; /* AST Enable Register */ unsigned cpu$v_sys_astsr : 4; /* AST Pending Summary Register */ } cpu$r_ast_bits0; } cpu$r_ast_overlay; __union { unsigned __int64 cpu$q_fen_datfx; /* Floating Point Enable */ __struct { unsigned cpu$v_fen : 1; /* Floating Point Enable = 1 */ unsigned cpu$v_fill_61_1 : 32; unsigned cpu$v_fill_61_2 : 29; unsigned cpu$v_pme : 1; /* Performance Monitor Enable */ unsigned cpu$v_datfx : 1; /* Data Alignment Trap Fixup */ } cpu$r_fen_datfx_overlay; } cpu$r_fen_overlay; unsigned __int64 cpu$q_sys_cc; /* Cycle Counter */ unsigned __int64 cpu$q_unq; unsigned int cpu$l_sys_cpu_id; /* CPU id */ int cpu$l_sys_reserved_1; __int64 cpu$q_sys_pal_rsvd [5]; /* Reserved for PAL Scratch */ /* */ /* End of Hardware Privileged Context Block (HWPCB) for the system process */ /* */ /******************************************************************* */ /******************************************************************* */ /* HWPCB for this CPU's Terminating Process. */ /* */ /* This Hardware Privileged Context Block provides the context for when this */ /* CPU needs a place to run when a powerfail may, unexpectedly, happen. */ /* */ /* Remember, when a process's HWPCB is loaded (active on the CPU) the contents */ /* of the HWPCB are undefined since the processor may use that area as */ /* scratch space. All code paths that execute higher than IPL IPL$_POWER-2 (29) */ /* for an extended period of time may need to execute in the context of this */ /* process. Currently this includes most code surrounding powerfail/restart */ /* and parts of SMP$START_SECONDARY. */ /* */ /* NOTE WELL: This HWPCB must be aligned to a 128 byte boundary, the */ /* architected natural alignment of a HWPCB. */ /* */ /* NOTE WELL: There are bit symbols defined here for accessing the saved ASTEN, */ /* ASTSR, FEN and DATFX values in the HWPCB. These symbols are NOT to be used when */ /* interfacing to the ASTEN, ASTSR, FEN or DATFX internal processor registers directly. */ /* See the specific internal register definitions for bitmasks and constants */ /* to be used when interfacing to the IPRs directly. */ /* */ /* Start of aligned section */ __union { unsigned __int64 cpu$q_term_hwpcb; /* Base of HWPCB */ unsigned __int64 cpu$q_term_ksp; /* Kernel stack pointer */ } cpu$r_term_hwpcb_overlay; unsigned __int64 cpu$q_term_esp; /* Executive stack pointer */ unsigned __int64 cpu$q_term_ssp; /* Supervisor stack pointer */ unsigned __int64 cpu$q_term_usp; /* User stack pointer */ unsigned __int64 cpu$q_term_ptbr; /* Page Table Base Register */ unsigned __int64 cpu$q_term_asn; /* Address Space Number */ __union { unsigned __int64 cpu$q_term_astsr_asten; /* ASTSR / ASTEN quadword */ __struct { unsigned cpu$v_term_asten : 4; /* AST Enable Register */ unsigned cpu$v_term_astsr : 4; /* AST Pending Summary Register */ } cpu$r_term_ast_bits0; } cpu$r_term_ast_overlay; __union { unsigned __int64 cpu$q_term_fen_datfx; /* Floating Point Enable /DATFX */ __struct { unsigned cpu$v_term_fen : 1; /* Floating Point Enable = 1 */ unsigned cpu$v_fill_61_2_1 : 32; unsigned cpu$v_fill_61_2_2 : 29; unsigned cpu$v_term_pme : 1; /* Performance Monitor Enable */ unsigned cpu$v_term_datfx : 1; /* Data Alignment Trap Fixup */ } cpu$r_term_fen_overlay; } cpu$r_term_fen_overlay; unsigned __int64 cpu$q_term_cc; /* Cycle Counter */ unsigned __int64 cpu$q_term_unq; /* Process Unique Value */ unsigned int cpu$l_term_cpu_id; /* CPU id */ int cpu$l_term_reserved_1; __int64 cpu$q_term_pal_rsvd [5]; /* Reserved for PAL Scratch */ /* */ /* End of aligned portion of HWPCB. Next quadword is used so we don't need to */ /* convert a virtual address to a physical address every time we use the terminating */ /* process. */ /* */ unsigned __int64 cpu$q_phy_term_hwpcb; /* Physical address of HWPCB */ /* */ /* End of Hardware Privileged Context Block (HWPCB) for the terminating process */ /* */ /******************************************************************* */ /* */ /* Per-CPU state saved during powerfail interrupt processing. The state */ /* that is saved here is process independent, yet specific to this CPU. */ /* */ unsigned __int64 cpu$q_saved_pcbb; /* PCBB from powerdown (non-zero */ /* if state successfully saved) */ unsigned __int64 cpu$q_scbb; /* SCBB from powerdown */ unsigned __int64 cpu$q_sisr; /* SISR from powerdown */ /******************************************************************* */ /* The following storage is used by BUGCHECK code. The order must be */ /* preserved since it is assumed by a table within SDA (see $CRASHDEF */ /* in [SDA]EVAX_SDADEF.SDL). */ /* */ unsigned __int64 cpu$q_bc_ksp; /* Stored KSP */ unsigned __int64 cpu$q_bc_esp; /* Stored ESP */ unsigned __int64 cpu$q_bc_ssp; /* Stored SSP */ unsigned __int64 cpu$q_bc_usp; /* Stored USP */ unsigned __int64 cpu$q_bc_ptbr; /* Stored PTBR */ unsigned __int64 cpu$q_bc_asn; /* Stored ASN */ unsigned __int64 cpu$q_bc_astsr_asten; /* Stored AST SR and EN */ unsigned __int64 cpu$q_bc_fen; /* Stored FEN / PME / DATFX */ unsigned __int64 cpu$q_bc_cc; /* Stored CC */ unsigned __int64 cpu$q_bc_unq; /* Process Unique Value */ unsigned int cpu$l_bc_cpu_id; /* CPU id */ int cpu$l_bc_reserved_1; __int64 cpu$q_bc_pal_rsvd [5]; /* PAL reserved area */ unsigned __int64 cpu$q_bc_r0; /* Stored R0 */ unsigned __int64 cpu$q_bc_r1; /* Stored R1 */ unsigned __int64 cpu$q_bc_r2; /* Stored R2 */ unsigned __int64 cpu$q_bc_r3; /* Stored R3 */ unsigned __int64 cpu$q_bc_r4; /* Stored R4 */ unsigned __int64 cpu$q_bc_r5; /* Stored R5 */ unsigned __int64 cpu$q_bc_r6; /* Stored R6 */ unsigned __int64 cpu$q_bc_r7; /* Stored R7 */ unsigned __int64 cpu$q_bc_r8; /* Stored R8 */ unsigned __int64 cpu$q_bc_r9; /* Stored R9 */ unsigned __int64 cpu$q_bc_r10; /* Stored R10 */ unsigned __int64 cpu$q_bc_r11; /* Stored R11 */ unsigned __int64 cpu$q_bc_r12; /* Stored R12 */ unsigned __int64 cpu$q_bc_r13; /* Stored R13 */ unsigned __int64 cpu$q_bc_r14; /* Stored R14 */ unsigned __int64 cpu$q_bc_r15; /* Stored R15 */ unsigned __int64 cpu$q_bc_r16; /* Stored R16 */ unsigned __int64 cpu$q_bc_r17; /* Stored R17 */ unsigned __int64 cpu$q_bc_r18; /* Stored R18 */ unsigned __int64 cpu$q_bc_r19; /* Stored R19 */ unsigned __int64 cpu$q_bc_r20; /* Stored R20 */ unsigned __int64 cpu$q_bc_r21; /* Stored R21 */ unsigned __int64 cpu$q_bc_r22; /* Stored R22 */ unsigned __int64 cpu$q_bc_r23; /* Stored R23 */ unsigned __int64 cpu$q_bc_r24; /* Stored R24 */ unsigned __int64 cpu$q_bc_r25; /* Stored R25 */ unsigned __int64 cpu$q_bc_r26; /* Stored R26 */ unsigned __int64 cpu$q_bc_r27; /* Stored R27 */ unsigned __int64 cpu$q_bc_r28; /* Stored R28 */ unsigned __int64 cpu$q_bc_r29; /* Stored R29 */ unsigned __int64 cpu$q_bc_pc; /* Stored PC */ unsigned __int64 cpu$q_bc_ps; /* Stored PS */ unsigned __int64 cpu$q_bc_f0; /* Stored F0 */ unsigned __int64 cpu$q_bc_f1; /* Stored F1 */ unsigned __int64 cpu$q_bc_f2; /* Stored F2 */ unsigned __int64 cpu$q_bc_f3; /* Stored F3 */ unsigned __int64 cpu$q_bc_f4; /* Stored F4 */ unsigned __int64 cpu$q_bc_f5; /* Stored F5 */ unsigned __int64 cpu$q_bc_f6; /* Stored F6 */ unsigned __int64 cpu$q_bc_f7; /* Stored F7 */ unsigned __int64 cpu$q_bc_f8; /* Stored F8 */ unsigned __int64 cpu$q_bc_f9; /* Stored F9 */ unsigned __int64 cpu$q_bc_f10; /* Stored F10 */ unsigned __int64 cpu$q_bc_f11; /* Stored F11 */ unsigned __int64 cpu$q_bc_f12; /* Stored F12 */ unsigned __int64 cpu$q_bc_f13; /* Stored F13 */ unsigned __int64 cpu$q_bc_f14; /* Stored F14 */ unsigned __int64 cpu$q_bc_f15; /* Stored F15 */ unsigned __int64 cpu$q_bc_f16; /* Stored F16 */ unsigned __int64 cpu$q_bc_f17; /* Stored F17 */ unsigned __int64 cpu$q_bc_f18; /* Stored F18 */ unsigned __int64 cpu$q_bc_f19; /* Stored F19 */ unsigned __int64 cpu$q_bc_f20; /* Stored F20 */ unsigned __int64 cpu$q_bc_f21; /* Stored F21 */ unsigned __int64 cpu$q_bc_f22; /* Stored F22 */ unsigned __int64 cpu$q_bc_f23; /* Stored F23 */ unsigned __int64 cpu$q_bc_f24; /* Stored F24 */ unsigned __int64 cpu$q_bc_f25; /* Stored F25 */ unsigned __int64 cpu$q_bc_f26; /* Stored F26 */ unsigned __int64 cpu$q_bc_f27; /* Stored F27 */ unsigned __int64 cpu$q_bc_f28; /* Stored F28 */ unsigned __int64 cpu$q_bc_f29; /* Stored F29 */ unsigned __int64 cpu$q_bc_f30; /* Stored F30 */ unsigned __int64 cpu$q_bc_ipl; /* Stored IPL */ unsigned __int64 cpu$q_bc_mces; /* Stored MCES */ unsigned __int64 cpu$q_bc_pcbb; /* Stored PCBB */ unsigned __int64 cpu$q_bc_prbr; /* Stored PRBR */ unsigned __int64 cpu$q_bc_vptb; /* Stored VPTB */ unsigned __int64 cpu$q_bc_scbb; /* Stored SCBB */ unsigned __int64 cpu$q_bc_sisr; /* Stored SISR */ unsigned __int64 cpu$q_bc_fpcr; /* Stored FPCR */ /**** End of Alpha symbols that match $CRASHDEF in [SDA]EVAX_SDADEF.SDL. **** */ /* */ /* */ /* */ /* End of storage used by BUGCHECK code. */ /******************************************************************* */ unsigned int cpu$l_bugcode; /* BUGCHECK code */ unsigned int cpu$l_capability; /* Bitmask of CPU's capabilities */ unsigned __int64 cpu$q_boot_time; /* System time this cpu booted */ unsigned __int64 cpu$q_asn; /* Last ASN assigned for this CPU */ unsigned __int64 cpu$q_asnseq; /* Current ASN sequence number */ /* */ /* Time counters defined as follows: */ /* (Also applies to UKERNEL and UNULLCPU cells) */ /* */ /* KERNEL mode in process context, no spinlock busywait active */ /* EXECUTIVE mode */ /* SUPERVISOR mode */ /* USER mode */ /* KERNEL mode in system context (PS = 1), no spinlock busywait active */ /* KERNEL mode in process or system context, spinlock busywait is active */ /* */ /* NULL time counter */ /* */ __union { unsigned __int64 cpu$q_kernel [6]; /* Clock ticks in each mode */ __struct { __int64 cpu$q_fill_1 [4]; /* non-busywait counters for 4 process modes */ unsigned __int64 cpu$q_system_context; /* Clock ticks in interrupt mode */ unsigned __int64 cpu$q_mpsynch; /* Clock ticks in MP synchronization */ } cpu$r_fill_5_; } cpu$r_fill_4_; unsigned __int64 cpu$q_nullcpu; /* Clock ticks in per-CPU system process (null) */ unsigned int cpu$l_hardaff; /* Count of processes with */ /* hard affinity for this CPU */ /* */ /* Spinlock acquisition/release tracking and verification data */ /* */ unsigned int cpu$l_rank_vec; /* Ranks of spinlocks currently held */ unsigned int cpu$l_ipl_vec; /* IPL vector of held spinlocks */ int cpu$l_ipl_array [32]; /* IPL counts of held spinlocks */ /* */ /* Cells for CPU sanity timer */ /* */ void *cpu$l_tpointer; /* Address of SANITY_TIMER of */ /* CPU being watched */ unsigned int cpu$l_sanity_timer; /* # of sanity cycles before this CPU times out */ unsigned int cpu$l_sanity_ticks; /* # of ticks until next sanity cycle */ /* */ /* CPU flags */ /* */ __union { unsigned int cpu$l_flags; /* Various CPU flags */ __struct { unsigned cpu$v_sched : 1; /* Idle loop vying for SCHED */ unsigned cpu$v_forever : 1; /* STOP/CPU with /FOREVER qualifier */ unsigned cpu$v_newprim : 1; /* Primary-to-be CPU */ unsigned cpu$v_pswitch : 1; /* Live primary switch requested by primary CPU */ unsigned cpu$v_bc_stack : 1; /* Set if we swapped process context to write crash dump */ unsigned cpu$v_bc_context : 1; /* Set if database contains context from bugcheck */ unsigned cpu$v_user_capabilities_set : 1; /* Set if user capabilities already initialized */ unsigned cpu$v_reset_low_power : 1; /* Tell the next clock soft-tick to reset the low power switch */ } cpu$r_fill_7_; } cpu$r_fill_6_; /* */ /* The following field, INTFLAGS, must be longword aligned since */ /* interlocked instructions are used to access the bitfields. */ /* */ __union { unsigned int cpu$l_intflags; /* Interlocked CPU flags */ __struct { unsigned cpu$v_stopping : 1; /* CPU stopping flag */ unsigned cpu$v_fill_17_ : 7; } cpu$r_fill_9_; } cpu$r_fill_8_; /* */ /* System stack base and limit */ /* */ void *cpu$l_sys_stack_base; void *cpu$l_sys_stack_limit; /* */ /* Descriptor used to locate the variable portion of the per-CPU database. */ /* This approach allows the fixed portion of the database to more easily */ /* grow over time. The offset represents a byte offset from the start of */ /* the fixed portion of the per-CPU database to a variable portion containing */ /* CPU-specific data. The variable portion is located adjacent to the fixed */ /* portion of the database. */ /* */ unsigned int cpu$l_variable_offset; /* Offset to variable portion of database */ unsigned int cpu$l_variable_length; /* Length in bytes of variable portion */ /* */ /* Define cells for machine check recovery block. These two longwords */ /* are assumed to be adjacent. */ /* */ unsigned int cpu$l_mchk_mask; /* Function mask for current recovery block */ void *cpu$l_mchk_sp; /* Saved SP for return at end of block */ /* 0 (zero) if no current recovery block */ /* */ /* Define a cell to point to a machine check crashes save area. This pointer */ /* is used by SDA to display the machine check information after a crash. */ /* */ __union { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_mchk_crash_area_va; /* VA of mcheck crash area */ #else unsigned __int64 cpu$pq_mchk_crash_area_va; #endif __struct { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif void *cpu$pl_mchk_crash_area_va_l; unsigned int cpu$il_mchk_crash_area_va_h; } cpu$r_mchk_crash_area_va_fields; } cpu$r_mchk_crash_area_va_overlay; /* */ /* Define cells for processor_corrected_error_svapte and processor_mchk_abort */ /* _svapte. */ /* */ void *cpu$l_proc_corrected_error_svap; void *cpu$l_proc_mchk_abort_svapte; /* sva of spte allocated during initialization */ /* used to map the logout areas. */ __union { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_logout_area_va; /* VA of mcheck logout area */ #else unsigned __int64 cpu$pq_logout_area_va; #endif __struct { #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif void *cpu$pl_logout_area_va_l; unsigned int cpu$il_logout_area_va_h; } cpu$r_logout_area_va_fields; } cpu$r_logout_area_va_overlay; /* */ /* Soft tick dynamic timing offsets to determine when a 10ms "soft" tick */ /* occurs for each CPU. */ /* */ unsigned int cpu$l_soft_tick; int cpu$l_time_deviation; /* */ /* The following fields support PC sampling. They must be longword aligned. */ /* */ void *cpu$l_pcsample_buffer; __union { unsigned int cpu$l_pcsample_flags; __struct { unsigned cpu$v_pcsample_active : 1; /* Sample being collected. */ unsigned cpu$v_fill_18_ : 7; } cpu$r_fill_11_; } cpu$r_fill_10_; /* */ /* Performance monitoring cells to replace global roll-up cells in idle loop. */ /* These cells MUST remain on quadword boundaries since they are updated by */ /* system quadword builtins. Any changes above these offsets must take this */ /* into account. */ /* */ unsigned __int64 cpu$q_idle_loop_count; /* Count of idle code loops */ unsigned __int64 cpu$q_zeroed_page_count; /* Count of free pages zeroed */ /* */ /* Rank counter cells for keeping track of the number of acquisitions */ /* in effect for a given ranking. This is primarily for portlock support, */ /* but is integrated into all static ranks for simplicity */ /* */ int cpu$l_rank_array [32]; /* Counts of acquisitions by rank */ /* */ /* Inline fork block for port-affinitized I/O activity */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif char cpu$l_io_aff_fkb [48]; /* */ /* Flags field for Fast Path I/O - this field is clumped with the FKB above */ /* and the queue below to get the best cache block behavior */ /* */ #pragma __nomember_alignment __union { unsigned int cpu$l_io_aff_flags; /* Fast Path I/O bits */ __struct { unsigned cpu$v_io_aff_fkb_inuse : 1; /* CPUDB FKB in use */ unsigned cpu$v_port_assigned : 1; /* CPU has port affinity */ unsigned cpu$v_distributed_ints : 1; /* CPU has hw interrupt port(s) assigned. */ unsigned cpu$v_fill_19_ : 5; } cpu$r_fill_13_; } cpu$r_fill_12_; /* */ /* Absolute queue header for port-affinitized Fast Path I/O - must be */ /* quadword aligned */ /* */ char cpu$b_fill_20_ [4]; #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif void *cpu$ps_io_start_aff_qfl; /* UCB listhead */ #pragma __nomember_alignment void *cpu$ps_io_start_aff_qbl; /* */ /* The following space doubles as debugging space as well as providing */ /* 64-byte cache alignment for the following listhead. If the structure */ /* above changes this must reflected in this count. If this space gets */ /* filled in at some point, it is critical that the new cells not be */ /* highly accessed, otherwise we have potential hangs from overlapping */ /* memory lock interactions. */ int cpu$l_fill_6 [12]; /* */ /* Absolute interlocked queue for fastpath hardware interrupt ports */ /* assigned to this cpu */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif void *cpu$ps_io_int_aff_qfl; /* Fastpath HW interrupt */ #pragma __nomember_alignment void *cpu$ps_io_int_aff_qbl; /* : ports UCB listhead */ /* */ /* Holder cell for CPU capabilities. This replaces the old CAPABILITY that */ /* existed further up the structure. The lower longword holds the system and */ /* user capabilities for this CPU. The upper longword is an affinity bitmask */ /* containing a single bit set in the CPUID position of this CPU. */ /* */ __union { unsigned __int64 cpu$q_capabilities; /* Caps and affinity */ unsigned int cpu$l_capabilities; /* Just system and user caps */ } cpu$r_capabilities_overlay; /* Cell to hold a counter for emulated instructions. This counter is incremented */ /* when an instruction that is not available on this CPU (e.g. LDBU, LDWU) is */ /* executed in system context and is emulated. */ int cpu$l_emulate_count; __union { unsigned int cpu$l_untested_page_state; /* State for interrupted memory test */ __struct { unsigned short int cpu$w_untested_chunks; /* Count of 32-byte chunks remaining to be tested in current page */ unsigned cpu$v_fill_7 : 13; unsigned cpu$v_lastpage_tested : 1; /* Last untested page is being tested */ unsigned cpu$v_mcheck : 1; /* Mcheck occurred during memory test */ unsigned cpu$v_memory_write : 1; /* Memory test is in the write process */ } cpu$r_untested_bits; } cpu$r_untested_overlay; unsigned __int64 cpu$q_untested_pattern; struct _pte *cpu$l_untested_page_spte; /* SPTE address */ void *cpu$l_untested_page_va; /* VA for testing memory */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif __int64 cpu$q_sched_data [85]; /* scheduling data based on process */ /* priority level (5 quadwords for each */ /* priority level). See SCHED_DS structure */ /* below for more details. */ /* */ __int64 cpu$q_scc_delta; /* Offset from primary SCC value */ #pragma __nomember_alignment __union { unsigned int cpu$l_transition_flags; /* Various CPU transition flags */ __struct { unsigned cpu$v_auto_start : 1; /* CPU is automatically made active */ unsigned cpu$v_nobindings : 1; /* Minimize features that prevent transition */ unsigned cpu$v_fill_21_ : 6; } cpu$r_fill_15_; } cpu$r_fill_14_; char cpu$b_fill_22_ [4]; #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_ctd_listhead; /* Offset to CPU transition block */ #else unsigned __int64 cpu$pq_ctd_listhead; #endif #pragma __nomember_alignment int cpu$l_failover_node; /* Node ID to fail this CPU over to */ char cpu$b_fill_23_ [4]; #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_gmp_listhead; /* Address of listhead for GMPs */ #else unsigned __int64 cpu$pq_gmp_listhead; #endif #pragma __nomember_alignment #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_extension_block; /* Pointer to extension of CPUDB */ #else unsigned __int64 cpu$pq_extension_block; #endif #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_lckcpu; /* pointer to per-CPU lckmgr counter structure */ #else unsigned __int64 cpu$pq_lckcpu; #endif #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif void *cpu$l_fp_asgn_ports_fl; /* queue links to fastpath ports */ #pragma __nomember_alignment void *cpu$l_fp_asgn_ports_bl; /* : */ int cpu$l_fp_num_ports; /* number of fastpath ports assigned to this CPU */ int cpu$l_fp_num_user_ports; /* number of user preferred fastpath ports assigned */ int cpu$l_fp_spare1; int cpu$l_fp_spare2; int cpu$l_fp_spare3; int cpu$l_fp_spare4; int cpu$l_rad; /* This cell initialized to the RAD number the CPU belongs to */ int cpu$l_rad_spare1; unsigned __int64 cpu$q_bc_virbnd; /* Virtual boundary recorded by BUGCHECK (if implemented) */ unsigned __int64 cpu$q_bc_sysptbr; /* System-space PTBR recorded by BUGCHECK (if implemented) */ unsigned __int64 cpu$q_bc_scc; /* System Cycle Counter recorded by BUGCHECK (all platforms) */ /* */ /* TIMEDWAIT cells to support mixed-speed CPUs in heterogeneous SMP configurations */ /* */ unsigned __int64 cpu$q_tmwt_scaler; /* Scaler value for SCC conversions */ unsigned __int64 cpu$q_tmwt_divisor; /* Divisor value for SCC conversions */ unsigned __int64 cpu$q_tmwt_shift; /* Divisor shift count for SCC conversions */ /* */ /* Fastpath hardware interrupt ports housekeeping */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif void *cpu$l_fp_asgn_hwint_ports_fl; /* queue links to hwint ports */ #pragma __nomember_alignment void *cpu$l_fp_asgn_hwint_ports_bl; /* : */ int cpu$l_num_hwint_ports; /* number HW int ports */ int cpu$l_num_usrprf_hwint_ports; /* number user-assigned fastpath HW int ports */ unsigned __int64 cpu$q_xfc_vab_pointer; /* Link to XFC per RAD structures */ /* Keep track of CPU load */ unsigned int cpu$l_load_factor; /* This is the fixed point fraction of time CPU is usable */ unsigned int cpu$l_bin_5sec; /* Which of 5 bins are we using */ unsigned int cpu$l_usable_ticks [5]; /* 5 bins counting the usable ticks */ unsigned int cpu$l_total_ticks; /* Total number of ticks during a second */ unsigned int cpu$l_counter_10ms; /* Count 10ms intervals to get one second */ int cpu$l_filler_1; /* Make quadwords even */ /* Per-CPU queues */ int cpu$aq_com_queues [128]; /* 64 queue heads for this CPU */ unsigned __int64 cpu$q_com_queue_summary; /* Bits to show which CPU queues are used */ /* Per-RAD database pointer */ #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$pq_rad_database; /* Pointer to the RAD database for RAD this CPU belongs to */ #else unsigned __int64 cpu$pq_rad_database; #endif /* Per-CPU timing cells to be filled in when CPU joins the active set */ int cpu$l_max_deviation; unsigned int cpu$l_minimum_ticks; int cpu$l_over_delta; int cpu$l_under_delta; /* */ /* System register stack base and limit */ /* */ /* */ /* Termination and slot stack bases */ /* */ #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __long /* And set ptr size default to 64-bit pointers */ void *cpu$q_slot_stack_base; #else unsigned __int64 cpu$q_slot_stack_base; #endif /* */ /* VHPT virtual address and setup info */ /* */ /* */ /* More BUGCHECK cells: the contents of the CR.PTA register; data from SWIS */ /* (SWIS$L_GH_PS thru SWIS$Q_DTNVFLT without the fill); the region registers */ /* */ /* */ /* Virtual addresses of the physical buffers that hold the SAL-built error */ /* records for the four hardware interrupt types */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif __union { #pragma __nomember_alignment CBB cpu$r_cbb_cpuid_mask; /* Embedded CBB block */ __struct { /* Compatability offset cells */ __int64 cpu$q_cbb_fill_1 [6]; __union { unsigned int cpu$l_cpuid_mask; /* CPU ID in longword bitmask form */ unsigned __int64 cpu$q_cpuid_mask; /* CPU ID in quadword bitmask form */ } cpu$r_cbb_cpumask_data_overlay; } cpu$r_cbb_cpumask_compat_overlay; } cpu$r_cbb_cpumask_overlay; unsigned __int64 cpu$q_bc_orig_intstk; /* Original exception frame address */ /* */ /* Itanium power management data cells. */ /* */ /* */ /* CPU Thread data */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif __union { #pragma __nomember_alignment CBB cpu$r_cbb_cothread_mask; /* Embedded CBB block */ __struct { /* Compatability offset cells */ __int64 cpu$q_cbbthd_fill_1 [6]; unsigned __int64 cpu$q_cothread_mask; /* CPU cothreads in quadword bitmask form */ } cpu$r_cbb_thdmask_compat_overlay; } cpu$r_cbb_thdmask_overlay; unsigned __int64 cpu$q_cothreadd_db_qfl; /* Quadword queue to CPUDB which is another thread */ unsigned __int64 cpu$q_cothreadd_db_qbl; /* (Back link) */ unsigned int cpu$l_max_cur_cothd_priority; /* The maximum priority of the cothreads on this core */ unsigned int cpu$l_num_cothreads; /* How many threads are in the same core with this CPU? */ /* */ /* More processor registers to be saved at system crash */ /* */ /* */ /* BUGcheck LOG buffer area for dump hints/info prior to bugcheck */ /* */ #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif char cpu$r_buglog [256]; /* */ /* New cells should be added before this comment */ /* */ #pragma __nomember_alignment __int64 cpu$q_bc_expansion [8]; /* Make sure there's space at the end for new registers */ /* */ /* End of fixed portion of the per-CPU database. A variable portion may be required */ /* by this CPU type. */ /* */ /* */ /* Beginning of quadword aligned, variable portion of the per-CPU database. */ /* Access to this is via the VARIABLE_OFFSET and VARIABLE_LENGTH data cells */ /* in the fixed portion of the database. */ /* */ } CPU; #if !defined(__VAXC) #define cpu$l_curpcb cpu$r_curpcb_overlay.cpu$l_curpcb #define cpu$l_curktb cpu$r_curpcb_overlay.cpu$l_curktb #define cpu$l_work_req cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$l_work_req #define cpu$v_inv_tbs cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_inv_tbs #define cpu$v_inv_tba cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_inv_tba #define cpu$v_bugchk cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_bugchk #define cpu$v_bugchkack cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_bugchkack #define cpu$v_recalschd cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_recalschd #define cpu$v_updastsr cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_updastsr #define cpu$v_update_hwclock cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_update_hwclock #define cpu$v_work_fqp cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_work_fqp #define cpu$v_qlost cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_qlost #define cpu$v_resched cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_resched #define cpu$v_virtcons cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_virtcons #define cpu$v_iopost cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_iopost #define cpu$v_inv_istream cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_inv_istream #define cpu$v_inv_tbsd cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_inv_tbsd #define cpu$v_inv_tbs_mmg cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_inv_tbs_mmg #define cpu$v_inv_tbsd_mmg cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_inv_tbsd_mmg #define cpu$v_io_int_aff cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_io_int_aff #define cpu$v_io_start_aff cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_io_start_aff #define cpu$v_update_sysptbr cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_update_sysptbr #define cpu$v_perfmon cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_perfmon #define cpu$v_read_scc cpu$r_work_req_overlay.cpu$r_fill_0_.cpu$r_fill_1_.cpu$v_read_scc #define cpu$v_cpuspec1 cpu$r_work_req_overlay.cpu$r_cpuspec_overlay.cpu$r_fill_2_.cpu$r_fill_3_.cpu$v_cpuspec1 #define cpu$v_cpuspec2 cpu$r_work_req_overlay.cpu$r_cpuspec_overlay.cpu$r_fill_2_.cpu$r_fill_3_.cpu$v_cpuspec2 #define cpu$v_cpuspec3 cpu$r_work_req_overlay.cpu$r_cpuspec_overlay.cpu$r_fill_2_.cpu$r_fill_3_.cpu$v_cpuspec3 #define cpu$v_cpuspec4 cpu$r_work_req_overlay.cpu$r_cpuspec_overlay.cpu$r_fill_2_.cpu$r_fill_3_.cpu$v_cpuspec4 #define cpu$q_work_fqfl cpu$r_ifq_overlay.cpu$q_work_fqfl #define cpu$q_work_ifq cpu$r_ifq_overlay.cpu$q_work_ifq #define cpu$q_sys_hwpcb cpu$r_hwpcb_overlay.cpu$q_sys_hwpcb #define cpu$q_sys_ksp cpu$r_hwpcb_overlay.cpu$q_sys_ksp #define cpu$q_sys_astsr_asten cpu$r_ast_overlay.cpu$q_sys_astsr_asten #define cpu$v_sys_asten cpu$r_ast_overlay.cpu$r_ast_bits0.cpu$v_sys_asten #define cpu$v_sys_astsr cpu$r_ast_overlay.cpu$r_ast_bits0.cpu$v_sys_astsr #define cpu$q_fen_datfx cpu$r_fen_overlay.cpu$q_fen_datfx #define cpu$v_fen cpu$r_fen_overlay.cpu$r_fen_datfx_overlay.cpu$v_fen #define cpu$v_pme cpu$r_fen_overlay.cpu$r_fen_datfx_overlay.cpu$v_pme #define cpu$v_datfx cpu$r_fen_overlay.cpu$r_fen_datfx_overlay.cpu$v_datfx #define cpu$q_term_hwpcb cpu$r_term_hwpcb_overlay.cpu$q_term_hwpcb #define cpu$q_term_ksp cpu$r_term_hwpcb_overlay.cpu$q_term_ksp #define cpu$q_term_astsr_asten cpu$r_term_ast_overlay.cpu$q_term_astsr_asten #define cpu$v_term_asten cpu$r_term_ast_overlay.cpu$r_term_ast_bits0.cpu$v_term_asten #define cpu$v_term_astsr cpu$r_term_ast_overlay.cpu$r_term_ast_bits0.cpu$v_term_astsr #define cpu$q_term_fen_datfx cpu$r_term_fen_overlay.cpu$q_term_fen_datfx #define cpu$v_term_fen cpu$r_term_fen_overlay.cpu$r_term_fen_overlay.cpu$v_term_fen #define cpu$v_term_pme cpu$r_term_fen_overlay.cpu$r_term_fen_overlay.cpu$v_term_pme #define cpu$v_term_datfx cpu$r_term_fen_overlay.cpu$r_term_fen_overlay.cpu$v_term_datfx #define cpu$q_kernel cpu$r_fill_4_.cpu$q_kernel #define cpu$q_system_context cpu$r_fill_4_.cpu$r_fill_5_.cpu$q_system_context #define cpu$q_mpsynch cpu$r_fill_4_.cpu$r_fill_5_.cpu$q_mpsynch #define cpu$l_flags cpu$r_fill_6_.cpu$l_flags #define cpu$v_sched cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_sched #define cpu$v_forever cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_forever #define cpu$v_newprim cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_newprim #define cpu$v_pswitch cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_pswitch #define cpu$v_bc_stack cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_bc_stack #define cpu$v_bc_context cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_bc_context #define cpu$v_user_capabilities_set cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_user_capabilities_set #define cpu$v_reset_low_power cpu$r_fill_6_.cpu$r_fill_7_.cpu$v_reset_low_power #define cpu$l_intflags cpu$r_fill_8_.cpu$l_intflags #define cpu$v_stopping cpu$r_fill_8_.cpu$r_fill_9_.cpu$v_stopping #define cpu$pq_mchk_crash_area_va cpu$r_mchk_crash_area_va_overlay.cpu$pq_mchk_crash_area_va #define cpu$pl_mchk_crash_area_va_l cpu$r_mchk_crash_area_va_overlay.cpu$r_mchk_crash_area_va_fields.cpu$pl_mchk_crash_area_va_l #define cpu$il_mchk_crash_area_va_h cpu$r_mchk_crash_area_va_overlay.cpu$r_mchk_crash_area_va_fields.cpu$il_mchk_crash_area_va_h #define cpu$pq_logout_area_va cpu$r_logout_area_va_overlay.cpu$pq_logout_area_va #define cpu$pl_logout_area_va_l cpu$r_logout_area_va_overlay.cpu$r_logout_area_va_fields.cpu$pl_logout_area_va_l #define cpu$il_logout_area_va_h cpu$r_logout_area_va_overlay.cpu$r_logout_area_va_fields.cpu$il_logout_area_va_h #define cpu$l_pcsample_flags cpu$r_fill_10_.cpu$l_pcsample_flags #define cpu$v_pcsample_active cpu$r_fill_10_.cpu$r_fill_11_.cpu$v_pcsample_active #define cpu$l_io_aff_flags cpu$r_fill_12_.cpu$l_io_aff_flags #define cpu$v_io_aff_fkb_inuse cpu$r_fill_12_.cpu$r_fill_13_.cpu$v_io_aff_fkb_inuse #define cpu$v_port_assigned cpu$r_fill_12_.cpu$r_fill_13_.cpu$v_port_assigned #define cpu$v_distributed_ints cpu$r_fill_12_.cpu$r_fill_13_.cpu$v_distributed_ints #define cpu$q_capabilities cpu$r_capabilities_overlay.cpu$q_capabilities #define cpu$l_capabilities cpu$r_capabilities_overlay.cpu$l_capabilities #define cpu$l_untested_page_state cpu$r_untested_overlay.cpu$l_untested_page_state #define cpu$w_untested_chunks cpu$r_untested_overlay.cpu$r_untested_bits.cpu$w_untested_chunks #define cpu$v_lastpage_tested cpu$r_untested_overlay.cpu$r_untested_bits.cpu$v_lastpage_tested #define cpu$v_mcheck cpu$r_untested_overlay.cpu$r_untested_bits.cpu$v_mcheck #define cpu$v_memory_write cpu$r_untested_overlay.cpu$r_untested_bits.cpu$v_memory_write #define cpu$l_transition_flags cpu$r_fill_14_.cpu$l_transition_flags #define cpu$v_auto_start cpu$r_fill_14_.cpu$r_fill_15_.cpu$v_auto_start #define cpu$v_nobindings cpu$r_fill_14_.cpu$r_fill_15_.cpu$v_nobindings #define cpu$r_cbb_cpuid_mask cpu$r_cbb_cpumask_overlay.cpu$r_cbb_cpuid_mask #define cpu$l_cpuid_mask cpu$r_cbb_cpumask_overlay.cpu$r_cbb_cpumask_compat_overlay.cpu$r_cbb_cpumask_data_overlay.cpu$l_cpuid_mask #define cpu$q_cpuid_mask cpu$r_cbb_cpumask_overlay.cpu$r_cbb_cpumask_compat_overlay.cpu$r_cbb_cpumask_data_overlay.cpu$q_cpuid_mask #define cpu$r_cbb_cothread_mask cpu$r_cbb_thdmask_overlay.cpu$r_cbb_cothread_mask #define cpu$q_cothread_mask cpu$r_cbb_thdmask_overlay.cpu$r_cbb_thdmask_compat_overlay.cpu$q_cothread_mask #endif /* #if !defined(__VAXC) */ #define CPU$K_LENGTH 3624 /* Total fixed structure size */ #define CPU$C_LENGTH 3624 /* Total fixed structure size */ #define CPU$M_AGE_DATA 0x1 #if !defined(__NOBASEALIGN_SUPPORT) && !defined(__cplusplus) /* If using pre DECC V4.0 or C++ */ #pragma __nomember_alignment __quadword #else #pragma __nomember_alignment #endif typedef struct _sched_ds { #pragma __nomember_alignment unsigned __int64 cpu$q_acc_run; /* accumulated runtime */ unsigned __int64 cpu$q_proc_count; /* # of process run at this priority level */ unsigned __int64 cpu$q_acc_interrupt; /* accumulated interrupt time */ unsigned __int64 cpu$q_acc_waitime; /* accumulated wait time */ __union { unsigned __int64 cpu$q_sched_flags; /* Scheduling flags */ __struct { unsigned cpu$v_age_data : 1; /* Indicates data needs to be aged */ unsigned cpu$v_fill_26_ : 7; } cpu$r_fill_25_; } cpu$r_fill_24_; } SCHED_DS; #if !defined(__VAXC) #define cpu$q_sched_flags cpu$r_fill_24_.cpu$q_sched_flags #define cpu$v_age_data cpu$r_fill_24_.cpu$r_fill_25_.cpu$v_age_data #endif /* #if !defined(__VAXC) */ #define CPU$K_SCHED_LENGTH 40 /* byte length of each per-priority entry */ /* in the SCHED_DATA data structure */ #pragma __member_alignment __restore #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __restore /* Restore the previously-defined required ptr size */ #endif #ifdef __cplusplus } #endif #pragma __standard #endif /* __CPUDEF_LOADED */