#ifndef __PFN_MACROS_LOADED #define __PFN_MACROS_LOADED 1 /************************************************************************* * * * © Copyright 2004 Hewlett-Packard Development Company, L.P. * * * * Confidential computer software. Valid license from HP and/or * * its subsidiaries required for possession, use, or copying. * * * * Consistent with FAR 12.211 and 12.212, Commercial Computer Software, * * Computer Software Documentation, and Technical Data for Commercial * * Items are licensed to the U.S. Government under vendor's standard * * commercial license. * * * * Neither HP nor any of its subsidiaries shall be liable for technical * * or editorial errors or omissions contained herein. The information * * in this document is provided "as is" without warranty of any kind and * * is subject to change without notice. The warranties for HP products * * are set forth in the express limited warranty statements accompanying * * such products. Nothing herein should be construed as constituting an * * additional warranty. * * * ************************************************************************* */ /* * Version X-12 *++ * FACILITY: * * VMS Executive (LIB_H) * * ABSTRACT: * * This header file provides the PFN macros & routines for C. * * AUTHOR: * * Nitin Karkhanis * * CREATION DATE: 17-Oct-1994 * * MODIFICATION HISTORY: * * X-14 KLN3502 Karen L. Noel 15-Apr-2004 * Fix access_backpointer macro for slot pages that are mapped. * * X-13 KLN3448 Karen L. Noel 5-Mar-2004 * For 50-bit PA project: * o Use PFN$I fields. * o Use mmg$gq_window_pte instead of mmg$gq_window_pte_pfn. * o Use PFN_T typedef. * o Reference PMM64 structure. * * X-12 KLN3410 Karen L. Noel 12-Jan-2004 * Define mmg window cells non-const for the C module that * initializes these cells. These definitions are under the * MMG_WINDOW_INIT symbol. * * X-11 KLN3368 Karen L. Noel 8-Oct-2003 * Avoid informational on decshr(). * * X-9A12 KLN3052 Karen L. Noel 5-April-2002 * Only define backpointer functions to be called from * modules that compile with the /pointer_size qualifier. * * X-9A11 KLN3025 Karen L. Noel 26-Feb-2002 * o Change references to mmg$gq_pt_base, mmg$gq_l1_base, * mmg$gq_l2_base and mmg$gl_l1_index for IPF. * o Include VRNX field in PTE_INDEX field of PFN database * entry. * o Remove inline pragmas. We trust the compiler now. * * X-9A10 Andy Kuehnel 25-Jul-2000 * Fix pfn_to_entry macro for high memory. * * X-18 KLN2082 Karen L. Noel 04-Jun-1998 * Surround this file with short pointer pragmas in case someone * wants to compile with long pointers from the command line. * * X-17 Andy Kuehnel 7-Jan-1998 * PMM is only defined with __NEW_STARLET. Compile VALID_PFN * only if __NEW_STARLET is defined. * * X-16 Andy Kuehnel 30-Dec-1997 * Use SYI PFN memory map in VALID_PFN macro. * * X-15 Andy Kuehnel 6-Jan-1996 * Add return statements after bug_check in order to avoid * CC-W-FALLOFFEND messages. * * X-14 NYK574 Nitin Y. Karkhanis 8-Mar-1996 * Add VALID_PFN. * * X-13 KLN1530 Karen L. Noel 13-Oct-1995 * 1. Fix typo, should include mmg_routines.h, not mmg_functions.h * 2. Also should include pfndef.h, ptedef.h and far_pointers.h * * X-12 NYK495 Nitin Y. Karkhanis 4-Oct-1995 * Add ALLOCPFN and ALLOC_ZERO_PFN. * * X-11 NYK475 Nitin Y. Karkhanis 15-Sep-1995 * MMG$GQ_WINDOW_PTE_PFN declared has an uint64 instead * of as an INT_PQ. * * X-10 NYK467 Nitin Y. Karkhanis 1-Sep-1995 * Make sure short pointers to struct _PTE are passed to * svapte_to_va_pte conversion routine. * * X-9 NYK367 Nitin Y. Karkhanis 13-Apr-1995 * Replace instances of PFN$PL_DATABASE with PFN$PQ_DATABASE. * * X-8 NYK343 Nitin Y. Karkhanis 6-Apr-1995 * Use long PFN database entry pointers (i.e. PFN_PQ and PFN_PPQ * instead of PFN * and PFN **). * * X-7 NYK333 Nitin Y. Karkhanis 31-Mar-1995 * Compute backpointers for RESERVED and UNKNOWN page types * as long as PT_PFN for corresponding PFN db entry is * non-zero. * * X-6 NYK327 Nitin Y. Karkhanis 28-Mar-1995 * Update macros to reflect their Macro-32 counterparts. * * X-5 NYK254 Nitin Y. Karkhanis 1-Feb-1995 * Modify extern declarations to match VMS_MACROS.H. * * X-4 NYK173 Nitin Y. Karkhanis 19-Dec-1994 * Cast return value from pfn_to_entry to (PFN *). * * X-3 NYK101 Nitin Y. Karkhanis 10-Nov-1994 * Mmg$gq_system_virtual_base was improperly declared. * Cast arguments to pte_va to VOID_PQ since pte_va has * temporarily (until C compiler properly handles 64-bit * expressions) become a routine. * * X-2 NYK095 Nitin Y. Karkhanis 2-Nov-1994 * Update backpointer macros. * Rename is_encumbered to test_backpointer. * Add __PFN_MACROS_LOADED symbol to determine whether * this header file was already included. * *-- */ #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __save /* Save the previously-defined required ptr size */ #pragma __required_pointer_size __short /* And set ptr size default to 32-bit pointers */ #endif /*** MODULE PFN_MACROS ***/ /* Include any header files we need to make these macros work. */ #include #include #include #include #include #include #include #include #include #include /* The following code is only available to modules that compile with the */ /* pointer_size qualifier. Short, long, 32, or 64 are all okay. */ #ifdef __INITIAL_POINTER_SIZE #if __INITIAL_POINTER_SIZE /* This routine returns the virtual address of a PTE (VA_PTE) that maps the PFN described by the entry passed in. The VA_PTE is only calculated if the PFN is encumbered (i.e. PFN$I_PT_PFN field of the entry is non-zero). Assumptions: MMG is held. Arguments: entry: the PFN database entry for which a VA_PTE should be calculated. va_pte: a quadword that is used to return the VA_PTE (page table space PTE) that maps the PFN described by entry. map: a flag to indicate that the returned VA_PTE should be calculated using the address in MMG$GQ_WINDOW_VA. */ #pragma inline (access_backpointer) static void access_backpointer (PFN_PQ entry, PTE_PPQ va_pte, int map) { extern PTE_PQ const mmg$gq_gpt_base, mmg$gq_s0s1base_pte_address; extern PTE_PQ const mmg$gq_pt_base[VA$C_VRNX_COUNT]; #if defined(MMG_WINDOW_INIT) extern VOID_PQ mmg$gq_window_va; extern PTE_PQ mmg$gq_window_pte; #else extern VOID_PQ const mmg$gq_window_va; extern PTE_PQ const mmg$gq_window_pte; #endif extern const uint64 mmg$gq_bwp_mask; uint64 pte_index_mask = 0xffffffffffffffff; PTE_PQ pt_base; int vrnx; pte_index_mask <<= PFN$S_INDEX_WIDTH; /* The PTE backpointer can only be calculated if the PFN is encumbered (PFN$I_PT_PFN field is not zero). */ if (entry->pfn$i_pt_pfn == 0) { *va_pte = (PTE_PQ) 0; return; } vrnx = entry->pfn$v_vrnx; switch (entry->pfn$v_pagtyp) { /* PTEs for process pages and process page table pages are determined based upon whether the owning process is current. The map flag guides the decision on whether to compute a PTE address using the window page. */ case PFN$C_PROCESS : case PFN$C_PPGTBL : /* Treat slot pages as system space pages. */ if (entry->pfn$v_slot == 1) { pt_base = mmg$gq_pt_base[vrnx]; *va_pte = &(pt_base [(entry->pfn$q_pte_index & ~pte_index_mask)]); if (map == 1) { if (*va_pte >= mmg$gq_s0s1base_pte_address) *va_pte = va_pte_to_svapte ((PTE *) *va_pte); } return; } /* Return the PT_space VA_PTE if mapping is not requested. */ pt_base = mmg$gq_pt_base[vrnx]; if (map == 0) *va_pte = &(pt_base [(entry->pfn$q_pte_index & ~pte_index_mask)]); else { /* To use the window address simply write the PFN of the page table containing the PTE of interest to the PFN field of the VA_PTE of MMG$GQ_WINDOW_VA. */ mmg$gq_window_pte->pte$v_pfn = entry->pfn$i_pt_pfn; tbi_data_64 (mmg$gq_window_va, THIS_CPU_ONLY, NO_PCB); *va_pte = (PTE_PQ) ((uint64) mmg$gq_window_va + (( (entry->pfn$q_pte_index & ~pte_index_mask ) << PTE$C_SHIFT_SIZE) & mmg$gq_bwp_mask)); } break; /* PTEs for system pages and global page table pages can always be found in page table space since they are valid for every process. The map flag of the macro guides the decision on whether the corresponding SPT window address should be calculated (for S0/S1 space VAs only). GPT pages are simply system space pages. Mapping a global page table page through the window makes little sense since it's valid for every process and always available in page table space. */ case PFN$C_GPGTBL : case PFN$C_SYSTEM : if (vrnx != VA$C_VRNX_SYSTEM) bug_check (INCONMMGST, FATAL, COLD); pt_base = mmg$gq_pt_base[vrnx]; *va_pte = &(pt_base [(entry->pfn$q_pte_index & ~pte_index_mask)]); /* If mapping was requested return the corresponding SVAPTE if the VA_PTE maps FFFFFFFF.80000000 or higher. */ if (map == 1) { if (*va_pte >= mmg$gq_s0s1base_pte_address) *va_pte = va_pte_to_svapte (*va_pte); } break; /* PTEs for global pages and global writable pages are valid for every process since they reside in system space. The map flag is meaningless in this case and is therefore ignored. */ case PFN$C_GLOBAL : case PFN$C_GBLWRT : *va_pte = &(mmg$gq_gpt_base [(entry->pfn$q_pte_index & ~pte_index_mask)]); break; /* Compute VA_PTES for these types of pages as long as the a backpointer exists. */ case PFN$C_RESERVED: case PFN$C_UNKNOWN: pt_base = mmg$gq_pt_base[vrnx]; if (entry->pfn$i_pt_pfn != 0) *va_pte = &(pt_base [(entry->pfn$q_pte_index & ~pte_index_mask)]); } } #endif /* if __INITIAL_POINTER_SIZE */ #endif /* ifdef __INITIAL_POINTER_SIZE */ /* This routine, PFN_TO_ENTRY, is equivalent to its MACRO counterpart. It takes a PFN argument and converts it into an entry address. */ static PFN_PQ pfn_to_entry (PFN_T pfn) { extern PFN_PQ const pfn$pq_database; return (&(pfn$pq_database [pfn])); } /* This macro allocates a PFN from the free list. If either the VA or VPN argument is specified, a PFN of the corresponding color will be allocated. To allocate a PFN with specifying a page color, both the VA and and VPN arguments should be zero. This implies that a PFN cannot be allocated to map VA or VPN 0. Any other scheme that enables a PFN color of zero to be specified complicates the interface and was viewed as unnecessary; especially since VPN 0 is typically a no access page. Note that non-zero values for the both VA and VPN arguments will result in an INCONMMGST bugcheck. This approach was taken since it's not possible to check the arguments at compile-time and generate an error. Assumptions: MMG is held. Arguments: Entry: The address of the PFN database entry that corresponds to the PFN allocated. VA: Virtual address which will map the allocated PFN. When this argument is specified, a PFN from the corresponding color list will be allocated. The VA and VPN arguments are mutually exclusive. Therefore, if VA if non-zero, the VPN argument MUST be zero. VPN: Virtual page number which will map the allocated PFN. When this argument is specified, A PFN from the corresponding color list will be allocated. The VA and VPN arguments are mutually exclusive. Therefore, if VPN is non-zero, the VA argument must be zero. Returns: The PFN returned from the call to MMG$ALLOCATE_PFN. */ static PFN_T allocpfn (PFN_PPQ entry, VOID_PQ va, uint64 vpn) { PFN_T pfn; uint64 byte_align; extern const uint64 mmg$gq_bwp_width; // Determine requested byte alignment (0 = no particular alignment) if (va && vpn) bug_check (INCONMMGST, FATAL, COLD); if (!va && !vpn) byte_align = 0; if (vpn) byte_align = vpn<pfn$w_refcnt--) < 0) \ bug_check (REFCNTNEG, FATAL, COLD); \ } /* This macro decrements the share count of the specified PFN database entry and initiates a SHRCNTNET bugcheck if the count becomes negative. Arguments: entry: the PFN database entry whose reference count is to be decremented. */ #define decshr(entry) \ { \ if ((int)((entry)->pfn$l_shrcnt--) < 0) \ bug_check (SHRCNTNEG, FATAL, COLD); \ } /* The following code is only available to modules that compile with the */ /* pointer_size qualifier. Short, long, 32, or 64 are all okay. */ #ifdef __INITIAL_POINTER_SIZE #if __INITIAL_POINTER_SIZE /* This routine establishes or clears the backpointer for the PFN database entry specified. To establish a backpointer means to compute a PTE index off of either MMG$GQ_PT_BASE or MMG$GQ_GPT_BASE, and determine the PFN of the page table that maps the PTE to which this PFN database entry corresponds. To clear a backpointer means to disassociate a VA_PTE with a PFN database entry. IF the VA_PTE argument is zero, the page table PFN field of the specified entry. Arguments: entry: the PFN database entry whose backpointer should be established or cleared. va_pte: the virtual address of a PTE to which this PFN is assigned. If the argument is zero, the page table PFN field of the entry specified will be cleared. svapte: a flag denoting that the address passed in the va_pte argument lies within the SPT window. gpt: a flag denoting that the address passed in the va_pte argument is an address within the global page table. If this flag is set, the macro uses MMG$GQ_GPT_BASE as the base of the page tables for the PTE index calculation. Note that the gpt and svapte arguments can never both be true. pt_pfn: PFN of a page table page containing the PTE that maps this page. This may be supplied if one wishes to override the PT_PFN value that otherwise would be calculated automatically using the current mapping. This parameter only has meaning when the gpt and svapte are both false. It's contents will be ignored if either flag is true. */ static void establish_backpointer (PFN_PQ entry, PTE_PQ va_pte, int svapte, int gpt, PFN_T pt_pfn) { extern PTE_PQ mmg$gq_max_gpte; /* Not declared as const since GPT can be expanded. */ extern PTE_PQ const mmg$gq_gpt_base, mmg$gq_s0s1base_pte_address, mmg$gq_sptbase; extern PTE_PQ const mmg$gq_pt_base[VA$C_VRNX_COUNT]; extern const uint64 mmg$gq_ptes_per_page, mmg$gq_level_width, mmg$gq_non_pt_mask; extern VOID_PQ const mmg$gq_system_virtual_base; PTE_PQ temp; uint64 index, pte_index_mask = 0xFFFFFFFFFFFFFFFF; int vrnx; uint64 ptes_per_page, size_of_pt_space; PTE_PQ pt_base, pt_space_hi; #ifdef __NEW_STARLET VA v; #else va v; #endif pte_index_mask <<= PFN$S_INDEX_WIDTH; /* The VA_PTE supplied cannot be both a SVAPTE and GPTE. */ if (svapte == 1 && gpt == 1) bug_check (INCONMMGST, FATAL, COLD); /* Clear the backpointer if va_pte is zero. */ if (va_pte == (PTE_PQ) 0) { entry->pfn$i_pt_pfn = 0; return; } /* We have a va_pte. Use it to extract the VRNX. */ # ifdef __NEW_STARLET v.va$q_quad = (uint64)va_pte; # else v.va$q_quad[0] = (unsigned int)va_pte; v.va$q_quad[1] = (unsigned int)((uint64)va_pte>>32); # endif vrnx = v.va$v_vrnx; entry->pfn$v_vrnx = vrnx; /* Check if a GPTE was supplied. */ if (gpt == 1) { /* Verify that a GPTE address was actually supplied. */ if (va_pte < mmg$gq_gpt_base || va_pte >= mmg$gq_max_gpte) bug_check (INCONMMGST, FATAL, COLD); /* Establish the backpointer, while preserving the reference count. */ index = ((uint64) va_pte - (uint64) mmg$gq_gpt_base) >> PTE$C_SHIFT_SIZE; entry->pfn$q_pte_index = (entry->pfn$q_pte_index & pte_index_mask) | index; temp = pte_va ((VOID_PQ) va_pte); entry->pfn$i_pt_pfn = temp->pte$v_pfn; return; } /* If VA_PTE contains a SVAPTE, compute the corresponding VA_PTE address and establish the backpointer. */ if (svapte == 1) { PTE_PQ converted_va_pte; /* Verify that a SVAPTE was actually supplied. */ if (va_pte < mmg$gq_sptbase) bug_check (INCONMMGST, FATAL, COLD); converted_va_pte = svapte_to_va_pte ((PTE *) va_pte); /* Establish the backpointer,while preserving the reference count. */ index = ((uint64) converted_va_pte - (uint64) mmg$gq_pt_base[vrnx]) >> PTE$C_SHIFT_SIZE; entry->pfn$q_pte_index = (entry->pfn$q_pte_index & pte_index_mask) | index; temp = pte_va ((VOID_PQ) converted_va_pte); entry->pfn$i_pt_pfn = temp->pte$v_pfn; return; } /* If execution has progressed this far, the following is true: o gpt flag is false. o svapte flag is false o a va_pte was passed in (needs to be verified). o the contents of pt_pfn must be recorded in the PFN database entry if it contains a non-zero value. */ /* Compute the size of PT space given 3 levels of page tables */ ptes_per_page = mmg$gq_ptes_per_page; size_of_pt_space = ptes_per_page*ptes_per_page*ptes_per_page*PTE$C_BYTES_PER_PTE; pt_base = mmg$gq_pt_base[vrnx]; pt_space_hi = (PTE_PQ)((uint64)pt_base + (uint64)size_of_pt_space); if (va_pte < pt_base || va_pte >= pt_space_hi) bug_check (INCONMMGST, FATAL, COLD); /* Establish the backpointer. At this point the gpt and svapte flags must be false so consider the value of pt_pfn when updating the PFN database entry. */ index = ((uint64) va_pte - (uint64) pt_base) >> PTE$C_SHIFT_SIZE; entry->pfn$q_pte_index = (entry->pfn$q_pte_index & pte_index_mask) | index; temp = pte_va (va_pte); if (pt_pfn == 0) entry->pfn$i_pt_pfn = temp->pte$v_pfn; else entry->pfn$i_pt_pfn = pt_pfn; } #endif /* if __INITIAL_POINTER_SIZE */ #endif /* ifdef __INITIAL_POINTER_SIZE */ /* This macro increments the reference count of the specified PFN database entry. Arguments: entry: PFN database entry whose reference count is to be incremented. */ #define incref(entry) ((entry)->pfn$w_refcnt++) /* This macro determines if a PFN is encumbered by comparing the page table PFN field to zero. A zero in this field indicates that the PFN is available for allocation. Arguments: entry: PFN database entry to be checked for encumbrance. */ #define test_backpointer(entry) ((entry)->pfn$i_pt_pfn != 0) #ifdef __NEW_STARLET /* This routine determines if a PFN is valid by checking if it is either larger than MAX_PFN, or within a hole by checking the PFN memory map. Arguments: pfn: PFN to check. */ static int valid_pfn (PFN_T pfn) { extern const uint64 mmg$gq_maxpfn; extern int mmg$gl_pfn_memory_map [2]; extern uint64 *mmg$gl_syi_pfn_memory_map_64; int i; PMM64 *memory_map; /* If the supplied PFN exceeds the highest numbered PFN on the system, it's invalid. */ if (pfn > mmg$gq_maxpfn) return 0; /* If the first longword in the memory map is zero, the system contains contiguous physical memory; therefore there's nothing more to check. */ if (mmg$gl_pfn_memory_map [0] == 0) return 1; /* Fetch the pool packet containing the memory map. For systems containing discontiguous physical memory, the high longword of mmg$gl_pfn_memory_map contains a pool packet that describes the PFN ranges on the system. */ memory_map = (PMM64 *)&mmg$gl_syi_pfn_memory_map_64[1]; for (i = 0; i < mmg$gl_syi_pfn_memory_map_64 [0]; ++i) if ((pfn >= memory_map[i].pmm64$q_start_pfn) && (pfn < memory_map[i].pmm64$q_start_pfn + memory_map[i].pmm64$q_pfn_count) && (memory_map[i].pmm64$v_console | memory_map[i].pmm64$v_openvms)) return 1; return 0; } #endif /* __NEW_STARLET */ #ifdef __INITIAL_POINTER_SIZE /* Defined whenever ptr size pragmas supported */ #pragma __required_pointer_size __restore /* Restore the previously-defined required ptr size */ #endif #endif /* __PFN_MACROS_LOADED */