################################################################# ## dCache Site Configuration File ## ################################################################# # +++++++++++++++++++++++++++++++++++++++++++ # SECTION 1 - Some General Stuff # MANDATORY # +++++++++++++++++++++++++++++++++++++++++++ # Specify the user under which you would like dCache to run # If the user is "non-root" the init scripts will drop privileges and start dCache # as that user. Log files will still be generated as root. Ownership of PID files # will be changed to the unprivileged user, which means they can still be written # to the default location of /var/run/. # Please take care that the user under which dCache is executed has # sufficient privileges. Watch out for the following: # # * Pools need write access to the pool directory. # * SRM, SpaceManager, PinManager, Billing (if enabled), and ChimeraDomain need database access. # * Billing (unless disabled) need write access to the billing directory. # * Statistics needs write access to the statistics directory. # * Doors and pools need access to the host certificate and host key. # * gPlazma needs read access to /etc/grid-security/ and the host certificate and host key. USER="root" # File System Domain # Example: fnal.gov MY_DOMAIN="uchicago.edu" # Would you like java to be installed by the dcache install script? # Options: yes or no INSTALL_JDK="no" # SKIP, if you chose 'no' to question above # The name of the JDK rpm and alternate location for installing # java. If you do not specify anything here, but chose 'yes' for the # variable above, it will be set to whats provided by the VDT-dCache # tarball. Also, by default, java will be installed under /opt/d-cache/. JDK_FILENAME="jdk-1.6.0_12-fcs.i586.rpm" JDK_RELOCATION="/opt/d-cache" # If you would rather use your own java, specify its location JAVA_LOCATION="/usr/java/default/bin/java" # Would you like to use the dCache/PNFS/Pool/Postgres init.d scripts # to start/stop various services? # Options: yes or no INSTALL_INITD_SCRIPTS="yes" # Would you like to install/use Gratia dCache storage and transfer probes? # Options: yes or no INSTALL_DCACHE_GRATIA_PROBES="yes" # Would you like to install/use srmwatch monitoring tool? # Options: yes or no INSTALL_SRMWATCH="yes" # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Section 2 -dCache Nodes Configuration # You need to specify Fully Qualified Host Names. example - gwdca04.fnal.gov # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Recommended distribution of services based on number of non-pool nodes # 2 nodes # node 1: LOCATION_MANAGER, POOL_MANAGER, ADMIN_DOOR, HTTP_DOMAIN, # UTILITY_DOMAIN, GPLAZMA_SERVICE, INFO, SRM, # REPLICA_MANAGER # node 2: PNFS_MANAGER, DIR_DOMAIN # 3 nodes # node 1: LOCATION_MANAGER, POOL_MANAGER, ADMIN_DOOR, HTTP_DOMAIN, # GPLAZMA_SERVICE, INFO, REPLICA_MANAGER # node 2: PNFS_MANAGER, DIR_DOMAIN # node 3: UTILITY_DOMAIN, SRM # 4 nodes # node 1: LOCATION_MANAGER, POOL_MANAGER, ADMIN_DOOR, HTTP_DOMAIN, # GPLAZMA_SERVICE, INFO # node 2: PNFS_MANAGER, DIR_DOMAIN # node 3: UTILITY_DOMAIN, SRM # node 4: REPLICA_MANAGER # greater than or equal to 5 nodes # node 1: LOCATION_MANAGER, POOL_MANAGER, ADMIN_DOOR, GPLAZMA_SERVICE # node 2: PNFS_MANAGER, DIR_DOMAIN # node 3: UTILITY_DOMAIN, SRM # node 4: REPLICA_MANAGER # node 5: HTTP_DOMAIN, INFO # All of the below 9 parameters are MANDATORY # dCache Admin node DCACHE_ADMIN="itb-dcache1.uchicago.edu" DCACHE_ADMIN_DOOR="itb-dcache1.uchicago.edu" # dCache node running gPlazma DCACHE_GPLAZMA="itb-dcache1.uchicago.edu" # Chimera hostname DCACHE_CHIMERA="itb-dcache2.uchicago.edu" # dCache node running the Location Manager DCACHE_LM="itb-dcache1.uchicago.edu" # dCache node running the Http service DCACHE_HTTP="itb-dcache1.uchicago.edu" # dCache node running the Pool Manager DCACHE_POOL_MANAGER="itb-dcache1.uchicago.edu" # dCache node running the utility domain DCACHE_UTILITY="itb-dcache1.uchicago.edu" # dCache node running the dir domain DCACHE_DIR_DOMAIN="itb-dcache2.uchicago.edu" # dCache node running the Info Service. Note that this info # service is not the same as the info provider service. The # info provider service will be deprecated in the future, so # we recommend that you no longer use it. # Using the Info service is MANDATORY DCACHE_PROVIDER_INFO="itb-dcache1.uchicago.edu" # If not set, it simply means these services will not run #DCACHE_STATISTICS="" DCACHE_REPLICA_MANAGER="itb-dcache1.uchicago.edu" # SRM node - MANDATORY DCACHE_DOOR_SRM="itb-dcache1.uchicago.edu" # Node on which srmwatch should be installed. MANDATORY, if # you chose "yes" for the srmwatch install option in section 1 SRMWATCH_HOST="itb-dcache1.uchicago.edu" # Make sure atleast one door node is specified DCACHE_DOOR_GSIFTP="itb-dcache1.uchicago.edu" # value for GRIDFTP should be same as what you specified above # for the GSIFTP door DCACHE_DOOR_GRIDFTP="itb-dcache1.uchicago.edu" DCACHE_DOOR_GSIDCAP="itb-dcache1.uchicago.edu" DCACHE_DOOR_DCAP="itb-dcache1.uchicago.edu" DCAP_DOORS_PER_NODE="1" # Pool Nodes Information - MANDATORY # Format: "hostname1:size1:path1 hostname1:size2:path2 hostname2:size1:path1 hostname2:size2:path2" # Enter size in gigabytes without units, or "all" to use entire space of partition. # Example: "gwdca01.fnal.gov:all:/storage/disk1 gwdca03.fnal.gov:20:/storage/disk2" DCACHE_POOLS="uct3-edge6.uchicago.edu:/dcache" #needed for pnfs DCACHE_PNFS="itb-dcache2.uchicago.edu" # Customizations, that you may like to edit SRM_SPACEMGR_ENABLED="yes" SRM_IMPLICIT_SPACE_RES="no" SRM_LINK_GROUP_AUTH_FILE="/opt/d-cache/etc/LinkGroupAuthorization.conf" # Causes billing info to be copied to a database as well as # the log file. If desired,set to "yes". BILLING_USE_DB="yes" # Below option should be set to "yes" only if you do not have chimera already setup # If it is already setup and you have been using it before, make sure this is set to "no" SETUP_CHIMERA="no" # Name of the directory that you need to migrate # Note that path will look like - /pnfs/YOUR-DOMAIN/data/MIGRATION_DIRECTORY MIGRATION_DIRECTORY="fermilab" # Below option should be set to "yes" only if you would like to access chimera filesystem on dcap nodes # without specifying URL-like format # Skip, if you did not chose to setup chimera MOUNT_FS_DCAP="yes" # password to connect to chimera database # Skip, if you did not chose to setup chimera CHIMERA_PASSWORD="chimera" # In dCache, file permission checking such as for create, read, and # delete has traditionally been the responsibility of the doors. # Starting with the 1.9.5 release, this check can optionally be moved # to PnfsManager. Besides the structural benefits of enforcing permissions # at a single point, there are performance gains from avoiding extra round # trips between the door and PnfsManager. To enable permission checking # inside PnfsManager, specify "yes" as the value of following. Else, "no" PERMISSION_POLICY_ENFORCEMENT_POINT_PNFS="yes" # Customizations, that you don't need to edit, unless you are aware of the implications DCACHE_DOOR_SRM_IGNORE_ORDER="true" #SRM_AUTHZ_CACHE_LIFETIME="" # The maximum number of client streams allowed by gridftp #MAX_FTP_STREAMS="" # DO NOT CHANGE BELOW SETTING! # In order to correct a mismatch between gridftp and srm # paramters that exists in the default settings of dCache, the # following customization is always done REMOTE_GSI_FTP_MAX_TRANSFERS="2000" # The network portion of the ip on the private network of # the pools. For example, if the pools ip's are 192.168.1.x, # enter "192.168.1". #GRIDFTP_PRIVATE_NETWORK= # Sets up default,WAN, and LAN queues on the pools, and # directs gridftp to use WAN and dcap and gsidcap to use LAN. # If desired, set to "yes". #USE_MULTI_MOVER_QUEUES= # Set to a non-default log directory for dCache domain output # logs. Directory must exist before dCache is started. #DCACHE_LOG_DIR= # For the first installation, the following variables must be set to 'yes'. # DO NOT set these values to yes on existing production services, # dCache internal databases will be deleted. #RESET_DCACHE_CONFIGURATION=yes #RESET_DCACHE_PNFS=yes #RESET_DCACHE_RDBMS=yes # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # SECTION 3 - Gratia dCache storage and transfer probes # Skip this section, if you do not want to use these probes # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Name of the host running your local gratia repository # Example: gratia-osg-transfer.opensciencegrid.org:80 GRATIA_SOAP_HOST="gratia-fermi.fnal.gov:8881" # Name of the storage element # Example: FNAL_Gridworks GRATIA_SITE_NAME="FNAL_Gridworks" # Name of the grid to which this Storage Element belongs # Options: OSG-ITB or OSG or LOCAL GRATIA_GRID_NAME="OSG-ITB" # Name of the database in which transfer information is stored # Gratia transfer probe will pull the information from this db GRATIA_TRANSFER_PROBE_DB="billing" # Would you like to send notification emails about Gratia probes # Options: yes or no GRATIA_SEND_EMAIL="yes" # SKIP, if you chose 'no' to question above # Mail server that should be used to send emails # Example: smtp.fnal.gov GRATIA_EMAIL_SRVHOST="smtp.fnal.gov" # SKIP, if you chose 'no' to question above # Sender's name under which you would like the emails to be sent # Skip if you chose 'no' above GRATIA_EMAIL_FROM="dCacheProbe" # SKIP, if you chose 'no' to question above # Single email address or a comma separated list of email addresses # to which you would like to send email. Skip, if you chose 'no' above GRATIA_EMAIL_TO="neha@fnal.gov,tlevshin@fnal.gov" # SKIP, if you chose 'no' to installing gratia probes # Options: 1 or 0 # If set to 0, gratia dCache storage probe will not report info about pools # If set to 1, it will report info about pools REPORT_POOL_USAGE="0" # SKIP, if you chose 'no' to installing gratia probes # Options: 1 or 0 # If set to 0, gratia dCache transfer probe will send un-sumamrized information # If set to 1, it will send summarized information SUMMARIZE="0" #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Section 4 - "Core dCache Operations" and "dCache Chronicle" Utilities # Skip, if you do not care what these are or what they do #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Core dCache Operations RPM # For now, this will be installed on the Admin node. Make sure PNFS is # mounted on the admin node. # INFO: It contains utilities for performing several tasks such as: # o Collecting disk usage information for all pools from the admin interface # o Printing out the total space used, including replicas, the percentage of # single-replica files, and the distribution of disk space utilized throughout # the namespace. # o Providing all the current transfer rates in dCache. # o Verifying the discrepancy in disk usage of all pools, by collecting information # from individual pools. If there is a difference in total disk as compared to # used disk and free disk, discrepancy is reported. # o Creating some space in a pool which has reached its disk capacity, i.e., is # almost full. It can move files from one pool to another, and optionally remove # files in the source pool. It works in interactive mode, and requires confirmation # for each action. # REQUIREMENTS: Jython and any server node with PNFS mounted # Would you like to install/use this rpm? # Options: yes or no INSTALL_CORE_DCACHE_OPERATIONS_RPM="yes" # SKIP the below settings for this rpm, if you chose 'no' to question above # If you chose 'yes', its MANDATORY that you specify all # Site's Cautionary_Level must be one of: { low | medium | high } # 1. low : All tools 'can' be run. # 2. medium : Tools with data removal capability 'cannot' be run. # 3. high : Tools with data removal capability as well as tools that # perform exhaustive operations, e.g., on all files, 'cannot' be run." # What cautionary level would you like to set for your site SITE_CAUTIONARY_LEVEL="medium" # Jython is required by a subset of utilities provided by Core dCache Operations RPM # BE VERY CAREFUL about following two questions asked during the install: # 1) For installation type: choose the "All (everything, including sources)" option # 2) For target directory: enter the directory location where you would like jython to # be installed. ALSO, enter the same directory location below JYTHON_LOCATION="/usr/local/jython" # Whom would you like to send emails generated by these utilities # Enter a single email address or a comma separated list CORE_DCACHE_OPERATIONS_EMAIL_TO="neha@fnal.gov,tanya@fnal.gov" # PNFS path to a top-level user storage (e.g. CMS's store/user) directory. # EXAMPLE: User_SE_Top_Dir="/pnfs/mit.edu/data4/cms/phedex/store/user" USER_SE_TOP_DIR="/pnfs/fnal.gov/data" # Port used to connect to admin interface ADMINSRV_PORT="2228" # Login name used to connect to admin interface ADMINSRV_LOGIN="admin" # Password used to connect to admin interface ADMINSRV_PASSWORD="dickerelch" # dCache Chronicle RPM # INFO: It provides an autobot emailer that operates daily. It is locally # configured by a site. The autobot sends an analysis of overall system # summary, based on all active PoolGroups in the storage system at the site. # For each PoolGroup found to have active pools, total is computed to measure # overall disk capacity. Total is also computed for space which is free, or # precious, or cached and removable. Report includes total number of active # pools and disk utilization percentage. # In the standard configured site's top-level users-storage directory, it # performs an analysis of space consumption of each user area. In addition, # for each user, it collects a list of 'lost' files. This is a comparison between # the physical and logical states of consumption. If there are any user-files # which seem to have been lost from disk but are still in PNFS filesystem, this # listing is automatically emailed followed by the chronicle report. # REQUIREMENTS: A server node with dCache Core Utilities RPM installed. # Jython-based utilities of dCache Core Utilities RPM are not used, # so Jython install and any associated jars are not required. # Would you like to install/use this rpm? # Options: yes or no # If you chose 'yes' here, make sure you chose 'yes' for INSTALL_CORE_DCACHE_OPERATIONS_RPM # aswell INSTALL_DCACHE_CHRONICLE_RPM="yes"