CESM Coupled Model XML Files

XML files for CESM in CIMEROOT/config/cesm.

CIMEROOT/config/cesm

CESM XML settings for short term archiver.

<components version="2.0">
  <comp_archive_spec compname="cice" compclass="ice">
    <rest_file_extension>[ri]</rest_file_extension>
    <hist_file_extension>h\d*.*\.nc$</hist_file_extension>
    <rest_history_varname>unset</rest_history_varname>
    <rpointer>
      <rpointer_file>rpointer.ice$NINST_STRING</rpointer_file>
      <rpointer_content>./$CASE.cice$NINST_STRING.r.$DATENAME.nc</rpointer_content>
    </rpointer>
    <test_file_names>
      <tfile disposition="copy">rpointer.ice</tfile>
      <tfile disposition="copy">casename.cice.r.1976-01-01-00000.nc</tfile>
      <tfile disposition="move">casename.cice.h.1976-01-01-00000.nc</tfile>
    </test_file_names>
  </comp_archive_spec>


  <comp_archive_spec compname="pop" compclass="ocn">
    <rest_file_extension>r</rest_file_extension>
    <rest_file_extension>r[ho]</rest_file_extension>
    <hist_file_extension>h\d*.*\.nc$</hist_file_extension>
    <hist_file_extension>d[dovt]</hist_file_extension>
    <rest_history_varname>unset</rest_history_varname>
    <rpointer>
      <rpointer_file>rpointer.ocn$NINST_STRING.restart</rpointer_file>
      <rpointer_content>./$CASE.pop$NINST_STRING.r.$DATENAME.nc,RESTART_FMT=nc</rpointer_content>
    </rpointer>
    <rpointer>
      <rpointer_file>rpointer.ocn$NINST_STRING.ovf</rpointer_file>
      <rpointer_content>./$CASE.pop$NINST_STRING.ro.$DATENAME</rpointer_content>
    </rpointer>
    <rpointer>
      <rpointer_file>rpointer.ocn$NINST_STRING.tavg</rpointer_file>
      <rpointer_content>./$CASE.pop$NINST_STRING.rh.$DATENAME.nc</rpointer_content>
    </rpointer>
    <test_file_names>
      <tfile disposition="copy">rpointer.pop</tfile>
      <tfile disposition="copy">casename.pop_0001.r.1976-01-01-00000.nc</tfile>
      <tfile disposition="copy">casename.pop.r.1976-01-01-00000.nc</tfile>
      <tfile disposition="move">casename.pop.h.1976-01-01-00000.nc</tfile>
      <tfile disposition="move">casename.pop.h.1975-02-01-00000.nc</tfile>
      <tfile disposition="move">casename.pop.h0.1976-01-01-00000.nc</tfile>
      <tfile disposition="move">casename.pop.dd.1976-01-01-00000</tfile>
      <tfile disposition="ignore">casename.pop.r.1975-01-01-00000.nc</tfile>
      <tfile disposition="ignore">anothercasename.pop.r.1976-01-01-00000.nc</tfile>
    </test_file_names>
  </comp_archive_spec>

  <comp_archive_spec compname="cism" compclass="glc">
    <rest_file_extension>[ri]</rest_file_extension>
    <hist_file_extension>h\d*.*\.nc$</hist_file_extension>
    <hist_file_extension>initial_hist</hist_file_extension>
    <rest_history_varname>unset</rest_history_varname>
    <rpointer>
      <rpointer_file>rpointer.glc$NINST_STRING</rpointer_file>
      <rpointer_content>./$CASE.cism$NINST_STRING.r.$DATENAME.nc</rpointer_content>
    </rpointer>
    <test_file_names>
      <!-- Should copy rpointer file(s) -->
      <tfile disposition="copy">rpointer.glc</tfile>
      <tfile disposition="copy">rpointer.glc_9999</tfile>
      <!-- Should only copy last restart file -->
      <tfile disposition="ignore">casename.cism.r.1975-01-01-00000.nc</tfile>
      <tfile disposition="copy">casename.cism.r.1976-01-01-00000.nc</tfile>
      <!-- Should copy all history files -->
      <tfile disposition="move">casename.cism.initial_hist.0001-01-01-00000.nc</tfile>
      <tfile disposition="move">casename.cism.h.1975-01-01-00000.nc</tfile>
      <tfile disposition="move">casename.cism.h.1976-01-01-00000.nc</tfile>
      <!-- Should ignore files created by test suite, files from other cases, etc. -->
      <tfile disposition="ignore">casename.cism.h.1976-01-01-00000.nc.base</tfile>
      <tfile disposition="ignore">anothercasename.cism.r.1976-01-01-00000.nc</tfile>
    </test_file_names>
  </comp_archive_spec>

  <comp_archive_spec compname="ww3" compclass="wav">
    <rest_file_extension>r</rest_file_extension>
    <hist_file_extension>hi.*\.nc$</hist_file_extension>
    <rest_history_varname>unset</rest_history_varname>
    <rpointer>
      <rpointer_file>rpointer.wav$NINST_STRING</rpointer_file>
      <rpointer_content>unset</rpointer_content>
    </rpointer>
    </comp_archive_spec>

    <comp_archive_spec compclass="esp" compname="dart">
      <rest_file_extension>r</rest_file_extension>
      <rest_file_extension>rh\d?</rest_file_extension>
      <hist_file_extension>[ei]</hist_file_extension>
      <rest_history_varname>restart_hist</rest_history_varname>
      <rpointer>
        <rpointer_file>rpointer.unset</rpointer_file>
        <rpointer_content>unset</rpointer_content>
      </rpointer>
      <test_file_names>
        <!-- Copy the little restart file and the files it references -->
        <tfile disposition="copy">casename.dart.r.1976-01-01-00000.nc</tfile>
        <tfile disposition="copy">casename.dart.rh.pop_preassim_priorinf_mean.1976-01-01-00000.nc</tfile>
        <tfile disposition="copy">casename.dart.rh.cam_preassim_priorinf_mean.1976-01-01-00000.nc</tfile>
        <!-- Move all the rest -->
        <tfile disposition="move">casename.dart.e.cam_postassim_mean.1976-01-01-00000.nc</tfile>
        <tfile disposition="move">casename.dart.i.cam_output_mean.1976-01-01-00000.nc</tfile>
        <tfile disposition="move">casename.dart.e.cam_obs_seq_final.1976-01-01-00000.nc</tfile>
      </test_file_names>
    </comp_archive_spec>
  </components>

CESM XML settings for defining CASEROOT env_*.xml file entries.

<?xml version="1.0"?>

<?xml-stylesheet type="text/xsl" ?>

<entry_id version="2.0">

  <entry id="MODEL">
    <type>char</type>
    <default_value>cesm</default_value>
    <group>case_der</group>
    <file>env_case.xml</file>
    <desc>model system name</desc>
  </entry>

  <!-- ============================================================ -->
  <!-- Filenames for case config, grids, machines and pio -->
  <!-- ============================================================ -->

  <entry id="CASEFILE_HEADERS">
    <type>char</type>
    <default_value>$CIMEROOT/config/config_headers.xml</default_value>
    <group>case_der</group>
    <file>env_case.xml</file>
    <desc>contains both header and group information for all the case env_*.xml files </desc>
  </entry>

  <entry id="GRIDS_SPEC_FILE">
    <type>char</type>
    <default_value>$CIMEROOT/config/$MODEL/config_grids.xml</default_value>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing specification of all supported model grids, domains and mapping files (for documentation only - DO NOT EDIT)</desc>
    <schema>$CIMEROOT/config/xml_schemas/config_grids_v2.1.xsd</schema>
  </entry>

  <entry id="MACHINES_SPEC_FILE">
    <type>char</type>
    <default_value>$CIMEROOT/config/$MODEL/machines/config_machines.xml</default_value>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing machine specifications for target model primary component (for documentation only - DO NOT EDIT)</desc>
    <schema>$CIMEROOT/config/xml_schemas/config_machines.xsd</schema>
  </entry>

  <entry id="BATCH_SPEC_FILE">
    <type>char</type>
    <default_value>$CIMEROOT/config/$MODEL/machines/config_batch.xml</default_value>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing batch system details for target system  (for documentation only - DO NOT EDIT)</desc>
    <schema>$CIMEROOT/config/xml_schemas/config_batch.xsd</schema>
  </entry>

  <entry id="WORKFLOW_SPEC_FILE">
    <type>char</type>
    <default_value>$CIMEROOT/config/$MODEL/machines/config_workflow.xml</default_value>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing workflow (for documentation only - DO NOT EDIT)</desc>
    <schema>$CIMEROOT/config/xml_schemas/config_workflow.xsd</schema>
  </entry>

  <entry id="INPUTDATA_SPEC_FILE">
    <type>char</type>
    <default_value>$CIMEROOT/config/$MODEL/config_inputdata.xml</default_value>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing inputdata server descriptions  (for documentation only - DO NOT EDIT)</desc>
    <schema>$CIMEROOT/config/xml_schemas/config_inputdata.xsd</schema>
  </entry>

  <entry id="COMPILERS_SPEC_FILE">
    <type>char</type>
    <default_value>$CIMEROOT/config/$MODEL/machines/config_compilers.xml</default_value>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing compiler specifications for target model primary component (for documentation only - DO NOT EDIT)</desc>
    <schema>$CIMEROOT/config/xml_schemas/config_compilers_v2.xsd</schema>
  </entry>

  <entry id="PIO_SPEC_FILE">
    <type>char</type>
    <default_value>$CIMEROOT/config/$MODEL/machines/config_pio.xml</default_value>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing specification of pio settings for target model possible machine, compiler, mpilib, compset and/or grid attributes (for documentation only - DO NOT EDIT)</desc>
  </entry>

  <entry id="CONFIG_TESTS_FILE">
    <type>char</type>
    <values>
      <value>$CIMEROOT/config/config_tests.xml</value>
      <!-- component specific config_tests files -->
      <value component="clm">$COMP_ROOT_DIR_LND/cime_config/config_tests.xml</value>
      <value component="cam">$COMP_ROOT_DIR_ATM/cime_config/config_tests.xml</value>
    </values>
    <group>test</group>
    <file>env_test.xml</file>
    <desc>file containing system test descriptions </desc>
  </entry>

  <!-- ============================================================ -->
  <!-- Filenames for determining compsets and tests file            -->
  <!-- Depends on component attribute value   -->
  <!-- ============================================================ -->

  <entry id="COMP_ROOT_DIR_ATM">
    <type>char</type>
    <values>
      <value component="datm"      >$CIMEROOT/src/components/data_comps/datm</value>
      <value component="satm"      >$CIMEROOT/src/components/stub_comps/satm</value>
      <value component="xatm"      >$CIMEROOT/src/components/xcpl_comps/xatm</value>
      <value component="cam"      >$SRCROOT/components/cam/</value>
      <value component="ufsatm"      >$SRCROOT/components/fv3/</value>
    </values>
    <group>case_comps</group>
    <file>env_case.xml</file>
    <desc>Root directory of the case atmospheric component  </desc>
    <schema>$CIMEROOT/config/xml_schemas/config_compsets.xsd</schema>
  </entry>

  <entry id="COMP_ROOT_DIR_CPL">
    <type>char</type>
    <values>
      <value>$CIMEROOT/src/drivers/$COMP_INTERFACE</value>
    </values>
    <group>case_comps</group>
    <file>env_case.xml</file>
    <desc>Root directory of the case driver/coupler component  </desc>
    <schema>$CIMEROOT/config/xml_schemas/config_compsets.xsd</schema>
  </entry>

  <entry id="COMP_ROOT_DIR_OCN">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value component="pop"       >$SRCROOT/components/pop/</value>
      <value component="mom"       >$SRCROOT/components/mom/</value>
      <value component="nemo"      >$SRCROOT/components/nemo/</value>
      <value component="docn"      >$CIMEROOT/src/components/data_comps/docn</value>
      <value component="socn"      >$CIMEROOT/src/components/stub_comps/socn</value>
      <value component="xocn"      >$CIMEROOT/src/components/xcpl_comps/xocn</value>
    </values>
    <group>case_comps</group>
    <file>env_case.xml</file>
    <desc>Root directory of the case ocean component  </desc>
    <schema>$CIMEROOT/config/xml_schemas/config_compsets.xsd</schema>
  </entry>

  <entry id="COMP_ROOT_DIR_WAV">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value component="ww3"      >$SRCROOT/components/ww3/</value>
      <value component="dwav"      >$CIMEROOT/src/components/data_comps/dwav</value>
      <value component="swav"      >$CIMEROOT/src/components/stub_comps/swav</value>
      <value component="xwav"      >$CIMEROOT/src/components/xcpl_comps/xwav</value>
    </values>
    <group>case_comps</group>
    <file>env_case.xml</file>
    <desc>Root directory of the case wave model component  </desc>
    <schema>$CIMEROOT/config/xml_schemas/config_compsets.xsd</schema>
  </entry>

  <entry id="COMP_ROOT_DIR_GLC">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value component="cism"      >$SRCROOT/components/cism/</value>
      <value component="dglc"      >$CIMEROOT/src/components/data_comps/dglc</value>
      <value component="sglc"      >$CIMEROOT/src/components/stub_comps/sglc</value>
      <value component="xglc"      >$CIMEROOT/src/components/xcpl_comps/xglc</value>
    </values>
    <group>case_comps</group>
    <file>env_case.xml</file>
    <desc>Root directory of the case land ice component  </desc>
    <schema>$CIMEROOT/config/xml_schemas/config_compsets.xsd</schema>
  </entry>

  <entry id="COMP_ROOT_DIR_ICE">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value component="cice"      >$SRCROOT/components/cice/</value>
      <value component="dice"      >$CIMEROOT/src/components/data_comps/dice</value>
      <value component="sice"      >$CIMEROOT/src/components/stub_comps/sice</value>
      <value component="xice"      >$CIMEROOT/src/components/xcpl_comps/xice</value>
    </values>
    <group>case_comps</group>
    <file>env_case.xml</file>
    <desc>Root directory of the case sea ice component  </desc>
    <schema>$CIMEROOT/config/xml_schemas/config_compsets.xsd</schema>
  </entry>

  <entry id="COMP_ROOT_DIR_ROF">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value component="rtm"      >$SRCROOT/components/rtm/</value>
      <value component="mosart"      >$SRCROOT/components/mosart/</value>
      <value component="drof"      >$CIMEROOT/src/components/data_comps/drof</value>
      <value component="srof"      >$CIMEROOT/src/components/stub_comps/srof</value>
      <value component="xrof"      >$CIMEROOT/src/components/xcpl_comps/xrof</value>
    </values>
    <group>case_comps</group>
    <file>env_case.xml</file>
    <desc>Root directory of the case river runoff model component  </desc>
    <schema>$CIMEROOT/config/xml_schemas/config_compsets.xsd</schema>
  </entry>

  <entry id="COMP_ROOT_DIR_LND">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value component="clm"      >$SRCROOT/components/clm/</value>
      <value component="dlnd"      >$CIMEROOT/src/components/data_comps/dlnd</value>
      <value component="slnd"      >$CIMEROOT/src/components/stub_comps/slnd</value>
      <value component="xlnd"      >$CIMEROOT/src/components/xcpl_comps/xlnd</value>
    </values>
    <group>case_comps</group>
    <file>env_case.xml</file>
    <desc>Root directory of the case land model component  </desc>
    <schema>$CIMEROOT/config/xml_schemas/config_compsets.xsd</schema>
  </entry>

  <entry id="COMP_ROOT_DIR_IAC">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value component="siac"      >$CIMEROOT/src/components/stub_comps/siac</value>
      <value component="xiac"      >$CIMEROOT/src/components/xcpl_comps/xiac</value>
    </values>
    <group>case_comps</group>
    <file>env_case.xml</file>
    <desc>Root directory of the case integrated assessment component  </desc>
    <schema>$CIMEROOT/config/xml_schemas/config_compsets.xsd</schema>
  </entry>

  <entry id="COMP_ROOT_DIR_ESP">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value component="desp"      >$CIMEROOT/src/components/data_comps/desp</value>
      <value component="sesp"      >$CIMEROOT/src/components/stub_comps/sesp</value>
    </values>
    <group>case_comps</group>
    <file>env_case.xml</file>
    <desc>Root directory of the case external system processing (esp) component  </desc>
    <schema>$CIMEROOT/config/xml_schemas/config_compsets.xsd</schema>
  </entry>

  <entry id="COMPSETS_SPEC_FILE">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value component="allactive">$SRCROOT/cime_config/config_compsets.xml</value>
      <value component="drv"      >$COMP_ROOT_DIR_CPL/cime_config/config_compsets.xml</value>
      <value component="cam"      >$COMP_ROOT_DIR_ATM/cime_config/config_compsets.xml</value>
      <value component="ufsatm"   >$COMP_ROOT_DIR_ATM/cime_config/config_compsets.xml</value>
      <value component="cism"     >$COMP_ROOT_DIR_GLC/cime_config/config_compsets.xml</value>
      <value component="clm"      >$COMP_ROOT_DIR_LND/cime_config/config_compsets.xml</value>
      <value component="cice"     >$COMP_ROOT_DIR_ICE/cime_config/config_compsets.xml</value>
      <value component="pop"      >$COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml</value>
      <value component="mom"      >$COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml</value>
      <value component="nemo"   >$COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing specification of all compsets for primary component (for documentation only - DO NOT EDIT)</desc>
    <schema>$CIMEROOT/config/xml_schemas/config_compsets.xsd</schema>
  </entry>

  <entry id="PES_SPEC_FILE">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value component="allactive">$SRCROOT/cime_config/config_pes.xml</value>
      <value component="drv"      >$COMP_ROOT_DIR_CPL/cime_config/config_pes.xml</value>
      <value component="cam"      >$COMP_ROOT_DIR_ATM/cime_config/config_pes.xml</value>
      <value component="ufsatm"   >$COMP_ROOT_DIR_ATM/cime_config/config_pes.xml</value>
      <value component="cism"     >$COMP_ROOT_DIR_GLC/cime_config/config_pes.xml</value>
      <value component="clm"      >$COMP_ROOT_DIR_LND/cime_config/config_pes.xml</value>
      <value component="cice"     >$COMP_ROOT_DIR_ICE/cime_config/config_pes.xml</value>
      <value component="pop"      >$COMP_ROOT_DIR_OCN/cime_config/config_pes.xml</value>
      <value component="mom"      >$COMP_ROOT_DIR_OCN/cime_config/config_pes.xml</value>
      <value component="nemo"   >$COMP_ROOT_DIR_OCN/cime_config/config_pes.xml</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing specification of all pe-layouts for primary component (for documentation only - DO NOT EDIT)</desc>
    <schema>$CIMEROOT/config/xml_schemas/config_pes.xsd</schema>
  </entry>

  <entry id="ARCHIVE_SPEC_FILE">
    <type>char</type>
    <values>
      <value>$CIMEROOT/config/cesm/config_archive.xml</value>
      <value component="drv"      >$COMP_ROOT_DIR_CPL/cime_config/config_archive.xml</value>
      <!-- data model components -->
      <value component="drof">$COMP_ROOT_DIR_ROF/cime_config/config_archive.xml</value>
      <value component="datm">$COMP_ROOT_DIR_ATM/cime_config/config_archive.xml</value>
      <value component="dice">$COMP_ROOT_DIR_ICE/cime_config/config_archive.xml</value>
      <value component="dlnd">$COMP_ROOT_DIR_LND/cime_config/config_archive.xml</value>
      <value component="docn">$COMP_ROOT_DIR_OCN/cime_config/config_archive.xml</value>
      <value component="dwav">$COMP_ROOT_DIR_WAV/cime_config/config_archive.xml</value>
      <!-- external model components -->
      <value component="cam"      >$COMP_ROOT_DIR_ATM/cime_config/config_archive.xml</value>
      <value component="cism"     >$COMP_ROOT_DIR_GLC/cime_config/config_archive.xml</value>
      <value component="clm"      >$COMP_ROOT_DIR_LND/cime_config/config_archive.xml</value>
      <value component="cice"     >$COMP_ROOT_DIR_ICE/cime_config/config_archive.xml</value>
      <value component="pop"      >$COMP_ROOT_DIR_OCN/cime_config/config_archive.xml</value>
      <value component="mom"      >$COMP_ROOT_DIR_OCN/cime_config/config_archive.xml</value>
      <value component="nemo"   >$COMP_ROOT_DIR_OCN/cime_config/config_archive.xml</value>
      <value component="rtm"      >$COMP_ROOT_DIR_ROF/cime_config/config_archive.xml</value>
      <value component="mosart"   >$COMP_ROOT_DIR_ROF/cime_config/config_archive.xml</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing specification of archive files for each component (for documentation only - DO NOT EDIT)</desc>
    <schema>$CIMEROOT/config/xml_schemas/config_archive.xsd</schema>
  </entry>

  <entry id="SYSTEM_TESTS_DIR">
    <type>char</type>
    <values>
      <value component="any">$CIMEROOT/scripts/lib/CIME/SystemTests</value>
      <value component="clm">$COMP_ROOT_DIR_LND/cime_config/SystemTests</value>
      <value component="cam">$COMP_ROOT_DIR_ATM/cime_config/SystemTests</value>
      <value component="pop">$COMP_ROOT_DIR_OCN/cime_config/SystemTests</value>
      <value component="mom">$COMP_ROOT_DIR_OCN/cime_config/SystemTests</value>
      <value component="nemo">$COMP_ROOT_DIR_OCN/cime_config/SystemTests</value>
      <value component="cice">$COMP_ROOT_DIR_ICE/cime_config/SystemTests</value>
      <value component="cism">$COMP_ROOT_DIR_GLC/cime_config/SystemTests</value>
      <value component="rtm">$COMP_ROOT_DIR_ROF/cime_config/SystemTests</value>
      <value component="mosart">$COMP_ROOT_DIR_ROF/cime_config/SystemTests</value>
    </values>
    <group>test</group>
    <file>env_test.xml</file>
    <desc>directories containing cime compatible system test modules</desc>
  </entry>

  <entry id="TESTS_SPEC_FILE">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value component="allactive">$SRCROOT/cime_config/testlist_allactive.xml</value>
      <value component="drv"      >$COMP_ROOT_DIR_CPL/cime_config/testdefs/testlist_drv.xml</value>
      <value component="cam"      >$COMP_ROOT_DIR_ATM/cime_config/testdefs/testlist_cam.xml</value>
      <value component="cism"     >$COMP_ROOT_DIR_GLC/cime_config/testdefs/testlist_cism.xml</value>
      <value component="clm"      >$COMP_ROOT_DIR_LND/cime_config/testdefs/testlist_clm.xml</value>
      <value component="cice"     >$COMP_ROOT_DIR_ICE/cime_config/testdefs/testlist_cice.xml</value>
      <value component="pop"      >$COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_pop.xml</value>
      <value component="mom"      >$COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_mom.xml</value>
      <value component="nemo"   >$COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_nemo.xml</value>
      <value component="rtm"      >$COMP_ROOT_DIR_ROF/cime_config/testdefs/testlist_rtm.xml</value>
      <value component="mosart"   >$COMP_ROOT_DIR_ROF/cime_config/testdefs/testlist_mosart.xml</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing specification of all system tests for primary component (for documentation only - DO NOT EDIT)</desc>
    <schema>$CIMEROOT/config/xml_schemas/testlist.xsd</schema>
  </entry>

  <entry id="TESTS_MODS_DIR">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value component="allactive">$SRCROOT/cime_config/testmods_dirs</value>
      <value component="drv"      >$COMP_ROOT_DIR_CPL/cime_config/testdefs/testmods_dirs</value>
      <value component="cam"      >$COMP_ROOT_DIR_ATM/cime_config/testdefs/testmods_dirs</value>
      <value component="cism"     >$COMP_ROOT_DIR_GLC/cime_config/testdefs/testmods_dirs</value>
      <value component="clm"      >$COMP_ROOT_DIR_LND/cime_config/testdefs/testmods_dirs</value>
      <value component="cice"     >$COMP_ROOT_DIR_ICE/cime_config/testdefs/testmods_dirs</value>
      <value component="rtm"      >$COMP_ROOT_DIR_ROF/cime_config/testdefs/testmods_dirs</value>
      <value component="mosart"   >$COMP_ROOT_DIR_ROF/cime_config/testdefs/testmods_dirs</value>
      <value component="pop"      >$COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs</value>
      <value component="mom"      >$COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs</value>
      <value component="nemo"   >$COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>directory containing test modifications for primary component tests (for documentation only - DO NOT EDIT)</desc>
  </entry>

  <entry id="USER_MODS_DIR">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value component="allactive">$SRCROOT/cime_config/usermods_dirs</value>
      <value component="drv"      >$COMP_ROOT_DIR_CPL/cime_config/usermods_dirs</value>
      <value component="cam"      >$COMP_ROOT_DIR_ATM/cime_config/usermods_dirs</value>
      <value component="cism"     >$COMP_ROOT_DIR_GLC/cime_config/usermods_dirs</value>
      <value component="clm"      >$COMP_ROOT_DIR_LND/cime_config/usermods_dirs</value>
      <value component="cice"     >$COMP_ROOT_DIR_ICE/cime_config/usermods_dirs</value>
      <value component="rtm"      >$COMP_ROOT_DIR_ROF/cime_config/usermods_dirs</value>
      <value component="mosart"   >$COMP_ROOT_DIR_ROF/cime_config/usermods_dirs</value>
      <value component="pop"      >$COMP_ROOT_DIR_OCN/cime_config/usermods_dirs</value>
      <value component="mom"      >$COMP_ROOT_DIR_OCN/cime_config/usermods_dirs</value>
      <value component="nemo"   >$COMP_ROOT_DIR_OCN/cime_config/usermods_dirs</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>directory containing user modifications for primary components (for documentation only - DO NOT EDIT)</desc>
  </entry>


  <entry id="NAMELIST_DEFINITION_FILE">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value component="drv"      >$COMP_ROOT_DIR_CPL/cime_config/namelist_definition_drv.xml</value>
      <!-- data model components -->
      <value component="drof">$CIMEROOT/src/components/data_comps/drof/cime_config/namelist_definition_drof.xml</value>
      <value component="datm">$CIMEROOT/src/components/data_comps/datm/cime_config/namelist_definition_datm.xml</value>
      <value component="dice">$CIMEROOT/src/components/data_comps/dice/cime_config/namelist_definition_dice.xml</value>
      <value component="dlnd">$CIMEROOT/src/components/data_comps/dlnd/cime_config/namelist_definition_dlnd.xml</value>
      <value component="docn">$CIMEROOT/src/components/data_comps/docn/cime_config/namelist_definition_docn.xml</value>
      <value component="dwav">$CIMEROOT/src/components/data_comps/dwav/cime_config/namelist_definition_dwav.xml</value>
      <!-- external model components -->
      <!--  TODO
      <value component="cam"      >$COMP_ROOT_DIR_ATM/bld/namelist_files/namelist_definition.xml</value>
      <value component="cism"     >$COMP_ROOT_DIR_GLC/bld/namelist_files/namelist_definition_cism.xml</value>
      <value component="cice"     >$COMP_ROOT_DIR_ICE/cime_config/namelist_definition_cice.xml</value>
      <value component="clm"      >$COMP_ROOT_DIR_LND/bld/namelist_files/namelist_definition_ctsm.xml</value>
      <value component="pop"      >$COMP_ROOT_DIR_OCN/bld/namelist_files/namelist_definition_pop.xml</value>
      <value component="mom"      >$COMP_ROOT_DIR_OCN/bld/namelist_files/namelist_definition_mom.xml</value>
      <value component="nemo"   >$COMP_ROOT_DIR_OCN/bld/namelist_files/namelist_definition_nemo.xml</value>
      -->
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing namelist_definitions for all components </desc>
    <schema>$CIMEROOT/config/xml_schemas/entry_id_namelist.xsd</schema>
  </entry>

  <!-- =============================================================== -->
  <!-- File names for all component specific configuration variables -->
  <!-- =============================================================== -->

  <entry id="CONFIG_CPL_FILE">
    <type>char</type>
    <values>
      <value>$COMP_ROOT_DIR_CPL/cime_config/config_component.xml</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing all non-component specific case configuration variables (for documentation only - DO NOT EDIT)</desc>
    <schema version="2.0">$CIMEROOT/config/xml_schemas/entry_id.xsd</schema>
    <schema version="3.0">$CIMEROOT/config/xml_schemas/entry_id_version3.xsd</schema>
  </entry>

  <entry id="CONFIG_CPL_FILE_MODEL_SPECIFIC">
    <type>char</type>
    <values>
      <value>$CIMEROOT/src/drivers/$COMP_INTERFACE/cime_config/config_component_$MODEL.xml</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing all component specific driver configuration variables (for documentation only - DO NOT EDIT)</desc>
    <schema version="2.0">$CIMEROOT/config/xml_schemas/entry_id.xsd</schema>
    <schema version="3.0">$CIMEROOT/config/xml_schemas/entry_id_version3.xsd</schema>
  </entry>

  <entry id="CONFIG_ATM_FILE">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value>$COMP_ROOT_DIR_ATM/cime_config/config_component.xml</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT)</desc>
    <schema version="2.0">$CIMEROOT/config/xml_schemas/entry_id.xsd</schema>
    <schema version="3.0">$CIMEROOT/config/xml_schemas/entry_id_version3.xsd</schema>
  </entry>

  <entry id="CONFIG_LND_FILE">
    <type>char</type>
    <values>
      <value>$COMP_ROOT_DIR_LND/cime_config/config_component.xml</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT)</desc>
    <schema version="2.0">$CIMEROOT/config/xml_schemas/entry_id.xsd</schema>
    <schema version="3.0">$CIMEROOT/config/xml_schemas/entry_id_version3.xsd</schema>
  </entry>

  <entry id="CONFIG_ROF_FILE">
    <type>char</type>
    <values>
      <value>$COMP_ROOT_DIR_ROF/cime_config/config_component.xml</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT)</desc>
    <schema version="2.0">$CIMEROOT/config/xml_schemas/entry_id.xsd</schema>
    <schema version="3.0">$CIMEROOT/config/xml_schemas/entry_id_version3.xsd</schema>
  </entry>

  <entry id="CONFIG_ICE_FILE">
    <type>char</type>
    <values>
      <value>$COMP_ROOT_DIR_ICE/cime_config/config_component.xml</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT)</desc>
    <schema version="2.0">$CIMEROOT/config/xml_schemas/entry_id.xsd</schema>
    <schema version="3.0">$CIMEROOT/config/xml_schemas/entry_id_version3.xsd</schema>
  </entry>

  <entry id="CONFIG_OCN_FILE">
    <type>char</type>
    <values>
      <value>$COMP_ROOT_DIR_OCN/cime_config/config_component.xml</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT)</desc>
    <schema version="2.0">$CIMEROOT/config/xml_schemas/entry_id.xsd</schema>
    <schema version="3.0">$CIMEROOT/config/xml_schemas/entry_id_version3.xsd</schema>
  </entry>

  <entry id="CONFIG_GLC_FILE">
    <type>char</type>
    <values>
      <value>$COMP_ROOT_DIR_GLC/cime_config/config_component.xml</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT)</desc>
    <schema version="2.0">$CIMEROOT/config/xml_schemas/entry_id.xsd</schema>
    <schema version="3.0">$CIMEROOT/config/xml_schemas/entry_id_version3.xsd</schema>
  </entry>

  <entry id="CONFIG_IAC_FILE">
    <type>char</type>
    <default_value>unset</default_value>
    <values>
      <value>$COMP_ROOT_DIR_IAC/cime_config/config_component.xml</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT)</desc>
    <schema version="2.0">$CIMEROOT/config/xml_schemas/entry_id.xsd</schema>
    <schema version="3.0">$CIMEROOT/config/xml_schemas/entry_id_version3.xsd</schema>
  </entry>

  <entry id="CONFIG_WAV_FILE">
    <type>char</type>
    <values>
      <value>$COMP_ROOT_DIR_WAV/cime_config/config_component.xml</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT)</desc>
    <schema version="2.0">$CIMEROOT/config/xml_schemas/entry_id.xsd</schema>
    <schema version="3.0">$CIMEROOT/config/xml_schemas/entry_id_version3.xsd</schema>
  </entry>

  <entry id="CONFIG_ESP_FILE">
    <type>char</type>
    <values>
      <value >$COMP_ROOT_DIR_ESP/cime_config/config_component.xml</value>
    </values>
    <group>case_last</group>
    <file>env_case.xml</file>
    <desc>file containing specification of component specific definitions and values(for documentation only - DO NOT EDIT)</desc>
    <schema version="2.0">$CIMEROOT/config/xml_schemas/entry_id.xsd</schema>
    <schema version="3.0">$CIMEROOT/config/xml_schemas/entry_id_version3.xsd</schema>
  </entry>

</entry_id>

CESM XML settings for defining supported grids.

<?xml version="1.0"?>

<grid_data version="2.1" xmlns:xi="http://www.w3.org/2001/XInclude">

  <help>
    =========================================
    GRID naming convention
    =========================================
    The notation for the grid longname is
        a%name_l%name_oi%name_r%name_m%mask_g%name_w%name
    where
        a% => atm, l% => lnd, oi% => ocn/ice, r% => river, m% => mask, g% => glc, w% => wav

    Supported out of the box grid configurations are given via alias specification in
    the file "config_grids.xml". Each grid alias can also be associated  with the
    following optional attributes

    compset       (Regular expression for compset matches that are required for this grid)
    not_compset   (Regular expression for compset matches that are not permitted this grid)

    Using the alias and the optional "compset" and "not_compset" attributes a grid longname is created
    Note that the mask is for information only - and is not an attribute of the grid
    By default, if the mask is not specified below, it will be set to the ocnice grid
    And if there is no ocnice grid (such as for single column, the mask is null since it does not mean anything)
  </help>

  <grids>
    <model_grid_defaults>
      <grid name="atm"    compset="SATM"  >null</grid>
      <grid name="lnd"    compset="SLND"  >null</grid>
      <grid name="ocnice" compset="SOCN"  >null</grid>
      <grid name="rof"    compset="SROF"  >null</grid>
      <grid name="rof"    compset="DWAV"  >rx1</grid>
      <grid name="rof"    compset="RTM"	  >r05</grid>
      <grid name="rof"    compset="MOSART">r05</grid>
      <grid name="rof"    compset="DROF"  >rx1</grid>
      <grid name="rof"    compset="DROF%CPLHIST">r05</grid>
      <grid name="rof"    compset="XROF"  >r05</grid>
      <grid name="glc"	  compset="SGLC"  >null</grid>
      <grid name="glc"	  compset="CISM1" >gland5UM</grid>
      <grid name="glc"	  compset="CISM2" >gland4</grid>
      <grid name="glc"    compset="XGLC"  >gland4</grid>
      <grid name="wav"	  compset="SWAV"  >null</grid>
      <grid name="wav"	  compset="DWAV"  >ww3a</grid>
      <grid name="wav"	  compset="WW3"	  >ww3a</grid>
      <grid name="wav"    compset="XWAV"  >ww3a</grid>
      <grid name="iac"    compset="SIAC"  >null</grid>
    </model_grid_defaults>

    <model_grid alias="g16_g16" compset="DATM.+DROF">
      <grid name="atm">gx1v6</grid>
      <grid name="lnd">gx1v6</grid>
      <grid name="ocnice">gx1v6</grid>
      <support>Non-standard grid for testing of the interpolation in DATM rather than coupler</support>
    </model_grid>

    <model_grid alias="g17_g17" compset="DATM.+DROF">
      <grid name="atm">gx1v7</grid>
      <grid name="lnd">gx1v7</grid>
      <grid name="ocnice">gx1v7</grid>
      <support>Non-standard grid for testing of the interpolation in DATM rather than coupler</support>
    </model_grid>

    <model_grid alias="1D_1D" compset="DATM.+DROF">
      <grid name="atm">01col</grid>
      <grid name="lnd">01col</grid>
      <support>Non-standard grid for running POP in true 1D mode</support>
    </model_grid>

    <model_grid alias="CLM_USRDAT" compset="DATM.+CLM">
      <grid name="atm">CLM_USRDAT</grid>
      <grid name="lnd">CLM_USRDAT</grid>
      <grid name="rof">null</grid>
    </model_grid>

    <model_grid alias="1x1_numaIA" compset="DATM.+CLM">
      <grid name="atm">1x1_numaIA</grid>
      <grid name="lnd">1x1_numaIA</grid>
      <grid name="rof">null</grid>
    </model_grid>

    <model_grid alias="1x1_brazil" compset="DATM.+CLM">
      <grid name="atm">1x1_brazil</grid>
      <grid name="lnd">1x1_brazil</grid>
      <grid name="rof">null</grid>
    </model_grid>

    <model_grid alias="1x1_smallvilleIA" compset="DATM.+CLM">
      <grid name="atm">1x1_smallvilleIA</grid>
      <grid name="lnd">1x1_smallvilleIA</grid>
      <grid name="rof">null</grid>
    </model_grid>

    <model_grid alias="1x1_camdenNJ" compset="DATM.+CLM">
      <grid name="atm">1x1_camdenNJ</grid>
      <grid name="lnd">1x1_camdenNJ</grid>
      <grid name="rof">null</grid>
    </model_grid>

    <model_grid alias="1x1_mexicocityMEX" compset="DATM.+CLM">
      <grid name="atm">1x1_mexicocityMEX</grid>
      <grid name="lnd">1x1_mexicocityMEX</grid>
      <grid name="rof">null</grid>
    </model_grid>

    <model_grid alias="1x1_vancouverCAN" compset="DATM.+CLM">
      <grid name="atm">1x1_vancouverCAN</grid>
      <grid name="lnd">1x1_vancouverCAN</grid>
      <grid name="rof">null</grid>
    </model_grid>

    <model_grid alias="1x1_urbanc_alpha" compset="DATM.+CLM">
      <grid name="atm">1x1_urbanc_alpha</grid>
      <grid name="lnd">1x1_urbanc_alpha</grid>
      <grid name="rof">null</grid>
    </model_grid>

    <model_grid alias="5x5_amazon" compset="DATM.+CLM">
      <grid name="atm">5x5_amazon</grid>
      <grid name="lnd">5x5_amazon</grid>
      <grid name="rof">null</grid>
    </model_grid>

    <!-- Regional NLDAS-2 grid over the U.S. (0.125 degree resolution;
         25-53N, 235-293E), with mask from NLDAS-2 atmospheric drivers -->
    <model_grid alias="nldas2_rnldas2_mnldas2" compset="DATM.+CLM">
      <grid name="atm">0.125nldas2</grid>
      <grid name="lnd">0.125nldas2</grid>
      <grid name="ocnice">0.125nldas2</grid>
      <grid name="rof">0.125nldas2</grid>
      <mask>nldas2</mask>
    </model_grid>

    <model_grid alias="hcru_hcru" compset="DATM.+CLM">
      <grid name="atm">360x720cru</grid>
      <grid name="lnd">360x720cru</grid>
    </model_grid>

    <!-- eulerian grids -->

    <model_grid alias="T31_g37">
      <grid name="atm">T31</grid>
      <grid name="lnd">T31</grid>
      <grid name="ocnice">gx3v7</grid>
      <mask>gx3v7</mask>
    </model_grid>

    <model_grid alias="T31_g37_gl4" compset="_CISM">
      <grid name="atm">T31</grid>
      <grid name="lnd">T31</grid>
      <grid name="ocnice">gx3v7</grid>
      <grid name="glc">gland4</grid>
      <mask>gx3v7</mask>
    </model_grid>

    <model_grid alias="T31_g37_gl20" compset="_CISM">
      <grid name="atm">T31</grid>
      <grid name="lnd">T31</grid>
      <grid name="ocnice">gx3v7</grid>
      <grid name="glc">gland20</grid>
      <mask>gx3v7</mask>
    </model_grid>

    <model_grid alias="T31_g37_gl5" compset="_CISM">
      <grid name="atm">T31</grid>
      <grid name="lnd">T31</grid>
      <grid name="ocnice">gx3v7</grid>
      <grid name="glc">gland5UM</grid>
      <mask>gx3v7</mask>
    </model_grid>

    <model_grid alias="T42_T42_musgs" not_compset="_POP">
      <grid name="atm">T42</grid>
      <grid name="lnd">T42</grid>
      <grid name="ocnice">T42</grid>
      <mask>usgs</mask>
    </model_grid>

    <model_grid alias="T42_T42" not_compset="_POP">
      <grid name="atm">T42</grid>
      <grid name="lnd">T42</grid>
      <grid name="ocnice">T42</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="T42_T42_mg16" not_compset="_POP">
      <grid name="atm">T42</grid>
      <grid name="lnd">T42</grid>
      <grid name="ocnice">T42</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="T42_T42_mg17" not_compset="_POP">
      <grid name="atm">T42</grid>
      <grid name="lnd">T42</grid>
      <grid name="ocnice">T42</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="T5_T5_mg37" not_compset="_POP">
      <grid name="atm">T5</grid>
      <grid name="lnd">T5</grid>
      <grid name="ocnice">T5</grid>
      <mask>gx3v7</mask>
    </model_grid>

    <model_grid alias="T85_T85_mg16" not_compset="_POP">
      <grid name="atm">T85</grid>
      <grid name="lnd">T85</grid>
      <grid name="ocnice">T85</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="T85_T85_mg17" not_compset="_POP">
      <grid name="atm">T85</grid>
      <grid name="lnd">T85</grid>
      <grid name="ocnice">T85</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="T85_T85_musgs" not_compset="_POP">
      <grid name="atm">T85</grid>
      <grid name="lnd">T85</grid>
      <grid name="ocnice">T85</grid>
      <mask>usgs</mask>
    </model_grid>

    <model_grid alias="T85_f09_t12">
      <grid name="atm">T85</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">tx0.1v2</grid>
      <mask>tx0.1v2</mask>
    </model_grid>

    <model_grid alias="T341_f02_t12">
      <grid name="atm">T341</grid>
      <grid name="lnd">0.23x0.31</grid>
      <grid name="ocnice">tx0.1v2</grid>
      <mask>tx0.1v2</mask>
    </model_grid>

    <model_grid alias="T62_g37" not_compset="_CAM">
      <grid name="atm">T62</grid>
      <grid name="lnd">T62</grid>
      <grid name="ocnice">gx3v7</grid>
      <mask>gx3v7</mask>
    </model_grid>

    <model_grid alias="T62_s11" not_compset="_CAM">
      <grid name="atm">T62</grid>
      <grid name="lnd">T62</grid>
      <grid name="ocnice">tx1v1</grid>
      <mask>tx1v1</mask>
    </model_grid>

    <model_grid alias="T62_n13" not_compset="_CAM">
      <grid name="atm">T62</grid>
      <grid name="lnd">T62</grid>
      <grid name="ocnice">tn1v3</grid>
      <mask>tn1v3</mask>
    </model_grid>

    <model_grid alias="T62_n0253" not_compset="_CAM">
      <grid name="atm">T62</grid>
      <grid name="lnd">T62</grid>
      <grid name="ocnice">tn0.25v3</grid>
      <mask>tn0.25v3</mask>
    </model_grid>

    <model_grid alias="T62_t12" not_compset="_CAM">
      <grid name="atm">T62</grid>
      <grid name="lnd">T62</grid>
      <grid name="ocnice">tx0.1v2</grid>
      <mask>tx0.1v2</mask>
    </model_grid>

    <model_grid alias="T62_t13" not_compset="_CAM">
      <grid name="atm">T62</grid>
      <grid name="lnd">T62</grid>
      <grid name="ocnice">tx0.1v3</grid>
      <mask>tx0.1v3</mask>
    </model_grid>

    <model_grid alias="TL319_g17" compset="DROF%JRA-1p4">
      <grid name="atm">TL319</grid>
      <grid name="lnd">TL319</grid>
      <grid name="ocnice">gx1v7</grid>
      <grid name="rof">JRA025v2</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="TL319_g17" not_compset="_CAM">
      <grid name="atm">TL319</grid>
      <grid name="lnd">TL319</grid>
      <grid name="ocnice">gx1v7</grid>
      <grid name="rof">JRA025</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="TL319_t061" not_compset="_CAM">
      <grid name="atm">TL319</grid>
      <grid name="lnd">TL319</grid>
      <grid name="ocnice">tx0.66v1</grid>
      <grid name="rof">JRA025</grid>
    </model_grid>

    <model_grid alias="TL319_t12" not_compset="_CAM">
      <grid name="atm">TL319</grid>
      <grid name="lnd">TL319</grid>
      <grid name="ocnice">tx0.1v2</grid>
      <grid name="rof">JRA025</grid>
    </model_grid>

    <model_grid alias="TL319_t13" compset="DROF%JRA-1p4">
      <grid name="atm">TL319</grid>
      <grid name="lnd">TL319</grid>
      <grid name="ocnice">tx0.1v3</grid>
      <grid name="rof">JRA025v2</grid>
    </model_grid>

    <model_grid alias="TL319_t13" not_compset="_CAM">
      <grid name="atm">TL319</grid>
      <grid name="lnd">TL319</grid>
      <grid name="ocnice">tx0.1v3</grid>
      <grid name="rof">JRA025</grid>
    </model_grid>

    <model_grid alias="T62_t061" not_compset="_CAM">
      <grid name="atm">T62</grid>
      <grid name="lnd">T62</grid>
      <grid name="ocnice">tx0.66v1</grid>
    </model_grid>

    <model_grid alias="T62_t025" not_compset="_CAM">
      <grid name="atm">T62</grid>
      <grid name="lnd">T62</grid>
      <grid name="ocnice">tx0.25v1</grid>
    </model_grid>
    <model_grid alias="f09_t061">
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">tx0.66v1</grid>
    </model_grid>

    <model_grid alias="T62_g16" not_compset="_CAM">
      <grid name="atm">T62</grid>
      <grid name="lnd">T62</grid>
      <grid name="ocnice">gx1v6</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="T62_g17" not_compset="_CAM">
      <grid name="atm">T62</grid>
      <grid name="lnd">T62</grid>
      <grid name="ocnice">gx1v7</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="T62_oQU120" not_compset="_CAM">
      <grid name="atm">T62</grid>
      <grid name="lnd">T62</grid>
      <grid name="ocnice">oQU120</grid>
      <mask>oQU120</mask>
    </model_grid>

    <!-- finite volume grids -->

    <model_grid alias="f02_g16">
      <grid name="atm">0.23x0.31</grid>
      <grid name="lnd">0.23x0.31</grid>
      <grid name="ocnice">gx1v6</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f02_g17">
      <grid name="atm">0.23x0.31</grid>
      <grid name="lnd">0.23x0.31</grid>
      <grid name="ocnice">gx1v7</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f02_n13">
      <grid name="atm">0.23x0.31</grid>
      <grid name="lnd">0.23x0.31</grid>
      <grid name="ocnice">tn1v3</grid>
      <mask>tn1v3</mask>
    </model_grid>

    <model_grid alias="f02_n0253">
      <grid name="atm">0.23x0.31</grid>
      <grid name="lnd">0.23x0.31</grid>
      <grid name="ocnice">tn0.25v3</grid>
      <mask>tn0.25v3</mask>
    </model_grid>

    <model_grid alias="f02_t12">
      <grid name="atm">0.23x0.31</grid>
      <grid name="lnd">0.23x0.31</grid>
      <grid name="ocnice">tx0.1v2</grid>
      <mask>tx0.1v2</mask>
    </model_grid>

    <model_grid alias="f05_g16">
      <grid name="atm">0.47x0.63</grid>
      <grid name="lnd">0.47x0.63</grid>
      <grid name="ocnice">gx1v6</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f05_g17">
      <grid name="atm">0.47x0.63</grid>
      <grid name="lnd">0.47x0.63</grid>
      <grid name="ocnice">gx1v7</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f05_t12">
      <grid name="atm">0.47x0.63</grid>
      <grid name="lnd">0.47x0.63</grid>
      <grid name="ocnice">tx0.1v2</grid>
      <mask>tx0.1v2</mask>
    </model_grid>

    <model_grid alias="f09_g16">
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">gx1v6</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f09_g17">
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">gx1v7</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f09_n13">
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">tn1v3</grid>
      <mask>tn1v3</mask>
    </model_grid>

    <model_grid alias="f09_n0253">
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">tn0.25v3</grid>
      <mask>tn0.25v3</mask>
    </model_grid>

    <model_grid alias="f09_g16_gl4" compset="_CISM">
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">gx1v6</grid>
      <grid name="glc">gland4</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f09_g17_gl4" compset="_CISM">
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">gx1v7</grid>
      <grid name="glc">gland4</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f09_g16_gl20" compset="_CISM">
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">gx1v6</grid>
      <grid name="glc">gland20</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f09_g17_gl20" compset="_CISM">
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">gx1v7</grid>
      <grid name="glc">gland20</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f09_g16_gl5" compset="_CISM">
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">gx1v6</grid>
      <grid name="glc">gland5UM</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f09_g17_gl5" compset="_CISM">
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">gx1v7</grid>
      <grid name="glc">gland5UM</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f09_f09_mnull" compset="_DOCN%SAQUAP|DOCN%DAQUAP" >
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">0.9x1.25</grid>
      <mask>null</mask>
    </model_grid>

    <model_grid alias="f09_f09_mg16" not_compset="_POP" >
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">0.9x1.25</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f09_f09_mg17" not_compset="_POP" >
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">0.9x1.25</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f05_f05_mg17" not_compset="_POP" >
      <grid name="atm">0.47x0.63</grid>
      <grid name="lnd">0.47x0.63</grid>
      <grid name="ocnice">0.47x0.63</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f09_f09_gl5" compset="_CISM" not_compset="_POP">
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">0.9x1.25</grid>
      <grid name="glc">gland5UM</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f09_f09_gl5_mg16" compset="_CISM" not_compset="_POP">
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">0.9x1.25</grid>
      <grid name="glc">gland5UM</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f09_f09_gl5_mg17" compset="_CISM" not_compset="_POP">
      <grid name="atm">0.9x1.25</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">0.9x1.25</grid>
      <grid name="glc">gland5UM</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f19_g16">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">gx1v6</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f19_g17">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">gx1v7</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f19_g16_r01">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">gx1v6</grid>
      <grid name="rof">r01</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f19_g17_r01">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">gx1v7</grid>
      <grid name="rof">r01</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f19_g16_gl4" compset="_CISM">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">gx1v6</grid>
      <grid name="glc">gland4</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f19_g17_gl4" compset="_CISM">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">gx1v7</grid>
      <grid name="glc">gland4</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f19_g16_gl5" compset="_CISM">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">gx1v6</grid>
      <grid name="glc">gland5UM</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f19_g17_gl5" compset="_CISM">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">gx1v7</grid>
      <grid name="glc">gland5UM</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f19_f19_gl5" compset="_CISM" not_compset="_POP">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">1.9x2.5</grid>
      <grid name="glc">gland5UM</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f19_f19_gl5_mg16" compset="_CISM" not_compset="_POP">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">1.9x2.5</grid>
      <grid name="glc">gland5UM</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f19_f19_gl5_mg17" compset="_CISM" not_compset="_POP">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">1.9x2.5</grid>
      <grid name="glc">gland5UM</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f19_f19_mg16" not_compset="_POP">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">1.9x2.5</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f19_f19" not_compset="_POP">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">1.9x2.5</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f19_f19_mnull" compset="_DOCN%SAQUAP|DOCN%DAQUAP" >
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">1.9x2.5</grid>
      <mask>null</mask>
    </model_grid>

    <model_grid alias="f19_f19_mg17" not_compset="_POP">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">1.9x2.5</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f45_g37">
      <grid name="atm">4x5</grid>
      <grid name="lnd">4x5</grid>
      <grid name="ocnice">gx3v7</grid>
      <mask>gx3v7</mask>
    </model_grid>

    <model_grid alias="f02_f02_mg16" not_compset="_POP">
      <grid name="atm">0.23x0.31</grid>
      <grid name="lnd">0.23x0.31</grid>
      <grid name="ocnice">0.23x0.31</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f02_f02_mg17" not_compset="_POP">
      <grid name="atm">0.23x0.31</grid>
      <grid name="lnd">0.23x0.31</grid>
      <grid name="ocnice">0.23x0.31</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f25_f25_mg16" not_compset="_POP">
      <grid name="atm">2.5x3.33</grid>
      <grid name="lnd">2.5x3.33</grid>
      <grid name="ocnice">2.5x3.33</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f25_f25_mg17" not_compset="_POP">
      <grid name="atm">2.5x3.33</grid>
      <grid name="lnd">2.5x3.33</grid>
      <grid name="ocnice">2.5x3.33</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="f45_f45_mg37" not_compset="_POP">
      <grid name="atm">4x5</grid>
      <grid name="lnd">4x5</grid>
      <grid name="ocnice">4x5</grid>
      <mask>gx3v7</mask>
    </model_grid>

    <model_grid alias="f10_f10_mg37" not_compset="_POP">
      <grid name="atm">10x15</grid>
      <grid name="lnd">10x15</grid>
      <grid name="ocnice">10x15</grid>
      <mask>gx3v7</mask>
    </model_grid>

    <model_grid alias="f10_f10_musgs" not_compset="_POP">
      <grid name="atm">10x15</grid>
      <grid name="lnd">10x15</grid>
      <grid name="ocnice">10x15</grid>
      <mask>usgs</mask>
    </model_grid>

    <model_grid alias="f10_g37">
      <grid name="atm">10x15</grid>
      <grid name="lnd">10x15</grid>
      <grid name="ocnice">gx3v7</grid>
      <mask>gx3v7</mask>
    </model_grid>

    <!--  spectral element grids -->

    <model_grid alias="ne5_ne5_mg37" not_compset="_POP">
      <grid name="atm">ne5np4</grid>
      <grid name="lnd">ne5np4</grid>
      <grid name="ocnice">ne5np4</grid>
      <mask>gx3v7</mask>
    </model_grid>

    <model_grid alias="ne16_g17">
      <grid name="atm">ne16np4</grid>
      <grid name="lnd">ne16np4</grid>
      <grid name="ocnice">gx1v7</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne16_ne16_mg17" not_compset="_POP">
      <grid name="atm">ne16np4</grid>
      <grid name="lnd">ne16np4</grid>
      <grid name="ocnice">ne16np4</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne30_g16">
      <grid name="atm">ne30np4</grid>
      <grid name="lnd">ne30np4</grid>
      <grid name="ocnice">gx1v6</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="ne30_g17">
      <grid name="atm">ne30np4</grid>
      <grid name="lnd">ne30np4</grid>
      <grid name="ocnice">gx1v7</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne30pg3_g17">
      <grid name="atm">ne30pg3</grid>
      <grid name="lnd">ne30pg3</grid>
      <grid name="ocnice">gx1v7</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne30_f19_g16">
      <grid name="atm">ne30np4</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">gx1v6</grid>
      <support>For testing tri-grid</support>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="ne30_f19_g17">
      <grid name="atm">ne30np4</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">gx1v7</grid>
      <support>For testing tri-grid</support>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne30_f09_g16">
      <grid name="atm">ne30np4</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">gx1v6</grid>
      <support>For testing tri-grid</support>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="ne30_f09_g17">
      <grid name="atm">ne30np4</grid>
      <grid name="lnd">0.9x1.25</grid>
      <grid name="ocnice">gx1v7</grid>
      <support>For testing tri-grid</support>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne30_ne30_mg16" not_compset="_POP">
      <grid name="atm">ne30np4</grid>
      <grid name="lnd">ne30np4</grid>
      <grid name="ocnice">ne30np4</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="ne30_ne30_mg17" not_compset="_POP">
      <grid name="atm">ne30np4</grid>
      <grid name="lnd">ne30np4</grid>
      <grid name="ocnice">ne30np4</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne60_g16">
      <grid name="atm">ne60np4</grid>
      <grid name="lnd">ne60np4</grid>
      <grid name="ocnice">gx1v6</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="ne60_g17">
      <grid name="atm">ne60np4</grid>
      <grid name="lnd">ne60np4</grid>
      <grid name="ocnice">gx1v7</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne60_ne60_mg16" not_compset="_POP">
      <grid name="atm">ne60np4</grid>
      <grid name="lnd">ne60np4</grid>
      <grid name="ocnice">ne60np4</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="ne120_g16">
      <grid name="atm">ne120np4</grid>
      <grid name="lnd">ne120np4</grid>
      <grid name="ocnice">gx1v6</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="ne120_g17">
      <grid name="atm">ne120np4</grid>
      <grid name="lnd">ne120np4</grid>
      <grid name="ocnice">gx1v7</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne120_t12">
      <grid name="atm">ne120np4</grid>
      <grid name="lnd">ne120np4</grid>
      <grid name="ocnice">tx0.1v2</grid>
      <mask>tx0.1v2</mask>
    </model_grid>

    <model_grid alias="ne120_ne120_mg16" not_compset="_POP">
      <grid name="atm">ne120np4</grid>
      <grid name="lnd">ne120np4</grid>
      <grid name="ocnice">ne120np4</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="ne120_ne120_mg17" not_compset="_POP">
      <grid name="atm">ne120np4</grid>
      <grid name="lnd">ne120np4</grid>
      <grid name="ocnice">ne120np4</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne240_f02_g16">
      <grid name="atm">ne240np4</grid>
      <grid name="lnd">0.23x0.31</grid>
      <grid name="ocnice">gx1v6</grid>
      <support>For testing high resolution tri-grid</support>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="ne240_f02_g17">
      <grid name="atm">ne240np4</grid>
      <grid name="lnd">0.23x0.31</grid>
      <grid name="ocnice">gx1v7</grid>
      <support>For testing high resolution tri-grid</support>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne240_t12">
      <grid name="atm">ne240np4</grid>
      <grid name="lnd">ne240np4</grid>
      <grid name="ocnice">tx0.1v2</grid>
      <mask>tx0.1v2</mask>
    </model_grid>

    <model_grid alias="ne240_ne240_mg16" not_compset="_POP">
      <grid name="atm">ne240np4</grid>
      <grid name="lnd">ne240np4</grid>
      <grid name="ocnice">ne240np4</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="ne240_ne240_mg17" not_compset="_POP">
      <grid name="atm">ne240np4</grid>
      <grid name="lnd">ne240np4</grid>
      <grid name="ocnice">ne240np4</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <!--  spectral element grids with 2x2 FVM physics grid -->

    <model_grid alias="ne30pg2_ne30pg2_mg17">
      <grid name="atm">ne30np4.pg2</grid>
      <grid name="lnd">ne30np4.pg2</grid>
      <grid name="ocnice">ne30np4.pg2</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne60pg2_ne60pg2_mg17" not_compset="_POP|_CLM">
      <grid name="atm">ne60np4.pg2</grid>
      <grid name="lnd">ne60np4.pg2</grid>
      <grid name="ocnice">ne60np4.pg2</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne120pg2_ne120pg2_mg17" not_compset="_POP|_CLM">
      <grid name="atm">ne120np4.pg2</grid>
      <grid name="lnd">ne120np4.pg2</grid>
      <grid name="ocnice">ne120np4.pg2</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne120pg2_ne120pg2_mt12">
      <grid name="atm">ne120np4.pg2</grid>
      <grid name="lnd">ne120np4.pg2</grid>
      <grid name="ocnice">ne120np4.pg2</grid>
      <mask>tx0.1v2</mask>
    </model_grid>

    <model_grid alias="ne240pg2_ne240pg2_mg17" not_compset="_POP|_CLM">
      <grid name="atm">ne240np4.pg2</grid>
      <grid name="lnd">ne240np4.pg2</grid>
      <grid name="ocnice">ne240np4.pg2</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <!--  spectral element grids with 3x3 FVM physics grid -->

    <model_grid alias="ne5pg3_ne5pg3_mg37" not_compset="_POP">
      <grid name="atm">ne5np4.pg3</grid>
      <grid name="lnd">ne5np4.pg3</grid>
      <grid name="ocnice">ne5np4.pg3</grid>
      <mask>gx3v7</mask>
    </model_grid>

    <model_grid alias="ne16pg3_ne16pg3_mg17" not_compset="_POP|_CLM">
      <grid name="atm">ne16np4.pg3</grid>
      <grid name="lnd">ne16np4.pg3</grid>
      <grid name="ocnice">ne16np4.pg3</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne30pg3_ne30pg3_mg17" not_compset="_POP">
      <grid name="atm">ne30pg3</grid>
      <grid name="lnd">ne30pg3</grid>
      <grid name="ocnice">ne30pg3</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne60pg3_ne60pg3_mg17" not_compset="_POP|_CLM">
      <grid name="atm">ne60np4.pg3</grid>
      <grid name="lnd">ne60np4.pg3</grid>
      <grid name="ocnice">ne60np4.pg3</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne120pg3_ne120pg3_mg17" not_compset="_POP">
      <grid name="atm">ne120np4.pg3</grid>
      <grid name="lnd">ne120np4.pg3</grid>
      <grid name="ocnice">ne120np4.pg3</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne120pg3_ne120pg3_mt13" not_compset="_POP">
      <grid name="atm">ne120np4.pg3</grid>
      <grid name="lnd">ne120np4.pg3</grid>
      <grid name="ocnice">ne120np4.pg3</grid>
      <mask>tx0.1v3</mask>
    </model_grid>

    <model_grid alias="ne240pg3_ne240pg3_mg17" not_compset="_POP|_CLM">
      <grid name="atm">ne240np4.pg3</grid>
      <grid name="lnd">ne240np4.pg3</grid>
      <grid name="ocnice">ne240np4.pg3</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne120pg3_g17">
      <grid name="atm">ne120np4.pg3</grid>
      <grid name="lnd">ne120np4.pg3</grid>
      <grid name="ocnice">gx1v7</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne120pg3_t13">
      <grid name="atm">ne120np4.pg3</grid>
      <grid name="lnd">ne120np4.pg3</grid>
      <grid name="ocnice">gx1v7</grid>
      <mask>tx0.1v3</mask>
    </model_grid>

    <!--  spectral element grids with 4x4 FVM physics grid -->

    <model_grid alias="ne30pg4_ne30pg4_mg17" not_compset="_POP|_CLM">
      <grid name="atm">ne30np4.pg4</grid>
      <grid name="lnd">ne30np4.pg4</grid>
      <grid name="ocnice">ne30np4.pg4</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne60pg4_ne60pg4_mg17" not_compset="_POP|_CLM">
      <grid name="atm">ne60np4.pg4</grid>
      <grid name="lnd">ne60np4.pg4</grid>
      <grid name="ocnice">ne60np4.pg4</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne120pg4_ne120pg4_mg17" not_compset="_POP|_CLM">
      <grid name="atm">ne120np4.pg4</grid>
      <grid name="lnd">ne120np4.pg4</grid>
      <grid name="ocnice">ne120np4.pg4</grid>
      <mask>gx1v7</mask>
    </model_grid>

   <!-- VR-CESM grids with CAM-SE -->

    <model_grid alias="ne0CONUSne30x8_g17">
      <grid name="atm">ne0np4CONUS.ne30x8</grid>
      <grid name="lnd">ne0np4CONUS.ne30x8</grid>
      <grid name="ocnice">gx1v7</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne0CONUSne30x8_ne0CONUSne30x8_mg17" not_compset="_POP">
      <grid name="atm">ne0np4CONUS.ne30x8</grid>
      <grid name="lnd">ne0np4CONUS.ne30x8</grid>
      <grid name="ocnice">ne0np4CONUS.ne30x8</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne0TESTONLYne5x4_ne0TESTONLYne5x4_mg37" not_compset="_POP">
      <grid name="atm">ne0np4TESTONLY.ne5x4</grid>
      <grid name="lnd">ne0np4TESTONLY.ne5x4</grid>
      <grid name="ocnice">ne0np4TESTONLY.ne5x4</grid>
      <mask>gx3v7</mask>
    </model_grid>

    <model_grid alias="ne0CONUSne30x8_ne0CONUSne30x8_mt12" not_compset="_POP">
      <grid name="atm">ne0np4CONUS.ne30x8</grid>
      <grid name="lnd">ne0np4CONUS.ne30x8</grid>
      <grid name="ocnice">ne0np4CONUS.ne30x8</grid>
      <mask>tx0.1v2</mask>
    </model_grid>

    <model_grid alias="ne0np4.ARCTIC.ne30x4_mt12" not_compset="_POP">
      <grid name="atm">ne0np4.ARCTIC.ne30x4</grid>
      <grid name="lnd">ne0np4.ARCTIC.ne30x4</grid>
      <grid name="ocnice">ne0np4.ARCTIC.ne30x4</grid>
      <mask>tx0.1v2</mask>
    </model_grid>

    <model_grid alias="ne0np4.ARCTICGRIS.ne30x8_mt12" not_compset="_POP">
      <grid name="atm">ne0np4.ARCTICGRIS.ne30x8</grid>
      <grid name="lnd">ne0np4.ARCTICGRIS.ne30x8</grid>
      <grid name="ocnice">ne0np4.ARCTICGRIS.ne30x8</grid>
      <mask>tx0.1v2</mask>
    </model_grid>

    <!-- new runoff grids for data runoff model DROF -->

    <model_grid alias="T31_g37_rx1" compset="_DROF">
      <grid name="atm">T31</grid>
      <grid name="lnd">T31</grid>
      <grid name="ocnice">gx3v7</grid>
      <mask>gx3v7</mask>
    </model_grid>

    <model_grid alias="f45_g37_rx1" compset="_DROF">
      <grid name="atm">4x5</grid>
      <grid name="lnd">4x5</grid>
      <grid name="ocnice">gx3v7</grid>
      <mask>gx3v7</mask>
    </model_grid>

    <model_grid alias="f19_g16_rx1" compset="_DROF">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">gx1v6</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="f19_g17_rx1" compset="_DROF">
      <grid name="atm">1.9x2.5</grid>
      <grid name="lnd">1.9x2.5</grid>
      <grid name="ocnice">gx1v7</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="ne30_g16_rx1" compset="_DROF">
      <grid name="atm">ne30np4</grid>
      <grid name="lnd">ne30np4</grid>
      <grid name="ocnice">gx1v6</grid>
      <mask>gx1v6</mask>
    </model_grid>

    <model_grid alias="ne30_g17_rx1" compset="_DROF">
      <grid name="atm">ne30np4</grid>
      <grid name="lnd">ne30np4</grid>
      <grid name="ocnice">gx1v7</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="C24_C24_mg17" >
      <grid name="atm">C24</grid>
      <grid name="lnd">C24</grid>
      <grid name="ocnice">C24</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="C48_C48_mg17" >
      <grid name="atm">C48</grid>
      <grid name="lnd">C48</grid>
      <grid name="ocnice">C48</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="C96_C96_mg17" >
      <grid name="atm">C96</grid>
      <grid name="lnd">C96</grid>
      <grid name="ocnice">gx1v7</grid>
      <grid name="glc">gland4</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="C96_C96_mt061" not_compset="_POP" >
      <grid name="atm">C96</grid>
      <grid name="lnd">C96</grid>
      <grid name="ocnice">C96</grid>
      <mask>tx0.66v1</mask>
    </model_grid>

    <model_grid alias="C96_t061" not_compset="_POP" >
      <grid name="atm">C96</grid>
      <grid name="lnd">C96</grid>
      <grid name="ocnice">tx0.66v1</grid>
      <mask>tx0.66v1</mask>
    </model_grid>

    <model_grid alias="C96_t025" not_compset="_POP" >
      <grid name="atm">C96</grid>
      <grid name="lnd">C96</grid>
      <grid name="ocnice">tx0.25v1</grid>
      <mask>tx0.25v1</mask>
    </model_grid>

    <model_grid alias="C192_C192_mg17" >
      <grid name="atm">C192</grid>
      <grid name="lnd">C192</grid>
      <grid name="ocnice">C192</grid>
      <mask>gx1v7</mask>
    </model_grid>


    <model_grid alias="C384_C384_mg17" >
      <grid name="atm">C384</grid>
      <grid name="lnd">C384</grid>
      <grid name="ocnice">C384</grid>
      <mask>gx1v7</mask>
    </model_grid>

    <model_grid alias="C384_t025" not_compset="_POP" >
      <grid name="atm">C384</grid>
      <grid name="lnd">C384</grid>
      <grid name="ocnice">tx0.25v1</grid>
      <mask>tx0.25v1</mask>
    </model_grid>

    <!-- The following grid is only used for ADWAV testing -->
    <model_grid alias="ww3a" compset="_WW3|DWAV">
      <grid name="wav">ww3a</grid>
    </model_grid>

  </grids>

  <!-- ======================================================== -->
  <!-- Component grid domain specifications -->
  <!-- ======================================================== -->

  <domains>

    <domain name="null">
      <!-- null grid -->
      <nx>0</nx> <ny>0</ny>
      <file>unset</file>
      <desc>null is no grid: </desc>
    </domain>

    <!-- LND domains for single column or regional -->


    <domain name="01col">
      <nx>1</nx> <ny>1</ny>
      <file>domain.ocn.01col.ArcticOcean.20150824.nc</file>
      <file>domain.ocn.01col.ArcticOcean.20150824.nc</file>
      <desc>01col is a single-column grid for datm and POP:</desc>
    </domain>

    <domain name="CLM_USRDAT">
      <nx>1</nx> <ny>1</ny>
      <file>$DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.${CLM_USRDAT_NAME}_navy.nc</file>
      <mesh driver="nuopc">create_mesh</mesh>
      <desc>user specified domain - only valid for DATM/CLM compset</desc>
    </domain>

    <domain name="1x1_numaIA">
      <nx>1</nx> <ny>1</ny>
      <file grid="atm|lnd">$DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-numaIA_navy.110106.nc</file>
      <mesh driver="nuopc">create_mesh</mesh>
      <desc>1x1 Numa Iowa -- only valid for DATM/CLM compset</desc>
    </domain>

    <domain name="1x1_brazil">
      <nx>1</nx> <ny>1</ny>
      <file grid="atm|lnd">$DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-brazil_navy.090715.nc</file>
      <mesh driver="nuopc">create_mesh</mesh>
      <desc>1x1 Brazil -- only valid for DATM/CLM compset</desc>
    </domain>

    <domain name="1x1_smallvilleIA">
      <nx>1</nx> <ny>1</ny>
      <file grid="atm|lnd">$DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-smallvilleIA_test.110106.nc</file>
      <mesh driver="nuopc">create_mesh</mesh>
      <desc>1x1 Smallville Iowa Crop Test Case -- only valid for DATM/CLM compset</desc>
    </domain>

    <domain name="1x1_camdenNJ">
      <nx>1</nx> <ny>1</ny>
      <file grid="atm|lnd">$DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-camdenNJ_navy.111004.nc</file>
      <mesh driver="nuopc">create_mesh</mesh>
      <desc>1x1 Camden New Jersey -- only valid for DATM/CLM compset</desc>
    </domain>

    <domain name="1x1_mexicocityMEX">
      <nx>1</nx> <ny>1</ny>
      <file grid="atm|lnd">$DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-mexicocityMEX_navy.090715.nc</file>
      <mesh driver="nuopc">create_mesh</mesh>
      <desc>1x1 Mexico City Mexico -- only valid for DATM/CLM compset</desc>
    </domain>

    <domain name="1x1_vancouverCAN">
      <nx>1</nx> <ny>1</ny>
      <file grid="atm|lnd">$DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-vancouverCAN_navy.090715.nc</file>
      <mesh driver="nuopc">create_mesh</mesh>
      <desc>1x1 Vancouver Canada -- only valid for DATM/CLM compset</desc>
    </domain>

    <domain name="1x1_urbanc_alpha">
      <nx>1</nx> <ny>1</ny>
      <file grid="atm|lnd">$DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-urbanc_alpha_test.110201.nc</file>
      <mesh driver="nuopc">create_mesh</mesh>
      <desc>1x1 Urban C Alpha Test Case -- only valid for DATM/CLM compset</desc>
    </domain>

    <domain name="5x5_amazon">
      <nx>1</nx> <ny>1</ny>
      <file grid="atm|lnd">$DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.5x5pt-amazon_navy.090715.nc</file>
      <mesh driver="nuopc">create_mesh</mesh>
      <desc>5x5 Amazon regional case -- only valid for DATM/CLM compset</desc>
    </domain>

    <!-- This grid is also used by ROF -->
    <domain name="0.125nldas2">
      <nx>464</nx> <ny>224</ny>
      <file grid="atm|lnd" mask="nldas2">$DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.0.125nldas2_0.125nldas2.190410.nc</file>
      <file grid="ocnice"  mask="nldas2">$DIN_LOC_ROOT/share/domains/domain.clm/domain.ocn.0.125nldas2.190410.nc</file>
      <desc>Regional NLDAS-2 grid over the U.S. (0.125 degree resolution; 25-53N, 235-293E)</desc>
    </domain>

    <!-- ATM/LND domains global -->

    <domain name="360x720cru">
      <nx>720</nx> <ny>360</ny>
      <file grid="atm|lnd">$DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.360x720_cruncep.100429.nc</file>
      <mesh driver="nuopc">create_mesh</mesh>
      <desc>Exact half-degree CRUNCEP datm forcing grid with CRUNCEP land-mask -- only valid for DATM/CLM compset</desc>
    </domain>

    <domain name="0.23x0.31">
      <nx>1152</nx> <ny>768</ny>
      <file grid="atm|lnd" mask="gx1v6">domain.lnd.fv0.23x0.31_gx1v6.100517.nc</file>
      <file grid="ocnice"  mask="gx1v6">domain.ocn.0.23x0.31_gx1v6_101108.nc</file>
      <file grid="atm|lnd" mask="tn1v3">domain.lnd.fv0.23x0.31_tn1v3.160414.nc</file>
      <file grid="ocnice"  mask="tn1v3">domain.ocn.fv0.23x0.31_tn1v3.160414.nc</file>
      <file grid="atm|lnd" mask="tn0.25v3">domain.lnd.fv0.23x0.31_tn0.25v3.160721.nc</file>
      <file grid="ocnice"  mask="tn0.25v3">domain.ocn.fv0.23x0.31_tn0.25v3.160721.nc</file>
      <desc>0.23x0.31 is FV 1/4-deg grid:</desc>
    </domain>

    <domain name="0.47x0.63">
      <nx>576</nx>  <ny>384</ny>
      <file grid="atm|lnd" mask="gx1v6">domain.lnd.fv0.47x0.63_gx1v6.090407.nc</file>
      <file grid="ocnice"  mask="gx1v6">domain.ocn.0.47x0.63_gx1v6_090408.nc</file>
      <file grid="atm|lnd" mask="gx1v7">domain.lnd.fv0.47x0.63_gx1v7.180521.nc</file>
      <file grid="ocnice"  mask="gx1v7">domain.ocn.fv0.47x0.63_gx1v7.180521.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/fv0.47x0.63_141008_ESMFmesh.nc</mesh>
      <desc>0.47x0.63 is FV 1/2-deg grid:</desc>
    </domain>

    <domain name="0.9x1.25">
      <nx>288</nx>  <ny>192</ny>
      <file grid="atm|lnd" mask="gx1v6">domain.lnd.fv0.9x1.25_gx1v6.090309.nc</file>
      <file grid="ocnice"  mask="gx1v6">domain.ocn.0.9x1.25_gx1v6_090403.nc</file>
      <file grid="atm|lnd" mask="gx1v7">domain.lnd.fv0.9x1.25_gx1v7.151020.nc</file>
      <file grid="ocnice"  mask="gx1v7">domain.ocn.fv0.9x1.25_gx1v7.151020.nc</file>
      <file grid="atm|lnd" mask="tx0.66v1">domain.lnd.fv0.9x1.25_tx0.66v1.190314.nc</file>
      <file grid="ocnice"  mask="tx0.66v1">domain.ocn.fv0.9x1.25_tx0.66v1.190314.nc</file>
      <file grid="atm|lnd" mask="tn1v3">domain.lnd.fv0.9x1.25_tn1v3.160414.nc</file>
      <file grid="ocnice"  mask="tn1v3">domain.ocn.fv0.9x1.25_tn1v3.160414.nc</file>
      <file grid="atm|lnd" mask="tn0.25v3">domain.lnd.fv0.9x1.25_tn0.25v3.160721.nc</file>
      <file grid="ocnice"  mask="tn0.25v3">domain.ocn.fv0.9x1.25_tn0.25v3.160721.nc</file>
      <file grid="atm|lnd" mask="null">/glade/u/home/benedict/ys/datain/domain.aqua.fv0.9x1.25.nc</file>
      <file grid="ocnice"  mask="null">/glade/u/home/benedict/ys/datain/domain.aqua.fv0.9x1.25.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/fv0.9x1.25_141008_polemod_ESMFmesh.nc</mesh>
      <desc>0.9x1.25 is FV 1-deg grid:</desc>
    </domain>

    <!-- TODO: the lats for the gx1v7 and gx1v6 mask are different at
         the poles- the 141008 should be used for both but would change answers -->
    <domain name="1.9x2.5">
      <nx>144</nx>  <ny>96</ny>
      <file grid="atm|lnd" mask="gx1v6">domain.lnd.fv1.9x2.5_gx1v6.090206.nc</file>
      <file grid="ocnice"  mask="gx1v6">domain.ocn.1.9x2.5_gx1v6_090403.nc</file>
      <file grid="atm|lnd" mask="gx1v7">domain.lnd.fv1.9x2.5_gx1v7.181205.nc</file>
      <file grid="ocnice"  mask="gx1v7">domain.ocn.fv1.9x2.5_gx1v7.181205.nc</file>
      <file grid="ocnice"  mask="null">domain.aqua.fv1.9x2.5.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/fv1.9x2.5_141008_ESMFmesh.nc</mesh>
      <desc>1.9x2.5 is FV 2-deg grid:</desc>
    </domain>

    <domain name="4x5">
      <nx>72</nx> <ny>46</ny>
      <file grid="atm|lnd" mask="gx3v7">domain.lnd.fv4x5_gx3v7.091218.nc</file>
      <file grid="ocnice">domain.ocn.4x5_gx3v7_100120.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/fv4x5_050615_polemod_ESMFmesh.nc</mesh>
      <desc>4x5 is FV 4-deg grid:</desc>
    </domain>

    <domain name="2.5x3.33">
      <nx>108</nx>  <ny>72</ny>
      <file grid="atm|lnd">domain.lnd.fv2.5x3.33_gx3v7.110223.nc</file>
      <file grid="ocnice">domain.ocn.fv2.5x3.33_gx3v7_110223.nc</file>
      <desc>2.5x3.33 is FV 3-deg grid:</desc>
    </domain>

    <domain name="10x15">
      <nx>24</nx>   <ny>19</ny>
      <file grid="atm|lnd" mask="usgs">$DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.fv10x15_USGS.110713.nc</file>
      <file grid="ocnice"  mask="usgs">$DIN_LOC_ROOT/share/domains/domain.clm/domain.ocn.fv10x15_USGS_070807.nc</file>
      <file grid="atm|lnd" mask="gx3v7">$DIN_LOC_ROOT/share/domains/domain.lnd.fv10x15_gx3v7.180321.nc</file>
      <file grid="ocnice"  mask="gx3v7">$DIN_LOC_ROOT/share/domains/domain.ocn.fv10x15_gx3v7.180321.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/10x15_nomask_c110308_ESMFmesh.nc</mesh>
      <desc>10x15 is FV 10-deg grid:</desc>
      <support>For low resolution testing</support>
    </domain>

    <domain name="T341">
      <nx>1024</nx> <ny>512</ny>
      <!-- global spectral (eulerian dycore) grids-->
      <!--- mask for atm is irrelevant -->
      <file grid="atm|lnd" mask="gx1v6">domain.lnd.T341_gx1v6.111226.nc</file>
      <desc>T341 is Gaussian grid:</desc>
      <support>Backward compatible for very high resolution Spectral-dycore experiments</support>
    </domain>

    <domain name="T5">
      <nx>16</nx> <ny>8</ny>
      <file grid="atm|lnd" mask="gx3v7">$DIN_LOC_ROOT/share/domains/domain.lnd.T5_gx3v7.181009.nc</file>
      <file grid="ocnice"  mask="gx3v7">$DIN_LOC_ROOT/share/domains/domain.ocn.T5_gx3v7.181009.nc</file>
      <desc>T5 is Gaussian grid:</desc>
    </domain>

    <domain name="T85">
      <!-- global spectral (eulerian dycore) grids-->
      <nx>256</nx>  <ny>128</ny>
      <file grid="atm|lnd">domain.lnd.T85_gx1v4.060403.nc</file>
      <file grid="ocnice">domain.lnd.T85_gx1v4.060403.nc</file>
      <desc>T85 is Gaussian grid:</desc>
      <support>Backward compatible for high resolution Spectral-dycore experiments</support>
    </domain>

    <domain name="T62">
      <nx>192</nx>  <ny>96</ny>
      <file grid="atm|lnd" mask="gx1v7"   >$DIN_LOC_ROOT/share/domains/domain.lnd.T62_gx1v7.151008.nc</file>
      <file grid="atm|lnd" mask="gx1v6"   >$DIN_LOC_ROOT/share/domains/domain.lnd.T62_gx1v6.090320.nc</file>
      <file grid="atm|lnd" mask="gx3v7"   >$DIN_LOC_ROOT/share/domains/domain.lnd.T62_gx3v7.090911.nc</file>
      <file grid="atm|lnd" mask="tx0.66v1">$DIN_LOC_ROOT/share/domains/domain.lnd.T62_tx0.66v1.190425.nc</file>
      <file grid="atm|lnd" mask="tx1v1"   >$DIN_LOC_ROOT/share/domains/domain.lnd.T62_tx1v1.090122.nc</file>
      <file grid="atm|lnd" mask="tx0.1v2" >$DIN_LOC_ROOT/share/domains/domain.lnd.T62_tx0.1v2_090623.nc</file>
      <file grid="atm|lnd" mask="tx0.1v3" >$DIN_LOC_ROOT/share/domains/domain.lnd.T62_tx0.1v3.170929.nc</file>
      <file grid="atm|lnd" mask="oQU120"  >$DIN_LOC_ROOT/share/domains/domain.lnd.T62_oQU120.160325.nc</file>
      <file grid="ocnice"  mask="gx1v6"   >$DIN_LOC_ROOT/share/domains/domain.ocn.T62_gx1v6.130409.nc</file>
      <file grid="ocnice"  mask="gx1v7"   >$DIN_LOC_ROOT/share/domains/domain.ocn.T62_gx1v7.151008.nc</file>
      <file grid="ocnice"  mask="gx3v7"   >$DIN_LOC_ROOT/share/domains/domain.ocn.T62_gx3v7.130409.nc</file>
      <file grid="ocnice"  mask="tx0.66v1">$DIN_LOC_ROOT/share/domains/domain.ocn.T62_tx0.66v1.190425.nc</file>
      <file grid="atm|lnd" mask="tn1v3">$DIN_LOC_ROOT/share/domains/domain.lnd.T62_tn1v3.160414.nc</file>
      <file grid="atm|lnd" mask="tn0.25v3">$DIN_LOC_ROOT/share/domains/domain.lnd.T62_tn0.25v3.160721.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/T62_040121_ESMFmesh.nc</mesh>
      <desc>T62 is Gaussian grid:</desc>
    </domain>

    <domain name="T31">
      <nx>96</nx> <ny>48</ny>
      <file grid="atm|lnd" mask="gx3v7">$DIN_LOC_ROOT/share/domains/domain.lnd.T31_gx3v7.130409.nc</file>
      <file grid="ocnice"  mask="gx3v7">$DIN_LOC_ROOT/share/domains/domain.ocn.T31_gx3v7.130409.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/T31_040122_ESMFmesh.nc</mesh>
      <desc>T31 is Gaussian grid:</desc>
    </domain>

    <domain name="T42">
      <nx>128</nx> <ny>64</ny>
      <file grid="atm|lnd" mask="usgs" >$DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.T42_USGS.111004.nc</file>
      <file grid="ocnice"  mask="usgs" >$DIN_LOC_ROOT/atm/cam/ocnfrac/domain.camocn.64x128_USGS_070807.nc</file>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.T42_gx1v7.180727.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.T42_gx1v7.180727.nc</file>
      <desc>T42 is Gaussian grid:</desc>
    </domain>

    <domain name="ne5np4">
      <nx>1352</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx3v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne5np4_gx3v7.140810.nc</file>
      <file grid="ocnice"  mask="gx3v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne5np4_gx3v7.140810.nc</file>
      <desc>ne5np4 is Spectral Elem 6-deg grid:</desc>
      <support>For ultra-low resolution spectral element grid testing</support>
    </domain>

    <domain name="ne5np4.pg3">
      <nx>1350</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx3v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne5np4.pg3_gx3v7.170605.nc</file>
      <file grid="ocnice"  mask="gx3v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne5np4.pg3_gx3v7.170605.nc</file>
      <desc>ne5np4 is Spectral Elem 6-deg grid with a 3x3 FVM physics grid:</desc>
      <support>EXPERIMENTAL FVM physics grid</support>
    </domain>

    <domain name="ne16np4">
      <nx>13826</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne16np4_gx1v7.171018.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne16np4_gx1v7.171018.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/ne16np4_scrip_171002_ESMFmesh.nc</mesh>
      <desc>ne16np4 is Spectral Elem 2-deg grid:</desc>
      <support>For low resolution spectral element grid testing</support>
    </domain>

    <domain name="ne16np4.pg3">
      <nx>13824</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne16pg3_gx1v7.171003.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne16pg3_gx1v7.171003.nc</file>
      <desc>ne16np4.pg3 is a Spectral Elem 2-deg grid with a 3x3 FVM physics grid:</desc>
      <support>EXPERIMENTAL FVM physics grid</support>
    </domain>

    <domain name="ne30np4">
      <nx>48602</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4_gx1v6.110905.nc</file>
      <file grid="ocnice"  mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_gx1v6_110217.nc</file>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne30_gx1v7.171003.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne30_gx1v7.171003.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/ne30np4_091226_pentagons_ESMFmesh.nc</mesh>
      <desc>ne30np4 is Spectral Elem 1-deg grid:</desc>
    </domain>

    <domain name="ne30np4.pg2">
      <nx>21600</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4.pg2_gx1v7.170628.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4.pg2_gx1v7.170628.nc</file>
      <desc>ne30np4.pg2 is a Spectral Elem 1-deg grid with a 2x2 FVM physics grid:</desc>
      <support>EXPERIMENTAL FVM physics grid</support>
    </domain>

    <domain name="ne30pg3">
      <nx>48600</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4.pg3_gx1v7.170605.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4.pg3_gx1v7_170605.nc</file>
      <desc>ne30pg3 is a Spectral Elem ne30 grid with a 3x3 FVM physics grid:</desc>
      <support>EXPERIMENTAL FVM physics grid</support>
    </domain>

    <domain name="ne30np4.pg4">
      <nx>86400</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4.pg4_gx1v7.170628.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4.pg4_gx1v7.170628.nc</file>
      <desc>ne30np4.pg4 is a Spectral Elem 1-deg grid with a 4x4 FVM physics grid:</desc>
      <support>EXPERIMENTAL FVM physics grid</support>
    </domain>

    <domain name="ne60np4">
      <nx>194402</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4_gx1v6.120406.nc</file>
      <file grid="ocnice"  mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4_gx1v6.121113.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/ne60np4_pentagons_100408_ESMFmesh.nc</mesh>
      <desc>ne60np4 is Spectral Elem 1/2-deg grid:</desc>
    </domain>

    <domain name="ne60np4.pg2">
      <nx>86400</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4.pg2_gx1v7.170628.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4.pg2_gx1v7.170628.nc</file>
      <desc>ne60np4.pg2 is a Spectral Elem 0.5-deg grid with a 2x2 FVM physics grid:</desc>
      <support>EXPERIMENTAL FVM physics grid</support>
    </domain>

    <domain name="ne60np4.pg3">
      <nx>194400</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4.pg3_gx1v7.170628.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4.pg3_gx1v7.170628.nc</file>
      <desc>ne60np4.pg3 is a Spectral Elem 0.5-deg grid with a 3x3 FVM physics grid:</desc>
      <support>EXPERIMENTAL FVM physics grid</support>
    </domain>

    <domain name="ne60np4.pg4">
      <nx>345600</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4.pg4_gx1v7.170628.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4.pg4_gx1v7.170628.nc</file>
      <desc>ne60np4.pg4 is a Spectral Elem 0.5-deg grid with a 4x4 FVM physics grid:</desc>
      <support>EXPERIMENTAL FVM physics grid</support>
    </domain>

    <domain name="ne120np4">
      <nx>777602</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4_gx1v6.110502.nc</file>
      <file grid="ocnice"  mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4_gx1v6.121113.nc</file>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4_gx1v7.190718.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4_gx1v7.190718.nc</file>
      <desc>ne120np4 is Spectral Elem 1/4-deg grid:</desc>
    </domain>

    <domain name="ne120np4.pg2">
      <nx>345600</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg2_gx1v7.170629.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg2_gx1v7.170629.nc</file>
      <file grid="atm|lnd" mask="tx0.1v2">$DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg2_tx0.1v2.200108.nc</file>
      <file grid="ocnice"  mask="tx0.1v2">$DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg2_tx0.1v2.200108.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/ne120np4_pentagons_100310_ESMFmesh.nc</mesh>
      <desc>ne120np4.pg2 is a Spectral Elem 0.25-deg grid with a 2x2 FVM physics grid:</desc>
      <support>EXPERIMENTAL FVM physics grid</support>
    </domain>

    <domain name="ne120np4.pg3">
      <nx>777600</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg3_gx1v7.190718.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg3_gx1v7.190718.nc</file>
      <file grid="atm|lnd" mask="tx0.1v3">$DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg3_tx0.1v3.190820.nc</file>
      <file grid="ocnice"  mask="tx0.1v3">$DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg3_tx0.1v3.190820.nc</file>
      <desc>ne120np4.pg3 is a Spectral Elem 0.25-deg grid with a 3x3 FVM physics grid:</desc>
      <support>EXPERIMENTAL FVM physics grid</support>
    </domain>

    <domain name="ne120np4.pg4">
      <nx>1382400</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg4_gx1v7.170629.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg4_gx1v7.170629.nc</file>
      <desc>ne120np4.pg4 is a Spectral Elem 0.25-deg grid with a 4x4 FVM physics grid:</desc>
      <support>EXPERIMENTAL FVM physics grid</support>
    </domain>

    <domain name="ne240np4">
      <nx>3110402</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.lnd.ne240np4_gx1v6.111226.nc</file>
      <file grid="ocnice"  mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.ocn.ne240np4_gx1v6.111226.nc</file>
      <desc>ne240np4 is Spectral Elem 1/8-deg grid:</desc>
      <support>Experimental for very high resolution experiments</support>
    </domain>

    <domain name="ne0np4TESTONLY.ne5x4">
      <nx>3863</nx> <ny>1</ny>
      <desc>ne0np4TESTONLY.ne5x4 is a low-resolution refined SE grid for testing:</desc>
      <support>Test support only</support>
    </domain>

    <domain name="ne0np4CONUS.ne30x8">
      <nx>174098</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne0CONUSne30x8_gx1v7.190322.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne0CONUSne30x8_gx1v7.190322.nc</file>
      <file grid="atm|lnd" mask="tx0.1v2">$DIN_LOC_ROOT/share/domains/domain.lnd.ne0CONUSne30x8_tx0.1v2.171010.nc</file>
      <file grid="ocnice"  mask="tx0.1v2">$DIN_LOC_ROOT/share/domains/domain.ocn.ne0CONUSne30x8_tx0.1v2.171010.nc</file>
      <desc>ne0np4CONUS.ne30x8 is a Spectral Elem 1-deg grid with a 1/8 deg refined region over the continental United States:</desc>
      <support>Test support only</support>
    </domain>

    <domain name="ne0np4.ARCTIC.ne30x4">
      <nx>117398</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="tx0.1v2">$DIN_LOC_ROOT/share/domains/domain.lnd.ne0np4.ARCTIC.ne30x4_tx0.1v2.191023.nc</file>
      <file grid="ocnice"  mask="tx0.1v2">$DIN_LOC_ROOT/share/domains/domain.ocn.ne0np4.ARCTIC.ne30x4_tx0.1v2.191023.nc</file>
      <desc>ne0np4.ARCTIC.ne30x4 is a Spectral Elem 1-deg grid with a 1/4 deg refined region over Arctic:</desc>
      <support>Test support only</support>
    </domain>

    <domain name="ne0np4.ARCTICGRIS.ne30x8">
      <nx>152390</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="tx0.1v2">$DIN_LOC_ROOT/share/domains/domain.lnd.ne0np4.ARCTICGRIS.ne30x8_tx0.1v2.191209.nc</file>
      <file grid="ocnice"  mask="tx0.1v2">$DIN_LOC_ROOT/share/domains/domain.ocn.ne0np4.ARCTICGRIS.ne30x8_tx0.1v2.191209.nc</file>
      <desc>ne0np4.ARCTICGRIS.ne30x8 is a Spectral Elem 1-deg grid with a 1/8 deg refined region over Greenland:</desc>
      <support>Test support only</support>
    </domain>

    <domain name="TL319">
      <nx>640</nx> <ny>320</ny>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.TL319_gx1v7.170705.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.TL319_gx1v7.170705.nc</file>
      <file grid="atm|lnd" mask="tx0.66v1">$DIN_LOC_ROOT/share/domains/domain.lnd.TL319_tx0.66v1.190425.nc</file>
      <file grid="ocnice"  mask="tx0.66v1">$DIN_LOC_ROOT/share/domains/domain.ocn.TL319_tx0.66v1.190425.nc</file>
      <file grid="atm|lnd" mask="tx0.1v2">$DIN_LOC_ROOT/share/domains/domain.lnd.TL319_tx0.1v2.161014.nc</file>
      <file grid="ocnice"  mask="tx0.1v2">$DIN_LOC_ROOT/share/domains/domain.ocn.tx0.1v2.161014.nc</file>
      <file grid="atm|lnd" mask="tx0.1v3">$DIN_LOC_ROOT/share/domains/domain.lnd.TL319_tx0.1v3.170730.nc</file>
      <file grid="ocnice"  mask="tx0.1v3">$DIN_LOC_ROOT/share/domains/domain.ocn.tx0.1v3.170730.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/TL319_151007_ESMFmesh.nc</mesh>
      <desc>TL319 grid for JRA55</desc>
    </domain>

    <domain name="ne240np4.pg2">
      <nx>1382400</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne240np4.pg2_gx1v7.170629.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne240np4.pg2_gx1v7.170629.nc</file>
      <desc>ne240np4.pg2 is a Spectral Elem 0.125-deg grid with a 2x2 FVM physics grid:</desc>
      <support>EXPERIMENTAL FVM physics grid</support>
    </domain>

    <domain name="ne240np4.pg3">
      <nx>3110400</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.ne240np4.pg3_gx1v7.170629.nc</file>
      <file grid="ocnice"  mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.ne240np4.pg3_gx1v7.170629.nc</file>
      <desc>ne240np4.pg3 is a Spectral Elem 0.125-deg grid with a 3x3 FVM physics grid:</desc>
      <support>EXPERIMENTAL FVM physics grid</support>
    </domain>

    <!-- ======================================================== -->
    <!-- OCN/ICE domains -->
    <!-- ======================================================== -->

    <domain name="gx1v6">
      <nx>320</nx>  <ny>384</ny>
      <file grid="atm|lnd">$DIN_LOC_ROOT/share/domains/domain.ocn.gx1v6.090206.nc</file>
      <file grid="ocnice">$DIN_LOC_ROOT/share/domains/domain.ocn.gx1v6.090206.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/gx1v6_090205_ESMFmesh.nc</mesh>
      <desc>gx1v6 is displaced Greenland pole v6 1-deg grid:</desc>
    </domain>

    <domain name="gx1v7">
      <nx>320</nx>  <ny>384</ny>
      <file grid="atm|lnd">$DIN_LOC_ROOT/share/domains/domain.ocn.gx1v7.151008.nc</file>
      <file grid="ocnice">$DIN_LOC_ROOT/share/domains/domain.ocn.gx1v7.151008.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/gx1v7_151008_ESMFmesh.nc</mesh>
      <desc>gx1v7 is displaced Greenland pole 1-deg grid with Caspian as a land feature:</desc>
    </domain>

    <domain name="gx3v7">
      <nx>100</nx> <ny>116</ny>
      <file grid="ocnice">$DIN_LOC_ROOT/share/domains/domain.ocn.gx3v7.120323.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/gx3v7_120309_ESMFmesh.nc</mesh>
      <desc>gx3v7 is displaced Greenland pole v7 3-deg grid:</desc>
    </domain>

    <domain name="tx0.66v1">
      <nx>540</nx> <ny>458</ny>
      <file grid="ocnice">$DIN_LOC_ROOT/share/domains/domain.ocn.tx0.66v1.190425.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/tx0.66v1_190314_ESMFmesh.nc</mesh>
      <desc>tx0.66v1 is tripole v1 0.66-deg MOM6 grid:</desc>
      <support>Experimental for MOM6 experiments</support>
    </domain>

    <domain name="tx0.25v1">
      <nx>1440</nx> <ny>1080</ny>
      <file grid="ocnice">$DIN_LOC_ROOT/share/domains/domain.ocn.tx0.25v1.190207.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/tx0.25v1_190204_ESMFmesh.nc</mesh>
      <desc>tx0.25v1 is tripole v1 0.25-deg MOM6 grid:</desc>
      <support>Experimental for MOM6 experiments</support>
    </domain>

    <domain name="tx0.1v2">
      <nx>3600</nx> <ny>2400</ny>
      <file grid="ocnice">$DIN_LOC_ROOT/share/domains/domain.ocn.tx0.1v2.161014.nc</file>
      <desc>tx0.1v2 is tripole v2 1/10-deg grid:</desc>
      <support>Experimental for high resolution experiments</support>
    </domain>

    <domain name="tx0.1v3">
      <nx>3600</nx> <ny>2400</ny>
      <file grid="ocnice">$DIN_LOC_ROOT/share/domains/domain.ocn.tx0.1v3.170730.nc</file>
      <desc>tx0.1v3 is tripole v3 1/10-deg grid:</desc>
      <support>Experimental for high resolution experiments</support>
    </domain>

    <domain name="tx1v1">
      <nx>360</nx> <ny>240</ny>
      <file grid="ocnice">$DIN_LOC_ROOT/share/domains/domain.ocn.tx1v1.090122.nc</file>
      <desc>tripole v1 1-deg grid: testing proxy for high-res tripole ocean grids- do not use for scientific experiments</desc>
      <support>Experimental tripole ocean grid</support>
    </domain>

    <domain name="oQU120">
      <nx>28574</nx>  <ny>1</ny>
      <file mask="oQU120">$DIN_LOC_ROOT/share/domains/domain.ocn.oQU120.160325.nc</file>
      <desc>oQU120 is a MPAS ocean grid that is roughly 1 degree resolution:</desc>
      <support>Experimental, under development</support>
    </domain>

    <!-- ======================================================== -->
    <!-- ROF domains -->
    <!-- ======================================================== -->

    <domain name="tn1v3">
      <nx>360</nx> <ny>291</ny>
      <file grid="ocnice">$DIN_LOC_ROOT/share/domains/domain.ocn.tn1v3.160414.nc</file>
      <desc>tn1v3 is NEMO ORCA1 tripole grid at 1 deg (reduced eORCA):</desc>
      <support>NEMO ORCA1 tripole ocean grid</support>
    </domain>

    <domain name="tn0.25v3">
      <nx>1440</nx> <ny>1050</ny>
      <file grid="ocnice">$DIN_LOC_ROOT/share/domains/domain.ocn.tn0.25v3.160721.nc</file>
      <desc>tn0.25v3 is NEMO ORCA1 tripole grid at 1/4 deg (reduced eORCA):</desc>
      <support>NEMO ORCA1 tripole ocean grid</support>
    </domain>

    <domain name="rx1">
      <nx>360</nx> <ny>180</ny>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/rx1_nomask_181022_ESMFmesh.nc</mesh>
      <desc>rx1 is 1 degree river routing grid (only valid for DROF):</desc>
      <support>Can only be used by DROF</support>
    </domain>

    <domain name="r05">
      <nx>720</nx> <ny>360</ny>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/r05_nomask_c110308_ESMFmesh.nc</mesh>
      <desc>r05 is 1/2 degree river routing grid:</desc>
    </domain>

    <domain name="r01">
      <nx>3600</nx> <ny>1800</ny>
      <!-- TODO (mvertens, 2018-12-02): create a domain file for this  -->
      <desc>r01 is 1/10 degree river routing grid:</desc>
      <support>For experimental use by high resolution grids</support>
    </domain>

    <domain name="JRA025v2">
      <nx>1440</nx> <ny>720</ny>
      <desc>JRA 0.25 degree runoff grid for use with JRA-55 runoff data</desc>
    </domain>

    <domain name="JRA025">
      <nx>1440</nx> <ny>720</ny>
      <!-- TODO (mvertens, 2018-12-02): create a domain file for this  -->
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/JRA025m.170209_ESMFmesh.nc</mesh>
      <desc>JRA is 0.25 degree runoff grid for use with JRA-55 runoff data</desc>
    </domain>

    <!-- GLC domains -->

    <domain name="gland20">
      <nx>76</nx> <ny>141</ny>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/gland_20km_c150511_ESMFmesh.nc</mesh>
      <desc>20-km Greenland grid</desc>
    </domain>

    <domain name="gland5UM">
      <nx>301</nx> <ny>561</ny>
      <desc>5-km Greenland grid (new version from U. Montana)</desc>
    </domain>

    <domain name="gland4">
      <nx>416</nx> <ny>704</ny>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/greenland_4km_epsg3413_c170414_ESMFmesh_c20190729.nc</mesh>
      <desc>4-km Greenland grid, for use with the glissade dycore</desc>
    </domain>

    <!-- WW3 domains-->

    <domain name="ww3a">
      <nx>90</nx>  <ny>50</ny>
      <file mask="ww3a">$DIN_LOC_ROOT/share/domains/domain.lnd.ww3a_ww3a.120222.nc</file>
      <file mask="ww3a">$DIN_LOC_ROOT/share/domains/domain.ocn.ww3a_ww3a.120222.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/ww3a_120222_ESMFmesh.nc</mesh>
      <desc>WW3 90 x 50 global grid</desc>
      <support>For testing of the WAV model</support>
    </domain>

    <!-- fvcubed domains-->

    <domain name="C24">
      <nx>3456</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.lnd.C24_gx1v6.181018.nc</file>
      <file grid="ocnice" mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.ocn.C24_gx1v6.181018.nc</file>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.C24_gx1v7.181018.nc</file>
      <file grid="ocnice" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.C24_gx1v7.181018.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/C24_181018_ESMFmesh.nc</mesh>
      <desc>C24 is a fvcubed xx-deg grid:</desc>
      <support>Experimental for fv3 dycore</support>
    </domain>

    <domain name="C48">
      <nx>13824</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.lnd.C48_gx1v6.181018.nc</file>
      <file grid="ocnice" mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.ocn.C48_gx1v6.181018.nc</file>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.C48_gx1v7.181018.nc</file>
      <file grid="ocnice" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.C48_gx1v7.181018.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/C48_181018_ESMFmesh.nc</mesh>
      <desc>C48 is a fvcubed xx-deg grid:</desc>
      <support>Experimental for fv3 dycore</support>
    </domain>

    <domain name="C96">
      <nx>55296</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.lnd.C96_gx1v6.181018.nc</file>
      <file grid="ocnice"  mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.ocn.C96_gx1v6.181018.nc</file>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.C96_gx1v7.181018.nc</file>
      <file grid="ocnice" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.C96_gx1v7.181018.nc</file>
      <file grid="atm|lnd" mask="tx0.66v1">$DIN_LOC_ROOT/share/domains/domain.lnd.C96_tx0.66v1.181210.nc</file>
      <file grid="ocnice"  mask="tx0.66v1">$DIN_LOC_ROOT/share/domains/domain.ocn.C96_tx0.66v1.181210.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/C96_181018_ESMFmesh.nc</mesh>
      <desc>C96 is a fvcubed xx-deg grid:</desc>
      <support>Experimental for fv3 dycore</support>
    </domain>

    <domain name="C192">
      <nx>221184</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.lnd.C192_gx1v6.181018..nc</file>
      <file grid="ocnice" mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.ocn.C192_gx1v6.181018.nc</file>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.C192_gx1v7.181018..nc</file>
      <file grid="ocnice" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.C192_gx1v7.181018.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/C192_181018_ESMFmesh.nc</mesh>
      <desc>C192 is a fvcubed xx-deg grid:</desc>
      <support>Experimental for fv3 dycore</support>
    </domain>

    <domain name="C384">
      <nx>884736</nx> <ny>1</ny>
      <file grid="atm|lnd" mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.lnd.C384_gx1v6.181018.nc</file>
      <file grid="ocnice" mask="gx1v6">$DIN_LOC_ROOT/share/domains/domain.ocn.C384_gx1v6.181018.nc</file>
      <file grid="atm|lnd" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.lnd.C384_gx1v7.181018.nc</file>
      <file grid="ocnice" mask="gx1v7">$DIN_LOC_ROOT/share/domains/domain.ocn.C384_gx1v7.181018.nc</file>
      <mesh driver="nuopc">$DIN_LOC_ROOT/share/meshes/C384_181018_ESMFmesh.nc</mesh>
      <desc>C384 is a fvcubed xx-deg grid:</desc>
      <support>Experimental for fv3 dycore</support>
    </domain>

  </domains>

  <!-- ======================================================== -->
  <!-- Mapping -->
  <!-- ======================================================== -->

  <!-- The following are the required grid maps that must not be idmap if the   -->
  <!-- attributes grid1 and grid2 are not equal -->

  <required_gridmaps>
    <required_gridmap grid1="atm_grid" grid2="ocn_grid">ATM2OCN_FMAPNAME</required_gridmap>
    <required_gridmap grid1="atm_grid" grid2="ocn_grid">ATM2OCN_SMAPNAME</required_gridmap>
    <required_gridmap grid1="atm_grid" grid2="ocn_grid">ATM2OCN_VMAPNAME</required_gridmap>
    <required_gridmap grid1="atm_grid" grid2="ocn_grid">OCN2ATM_FMAPNAME</required_gridmap>
    <required_gridmap grid1="atm_grid" grid2="ocn_grid">OCN2ATM_SMAPNAME</required_gridmap>
    <required_gridmap grid1="atm_grid" grid2="lnd_grid">ATM2LND_FMAPNAME</required_gridmap>
    <required_gridmap grid1="atm_grid" grid2="lnd_grid">ATM2LND_SMAPNAME</required_gridmap>
    <required_gridmap grid1="atm_grid" grid2="lnd_grid">LND2ATM_FMAPNAME</required_gridmap>
    <required_gridmap grid1="atm_grid" grid2="lnd_grid">LND2ATM_SMAPNAME</required_gridmap>
    <required_gridmap grid1="atm_grid" grid2="wav_grid">ATM2WAV_SMAPNAME</required_gridmap>
    <required_gridmap grid1="ocn_grid" grid2="wav_grid">OCN2WAV_SMAPNAME</required_gridmap>
    <required_gridmap grid1="ocn_grid" grid2="wav_grid">ICE2WAV_SMAPNAME</required_gridmap> <!-- ??? -->
    <!-- <required_gridmap grid1="ocn_grid" grid2="rof_grid" not_compset="_POP">ROF2OCN_FMAPNAME</required_gridmap> ?? -->
    <required_gridmap grid1="ocn_grid" grid2="rof_grid">ROF2OCN_LIQ_RMAPNAME</required_gridmap>
    <required_gridmap grid1="ocn_grid" grid2="rof_grid">ROF2OCN_ICE_RMAPNAME</required_gridmap>
    <required_gridmap grid1="lnd_grid" grid2="rof_grid">LND2ROF_FMAPNAME</required_gridmap>
    <required_gridmap grid1="lnd_grid" grid2="rof_grid">ROF2LND_FMAPNAME</required_gridmap>
  </required_gridmaps>

  <xi:include href="config_grids_mct.xml"/>
  <xi:include href="config_grids_nuopc.xml"/>
  <xi:include href="config_grids_common.xml"/>

</grid_data>

CIMEROOT/config/cesm/machines

CESM XML settings for supported batch queuing systems.

<?xml version="1.0"?>
<config_batch version="2.1">
  <!--
     File:    config_batch.xml
     Purpose: abstract out the parts of run scripts that are different, and use this configuration to
     create cesm run scripts from a single template.

     batch_system:     the batch system type and version
     batch_query:      the batch query command for each batch system.
     batch_redirect:   Whether a redirect character is needed to submit jobs.
     batch_directive:  The string that prepends a batch directive for the batch system.
     jobid_pattern:    A perl regular expression used to filter out the returned job id from a
                       queue submission.

 ===============================================================
 batch_system
 ===============================================================
 The batch_system and associated tags are meant for configuring batch systems and
 queues across machines.  The batch_system tag denotes the name for a particular
 batch system, these can either be shared between one or more machines, or can be
 defined for a specific machine if need be.

 Machine specific entries take precidence over generic entries, directives are appended

 queues:
 one or more queues can be defined per batch_system. if the attribute default="true"
 is used, then that queue will be used by default. Alternatively, multiple queues can
 be used.  The following variables can be used to choose a queue :
 walltimemin: Giving the minimum amount of walltime for the queue.
 walltimemax: The maximum amount of walltime for a queue.
 nodemin:      The minimum node count required to use this queue.
 nodemax:      The maximum node count required to use this queue.
 jobmin:      The minimum task count required to use this queue. This should only rarely be used to select queues that only use a fraction of a node. This cannot be used in conjuction with nodemin.
 jobmax:      The maximum task count required to use this queue. This should only rarely be used to select queues that only use a fraction of a node. This cannot be used in conjuction with nodemax.
    -->
  <batch_system type="template" >
    <batch_query args=""></batch_query>
    <batch_submit></batch_submit>
    <batch_redirect></batch_redirect>
    <batch_directive></batch_directive>
    <directives>
      <directive></directive>
    </directives>
  </batch_system>

  <batch_system type="none" >
    <batch_query args=""></batch_query>
    <batch_submit></batch_submit>
    <batch_redirect></batch_redirect>
    <batch_directive></batch_directive>
    <directives>
      <directive></directive>
    </directives>
  </batch_system>

  <batch_system type="cobalt" >
    <batch_query>qstat</batch_query>
    <batch_submit>qsub</batch_submit>
    <batch_cancel>qdel</batch_cancel>
    <batch_env>-v</batch_env>
    <batch_directive></batch_directive>
    <jobid_pattern>(\d+)</jobid_pattern>
    <depend_string> --dependencies</depend_string>
    <walltime_format>%H:%M:%s</walltime_format>
    <batch_mail_flag>-M</batch_mail_flag>
    <batch_mail_type_flag></batch_mail_type_flag>
    <batch_mail_type></batch_mail_type>
    <submit_args>
      <arg flag="--cwd" name="CASEROOT"/>
      <arg flag="-A" name="PROJECT"/>
      <arg flag="-t" name="JOB_WALLCLOCK_TIME"/>
      <!-- space required at beginning of name -->
      <arg flag="-n" name=" $TOTALPES / $MAX_MPITASKS_PER_NODE"/>
      <arg flag="-q" name="JOB_QUEUE"/>
      <arg flag="--mode script"/>
    </submit_args>
  </batch_system>

  <batch_system type="cobalt_theta" >
    <batch_query>qstat</batch_query>
    <batch_submit>qsub</batch_submit>
    <batch_cancel>qdel</batch_cancel>
    <batch_env>--env</batch_env>
    <batch_directive>#COBALT</batch_directive>
    <jobid_pattern>(\d+)</jobid_pattern>
    <depend_string> --dependencies</depend_string>
    <batch_mail_flag>-M</batch_mail_flag>
    <batch_mail_type_flag></batch_mail_type_flag>
    <batch_mail_type></batch_mail_type>
    <submit_args>
      <arg flag="-A" name="PROJECT"/>
      <arg flag="-t" name="JOB_WALLCLOCK_TIME"/>
      <arg flag="-n" name=" $TOTALPES/$MAX_MPITASKS_PER_NODE"/>
      <arg flag="-q" name="JOB_QUEUE"/>
      <arg flag="--mode script"/>
    </submit_args>
  </batch_system>

  <batch_system type="lsf">
    <batch_query args=" -w" >bjobs</batch_query>
    <batch_submit>bsub</batch_submit>
    <batch_cancel>bkill</batch_cancel>
    <batch_redirect>&lt;</batch_redirect>
    <batch_env> </batch_env>
    <batch_directive>#BSUB</batch_directive>
    <jobid_pattern>&lt;(\d+)&gt;</jobid_pattern>
    <depend_string> -w 'done(jobid)'</depend_string>
    <depend_allow_string> -w 'ended(jobid)'</depend_allow_string>
    <depend_separator>&amp;&amp;</depend_separator>
    <walltime_format>%H:%M</walltime_format>
    <batch_mail_flag>-u</batch_mail_flag>
    <batch_mail_type_flag></batch_mail_type_flag>
    <batch_mail_type></batch_mail_type>
    <directives>
      <directive                       > -J {{ job_id }} </directive>
      <directive                       > -n {{ total_tasks }} </directive>
      <directive                       > -W $JOB_WALLCLOCK_TIME </directive>
      <directive default="cesm.stdout" > -o {{ job_id }}.%J  </directive>
      <directive default="cesm.stderr" > -e {{ job_id }}.%J  </directive>
    </directives>
  </batch_system>

  <batch_system type="pbs" >
    <batch_query args="-f" >qstat</batch_query>
    <batch_submit>qsub </batch_submit>
    <batch_cancel>qdel</batch_cancel>
    <batch_env>-v</batch_env>
    <batch_directive>#PBS</batch_directive>
    <jobid_pattern>^(\S+)$</jobid_pattern>
    <depend_string> -W depend=afterok:jobid</depend_string>
    <depend_allow_string> -W depend=afterany:jobid</depend_allow_string>
    <depend_separator>:</depend_separator>
    <walltime_format>%H:%M:%S</walltime_format>
    <batch_mail_flag>-M</batch_mail_flag>
    <batch_mail_type_flag>-m</batch_mail_type_flag>
    <batch_mail_type>, bea, b, e, a</batch_mail_type>
    <submit_args>
      <arg flag="-q" name="$JOB_QUEUE"/>
      <arg flag="-l walltime=" name="$JOB_WALLCLOCK_TIME"/>
      <arg flag="-A" name="$PROJECT"/>
    </submit_args>
    <directives>
      <directive>-N {{ job_id }}</directive>
      <directive default="n"> -r {{ rerunnable }} </directive>
      <!-- <directive> -j oe {{ job_id }} </directive> -->
      <directive> -j oe </directive>
      <directive> -V </directive>
    </directives>
  </batch_system>

  <batch_system type="slurm" >
    <batch_query per_job_arg="-j">squeue</batch_query>
    <batch_cancel>scancel</batch_cancel>
    <batch_directive>#SBATCH</batch_directive>
    <jobid_pattern>(\d+)$</jobid_pattern>
    <depend_string> --dependency=afterok:jobid</depend_string>
    <depend_allow_string> --dependency=afterany:jobid</depend_allow_string>
    <depend_separator>,</depend_separator>
    <walltime_format>%H:%M:%S</walltime_format>
    <batch_mail_flag>--mail-user</batch_mail_flag>
    <batch_mail_type_flag>--mail-type</batch_mail_type_flag>
    <batch_mail_type>none, all, begin, end, fail</batch_mail_type>
    <directives>
      <directive> --job-name={{ job_id }}</directive>
      <directive> --nodes={{ num_nodes }}</directive>
      <directive> --ntasks-per-node={{ tasks_per_node }}</directive>
      <directive> --output={{ job_id }}   </directive>
      <directive> --exclusive                        </directive>
    </directives>
  </batch_system>

  <batch_system MACH="aleph" type="pbs" >
    <directives>
      <directive>-l nodes={{ num_nodes }}</directive>
      <directive>-q iccp</directive>
    </directives>
    <queues>
      <queue walltimemax="24:00:00" default="true" >iccp</queue>
    </queues>
  </batch_system>

  <!-- athena is lsf -->
  <batch_system MACH="athena" type="lsf">
    <submit_args>
      <arg flag="-q" name="$JOB_QUEUE"/>
      <arg flag="-W" name="$JOB_WALLCLOCK_TIME"/>
      <arg flag="-P" name="$PROJECT"/>
    </submit_args>
    <directives>
      <directive                       > -R "span[ptile={{ tasks_per_node }}]"</directive>
      <directive                       > -N  </directive>
      <directive default="poe"         > -a {{ poe }} </directive>
    </directives>
    <queues>
      <queue walltimemin="00:00" walltimemax="02:00">poe_short</queue>
      <queue walltimemin="02:00" walltimemax="04:00" default="true">poe_medium</queue>
      <queue walltimemin="04:00" walltimemax="08:00">poe_long</queue>
    </queues>
  </batch_system>

  <!-- bluewaters is PBS -->
  <batch_system MACH="bluewaters" type="pbs" >
    <jobid_pattern>(\d+.bw)$</jobid_pattern>
    <directives>
      <directive>-l nodes={{ num_nodes }}:ppn={{ tasks_per_node }}:xe</directive>
      <directive default="/bin/bash" > -S {{ shell }} </directive>
    </directives>
    <queues>
      <queue walltimemax="24:00:00">normal</queue>
      <queue walltimemax="00:30:00" nodemin="1" nodemax="16" default="true">debug</queue>
    </queues>
  </batch_system>

  <batch_system MACH="cheyenne" type="pbs">
    <directives queue="regular">
      <directive default="/bin/bash" > -S {{ shell }}  </directive>
      <directive> -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}</directive>
    </directives>

    <directives queue="premium">
      <directive default="/bin/bash" > -S {{ shell }}  </directive>
      <directive> -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}</directive>
    </directives>

    <directives queue="economy">
      <directive default="/bin/bash" > -S {{ shell }}  </directive>
      <directive> -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}</directive>
    </directives>

    <directives queue="share">
      <directive default="/bin/bash" > -S {{ shell }}  </directive>
      <directive> -l select=1:mpiprocs={{ total_tasks }}:ompthreads={{ thread_count }}</directive>
    </directives>

    <!-- Unknown queues use the batch directives for the regular queue -->
    <unknown_queue_directives>regular</unknown_queue_directives>

    <queues>
      <queue walltimemax="12:00:00" nodemin="1" nodemax="4032">regular</queue>
      <queue walltimemax="12:00:00" nodemin="1" nodemax="4032">premium</queue>
      <queue default="true" walltimemax="06:00:00" jobmin="1" jobmax="18">share</queue>
      <queue walltimemax="12:00:00" nodemin="1" nodemax="4032">economy</queue>
    </queues>
  </batch_system>

  <!-- coeus slurm -->
  <batch_system MACH="coeus" type="slurm" >
    <batch_query per_job_arg="-j">squeue</batch_query>
    <batch_submit>sbatch</batch_submit>
    <batch_cancel>scancel</batch_cancel>
    <batch_directive>#SBATCH</batch_directive>
    <jobid_pattern>(\d+)$</jobid_pattern>
    <depend_separator>,</depend_separator>
    <walltime_format>%H:%M:%S</walltime_format>
    <batch_mail_flag>--mail-user</batch_mail_flag>
    <batch_mail_type_flag>--mail-type</batch_mail_type_flag>
    <batch_mail_type>none, all, begin, end, fail</batch_mail_type>
    <directives>
      <directive> --job-name={{ job_id }}</directive>
      <directive> --nodes={{ num_nodes }}</directive>
      <directive> --ntasks-per-node={{ tasks_per_node }}</directive>
      <directive> --output={{ job_id }}   </directive>
      <directive> --exclusive                        </directive>
    </directives>
    <queues>
      <queue nodemin="1" nodemax="96" default="true">medium</queue>
    </queues>
  </batch_system>

  <batch_system type="slurm" MACH="constance">
    <batch_submit>sbatch</batch_submit>
    <submit_args>
      <arg flag="--time" name="$JOB_WALLCLOCK_TIME"/>
      <arg flag="-p" name="$JOB_QUEUE"/>
      <arg flag="--account" name="$PROJECT"/>
    </submit_args>
  </batch_system>

  <batch_system MACH="cori-haswell" type="slurm" >
    <batch_submit>sbatch</batch_submit>
    <submit_args>
      <arg flag="--time" name="$JOB_WALLCLOCK_TIME"/>
      <arg flag="-q" name="$JOB_QUEUE"/>
      <arg flag="--account" name="$PROJECT"/>
    </submit_args>
    <directives>
      <directive>-C haswell </directive>
    </directives>
    <queues>
      <queue walltimemax="06:00:00" nodemin="1" nodemax="710">regular</queue>
    <!--  <queue walltimemax="00:30:00" nodemin="1" nodemax="3072" default="true">debug</queue> -->
    </queues>
  </batch_system>

  <batch_system MACH="cori-knl" type="slurm" >
    <batch_submit>sbatch</batch_submit>
    <submit_args>
      <arg flag="--time" name="$JOB_WALLCLOCK_TIME"/>
      <arg flag="-q" name="$JOB_QUEUE"/>
      <arg flag="--account" name="$PROJECT"/>
    </submit_args>
    <directives>
      <directive>-C knl,quad,cache </directive>
      <directive>-S 2 </directive>
    </directives>
    <queues>
      <queue walltimemax="02:00:00" nodemin="1" nodemax="177">regular</queue>
    <!--  <queue walltimemax="00:30:00" nodemin="1" nodemax="3072" default="true">debug</queue> -->
    </queues>
  </batch_system>

  <batch_system MACH="daint" type="slurm" >
    <batch_submit>sbatch</batch_submit>
    <submit_args>
      <arg flag="--time" name="$JOB_WALLCLOCK_TIME"/>
      <arg flag="-p" name="$JOB_QUEUE"/>
      <arg flag="--account" name="$PROJECT"/>
    </submit_args>
    <queues>
      <queue default="true">default</queue>
    </queues>
  </batch_system>

  <batch_system MACH="eastwind" type="slurm" >
    <batch_submit>sbatch</batch_submit>
    <submit_args>
      <arg flag="--time" name="$JOB_WALLCLOCK_TIME"/>
      <arg flag="-p" name="$JOB_QUEUE"/>
      <arg flag="--account" name="$PROJECT"/>
    </submit_args>
    <queues>
      <queue nodemin="1" nodemax="833" default="true">batch</queue>
    </queues>
  </batch_system>

  <batch_system MACH="edison" type="slurm" >
    <batch_submit>sbatch</batch_submit>
    <submit_args>
      <arg flag="--time" name="$JOB_WALLCLOCK_TIME"/>
      <arg flag="-q" name="$JOB_QUEUE"/>
      <arg flag="--account" name="$PROJECT"/>
    </submit_args>
    <queues>
      <queue walltimemax="36:00:00" nodemin="1" nodemax="2712" >regular</queue>
      <queue walltimemax="00:30:00" nodemin="1" nodemax="256" default="true">debug</queue>
    </queues>
  </batch_system>

  <!-- euler2 is LSF -->
  <batch_system MACH="euler2" type="lsf" >
    <directives>
      <directive> -R "select[model==XeonE5_2680v3]" </directive>
    </directives>
    <queues>
      <queue walltimemax="23:59:00" default="true">normal.24h</queue>
      <queue walltimemax="03:59:00" >normal.4h</queue>
    </queues>
  </batch_system>

  <!-- euler3 is LSF -->
  <batch_system MACH="euler3" type="lsf" >
    <directives>
      <directive> -R "span[ptile=4] select[model==XeonE3_1585Lv5]" </directive>
    </directives>
    <queues>
      <queue walltimemax="23:59:00" default="true">normal.24h</queue>
      <queue walltimemax="03:59:00" >normal.4h</queue>
    </queues>
  </batch_system>

  <!-- euler4 is LSF -->
  <batch_system MACH="euler4" type="lsf" >
    <directives>
      <directive> -R "select[model==XeonGold_6150]" </directive>
    </directives>
    <queues>
      <queue walltimemax="23:59:00" default="true">normal.24h</queue>
      <queue walltimemax="03:59:00" >normal.4h</queue>
    </queues>
  </batch_system>
  <!-- gaea is PBS -->
  <batch_system MACH="gaea" type="pbs" >
    <directives>
      <directive>-A cpo</directive>
      <directive>-l {{ partition }}</directive>
      <directive>-l size={{ mppsize }}</directive>
      <directive>-E </directive>
      <directive>-d $RUNDIR</directive>
      <directive>-o $RUNDIR/$CASE.out </directive>
      <directive>-S /bin/bash  </directive>
    </directives>
    <queues>
      <queue walltimemax="01:00:00" nodemin="1" nodemax="35">debug</queue>
      <queue walltimemax="24:00:00" nodemin="861" nodemax="4166" default="true">batch</queue>
    </queues>
  </batch_system>

  <!-- hobart is PBS -->
  <batch_system type="pbs" MACH="hobart" >
    <directives>
      <directive>-l nodes={{ num_nodes }}:ppn={{ tasks_per_node }}</directive>
      <directive default="/bin/bash" > -S {{ shell }}  </directive>
    </directives>
    <queues>
      <queue walltimemax="02:00:00"   strict="true" nodemin="1"  nodemax="8">short</queue>
      <queue walltimemax="08:00:00"   strict="true" nodemin="1"  nodemax="6" default="true">medium</queue>
      <queue walltimemax="40:00:00"   strict="true" nodemin="1"  nodemax="8">long</queue>
      <queue walltimemax="80:00:00"   strict="true" nodemin="1"  nodemax="8">verylong</queue>
      <queue walltimemax="32:00:00"   strict="true" nodemax="16" nodemin="1">overnight</queue>
      <queue walltimemax="3000:00:00" strict="true" nodemax="32" nodemin="1">monster</queue>
    </queues>
  </batch_system>

  <batch_system type="pbs" MACH="izumi" >
    <batch_submit>qsub</batch_submit>
    <jobid_pattern>(\d+.izumi.unified.ucar.edu)$</jobid_pattern>
    <directives>
      <directive>-l nodes={{ num_nodes }}:ppn={{ tasks_per_node }}</directive>
      <directive default="/bin/bash" > -S {{ shell }}  </directive>
    </directives>
    <queues>
      <queue walltimemax="02:00:00"   strict="true" nodemin="1"  nodemax="8">short</queue>
      <queue walltimemax="08:00:00"   strict="true" nodemin="1"  nodemax="6" default="true">medium</queue>
      <queue walltimemax="40:00:00"   strict="true" nodemin="1"  nodemax="8">long</queue>
      <queue walltimemax="120:00:00"   strict="true" nodemin="1"  nodemax="8">verylong</queue>
      <queue walltimemax="32:00:00"   strict="true" nodemax="14" nodemin="1">overnight</queue>
      <queue walltimemax="3000:00:00" strict="true" nodemax="14" nodemin="1">monster</queue>
    </queues>
  </batch_system>

  <batch_system MACH="laramie" type="pbs">
    <directives>
      <directive default="/bin/bash" > -S {{ shell }}  </directive>
      <directive> -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}</directive>
    </directives>
    <queues>
      <queue default="true" walltimemax="12:00" nodemin="1" nodemax="72">regular</queue>
    </queues>
  </batch_system>

  <batch_system MACH="lawrencium-lr3" type="slurm">
    <batch_submit>sbatch</batch_submit>
    <directives>
      <directive>--qos=lr_normal</directive>
      <directive>--partition=lr3</directive>
      <directive>--account={{ project }}</directive>
      <directive>--ntasks-per-node={{ tasks_per_node }}</directive>
    </directives>
    <queues>
      <queue walltimemin="00:00:00" walltimemax="72:00:00" nodemin="1" nodemax="64" default="true">lr3</queue>
    </queues>
  </batch_system>

  <batch_system MACH="lawrencium-lr2" type="slurm">
    <batch_submit>sbatch</batch_submit>
    <directives>
       <directive>--qos=lr_normal</directive>
       <directive>--partition=lr2</directive>
       <directive>--account={{ project }}</directive>
       <directive>--ntasks-per-node={{ tasks_per_node }}</directive>
    </directives>
    <queues>
      <queue walltimemin="00:00:00" walltimemax="72:00:00" nodemin="1" nodemax="64" default="true">lr2</queue>
    </queues>
  </batch_system>

  <batch_system MACH="lonestar5" type="slurm" >
    <batch_submit>ssh login1.ls5.tacc.utexas.edu cd $CASEROOT ; sbatch</batch_submit>
    <submit_args>
      <arg flag="--time" name="$JOB_WALLCLOCK_TIME"/>
      <arg flag="-p" name="$JOB_QUEUE"/>
      <arg flag="--account" name="$PROJECT"/>
    </submit_args>
    <queues>
      <queue walltimemax="48:00:00" nodemin="1" nodemax="171" default="true">normal</queue>
      <queue walltimemax="24:00:00" nodemin="160" nodemax="342">large</queue>
      <queue walltimemax="02:00:00" nodemin="1" nodemax="11" >development</queue>
    </queues>
  </batch_system>

  <batch_system MACH="mira" type="cobalt">
    <queues>
      <queue walltimemax="06:00:00" nodemin="1" nodemax="12288" default="true">default</queue>
    </queues>
  </batch_system>

  <!-- modex is PBS -->
  <batch_system MACH="modex" type="pbs">
    <directives>
      <directive>-l nodes={{ num_nodes }}:ppn={{ tasks_per_node }}</directive>
      <directive default="/bin/bash" > -S {{ shell }}  </directive>
    </directives>
    <queues>
      <queue walltimemax="36:00:00" default="true">batch</queue>
    </queues>
  </batch_system>

  <batch_system MACH="olympus" type="slurm">
    <batch_submit>sbatch</batch_submit>
    <submit_args>
      <arg flag="--time" name="$JOB_WALLCLOCK_TIME"/>
      <arg flag="-p" name="$JOB_QUEUE"/>
      <arg flag="--account" name="$PROJECT"/>
    </submit_args>
    <queues>
      <queue walltimemin="0" walltimemax="00:59:00" nodemin="0" nodemax="312" default="true">queue</queue>
    </queues>
  </batch_system>

  <!-- NAS pleiades machines -->
  <batch_system type="pbs" MACH="pleiades-bro" >
    <directives>
      <directive>-W group_list=$PROJECT</directive>
      <directive>-l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=bro</directive>
      <directive>-l place=scatter:excl</directive>
      <directive default="/bin/bash" > -S {{ shell }}  </directive>
    </directives>
    <queues>
      <queue walltimemin="" walltimemax="08:00:00" nodemin="0" nodemax="357" default="true">normal</queue>
    </queues>
  </batch_system>

  <batch_system type="pbs" MACH="pleiades-has" >
    <directives>
      <directive>-W group_list=$PROJECT</directive>
      <directive>-l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=has</directive>
      <directive>-l place=scatter:excl</directive>
      <directive default="/bin/bash" > -S {{ shell }}  </directive>
    </directives>
    <queues>
      <queue walltimemin="" walltimemax="08:00:00" nodemin="0" nodemax="357" default="true">normal</queue>
    </queues>
  </batch_system>

  <batch_system type="pbs" MACH="pleiades-ivy" >
    <directives>
      <directive>-W group_list=$PROJECT </directive>
      <directive>-l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=ivy</directive>
      <directive>-l place=scatter:excl</directive>
      <directive default="/bin/bash" > -S {{ shell }}  </directive>
    </directives>
    <queues>
      <queue walltimemin="" walltimemax="08:00:00" nodemin="0" nodemax="500" default="true">normal</queue>
    </queues>
  </batch_system>

  <batch_system type="pbs" MACH="pleiades-san" >
    <directives>
      <directive>-W group_list=$PROJECT </directive>
      <directive>-l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=san</directive>
      <directive>-l place=scatter:excl</directive>
      <directive default="/bin/bash" > -S {{ shell }}  </directive>
    </directives>
    <queues>
      <queue walltimemin="" walltimemax="08:00:00" nodemin="0" nodemax="624" default="true">normal</queue>
    </queues>
  </batch_system>

  <batch_system MACH="stampede2-skx" type="slurm" >
    <batch_submit>ssh stampede2.tacc.utexas.edu cd $CASEROOT ; sbatch</batch_submit>
    <submit_args>
      <arg flag="--time" name="$JOB_WALLCLOCK_TIME"/>
      <arg flag="-p" name="$JOB_QUEUE"/>
      <arg flag="--account" name="$PROJECT"/>
    </submit_args>
    <queues>
      <queue walltimemax="48:00:00" nodemin="1" nodemax="256" default="true">skx-normal</queue>
      <queue walltimemax="02:00:00" nodemin="1" nodemax="4" >skx-dev</queue>
    </queues>
  </batch_system>

  <batch_system MACH="stampede2-knl" type="slurm" >
    <batch_submit>ssh stampede2.tacc.utexas.edu cd $CASEROOT ; sbatch</batch_submit>
    <submit_args>
      <arg flag="--time" name="$JOB_WALLCLOCK_TIME"/>
      <arg flag="-p" name="$JOB_QUEUE"/>
      <arg flag="--account" name="$PROJECT"/>
    </submit_args>
    <queues>
      <queue walltimemax="48:00:00" nodemin="1" nodemax="256" >normal</queue>
      <queue walltimemax="02:00:00" nodemin="1" nodemax="8" default="true">development</queue>
    </queues>
  </batch_system>

  <batch_system MACH="theia" type="slurm" >
    <batch_submit>sbatch</batch_submit>
    <submit_args>
      <arg flag="--time" name="$JOB_WALLCLOCK_TIME"/>
      <arg flag="-q" name="$JOB_QUEUE"/>
      <arg flag="--account" name="$PROJECT"/>
    </submit_args>
    <directives>
      <directive>--partition=theia</directive>
    </directives>
    <queues>
      <queue walltimemax="01:00:00" nodemin="1" nodemax="171">batch</queue>
    </queues>
  </batch_system>

  <batch_system MACH="theta" type="cobalt_theta">
    <queues>
      <queue walltimemax="00:60:00" nodemin="1" nodemax="50" default="true">default</queue>
    </queues>
  </batch_system>
</config_batch>

CESM XML settings for supported compilers.

<?xml version="1.0" encoding="UTF-8"?>
<config_compilers version="2.0">
<!--
========================================================================
This file defines compiler flags for building CESM.  General flags are listed first
followed by flags specific to particular operating systems, followed by particular machines.

More general flags are replaced by more specific flags.

Attributes indicate that an if clause should be added to the Macros so that these flags are added
only under the conditions described by the attribute(s).

The env_mach_specific file may set environment variables or load modules which set environment variables
which are then  used in the Makefile.   For example the NETCDF_PATH on many machines is set by a module.

========================================================================
Serial/MPI compiler specification
========================================================================

SCC   and  SFC specifies the serial compiler
MPICC and  MPICC specifies the mpi compiler

if $MPILIB is set to mpi-serial then
CC = $SCC
FC = $SFC
MPICC = $SCC
MPIFC = $SFC
INC_MPI = $(CIMEROOT)/src/externals/mct/mpi-serial

========================================================================
Options for including C++ code in the build
========================================================================

SUPPORTS_CXX (TRUE/FALSE): Whether we have defined all the necessary
settings for including C++ code in the build for this compiler (or
this compiler/machine combination). See below for a description of the
necessary settings.

The following are required for a compiler to support the inclusion of
C++ code:

SCXX: serial C++ compiler

MPICXX: mpi C++ compiler

CXX_LINKER (CXX/FORTRAN): When C++ code is included in the build, do
we use a C++ or Fortran linker?

In addition, some compilers require additional libraries or link-time
flags, specified via CXX_LIBS or CXX_LDFLAGS, as in the following
examples:

<CXX_LIBS> -L/path/to/directory -lfoo </CXX_LIBS>

or

<CXX_LDFLAGS> -cxxlib </CXX_LDFLAGS>

Note that these libraries or LDFLAGS will be added on the link line,
regardless of whether we are using a C++ or Fortran linker. For
example, if CXX_LINKER=CXX, then the above CXX_LIBS line should
specify extra libraries needed when linking C++ and fortran code using
a C++ linker. If CXX_LINKER=FORTRAN, then the above CXX_LDFLAGS line
should specify extra LDFLAGS needed when linking C++ and fortran code
using a fortran linker.

-->
<!-- Define default values that can be overridden by specific
     compilers -->
<compiler>
  <CPPDEFS>
    <!-- This should be removed AFTER MOM6 cap is fully unified -->
    <append> -DCESMCOUPLED </append>
    <append MODEL="pop"> -D_USE_FLOW_CONTROL </append>
    <append MODEL="ufsatm"> -DSPMD </append>
  </CPPDEFS>

  <INCLDIR>
        <append MODEL="ufsatm"> -I$(EXEROOT)/atm/obj/FMS </append>
  </INCLDIR>
  <FFLAGS>
    <append MODEL="ufsatm"> $(FC_AUTO_R8) </append>
    <append MODEL="mom"> $(FC_AUTO_R8) -Duse_LARGEFILE</append>
  </FFLAGS>
  <SUPPORTS_CXX>FALSE</SUPPORTS_CXX>
</compiler>

<compiler COMPILER="cray">
  <CFLAGS>
    <append compile_threaded="FALSE"> -h noomp </append>
    <append DEBUG="TRUE"> -g -O0 </append>
    <append DEBUG="FALSE"> -O2 </append>
  </CFLAGS>
  <CPPDEFS>
    <!--http://docs.cray.com/cgi-bin/craydoc.cgi?mode=View;id=S-3901-83;idx=books_search;this_sort=;q=;type=books;title=Cray%20Fortran%20Reference%20Manual -->
    <append> -DFORTRANUNDERSCORE -DNO_R16 -DCPRCRAY</append>
    <append MODEL="pop"> -DDIR=NOOP </append>
    <append MODEL="moby"> -DDIR=NOOP </append>
  </CPPDEFS>
  <FC_AUTO_R8>
    <base> -s real64 </base>
  </FC_AUTO_R8>
  <FFLAGS>
    <base> -f free -N 255  -h byteswapio -x dir </base>
    <append compile_threaded="FALSE"> -h noomp </append>
    <append DEBUG="TRUE"> -g -O0 -K trap=fp  -m1 </append>
    <append DEBUG="FALSE"> -O2,ipa2 -em </append>
  </FFLAGS>
  <FFLAGS_NOOPT>
    <base> -O1,fp2,ipa0,scalar0,vector0 </base>
  </FFLAGS_NOOPT>
  <HAS_F2008_CONTIGUOUS>TRUE</HAS_F2008_CONTIGUOUS>
  <LDFLAGS>
    <base> -Wl,--allow-multiple-definition -h byteswapio </base>
  </LDFLAGS>
</compiler>

<compiler COMPILER="gnu">
  <CFLAGS>
    <base> -std=gnu99 </base>
    <append compile_threaded="TRUE"> -fopenmp </append>
    <append DEBUG="TRUE"> -g -Wall -Og -fbacktrace -ffpe-trap=invalid,zero,overflow -fcheck=bounds </append>
    <append DEBUG="FALSE"> -O </append>
  </CFLAGS>
  <CPPDEFS>
    <!-- http://gcc.gnu.org/onlinedocs/gfortran/ -->
    <append> -DFORTRANUNDERSCORE -DNO_R16 -DCPRGNU</append>
  </CPPDEFS>
  <CXX_LINKER>FORTRAN</CXX_LINKER>
  <FC_AUTO_R8>
    <base> -fdefault-real-8 </base>
  </FC_AUTO_R8>
  <FFLAGS>
    <!-- -ffree-line-length-none and -ffixed-line-length-none need to be in FFLAGS rather than in FIXEDFLAGS/FREEFLAGS
       so that these are passed to cmake builds (cmake builds don't use FIXEDFLAGS and FREEFLAGS). -->
    <base>  -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none </base>
    <append compile_threaded="TRUE"> -fopenmp </append>
    <!-- Ideally, we would also have 'invalid' in the ffpe-trap list. But at
         least with some versions of gfortran (confirmed with 5.4.0, 6.3.0 and
         7.1.0), gfortran's isnan (which is called in cime via the
         CPRGNU-specific shr_infnan_isnan) causes a floating point exception
         when called on a signaling NaN. -->
    <append DEBUG="TRUE"> -g -Wall -Og -fbacktrace -ffpe-trap=zero,overflow -fcheck=bounds </append>
    <append DEBUG="FALSE"> -O </append>
  </FFLAGS>
  <FFLAGS_NOOPT>
    <base> -O0 </base>
  </FFLAGS_NOOPT>
  <FIXEDFLAGS>
    <base>  -ffixed-form </base>
  </FIXEDFLAGS>
  <FREEFLAGS>
    <base> -ffree-form </base>
  </FREEFLAGS>
  <HAS_F2008_CONTIGUOUS>FALSE</HAS_F2008_CONTIGUOUS>
  <LDFLAGS>
    <append compile_threaded="TRUE"> -fopenmp </append>
  </LDFLAGS>
  <MPICC> mpicc  </MPICC>
  <MPICXX> mpicxx </MPICXX>
  <MPIFC> mpif90 </MPIFC>
  <SCC> gcc </SCC>
  <SCXX> g++ </SCXX>
  <SFC> gfortran </SFC>
  <SUPPORTS_CXX>TRUE</SUPPORTS_CXX>
</compiler>

<compiler COMPILER="ibm">
  <CFLAGS>
    <base> -g -qfullpath -qmaxmem=-1 </base>
    <append DEBUG="FALSE"> -O3  </append>
    <append compile_threaded="TRUE"> -qsmp=omp </append>
    <append DEBUG="TRUE" compile_threaded="TRUE"> -qsmp=omp:noopt </append>
  </CFLAGS>
  <CPPDEFS>
    <!-- http://publib.boulder.ibm.com/infocenter/comphelp/v7v91/index.jsp
 Notes:  (see xlf user's guide for the details)
  -lmass          => IBM-tuned intrinsic lib
  -qsmp=noauto    => enable SMP directives, but don't add any
  -qsmp=omp       => enable SMP directives, strict omp
  -qstrict        => don't turn divides into multiplies, etc
  -qhot           => higher-order-transformations (eg. loop padding)
  -qalias=noaryovrlp => assume no array overlap wrt equivalance, etc
  -qmaxmem=-1     => memory available to compiler during optimization
  -qipa=level=2   => InterProcedure Analysis (eg. inlining) => slow compiles
  -p -pg          => enable profiling (use in both FFLAGS and LDFLAGS)
  -qreport        => for smp/omp only
  -g              => always leave it on because overhead is minimal
  -qflttrap=...   => enable default sigtrap (core dump)
  -C              => runtime array bounds checking (runs slow)
  -qinitauto=...  => initializes automatic variables
  -->
    <append> -DFORTRAN_SAME -DCPRIBM </append>
  </CPPDEFS>
  <CPRE>-WF,-D</CPRE>
  <FC_AUTO_R8>
    <base> -qrealsize=8 </base>
  </FC_AUTO_R8>
  <FFLAGS>
    <base> -g -qfullpath -qmaxmem=-1 </base>
    <append DEBUG="FALSE"> -O2 -qstrict -qinline=auto </append>
    <append compile_threaded="TRUE"> -qsmp=omp </append>
    <append DEBUG="TRUE"> -qinitauto=7FF7FFFF -qflttrap=ov:zero:inv:en </append>
    <append DEBUG="TRUE" compile_threaded="TRUE"> -qsmp=omp:noopt </append>
    <append DEBUG="TRUE" MODEL="pop"> -C </append>
  </FFLAGS>
  <FIXEDFLAGS>
    <base>  -qsuffix=f=f -qfixed=132 </base>
  </FIXEDFLAGS>
  <FREEFLAGS>
    <base> -qsuffix=f=f90:cpp=F90  </base>
  </FREEFLAGS>
  <HAS_F2008_CONTIGUOUS>TRUE</HAS_F2008_CONTIGUOUS>
  <LDFLAGS>
    <append compile_threaded="TRUE"> -qsmp=omp </append>
    <append DEBUG="TRUE" compile_threaded="TRUE"> -qsmp=omp:noopt </append>
  </LDFLAGS>
</compiler>

<compiler COMPILER="intel">
  <CFLAGS>
    <base>  -qno-opt-dynamic-align -fp-model precise -std=gnu99 </base>
    <append compile_threaded="TRUE"> -qopenmp </append>
    <append DEBUG="FALSE"> -O2 -debug minimal </append>
    <append DEBUG="TRUE"> -O0 -g </append>
  </CFLAGS>
  <CPPDEFS>
    <!-- http://software.intel.com/en-us/articles/intel-composer-xe/ -->
    <append> -DFORTRANUNDERSCORE -DCPRINTEL</append>
  </CPPDEFS>
  <CXX_LDFLAGS>
    <base> -cxxlib </base>
  </CXX_LDFLAGS>
  <CXX_LINKER>FORTRAN</CXX_LINKER>
  <FC_AUTO_R8>
    <base> -r8 </base>
  </FC_AUTO_R8>
  <FFLAGS>
    <base> -qno-opt-dynamic-align  -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model source  </base>
    <append compile_threaded="TRUE"> -qopenmp </append>
    <append DEBUG="TRUE"> -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created </append>
    <append DEBUG="FALSE"> -O2 -debug minimal </append>
  </FFLAGS>
  <FFLAGS_NOOPT>
    <base> -O0 </base>
  </FFLAGS_NOOPT>
  <FIXEDFLAGS>
    <base> -fixed  </base>
  </FIXEDFLAGS>
  <FREEFLAGS>
    <base> -free </base>
  </FREEFLAGS>
  <LDFLAGS>
    <append compile_threaded="TRUE"> -qopenmp </append>
  </LDFLAGS>
  <MPICC> mpicc  </MPICC>
  <MPICXX> mpicxx </MPICXX>
  <MPIFC> mpif90 </MPIFC>
  <SCC> icc </SCC>
  <SCXX> icpc </SCXX>
  <SFC> ifort </SFC>
  <SLIBS>
    <append MPILIB="mpich"> -mkl=cluster </append>
    <append MPILIB="mpich2"> -mkl=cluster </append>
    <append MPILIB="mvapich"> -mkl=cluster </append>
    <append MPILIB="mvapich2"> -mkl=cluster </append>
    <append MPILIB="mpt"> -mkl=cluster </append>
    <append MPILIB="openmpi"> -mkl=cluster </append>
    <append MPILIB="impi"> -mkl=cluster </append>
    <append MPILIB="mpi-serial"> -mkl </append>
  </SLIBS>
  <SUPPORTS_CXX>TRUE</SUPPORTS_CXX>
</compiler>

<compiler COMPILER="nag">
  <CFLAGS>
    <base> -std=gnu99 </base>
    <append DEBUG="TRUE"> -g </append>
  </CFLAGS>
  <CPPDEFS>
    <append> -DFORTRANUNDERSCORE -DNO_CRAY_POINTERS -DNO_SHR_VMATH -DCPRNAG </append>
  </CPPDEFS>
  <FC_AUTO_R8>
    <base> -r8 </base>
  </FC_AUTO_R8>
  <FFLAGS>
    <!-- The indirect flag below is to deal with MPI functions that violate    -->
    <!-- the Fortran standard, by adding a large set of arguments from a file. -->
    <base>-Wp,-macro=no_com -convert=BIG_ENDIAN -indirect $ENV{CIMEROOT}/config/cesm/machines/nag_mpi_argument.txt</base>
    <!-- DEBUG vs. non-DEBUG runs.                                             -->
    <append DEBUG="FALSE"> -ieee=full -O2 </append>
    <!-- The "-gline" option is nice, but it doesn't work with OpenMP.         -->
    <!-- Runtime checks with OpenMP (in fact, all OpenMP cases) are WIP.       -->
    <append DEBUG="TRUE"> -C=all -g -time -f2003 -ieee=stop </append>
    <append DEBUG="TRUE" compile_threaded="FALSE"> -gline </append>
    <!-- The SLAP library (which is part of the CISM build) has many instances of
       arguments being passed to different types. So disable argument type
       checking when building CISM. This can be removed once we remove SLAP from
       CISM. -->
    <append MODEL="cism"> -mismatch_all </append>
  </FFLAGS>
  <FFLAGS_NOOPT>
    <base> -O0 </base>
  </FFLAGS_NOOPT>
  <FIXEDFLAGS>
    <base> -fixed </base>
  </FIXEDFLAGS>
  <FREEFLAGS>
    <base> -free </base>
  </FREEFLAGS>
  <HAS_F2008_CONTIGUOUS>FALSE</HAS_F2008_CONTIGUOUS>
  <MPICC> mpicc </MPICC>
  <MPIFC> mpif90 </MPIFC>
  <SCC> gcc </SCC>
  <SFC> nagfor </SFC>

  <LDFLAGS>
    <append> -lpthread </append>
  </LDFLAGS>
  <CONFIG_ARGS>
    <append> FCLIBS="-Wl,--as-needed,--allow-shlib-undefined  -L$(COMPILER_PATH)/lib/NAG_Fortran -lf62rts" </append>
  </CONFIG_ARGS>
</compiler>


<compiler COMPILER="pgi">
  <CFLAGS>
    <base> -gopt  -time </base>
    <append compile_threaded="TRUE"> -mp </append>
  </CFLAGS>
  <CPPDEFS>
    <!-- http://www.pgroup.com/resources/docs.htm                                              -->
    <!-- Notes:  (see pgi man page & user's guide for the details) -->
    <!--  -Mextend        => Allow 132-column source lines -->
    <!--  -Mfixed         => Assume fixed-format source -->
    <!--  -Mfree          => Assume free-format source -->
    <!--  -byteswapio     => Swap byte-order for unformatted i/o (big/little-endian) -->
    <!--  -target=linux   => Specifies the target architecture to Compute Node Linux (CNL only) -->
    <!--  -fast           => Chooses generally optimal flags for the target platform -->
    <!--  -Mnovect        => Disables automatic vector pipelining -->
    <!--  -Mvect=nosse    => Don't generate SSE, SSE2, 3Dnow, and prefetch instructions in loops    -->
    <!--  -Mflushz        => Set SSE to flush-to-zero mode (underflow) loops where possible  -->
    <!--  -Kieee          => Perform fp ops in strict conformance with the IEEE 754 standard.  -->
    <!--                     Some optimizations disabled, slightly slower, more accurate math.  -->
    <!--  -mp=nonuma      => Don't use thread/processors affinity (for NUMA architectures)  -->
    <!-- -->
    <!--  -g              => Generate symbolic debug information. Turns off optimization.   -->
    <!--  -gopt           => Generate information for debugger without disabling optimizations  -->
    <!--  -Mbounds        => Add array bounds checking  -->
    <!--  -Ktrap=fp       => Determine IEEE Trap conditions fp => inv,divz,ovf   -->
    <!--                     * inv: invalid operands         -->
    <!--                     * divz divide by zero           -->
    <!--                     * ovf: floating point overflow   -->
    <!--  -Mlist          => Create a listing file             -->
    <!--  -F              => leaves file.f for each preprocessed file.F file  -->
    <!--  -time           => Print execution time for each compiler step  -->
    <append> -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16  -DCPRPGI </append>
  </CPPDEFS>
  <CXX_LINKER>CXX</CXX_LINKER>
  <FC_AUTO_R8>
    <base> -r8 </base>
  </FC_AUTO_R8>
  <FFLAGS>
    <base>  -i4 -gopt  -time -Mextend -byteswapio -Mflushz -Kieee  </base>
    <append compile_threaded="TRUE"> -mp </append>
    <append DEBUG="TRUE"> -O0 -g -Ktrap=fp -Mbounds -Kieee </append>
    <append MODEL="datm"> -Mnovect </append>
    <append MODEL="dlnd"> -Mnovect </append>
    <append MODEL="drof"> -Mnovect </append>
    <append MODEL="dwav"> -Mnovect </append>
    <append MODEL="dice"> -Mnovect </append>
    <append MODEL="docn"> -Mnovect </append>
  </FFLAGS>
  <FFLAGS_NOOPT>
    <base> -O0 </base>
  </FFLAGS_NOOPT>
  <FIXEDFLAGS>
    <base> -Mfixed </base>
  </FIXEDFLAGS>
  <FREEFLAGS>
    <base> -Mfree </base>
  </FREEFLAGS>
  <!-- Note that SUPPORTS_CXX is false for pgi in general, because we
       need some machine-specific libraries - see hopper pgi for an
       example -->
  <!-- Technically, PGI does recognize this keyword during parsing,
       but support is either buggy or incomplete, notably in that
       the "contiguous" attribute is incompatible with "intent".-->
  <HAS_F2008_CONTIGUOUS>FALSE</HAS_F2008_CONTIGUOUS>
  <LDFLAGS>
    <base> -time -Wl,--allow-multiple-definition </base>
    <append compile_threaded="TRUE"> -mp </append>
  </LDFLAGS>
  <MPICC> mpicc </MPICC>
  <MPICXX> mpicxx </MPICXX>
  <MPIFC> mpif90 </MPIFC>
  <SCC> pgcc </SCC>
  <SCXX> pgc++ </SCXX>
  <SFC> pgf95 </SFC>
</compiler>

<compiler OS="AIX" COMPILER="ibm">
  <CFLAGS>
    <append> -qarch=auto -qtune=auto -qcache=auto </append>
  </CFLAGS>
  <CONFIG_SHELL> /usr/bin/bash </CONFIG_SHELL>
  <FFLAGS>
    <append> -qarch=auto -qtune=auto -qcache=auto -qsclk=micro </append>
    <append MODEL="cam"> -qspill=6000 </append>
  </FFLAGS>
  <LDFLAGS>
    <append DEBUG="TRUE"> -qsigtrap=xl__trcedump </append>
    <append> -bdatapsize:64K -bstackpsize:64K -btextpsize:32K </append>
  </LDFLAGS>
  <MPICC> mpcc_r </MPICC>
  <MPIFC> mpxlf2003_r </MPIFC>
  <SCC> cc_r </SCC>
  <SFC> xlf2003_r </SFC>
  <SLIBS>
    <append> -lmassv -lessl </append>
    <append DEBUG="FALSE"> -lmass </append>
  </SLIBS>
</compiler>

<compiler OS="BGQ" COMPILER="ibm">
  <CONFIG_ARGS>
    <base> --build=powerpc-bgp-linux --host=powerpc64-suse-linux </base>
  </CONFIG_ARGS>
  <CPPDEFS>
    <append> -DLINUX  </append>
  </CPPDEFS>
  <FFLAGS>
    <base> -g -qfullpath -qmaxmem=-1 -qspillsize=2500 -qextname=flush </base>
    <append DEBUG="FALSE"> -O3 -qstrict -qinline=auto </append>
    <append DEBUG="FALSE" compile_threaded="TRUE"> -qsmp=omp </append>
    <append DEBUG="TRUE" compile_threaded="TRUE"> -qsmp=omp:noopt </append>
  </FFLAGS>
  <LDFLAGS>
    <base>  -Wl,--relax -Wl,--allow-multiple-definition </base>
  </LDFLAGS>
</compiler>

<compiler OS="CNL">
  <CMAKE_OPTS>
    <base> -DCMAKE_SYSTEM_NAME=Catamount</base>
  </CMAKE_OPTS>
  <CPPDEFS>
    <append> -DLINUX </append>
    <append MODEL="gptl"> -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY  </append>
  </CPPDEFS>
  <MPICC> cc </MPICC>
  <MPICXX> CC </MPICXX>
  <MPIFC> ftn </MPIFC>
  <NETCDF_PATH>$ENV{NETCDF_DIR}</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>lustre</PIO_FILESYSTEM_HINTS>
  <PNETCDF_PATH>$ENV{PARALLEL_NETCDF_DIR}</PNETCDF_PATH>
  <SCC> cc </SCC>
  <SCXX> CC </SCXX>
  <SFC> ftn </SFC>
</compiler>

<compiler OS="Darwin">
  <CPPDEFS>
    <append> -DSYSDARWIN </append>
  </CPPDEFS>
</compiler>

<compiler OS="Darwin" COMPILER="intel">
  <FFLAGS>
    <append compile_threaded="FALSE"> -heap-arrays </append>
  </FFLAGS>
</compiler>

<compiler MACH="aleph" COMPILER="intel">
  <CONFIG_ARGS>
    <base> --host=Linux </base>
  </CONFIG_ARGS>
  <CFLAGS>
    <append> -xCORE-AVX2 </append>
  </CFLAGS>
  <FFLAGS>
    <append> -xCORE-AVX2 </append>
  </FFLAGS>
  <SLIBS>
    <append> -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf </append>
  </SLIBS>
  <CPPDEFS>
    <append MODEL="gptl"> -DHAVE_PAPI -DHAVE_SLASHPROC </append>
  </CPPDEFS>
  <LDFLAGS>
    <append>-mkl </append>
  </LDFLAGS>
</compiler>

<compiler MACH="athena">
  <CPPDEFS>
    <!-- these flags enable nano timers -->
    <append MODEL="gptl"> -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY </append>
  </CPPDEFS>
  <NETCDF_PATH>$ENV{NETCDF}</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>gpfs</PIO_FILESYSTEM_HINTS>
  <PNETCDF_PATH>$ENV{PNETCDF}</PNETCDF_PATH>
  <AR>xiar</AR>
  <ARFLAGS>
    <base>cru</base>
  </ARFLAGS>
</compiler>

<compiler MACH="athena" COMPILER="intel">
  <CFLAGS>
    <append>  -xHost </append>
  </CFLAGS>
  <CPPDEFS>
    <append> -DINTEL_MKL -DHAVE_SSE2 </append>
  </CPPDEFS>
  <FFLAGS>
    <append>  -xHost </append>
  </FFLAGS>
  <FFLAGS>
    <append MODEL="nemo"> $(FC_AUTO_R8) -O3 -assume norealloc_lhs </append>
  </FFLAGS>
  <SLIBS>
    <append> $SHELL{${NETCDF_PATH}/bin/nc-config --flibs}</append>
  </SLIBS>
  <MPICXX MPILIB="mpich2">mpiicpc</MPICXX>
  <MPICC MPILIB="mpich2">mpiicc</MPICC>
  <MPIFC MPILIB="mpich2">mpiifort</MPIFC>
  <SCC>icc</SCC>
  <SFC>ifort</SFC>
  <TRILINOS_PATH MPILIB="mpich2">$ENV{TRILINOS_PATH}</TRILINOS_PATH>
</compiler>

<compiler MACH="bluewaters">
  <CPPDEFS>
    <append MODEL="gptl"> -DHAVE_PAPI </append>
  </CPPDEFS>
  <PIO_FILESYSTEM_HINTS>lustre</PIO_FILESYSTEM_HINTS>
</compiler>

<compiler MACH="bluewaters" COMPILER="intel">
  <HAS_F2008_CONTIGUOUS>FALSE</HAS_F2008_CONTIGUOUS>
  <FFLAGS>
    <append> -dynamic -mkl=sequential -no-fma</append>
  </FFLAGS>
  <CFLAGS>
    <append> -dynamic -mkl=sequential -no-fma</append>
  </CFLAGS>
</compiler>

<compiler MACH="bluewaters" COMPILER="pgi">
  <CFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
    <append> -nofma </append>
  </CFLAGS>
  <CXX_LIBS>
    <base> -lmpichf90_pgi $ENV{PGI_PATH}/linux86-64/$ENV{PGI_VERSION}/lib/f90main.o </base>
  </CXX_LIBS>
  <FFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
    <append> -nofma </append>
  </FFLAGS>
  <SUPPORTS_CXX>TRUE</SUPPORTS_CXX>
</compiler>

<compiler MACH="daint" COMPILER="pgi">
  <FFLAGS>
    <append> -I/project/s824/edavin/OASIS3-MCT_2.0/build.pgi/build/lib/mct -I/project/s824/edavin/OASIS3-MCT_2.0/build.pgi/build/lib/psmile.MPI1 </append>
  </FFLAGS>
  <SLIBS>
    <append> -llapack -lblas </append>
    <append> -L/project/s824/edavin/OASIS3-MCT_2.0/build.pgi/lib -lpsmile.MPI1 -lscrip -lmct_oasis -lmpeu_oasis </append>
  </SLIBS>
</compiler>

<compiler MACH="daint" COMPILER="cray">
  <FFLAGS>
    <append> -I/project/s824/edavin/OASIS3-MCT_2.0/build.cray/build/lib/mct -I/project/s824/edavin/OASIS3-MCT_2.0/build.cray/build/lib/psmile.MPI1 </append>
  </FFLAGS>
  <SLIBS>
    <append> -L/project/s824/edavin/OASIS3-MCT_2.0/build.cray/lib -lpsmile.MPI1 -lscrip -lmct_oasis -lmpeu_oasis </append>
  </SLIBS>
</compiler>
<compiler MACH="centos7-linux">
  <SLIBS>
    <append>-L$(NETCDF_PATH)/lib -Wl,-rpath,$(NETCDF_PATH)/lib -lnetcdff -lnetcdf </append>
  </SLIBS>
</compiler>

<compiler MACH="cheyenne">
  <CPPDEFS>
    <!-- these flags enable nano timers -->
    <append MODEL="gptl"> -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY </append>
  </CPPDEFS>
  <NETCDF_PATH>$ENV{NETCDF}</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>gpfs</PIO_FILESYSTEM_HINTS>
  <PNETCDF_PATH>$ENV{PNETCDF}</PNETCDF_PATH>
</compiler>

<compiler MACH="cheyenne" COMPILER="gnu">
  <CPPDEFS>
    <append MODEL="pio1"> -DNO_MPIMOD </append>
  </CPPDEFS>
  <SLIBS>
    <append> -ldl </append>
  </SLIBS>
</compiler>

<compiler MACH="cheyenne" COMPILER="intel">
  <CFLAGS>
    <append> -qopt-report -xCORE_AVX2 -no-fma</append>
  </CFLAGS>
  <FFLAGS>
    <append> -qopt-report -xCORE_AVX2 -no-fma</append>
  </FFLAGS>
  <CMAKE_OPTS>
    <append DEBUG="TRUE"> -DPIO_ENABLE_LOGGING=ON </append>
  </CMAKE_OPTS>
  <PFUNIT_PATH MPILIB="mpi-serial" compile_threaded="FALSE">$ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_noMPI_noOpenMP</PFUNIT_PATH>
  <PFUNIT_PATH MPILIB="mpt" compile_threaded="TRUE">$ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP</PFUNIT_PATH>
  <!-- SET to FALSE for intel 17 and 18 TRUE otherwise -->
  <HAS_F2008_CONTIGUOUS>TRUE</HAS_F2008_CONTIGUOUS>
</compiler>

<compiler MACH="cheyenne" COMPILER="pgi">
  <SLIBS>
    <append> -llapack -lblas </append>
    <append MPILIB="mpi-serial"> -ldl </append>
  </SLIBS>
</compiler>

<compiler MACH="coeus" COMPILER="gnu">
  <NETCDF_PATH>/vol/apps/hpc/stow/netcdf/4.4.1.1/gcc-6.3.0</NETCDF_PATH>
  <SLIBS>
    <base> -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi </base>
  </SLIBS>
</compiler>

<compiler MACH="constance" COMPILER="intel">
  <CFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
  </CFLAGS>
  <CONFIG_ARGS>
    <base> --host=Linux </base>
  </CONFIG_ARGS>
  <CPPDEFS>
    <append> -DLINUX </append>
  </CPPDEFS>
  <FFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
  </FFLAGS>
  <NETCDF_PATH> $ENV{NETCDF_HOME}</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>lustre</PIO_FILESYSTEM_HINTS>
  <SLIBS>
    <base> -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MKL_PATH} -lmkl_rt</base>
  </SLIBS>
</compiler>

<compiler MACH="constance" COMPILER="pgi">
  <CFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
  </CFLAGS>
  <CONFIG_ARGS>
    <base> --host=Linux </base>
  </CONFIG_ARGS>
  <CPPDEFS>
    <append> -DLINUX </append>
  </CPPDEFS>
  <FFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
  </FFLAGS>
  <NETCDF_PATH> $ENV{NETCDF_HOME}</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>lustre</PIO_FILESYSTEM_HINTS>
  <SLIBS>
    <base> -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi </base>
  </SLIBS>
</compiler>

<compiler MACH="cori-haswell" COMPILER="intel">
  <CONFIG_ARGS>
    <base> --host=Linux </base>
  </CONFIG_ARGS>
  <CFLAGS>
    <append> -xCORE-AVX2 </append>
  </CFLAGS>
  <FFLAGS>
    <append> -xCORE-AVX2 </append>
  </FFLAGS>
  <SLIBS>
    <append> -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf </append>
  </SLIBS>
    <CPPDEFS>
    <append MODEL="gptl"> -DHAVE_SLASHPROC </append>
  </CPPDEFS>
  <LDFLAGS>
    <append>-mkl </append>
  </LDFLAGS>
  <!-- Bug in the intel/17.0.1 compiler requires this, remove this line when compiler is updated -->
  <HAS_F2008_CONTIGUOUS>FALSE</HAS_F2008_CONTIGUOUS>
</compiler>

<compiler MACH="cori-knl" COMPILER="intel">
  <CONFIG_ARGS>
    <base> --host=Linux </base>
  </CONFIG_ARGS>
  <CFLAGS>
    <append> -xMIC-AVX512 </append>
  </CFLAGS>
  <FFLAGS>
    <append> -xMIC-AVX512 </append>
  </FFLAGS>
  <SLIBS>
    <append> -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf </append>
  </SLIBS>
    <CPPDEFS>
    <append MODEL="gptl"> -DHAVE_SLASHPROC</append>
  </CPPDEFS>
  <LDFLAGS>
    <append>-mkl -lmemkind -zmuldefs</append>
  </LDFLAGS>
  <!-- Bug in the intel/17.0.1 compiler requires this, remove this line when compiler is updated -->
  <HAS_F2008_CONTIGUOUS>FALSE</HAS_F2008_CONTIGUOUS>
</compiler>

<compiler MACH="eastwind" COMPILER="intel">
  <CFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
  </CFLAGS>
  <CONFIG_ARGS>
    <base> --host=Linux </base>
  </CONFIG_ARGS>
  <CPPDEFS>
    <append> -DLINUX </append>
  </CPPDEFS>
  <FFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
  </FFLAGS>
  <NETCDF_PATH> $ENV{NETCDF_HOME}</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>lustre</PIO_FILESYSTEM_HINTS>
  <SLIBS>
    <base> -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi </base>
  </SLIBS>
</compiler>

<compiler MACH="eastwind" COMPILER="pgi">
  <CFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
    <append compile_threaded="FALSE"> -nomp </append>
  </CFLAGS>
  <CONFIG_ARGS>
    <base> --host=Linux </base>
  </CONFIG_ARGS>
  <CPPDEFS>
    <append> -DLINUX </append>
  </CPPDEFS>
  <FFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
    <append compile_threaded="FALSE"> -nomp </append>
  </FFLAGS>
  <LDFLAGS>
    <append compile_threaded="FALSE"> -nomp </append>
  </LDFLAGS>
  <NETCDF_PATH> $ENV{NETCDF_HOME}</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>lustre</PIO_FILESYSTEM_HINTS>
  <SLIBS>
    <base> -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi </base>
  </SLIBS>
</compiler>

<compiler MACH="edison" COMPILER="intel">
  <CFLAGS>
    <append DEBUG="FALSE"> -O2  </append>
  </CFLAGS>
  <CONFIG_ARGS>
    <base> --host=Linux </base>
  </CONFIG_ARGS>
  <CPPDEFS>
    <append MODEL="gptl"> -DHAVE_PAPI </append>
  </CPPDEFS>
  <FFLAGS>
    <append DEBUG="FALSE"> -O2  </append>
  </FFLAGS>
  <SLIBS>
    <append> -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf </append>
  </SLIBS>
</compiler>

<compiler MACH="euler2">
  <CPPDEFS>
    <append> -DLINUX </append>
  </CPPDEFS>
  <NETCDF_PATH>$ENV{NETCDF}</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>lustre</PIO_FILESYSTEM_HINTS>
  <PNETCDF_PATH>$ENV{PNETCDF}</PNETCDF_PATH>
  <SLIBS>
    <base> -L$ENV{NETCDF}/lib -lnetcdf -lnetcdff </base>
  </SLIBS>
</compiler>

<compiler MACH="euler2" COMPILER="intel">
  <CFLAGS>
    <append DEBUG="FALSE"> -xCORE-AVX2 </append>
  </CFLAGS>
  <FFLAGS>
    <append DEBUG="FALSE"> -xCORE-AVX2 </append>
  </FFLAGS>
  <LDFLAGS>
    <append> -mkl </append>
  </LDFLAGS>
</compiler>

<compiler MACH="euler2" COMPILER="pgi">
  <CFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
  </CFLAGS>
  <FFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
  </FFLAGS>
</compiler>

<compiler MACH="euler3">
  <CPPDEFS>
    <append> -DLINUX </append>
  </CPPDEFS>
  <NETCDF_PATH>$ENV{NETCDF}</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>lustre</PIO_FILESYSTEM_HINTS>
  <PNETCDF_PATH>$ENV{PNETCDF}</PNETCDF_PATH>
  <SLIBS>
    <base> -L$ENV{NETCDF}/lib -lnetcdf -lnetcdff </base>
  </SLIBS>
</compiler>

<compiler MACH="euler3" COMPILER="intel">
  <CFLAGS>
    <append DEBUG="FALSE"> -xCORE-AVX2 </append>
  </CFLAGS>
  <FFLAGS>
    <append DEBUG="FALSE"> -xCORE-AVX2 </append>
  </FFLAGS>
  <LDFLAGS>
    <append> -mkl </append>
  </LDFLAGS>
</compiler>

<compiler MACH="euler3" COMPILER="pgi">
  <CFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
  </CFLAGS>
  <FFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
  </FFLAGS>
</compiler>

<compiler MACH="euler4">
  <CPPDEFS>
    <append> -DLINUX </append>
  </CPPDEFS>
  <NETCDF_PATH>$ENV{NETCDF}</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>lustre</PIO_FILESYSTEM_HINTS>
  <PNETCDF_PATH>$ENV{PNETCDF}</PNETCDF_PATH>
  <SLIBS>
    <base> -L$ENV{NETCDF}/lib -lnetcdf -lnetcdff </base>
  </SLIBS>
</compiler>

<compiler MACH="euler4" COMPILER="intel">
  <CFLAGS>
    <append DEBUG="FALSE"> -xCORE-AVX2 </append>
  </CFLAGS>
  <FFLAGS>
    <append DEBUG="FALSE"> -xCORE-AVX2 </append>
  </FFLAGS>
  <LDFLAGS>
    <append> -mkl </append>
  </LDFLAGS>
</compiler>

<compiler MACH="euler4" COMPILER="pgi">
  <CFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
  </CFLAGS>
  <FFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
  </FFLAGS>
</compiler>

<compiler MACH="hobart">
  <CPPDEFS>
    <append MODEL="gptl"> -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY </append>
  </CPPDEFS>
  <LAPACK_LIBDIR> /usr/lib64 </LAPACK_LIBDIR>
  <MPI_LIB_NAME MPILIB="mvapich2"> mpich</MPI_LIB_NAME>
  <NETCDF_PATH>$ENV{NETCDF_PATH}</NETCDF_PATH>
  <SLIBS>
    <append>-L$NETCDF_PATH/lib -lnetcdff -lnetcdf</append>
  </SLIBS>
</compiler>

<compiler MACH="izumi">
  <CPPDEFS>
    <append MODEL="gptl"> -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY </append>
  </CPPDEFS>
  <LAPACK_LIBDIR> /usr/lib64 </LAPACK_LIBDIR>
  <MPI_LIB_NAME MPILIB="mvapich2"> mpich</MPI_LIB_NAME>
  <NETCDF_PATH>$ENV{NETCDF_PATH}</NETCDF_PATH>
  <SLIBS>
    <append>-L$NETCDF_PATH/lib -lnetcdff -lnetcdf</append>
  </SLIBS>
</compiler>

<compiler MACH="izumi" COMPILER="intel">
  <PFUNIT_PATH MPILIB="mpi-serial" compile_threaded="FALSE">/fs/cgd/csm/tools/pFUnit/pFUnit3.3.3_izumi_Intel19.0.1_noMPI_noOpenMP</PFUNIT_PATH>
</compiler>

<compiler MACH="hobart" COMPILER="intel">
  <CFLAGS>
    <append> -lifcore</append>
  </CFLAGS>
  <FFLAGS>
    <append> -lifcore</append>
    <append MPILIB="mpi-serial"> -mcmodel medium </append>
  </FFLAGS>
  <LDFLAGS>
    <append> -lquadmath </append>
    <append> -Wl,-rpath,${NETCDF_PATH}/lib </append>
    <append> -Wl,-rpath,$ENV{COMPILER_PATH}/lib/intel64 </append>
    <append> -Wl,-rpath,$ENV{COMPILER_PATH}/mkl/lib/intel64 </append>
    <append> -Wl,-rpath,$ENV{MPI_PATH}/lib</append>
    <append> -lifcore</append>
  </LDFLAGS>
  <SLIBS>
    <append MPILIB="mvapich2"> -mkl=cluster </append>
  </SLIBS>
  <PFUNIT_PATH MPILIB="mpi-serial" compile_threaded="FALSE">/fs/cgd/csm/tools/pFUnit/pFUnit3.2.8_hobart_Intel15.0.2_noMPI_noOpenMP</PFUNIT_PATH>
</compiler>

<compiler MACH="hobart" COMPILER="pgi">
  <CFLAGS>
    <append DEBUG="FALSE"> -O0 </append>
  </CFLAGS>
  <FFLAGS>
    <append DEBUG="FALSE"> -O0 </append>
  </FFLAGS>
  <LDFLAGS>
    <append> -lgomp </append>
    <append> -Wl,-R${NETCDF_PATH}/lib</append>
    <append> -Wl,-R$ENV{COMPILER_PATH}/lib</append>
    <append> -Wl,-R$ENV{COMPILER_PATH}/libso</append>
  </LDFLAGS>
</compiler>

<compiler MACH="hobart" COMPILER="gnu">
  <SLIBS>
    <append> -lm -ldl</append>
  </SLIBS>
</compiler>

<compiler MACH="homebrew" COMPILER="gnu">
  <LDFLAGS>
    <!-- These LDFLAGS provide lapack and blas support on a Mac. This
         may require installation of the Apple Developer Tools. -->
    <append> -framework Accelerate -Wl,-rpath $(NETCDF)/lib</append>
  </LDFLAGS>
</compiler>

<compiler MACH="laramie">
  <CPPDEFS>
    <!-- these flags enable nano timers -->
    <append MODEL="gptl"> -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY </append>
  </CPPDEFS>
  <NETCDF_PATH>$ENV{NETCDF}</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>gpfs</PIO_FILESYSTEM_HINTS>
  <PNETCDF_PATH>$ENV{PNETCDF}</PNETCDF_PATH>
</compiler>
<compiler MACH="laramie" COMPILER="intel">
  <CFLAGS>
    <append> -vec-report </append>
  </CFLAGS>
  <FFLAGS>
    <append> -vec-report </append>
  </FFLAGS>
  <CMAKE_OPTS>
    <append DEBUG="TRUE"> -DPIO_ENABLE_LOGGING=ON </append>
  </CMAKE_OPTS>
</compiler>

<compiler COMPILER="intel" MACH="lawrencium-lr3">
  <CPPDEFS>
    <append MODEL="gptl"> -DHAVE_VPRINTF -DHAVE_TIMES -DHAVE_GETTIMEOFDAY </append>
  </CPPDEFS>
  <SLIBS>
    <append> -lnetcdff -lnetcdf -mkl </append>
  </SLIBS>
  <FFLAGS>
    <append DEBUG="TRUE"> -ftrapuv </append>
  </FFLAGS>
  <CFLAGS>
    <append DEBUG="TRUE"> -ftrapuv </append>
  </CFLAGS>
  <NETCDF_PATH>$ENV{NETCDF_DIR}</NETCDF_PATH>
  <LAPACK_LIBDIR>/global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib</LAPACK_LIBDIR>
</compiler>

<compiler COMPILER="intel" MACH="lawrencium-lr2">
  <CPPDEFS>
    <append MODEL="gptl"> -DHAVE_VPRINTF -DHAVE_TIMES -DHAVE_GETTIMEOFDAY </append>
  </CPPDEFS>
  <SLIBS>
    <append> -lnetcdff -lnetcdf -mkl </append>
  </SLIBS>
  <FFLAGS>
    <append DEBUG="TRUE"> -ftrapuv </append>
  </FFLAGS>
  <CFLAGS>
    <append DEBUG="TRUE"> -ftrapuv </append>
  </CFLAGS>
  <NETCDF_PATH>$ENV{NETCDF_DIR}</NETCDF_PATH>
  <LAPACK_LIBDIR>/global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib</LAPACK_LIBDIR>
</compiler>

<compiler MACH="melvin" COMPILER="gnu">
  <ALBANY_PATH>/projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install</ALBANY_PATH>
  <CFLAGS>
    <append DEBUG="FALSE"> -O2  </append>
  </CFLAGS>
  <CONFIG_ARGS>
    <base> --host=Linux </base>
  </CONFIG_ARGS>
  <CXX_LIBS>
    <base>-lstdc++ -lmpi_cxx</base>
  </CXX_LIBS>
  <FFLAGS>
    <append DEBUG="FALSE"> -O2  </append>
  </FFLAGS>
  <NETCDF_PATH>$ENV{NETCDFROOT}</NETCDF_PATH>
  <PNETCDF_PATH>$ENV{PNETCDFROOT}</PNETCDF_PATH>
  <SLIBS>
    <append> $SHELL{${NETCDF_PATH}/bin/nf-config --flibs} -lblas -llapack</append>
  </SLIBS>
</compiler>

<compiler MACH="mira" COMPILER="ibm">
  <CFLAGS>
    <append>-qfloat=nomaf</append>
  </CFLAGS>
  <FFLAGS>
    <append>-qfloat=nomaf</append>
  </FFLAGS>
  <HDF5_PATH>$ENV{HDF5}</HDF5_PATH>
  <!-- This LD is a workaround for darshan initialization on mira (Darshan does -->
  <!-- not run if f90 or higher is used for linking -->
  <LD> /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlf77_r </LD>
  <MPICC> /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlc_r </MPICC>
  <MPIFC> /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlf2003_r </MPIFC>
  <NETCDF_PATH>/soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/current/</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>gpfs </PIO_FILESYSTEM_HINTS>
  <PNETCDF_PATH>/soft/libraries/pnetcdf/1.6.1/cnk-xl/current/</PNETCDF_PATH>
  <SCC> /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlc_r </SCC>
  <SFC> /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlf2003_r </SFC>
  <SLIBS>
    <append>-L${NETCDF_PATH}/lib -lnetcdff -lnetcdf -L$ENV{HDF5}/lib -lhdf5_hl -lhdf5 -L/soft/libraries/alcf/current/xl/ZLIB/lib -lz -L/soft/libraries/alcf/current/xl/LAPACK/lib -llapack -L/soft/libraries/alcf/current/xl/BLAS/lib -lblas -L/bgsys/drivers/ppcfloor/comm/sys/lib </append>
  </SLIBS>
</compiler>

<compiler COMPILER="gnu" MACH="modex">
  <SLIBS>
    <append> -L$ENV{HDF5_HOME}/lib -lhdf5_fortran -lhdf5 -lhdf5_hl -lhdf5hl_fortran </append>
    <append> -L$ENV{NETCDF_PATH}/lib/ -lnetcdff -lnetcdf -lcurl -lblas -llapack</append>
  </SLIBS>
  <CPPDEFS>
     <append MODEL="gptl"> -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY -DHAVE_BACKTRACE </append>
  </CPPDEFS>
</compiler>

<compiler MACH="olympus" COMPILER="pgi">
  <CFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
  </CFLAGS>
  <CONFIG_ARGS>
    <base> --host=Linux </base>
  </CONFIG_ARGS>
  <CPPDEFS>
    <append> -DLINUX </append>
  </CPPDEFS>
  <FFLAGS>
    <append DEBUG="FALSE"> -O2 </append>
  </FFLAGS>
  <NETCDF_PATH> $ENV{NETCDF_LIB}/..</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>lustre</PIO_FILESYSTEM_HINTS>
  <SLIBS>
    <base> -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi </base>
  </SLIBS>
</compiler>

<compiler MACH="pleiades-bro">
  <FFLAGS>
    <append DEBUG="FALSE"> -O2 -xCORE-AVX2 </append>
  </FFLAGS>
  <MPICC>icc</MPICC>
  <MPI_LIB_NAME>mpi</MPI_LIB_NAME>
  <MPI_PATH>$ENV{MPI_ROOT}</MPI_PATH>
  <NETCDF_PATH>$ENV{NETCDF}</NETCDF_PATH>
  <SLIBS>
    <append>-L$ENV{NETCDF}/lib -lnetcdff -lnetcdf</append>
  </SLIBS>
  <ESMF_LIBDIR>/home6/fvitt/esmf_7_1_0r/esmf/lib/libO/Linux.intel.64.mpi.default</ESMF_LIBDIR>
</compiler>

<compiler MACH="pleiades-has">
  <FFLAGS>
    <append DEBUG="FALSE"> -O2 -xCORE-AVX2 </append>
  </FFLAGS>
  <MPICC>icc</MPICC>
  <MPI_LIB_NAME>mpi</MPI_LIB_NAME>
  <MPI_PATH>$ENV{MPI_ROOT}</MPI_PATH>
  <NETCDF_PATH>$ENV{NETCDF}</NETCDF_PATH>
  <SLIBS>
    <append>-L$ENV{NETCDF}/lib -lnetcdff -lnetcdf</append>
  </SLIBS>
  <ESMF_LIBDIR>/home6/fvitt/esmf_7_1_0r/esmf/lib/libO/Linux.intel.64.mpi.default</ESMF_LIBDIR>
</compiler>

<compiler MACH="pleiades-ivy">
  <FFLAGS>
    <append DEBUG="FALSE"> -O2 -xAVX </append>
  </FFLAGS>
  <MPICC>icc</MPICC>
  <MPI_LIB_NAME>mpi</MPI_LIB_NAME>
  <MPI_PATH>$ENV{MPI_ROOT}</MPI_PATH>
  <NETCDF_PATH>$ENV{NETCDF}</NETCDF_PATH>
  <SLIBS>
    <append>-L$ENV{NETCDF}/lib -lnetcdff -lnetcdf</append>
  </SLIBS>
  <ESMF_LIBDIR>/home6/fvitt/esmf_7_1_0r/esmf/lib/libO/Linux.intel.64.mpi.default</ESMF_LIBDIR>
</compiler>

<compiler MACH="pleiades-san">
  <FFLAGS>
    <append DEBUG="FALSE"> -O2 -xAVX </append>
  </FFLAGS>
  <MPICC>icc</MPICC>
  <MPI_LIB_NAME>mpi</MPI_LIB_NAME>
  <MPI_PATH>$ENV{MPI_ROOT}</MPI_PATH>
  <NETCDF_PATH>$ENV{NETCDF}</NETCDF_PATH>
  <SLIBS>
    <append>-L$ENV{NETCDF}/lib -lnetcdff -lnetcdf</append>
  </SLIBS>
  <ESMF_LIBDIR>/home6/fvitt/esmf_7_1_0r/esmf/lib/libO/Linux.intel.64.mpi.default</ESMF_LIBDIR>
</compiler>

<compiler MACH="sandiatoss3" COMPILER="intel">
  <CFLAGS>
    <append DEBUG="FALSE"> -O2  </append>
  </CFLAGS>
  <CONFIG_ARGS>
    <base> --host=Linux </base>
  </CONFIG_ARGS>
  <ESMF_LIBDIR>/projects/ccsm/esmf-6.3.0rp1/lib/libO/Linux.intel.64.openmpi.default</ESMF_LIBDIR>
  <FFLAGS>
    <append DEBUG="FALSE"> -O2  </append>
  </FFLAGS>
  <NETCDF_PATH>$ENV{NETCDFROOT}</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>lustre </PIO_FILESYSTEM_HINTS>
  <PNETCDF_PATH>$ENV{PNETCDFROOT}</PNETCDF_PATH>
  <SLIBS>
    <append> -L${NETCDF_PATH}/lib -lnetcdff -L/projects/ccsm/BLAS-intel -lblas_LINUX</append>
  </SLIBS>
</compiler>

<compiler MACH="lonestar5">
  <CPPDEFS>
    <append> -DHAVE_NANOTIME </append>
  </CPPDEFS>
  <NETCDF_PATH>$ENV{TACC_NETCDF_DIR}</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>lustre</PIO_FILESYSTEM_HINTS>
  <PNETCDF_PATH>$ENV{TACC_PNETCDF_DIR}</PNETCDF_PATH>
  <LDFLAGS>
    <append> -Wl,-rpath,${NETCDF_PATH}/lib </append>
  </LDFLAGS>
  <SLIBS>
    <append> -L${NETCDF_PATH}/lib -lnetcdff -lnetcdf</append>
  </SLIBS>
</compiler>

<compiler MACH="stampede2-skx">
  <CPPDEFS>
    <append> -DHAVE_NANOTIME </append>
  </CPPDEFS>
  <NETCDF_PATH>$ENV{TACC_NETCDF_DIR}</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>lustre</PIO_FILESYSTEM_HINTS>
  <PNETCDF_PATH>$ENV{TACC_PNETCDF_DIR}</PNETCDF_PATH>
</compiler>

<compiler MACH="stampede2-skx" COMPILER="intel">
  <CFLAGS>
    <append> -xCOMMON-AVX512 -no-fma </append>
  </CFLAGS>
  <FFLAGS>
    <append> -xCOMMON-AVX512 -no-fma </append>
    <append MPILIB="mpi-serial"> -mcmodel medium </append>
  </FFLAGS>
  <LDFLAGS>
    <append>-L$ENV{TACC_HDF5_LIB} -lhdf5 $(MKL) -zmuldefs -xCOMMON-AVX512</append>
  </LDFLAGS>
  <SLIBS>
    <append>$SHELL{${NETCDF_PATH}/bin/nf-config --flibs} -L$ENV{TACC_HDF5_LIB} -lhdf5</append>
  </SLIBS>
  <TRILINOS_PATH>$ENV{TRILINOS_PATH}</TRILINOS_PATH>
  <HAS_F2008_CONTIGUOUS>FALSE</HAS_F2008_CONTIGUOUS>
</compiler>

<compiler MACH="stampede2-knl">
  <CPPDEFS>
    <append> -DHAVE_NANOTIME </append>
  </CPPDEFS>
  <NETCDF_PATH>$ENV{TACC_NETCDF_DIR}</NETCDF_PATH>
  <PIO_FILESYSTEM_HINTS>lustre</PIO_FILESYSTEM_HINTS>
  <PNETCDF_PATH>$ENV{TACC_PNETCDF_DIR}</PNETCDF_PATH>
</compiler>

<compiler MACH="stampede2-knl" COMPILER="intel">
  <CFLAGS>
    <append> -xCOMMON-AVX512 -no-fma </append>
  </CFLAGS>
  <FFLAGS>
    <append> -xCOMMON-AVX512 -no-fma </append>
    <append MPILIB="mpi-serial"> -mcmodel medium </append>
  </FFLAGS>
  <LDFLAGS>
    <append>-L$ENV{TACC_HDF5_LIB} -lhdf5 $(MKL) -zmuldefs -xCOMMON-AVX512</append>
  </LDFLAGS>
  <SLIBS>
    <append>$SHELL{${NETCDF_PATH}/bin/nf-config --flibs} -L$ENV{TACC_HDF5_LIB} -lhdf5</append>
  </SLIBS>
  <TRILINOS_PATH>$ENV{TRILINOS_PATH}</TRILINOS_PATH>
  <HAS_F2008_CONTIGUOUS>FALSE</HAS_F2008_CONTIGUOUS>
</compiler>

<compiler COMPILER="intel" MACH="theia">
  <MPICC> mpiicc  </MPICC>
  <MPICXX> mpiicpc </MPICXX>
  <MPIFC> mpiifort </MPIFC>
  <NETCDF_PATH>/apps/netcdf/4.3.0-intel</NETCDF_PATH>
</compiler>

<compiler MACH="theta">
  <CFLAGS>
    <append> -xMIC-AVX512 </append>
  </CFLAGS>
  <FFLAGS>
    <append> -xMIC-AVX512 </append>
  </FFLAGS>
    <CONFIG_ARGS>
    <base> --host=Linux </base>
  </CONFIG_ARGS>
  <SLIBS>
    <append>-L$(NETCDF_DIR)/lib -lnetcdff -L$(NETCDF_DIR)/lib -lnetcdf -Wl,-rpath -Wl,$(NETCDF_DIR)/lib </append>
  </SLIBS>
</compiler>

<compiler MACH="userdefined">
  <CONFIG_ARGS>
    <base/>
  </CONFIG_ARGS>
  <CPPDEFS>
    <append/>
  </CPPDEFS>
  <ESMF_LIBDIR/>
  <MPI_LIB_NAME/>
  <MPI_PATH/>
  <NETCDF_PATH> USERDEFINED_MUST_EDIT_THIS</NETCDF_PATH>
  <PNETCDF_PATH/>
  <SLIBS>
    <append># USERDEFINED $SHELL{${NETCDF_PATH}/bin/nc-config --flibs}</append>
  </SLIBS>
</compiler>

</config_compilers>

CESM XML settings for supported machines.

<?xml version="1.0"?>

<!--

===============================================================
COMPILER and COMPILERS
===============================================================
If a machine supports multiple compilers - then
- the settings for COMPILERS should reflect the supported compilers
as a comma separated string
- the setting for COMPILER should be the default compiler
(which is one of the values in COMPILERS)

===============================================================
MPILIB and MPILIBS
===============================================================
If a machine supports only one MPILIB is supported - then
the setting for  MPILIB and MPILIBS should be blank ("")
If a machine supports multiple mpi libraries (e.g. mpich and openmpi)
- the settings for MPILIBS should reflect the supported mpi libraries
as a comma separated string

The default settings for COMPILERS and MPILIBS is blank (in config_machines.xml)

Normally variable substitutions are not made until the case scripts are run, however variables
of the form $ENV{VARIABLE_NAME} are substituted in create_newcase from the environment
variable of the same name if it exists.

===============================================================
PROJECT_REQUIRED
===============================================================
A machine may need the PROJECT xml variable to be defined either because it is
used in some paths, or because it is used to give an account number in the job
submission script. If either of these are the case, then PROJECT_REQUIRED
should be set to TRUE for the given machine.


mpirun: the mpirun command that will be used to actually launch the model.
The attributes used to choose the mpirun command are:

mpilib: can either be 'default' the name of an mpi library, or a compiler name so one can choose the mpirun
based on the mpi library in use.

the 'executable' tag must have arguments required for the chosen mpirun, as well as the executable name.

unit_testing: can be 'true' or 'false'.
This allows using a different mpirun command to launch unit tests

-->

<config_machines version="2.0">
  <machine MACH="aleph">
    <DESC>XC50 SkyLake, os is CNL, 40 pes/node, batch system is PBSPro</DESC>
    <NODENAME_REGEX>.*eth\d</NODENAME_REGEX>
    <OS>CNL</OS>
    <COMPILERS>intel,gnu,cray</COMPILERS>
    <MPILIBS>mpt,mpi-serial</MPILIBS>
    <CIME_OUTPUT_ROOT>/proj/$ENV{USER}</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>$ENV{DIN_LOC_ROOT}</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>$DIN_LOC_ROOT</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>${CIME_OUTPUT_ROOT}/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>${CIME_OUTPUT_ROOT}/cesm_baselines</BASELINE_ROOT>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
    <SUPPORTED_BY> @ pusan.ac.kr</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>40</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>40</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>aprun</executable>
      <arguments>
        <arg name="hyperthreading" default="1"> -j {{ hyperthreading }}</arg>
        <arg name="num_tasks"> -n {{ total_tasks }}</arg>
        <arg name="tasks_per_node"> -N $MAX_MPITASKS_PER_NODE</arg>
        <arg name="tasks_per_numa" > -S {{ tasks_per_numa }}</arg>
        <arg name="thread_count"> -d $ENV{OMP_NUM_THREADS}</arg>
        <arg name="env_thread_count">--mpmd-env OMP_NUM_THREADS=$OMP_NUM_THREADS</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/opt/modules/default/init/perl.pm</init_path>
      <init_path lang="python">/opt/modules/default/init/python.py</init_path>
      <init_path lang="sh">/opt/modules/default/init/sh</init_path>
      <init_path lang="csh">/opt/modules/default/init/csh</init_path>
      <cmd_path lang="perl">/opt/modules/default/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/opt/modules/default/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="rm">craype-x86-skylake</command>
        <command name="rm">PrgEnv-pgi</command>
        <command name="rm">PrgEnv-intel</command>
        <command name="rm">PrgEnv-cray</command>
        <command name="rm">PrgEnv-gnu</command>
        <command name="rm">cray-netcdf</command>
        <command name="rm">cray-hdf5</command>
        <command name="rm">cray-parallel-netcdf</command>
        <command name="rm">papi</command>
      </modules>
      <modules compiler="intel">
        <command name="load">PrgEnv-intel</command>
        <command name="load">craype-x86-skylake</command>
        <command name="load">craype-hugepages2M</command>
        <command name="rm">perftools-base/7.0.4</command>
        <command name="load">cray-netcdf/4.6.1.3</command>
        <command name="load">cray-hdf5/1.10.2.0</command>
        <command name="load">cray-parallel-netcdf/1.11.1.1</command>
        <command name="load">papi/5.6.0.4</command>
        <command name="load">gridftp/6.0</command>
        <command name="load">cray-python/3.6.5.1</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">256M</env>
      <env name="POSTPROCESS_PATH">/home/jedwards/workflow/CESM_postprocessing</env>
    </environment_variables>
  </machine>

  <machine MACH="athena">
    <DESC> CMCC IBM iDataPlex, os is Linux, 16 pes/node, batch system is LSFd mpich</DESC>
    <NODENAME_REGEX>.*.cluster.net</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel,intel15</COMPILERS>
    <MPILIBS>mpich2</MPILIBS>
    <CIME_OUTPUT_ROOT>/work/$USER/CESM2</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/users/home/dp16116/CESM2/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>$DIN_LOC_ROOT/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$ENV{CESMDATAROOT}/ccsm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/users/home/dp16116/CESM2/cesm2.0.1/cime/tools/cprnc/cprnc</CCSM_CPRNC>
    <PERL5LIB>/usr/lib64/perl5:/usr/share/perl5</PERL5LIB>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>lsf</BATCH_SYSTEM>
    <SUPPORTED_BY> </SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>30</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>15</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable> mpirun_Impi5 </executable>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/usr/share/Modules/init/perl.pm</init_path>
      <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
      <init_path lang="csh">/usr/share/Modules/init/csh</init_path>
      <init_path lang="sh">/usr/share/Modules/init/sh</init_path>
      <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/usr/bin/modulecmd python </cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"/>
      </modules>
      <modules compiler="intel">
        <command name="load">ANACONDA2/python2.7</command>
        <command name="load">INTEL/intel_xe_2015.3.187</command>
        <command name="load">SZIP/szip-2.1_int15</command>
      </modules>
      <modules compiler="intel" mpilib="!mpi-serial" DEBUG="TRUE">
        <command name="load">ESMF/esmf-6.3.0rp1-intelmpi-64-g_int15</command>
      </modules>
      <modules compiler="intel" mpilib="!mpi-serial" DEBUG="FALSE">
        <command name="load">ESMF/esmf-6.3.0rp1-intelmpi-64-O_int15</command>
      </modules>
      <modules compiler="intel" mpilib="mpi-serial" DEBUG="TRUE">
        <command name="load">ESMF/esmf-6.3.0rp1-mpiuni-64-g_int15</command>
      </modules>
      <modules compiler="intel" mpilib="mpi-serial" DEBUG="FALSE">
        <command name="load">ESMF/esmf-6.3.0rp1-mpiuni-64-O_int15</command>
      </modules>
      <modules mpilib="mpi-serial">
        <command name="load">HDF5/hdf5-1.8.15-patch1</command>
        <command name="load">NETCDF/netcdf-C_4.3.3.1-F_4.4.2_C++_4.2.1</command>
      </modules>
      <modules mpilib="!mpi-serial">
        <command name="load">HDF5/hdf5-1.8.15-patch1_parallel</command>
        <command name="load">NETCDF/netcdf-C_4.3.3.1-F_4.4.2_C++_4.2.1_parallel</command>
        <command name="load">PARALLEL_NETCDF/parallel-netcdf-1.6.1</command>
      </modules>
      <modules>
        <command name="load">CMAKE/cmake-3.3.0-rc1</command>
      </modules>
      <modules compiler="intel">
        <command name="unload">INTEL/intel_xe_2013.5.192</command>
        <command name="unload">INTEL/intel_xe_2013</command>
        <command name="unload">HDF5/hdf5-1.8.10-patch1</command>
        <command name="load">INTEL/intel_xe_2015.3.187</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">256M</env>
    </environment_variables>
    <environment_variables compiler="intel">
      <env name="I_MPI_EXTRA_FILESYSTEM_LIST">gpfs</env>
      <env name="I_MPI_EXTRA_FILESYSTEM">on</env>
      <env name="I_MPI_PLATFORM">snb</env>
      <env name="I_MPI_HYDRA_BOOTSTRAP">lsf</env>
      <env name="I_MPI_LSF_USE_COLLECTIVE_LAUNCH">1</env>
      <env name="I_MPI_DAPL_UD">on</env>
      <env name="I_MPI_DAPL_SCALABLE_PROGRESS">on</env>
      <env name="XIOS_PATH">/users/home/models/nemo/xios-cmip6/intel_xe_2013</env>
    </environment_variables>
  </machine>

  <machine MACH="bluewaters">
    <DESC>ORNL XE6, os is CNL, 32 pes/node, batch system is PBS</DESC>
    <NODENAME_REGEX>h2o</NODENAME_REGEX>
    <OS>CNL</OS>
    <COMPILERS>intel,pgi,cray,gnu</COMPILERS>
    <MPILIBS>mpich</MPILIBS>
    <PROJECT>banu</PROJECT>
    <CIME_OUTPUT_ROOT>/scratch/sciteam/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>$ENV{CESMDATAROOT}/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>$ENV{CESMDATAROOT}/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$ENV{CESMDATAROOT}/ccsm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>$ENV{CESMDATAROOT}/tools/cprnc</CCSM_CPRNC>
    <GMAKE_J> 8</GMAKE_J>
    <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
    <SUPPORTED_BY>cseg</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>32</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>16</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>aprun</executable>
      <arguments>
	<arg name="num_tasks"> -n {{ total_tasks }}</arg>
	<!-- <arg name="tasks_per_numa"> -S {{ tasks_per_numa }}</arg> -->
	<arg name="tasks_per_node"> -N $MAX_MPITASKS_PER_NODE</arg>
	<arg name="thread_count"> -d $ENV{OMP_NUM_THREADS}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/opt/modules/default/init/perl.pm</init_path>
      <init_path lang="python">/opt/modules/default/init/python.py</init_path>
      <init_path lang="sh">/opt/modules/default/init/sh</init_path>
      <init_path lang="csh">/opt/modules/default/init/csh</init_path>
      <cmd_path lang="perl">/opt/modules/3.2.10.3/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/opt/modules/3.2.10.3/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
	<command name="rm">PrgEnv-pgi</command>
	<command name="rm">PrgEnv-intel</command>
	<command name="rm">PrgEnv-cray</command>
	<command name="rm">PrgEnv-gnu</command>
	<command name="rm">pgi</command>
	<command name="rm">cray</command>
	<command name="rm">intel</command>
	<command name="rm">cray-netcdf</command>
	<command name="rm">gcc</command>
      </modules>
      <modules compiler="intel">
	<command name="load">PrgEnv-intel</command>
	<command name="rm">intel</command>
	<command name="load">intel/18.0.3.222</command>
	<!-- the PrgEnv-intel loads a gcc compiler that causes several
	problems -->
	<command name="rm">gcc</command>
      </modules>
      <modules compiler="pgi">
	<command name="load">PrgEnv-pgi</command>
	<command name="switch">pgi pgi/18.7.0</command>
      </modules>
      <modules compiler="gnu">
	<command name="load">PrgEnv-gnu</command>
	<command name="switch">gcc gcc/6.3.0</command>
      </modules>
      <modules compiler="cray">
	<command name="load">PrgEnv-cray</command>
	<command name="switch">cce cce/8.5.8</command>
      </modules>
      <modules>
	<command name="load">papi/5.5.1.1</command>
	<command name="switch">cray-mpich cray-mpich/7.7.1</command>
	<command name="switch">cray-libsci cray-libsci/18.04.1</command>
	<command name="load">torque/6.0.4</command>
      </modules>
      <modules mpilib="!mpi-serial">
	<command name="load">cray-hdf5-parallel/1.10.2.0</command>
	<command name="load">cray-netcdf-hdf5parallel/4.6.1.0</command>
	<command name="load">cray-parallel-netcdf/1.8.1.3</command>
      </modules>
      <modules mpilib="mpi-serial">
	<command name="load">cray-netcdf/4.6.1.0</command>
      </modules>
      <modules>
	<command name="load">cmake/3.1.3</command>
	<command name="rm">darshan</command>
	<command name="use">/sw/modulefiles/CESM</command>
	<command name="load">CESM-ENV</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">64M</env>
      <env name="PATH">$ENV{HOME}/bin:$ENV{PATH}</env>
    </environment_variables>
  </machine>

  <machine MACH="centos7-linux">
    <DESC>
      Example port to centos7 linux system with gcc, netcdf, pnetcdf and mpich
      using modules from http://www.admin-magazine.com/HPC/Articles/Environment-Modules
    </DESC>
    <NODENAME_REGEX>regex.expression.matching.your.machine</NODENAME_REGEX>
    <OS>LINUX</OS>
    <PROXY> https://howto.get.out </PROXY>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>mpich</MPILIBS>
    <PROJECT>none</PROJECT>
    <SAVE_TIMING_DIR> </SAVE_TIMING_DIR>
    <CIME_OUTPUT_ROOT>$ENV{HOME}/cesm/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>$ENV{HOME}/cesm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>$ENV{HOME}/cesm/inputdata/lmwg</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$ENV{HOME}/cesm/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$ENV{HOME}/cesm/cesm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>$ENV{HOME}/cesm/tools/cime/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE>make</GMAKE>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>none</BATCH_SYSTEM>
    <SUPPORTED_BY>me@my.address</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>8</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>8</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>mpiexec</executable>
      <arguments>
	<arg name="ntasks"> -np {{ total_tasks }} </arg>
      </arguments>
    </mpirun>
    <module_system type="module" allow_error="true">
      <init_path lang="perl">/usr/share/Modules/init/perl.pm</init_path>
      <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
      <init_path lang="csh">/usr/share/Modules/init/csh</init_path>
      <init_path lang="sh">/usr/share/Modules/init/sh</init_path>
      <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
	<command name="purge"/>
      </modules>
      <modules compiler="gnu">
	<command name="load">compiler/gnu/8.2.0</command>
	<command name="load">mpi/3.3/gcc-8.2.0</command>
	<command name="load">tool/netcdf/4.6.1/gcc-8.1.0</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">256M</env>
    </environment_variables>
    <resource_limits>
      <resource name="RLIMIT_STACK">-1</resource>
    </resource_limits>
  </machine>

  <machine MACH="cheyenne">
    <DESC>NCAR SGI platform, os is Linux, 36 pes/node, batch system is PBS</DESC>
    <NODENAME_REGEX>.*.?cheyenne\d?.ucar.edu</NODENAME_REGEX>
    <!-- MPT sometimes timesout at model start time, the next two lines cause
    case_run.py to detect the timeout and retry FORCE_SPARE_NODES times -->
    <MPIRUN_RETRY_REGEX>MPT: Launcher network accept (MPI_LAUNCH_TIMEOUT) timed out</MPIRUN_RETRY_REGEX>
    <MPIRUN_RETRY_COUNT>10</MPIRUN_RETRY_COUNT>
    <OS>LINUX</OS>
    <COMPILERS>intel,gnu,pgi</COMPILERS>
    <MPILIBS compiler="intel" >mpt,openmpi</MPILIBS>
    <MPILIBS compiler="pgi" >openmpi,mpt</MPILIBS>
    <MPILIBS compiler="gnu" >openmpi</MPILIBS>
    <CIME_OUTPUT_ROOT>/glade/scratch/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>$ENV{CESMDATAROOT}/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/glade/p/cgd/tss/CTSM_datm_forcing_data</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$ENV{CESMDATAROOT}/cesm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>$ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc.cheyenne</CCSM_CPRNC> 
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
    <SUPPORTED_BY>cseg</SUPPORTED_BY>
    <!-- have not seen any performance benefit in smt -->
    <MAX_TASKS_PER_NODE>36</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>36</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>mpiexec_mpt</executable>
      <arguments>
	<arg name="labelstdout">-p "%g:"</arg>
	<arg name="num_tasks"> -np {{ total_tasks }}</arg>
	<!-- the omplace argument needs to be last -->
	<arg name="zthreadplacement"> omplace -tm open64 </arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="mpt" queue="share">
      <executable>mpirun `hostname`</executable>
      <arguments>
	<arg name="anum_tasks"> -np {{ total_tasks }}</arg>
	<!-- the omplace argument needs to be last -->
	<arg name="zthreadplacement"> omplace -tm open64 </arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="openmpi">
      <executable>mpirun</executable>
      <arguments>
	<arg name="anum_tasks"> -np {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="default" unit_testing="true">
      <!-- The only place we can build and run the unit tests is on cheyenne's
	   shared nodes. However, running mpi jobs on the shared nodes currently
	   requires some workarounds; these workarounds are implemented here -->
      <executable>/opt/sgi/mpt/mpt-2.15/bin/mpirun $ENV{UNIT_TEST_HOST} -np 1 </executable>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/perl</init_path>
      <init_path lang="python">/glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/env_modules_python.py</init_path>
      <init_path lang="csh">/glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/csh</init_path>
      <init_path lang="sh">/glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/sh</init_path>
      <cmd_path lang="perl">/glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/libexec/lmod perl</cmd_path>
      <cmd_path lang="python">/glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
	<command name="purge"/>
	<command name="load">ncarenv/1.2</command>
      </modules>
      <modules compiler="intel">
	<command name="load">intel/19.0.2</command>
	<command name="load">esmf_libs</command>
	<command name="load">mkl</command>
      </modules>
      <modules compiler="intel" mpilib="!mpi-serial" DEBUG="TRUE" comp_interface="mct">
        <command name="load">esmf-7.1.0r-defio-mpi-g</command>
      </modules>
      <modules compiler="intel" mpilib="!mpi-serial" DEBUG="FALSE" comp_interface="mct">
        <command name="load">esmf-7.1.0r-defio-mpi-O</command>
      </modules>
      <modules compiler="intel" mpilib="mpi-serial" DEBUG="TRUE" comp_interface="mct">
        <command name="load">esmf-7.1.0r-ncdfio-uni-g</command>
      </modules>
      <modules compiler="intel" mpilib="mpi-serial" DEBUG="FALSE" comp_interface="mct">
        <command name="load">esmf-7.1.0r-ncdfio-uni-O</command>
      </modules>
      <modules compiler="intel" mpilib="!mpi-serial" DEBUG="TRUE" comp_interface="nuopc">
        <command name="use">/glade/work/turuncu/PROGS/modulefiles/esmfpkgs/intel/19.0.2</command>
        <command name="load">esmf-8.0.0-ncdfio-mpt-g</command>
      </modules>
      <modules compiler="intel" mpilib="!mpi-serial" DEBUG="FALSE" comp_interface="nuopc">
        <command name="use">/glade/work/turuncu/PROGS/modulefiles/esmfpkgs/intel/19.0.2</command>
        <command name="load">esmf-8.0.0-ncdfio-mpt-O</command>
      </modules>
      <modules compiler="intel" mpilib="mpi-serial" DEBUG="TRUE" comp_interface="nuopc">
        <command name="use">/glade/work/turuncu/PROGS/modulefiles/esmfpkgs/intel/19.0.2</command>
        <command name="load">esmf-8.0.0-ncdfio-mpiuni-g</command>
      </modules>
      <modules compiler="intel" mpilib="mpi-serial" DEBUG="FALSE" comp_interface="nuopc">
        <command name="use">/glade/work/turuncu/PROGS/modulefiles/esmfpkgs/intel/19.0.2</command>
        <command name="load">esmf-8.0.0-ncdfio-mpiuni-O</command>
      </modules>
      <modules compiler="pgi">
	<command name="load">pgi/19.3</command>
      </modules>
      <modules compiler="gnu">
        <command name="load">gnu/8.3.0</command>
        <command name="load">openblas/0.3.6</command>
      </modules>
      <modules mpilib="mpt" compiler="gnu">
	<command name="load">mpt/2.19</command>
	<command name="load">netcdf-mpi/4.7.1</command>
      </modules>
      <modules mpilib="mpt" compiler="intel">
	<command name="load">mpt/2.19</command>
	<!-- known failure in parallel netcdf with mpt, use serial -->
	<command name="load">netcdf/4.7.1</command>
	<command name="load">pnetcdf/1.11.0</command>
      </modules>
      <modules mpilib="mpt" compiler="pgi">
	<command name="load">mpt/2.19</command>
	<command name="load">netcdf-mpi/4.7.1</command>
	<command name="load">pnetcdf/1.11.1</command>
      </modules>
      <modules mpilib="openmpi" compiler="pgi">
	<command name="load">openmpi/3.1.4</command>
	<command name="load">netcdf/4.7.1</command>
      </modules>
      <modules mpilib="openmpi" compiler="gnu">
        <command name="load">openmpi/3.1.4</command>
        <command name="load">netcdf/4.7.1</command>
      </modules>
      <modules>
	<command name="load">ncarcompilers/0.5.0</command>
      </modules>
      <modules compiler="gnu" mpilib="mpi-serial">
	<command name="load">netcdf/4.7.1</command>
      </modules>
      <modules compiler="intel" mpilib="mpi-serial">
	<command name="load">netcdf/4.7.1</command>
      </modules>
      <modules compiler="pgi" mpilib="mpi-serial">
	<command name="load">netcdf/4.7.1</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">256M</env>
      <env name="TMPDIR">/glade/scratch/$USER</env>
      <env name="MPI_TYPE_DEPTH">16</env>
      <env name="MPI_IB_CONGESTED">1</env>
      <env name="MPI_USE_ARRAY"/>
    </environment_variables>
    <environment_variables comp_interface="nuopc">
      <env name="ESMF_RUNTIME_PROFILE">ON</env>
      <env name="ESMF_RUNTIME_PROFILE_OUTPUT">SUMMARY</env>
      <env name="UGCSINPUTPATH">/glade/work/turuncu/FV3GFS/benchmark-inputs/2012010100/gfs/fcst</env>
      <env name="UGCSFIXEDFILEPATH">/glade/work/turuncu/FV3GFS/fix_am</env>
      <env name="UGCSADDONPATH">/glade/work/turuncu/FV3GFS/addon</env>
    </environment_variables>
    <environment_variables unit_testing="true">
      <env name="MPI_USE_ARRAY">false</env>
    </environment_variables>
    <environment_variables queue="share">
      <env name="TMPDIR">/glade/scratch/$USER</env>
      <env name="MPI_USE_ARRAY">false</env>
    </environment_variables>
    <resource_limits>
      <resource name="RLIMIT_STACK">-1</resource>
    </resource_limits>
  </machine>

  <machine MACH="coeus">
    <DESC>
      Portland State University Coeus Cluster Dec 2019 CentOS 7
    </DESC>
    <NODENAME_REGEX>(login[1,2].cluster|compute[0-9]*.cluster)</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>mvapich2</MPILIBS>
    <PROJECT>none</PROJECT>
    <CIME_OUTPUT_ROOT>$ENV{CESMDATAROOT}/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>$ENV{CESMDATAROOT}/inputdata</DIN_LOC_ROOT>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$ENV{CESMDATAROOT}/cesm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/vol/apps/hpc/src/cesm-2.1.0/cime/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE>make</GMAKE>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>oit-rc-groups@pdx.edu</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>40</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>20</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="mvapich2">
      <executable>srun</executable>
      <arguments>
        <arg name="num_tasks">--ntasks={{ total_tasks }}</arg>
        <arg name="cpu_bind">--cpu_bind=sockets --cpu_bind=verbose</arg>
        <arg name="kill-on-bad-exit">--kill-on-bad-exit</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl"></init_path>
      <init_path lang="python"></init_path>
      <init_path lang="csh">/usr/share/Modules/init/csh</init_path>
      <init_path lang="sh">/usr/share/Modules/init/sh</init_path>
      <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"/>
      </modules>
      <modules compiler="gnu">
        <command name="load">gcc-6.3.0</command>
        <command name="load">mvapich2-2.2-psm/gcc-6.3.0</command>
        <command name="load">General/netcdf/4.4.1.1/gcc-6.3.0</command>
        <command name="load">Python/2.7.13/gcc-6.3.0</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">256M</env>
      <env name="NETCDF_HOME">/vol/apps/hpc/stow/netcdf/4.4.1.1/gcc-6.3.0/</env>
    </environment_variables>
    <resource_limits>
      <resource name="RLIMIT_STACK">-1</resource>
    </resource_limits>
  </machine>

  <machine MACH="constance">
    <DESC>PNL Haswell cluster, OS is Linux, batch system is SLURM</DESC>
    <OS>LINUX</OS>
    <COMPILERS>intel,pgi</COMPILERS>
    <MPILIBS>mvapich2,openmpi,intelmpi,mvapich</MPILIBS>
    <CIME_OUTPUT_ROOT>/pic/scratch/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/pic/scratch/tcraig/IRESM/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/pic/scratch/tcraig/IRESM/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/pic/scratch/$USER/cases/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/pic/scratch/tcraig/IRESM/ccsm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/people/tcraig/bin/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>tcraig -at- ucar.edu</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>24</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>24</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="mvapich2">
      <executable>srun</executable>
      <arguments>
	<arg name="mpi">--mpi=none</arg>
	<arg name="num_tasks">--ntasks={{ total_tasks }}</arg>
	<arg name="cpu_bind">--cpu_bind=sockets --cpu_bind=verbose</arg>
	<arg name="kill-on-bad-exit">--kill-on-bad-exit</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="mvapich">
      <executable>srun</executable>
      <arguments>
	<arg name="num_tasks">--ntasks={{ total_tasks }}</arg>
	<arg name="cpu_bind">--cpu_bind=sockets --cpu_bind=verbose</arg>
	<arg name="kill-on-bad-exit">--kill-on-bad-exit</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="intelmpi">
      <executable>mpirun</executable>
      <arguments>
	<arg name="num_tasks">-n {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="openmpi">
      <executable>mpirun</executable>
      <arguments>
	<arg name="num_tasks">-n {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/share/apps/modules/Modules/3.2.10/init/perl.pm</init_path>
      <init_path lang="csh">/etc/profile.d/modules.csh</init_path>
      <init_path lang="sh">/etc/profile.d/modules.sh</init_path>
      <cmd_path lang="perl">/share/apps/modules/Modules/3.2.10/bin/modulecmd perl </cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
	<command name="purge"/>
      </modules>
      <modules>
	<command name="load">perl/5.20.0</command>
	<command name="load">cmake/2.8.12</command>
      </modules>
      <modules compiler="intel">
	<command name="load">intel/15.0.1</command>
	<command name="load">netcdf/4.3.2</command>
	<command name="load">mkl/15.0.1</command>
      </modules>
      <modules compiler="pgi">
	<command name="load">pgi/14.10</command>
	<command name="load">netcdf/4.3.2</command>
      </modules>
      <modules mpilib="mvapich">
	<command name="load">mvapich2/2.1</command>
      </modules>
      <modules mpilib="mvapich2">
	<command name="load">mvapich2/2.1</command>
      </modules>
      <modules mpilib="intelmpi">
	<command name="load">intelmpi/5.0.1.035</command>
      </modules>
      <modules mpilib="openmpi">
	<command name="load">openmpi/1.8.3</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">64M</env>
    </environment_variables>
    <environment_variables compiler="intel">
      <env name="MKL_PATH">$MLIB_LIB</env>
      <env name="NETCDF_HOME">/share/apps/netcdf/4.3.2/intel/15.0.1</env>
    </environment_variables>
    <environment_variables compiler="pgi">
      <env name="NETCDF_HOME">/share/apps/netcdf/4.3.2/pgi/14.10</env>
    </environment_variables>
  </machine>

  <machine MACH="cori-haswell">
    <!-- NODENAME_REGEX makes haswell the default machine for cori -->
    <!-- to make knl the default comment this line and uncomment the one in cori-knl -->
    <DESC>NERSC XC40 Haswell, os is CNL, 32 pes/node, batch system is Slurm</DESC>
    <NODENAME_REGEX>cori</NODENAME_REGEX>
    <OS>CNL</OS>
    <COMPILERS>intel,gnu,cray</COMPILERS>
    <MPILIBS>mpt</MPILIBS>
    <CIME_OUTPUT_ROOT>$ENV{SCRATCH}</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/project/projectdirs/ccsm1/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/project/projectdirs/ccsm1/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/project/projectdirs/ccsm1/ccsm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/project/projectdirs/ccsm1/tools/cprnc.corip1/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>cseg</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>64</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>32</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
	<arg name="label"> --label</arg>
	<arg name="num_tasks" > -n {{ total_tasks }}</arg>
	<arg name="binding"> -c {{ srun_binding }}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/opt/modules/default/init/perl.pm</init_path>
      <init_path lang="python">/opt/modules/default/init/python.py</init_path>
      <init_path lang="sh">/opt/modules/default/init/sh</init_path>
      <init_path lang="csh">/opt/modules/default/init/csh</init_path>
      <cmd_path lang="perl">/opt/modules/default/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/opt/modules/default/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
	<command name="rm">PrgEnv-intel</command>
	<command name="rm">PrgEnv-cray</command>
	<command name="rm">PrgEnv-gnu</command>
	<command name="rm">intel</command>
	<command name="rm">cce</command>
	<command name="rm">cray-parallel-netcdf</command>
	<command name="rm">cray-parallel-hdf5</command>
	<command name="rm">pmi</command>
	<command name="rm">cray-libsci</command>
	<command name="rm">cray-mpich2</command>
	<command name="rm">cray-mpich</command>
	<command name="rm">cray-netcdf</command>
	<command name="rm">cray-hdf5</command>
	<command name="rm">cray-netcdf-hdf5parallel</command>
	<command name="rm">craype-sandybridge</command>
	<command name="rm">craype-ivybridge</command>
	<command name="rm">craype</command>
      </modules>

      <modules compiler="intel">
	<command name="load">PrgEnv-intel</command>
	<command name="switch">intel intel/19.0.3.199</command>
	<command name="use">/global/project/projectdirs/ccsm1/modulefiles/cori</command>
      </modules>
      <modules compiler="intel" mpilib="!mpi-serial" >
	<command name="load">esmf/7.1.0r-defio-intel18.0.1.163-mpi-O-cori-haswell</command>
      </modules>
      <modules compiler="intel" mpilib="mpi-serial" >
	<command name="load">esmf/7.1.0r-netcdf-intel18.0.1.163-mpiuni-O-haswell</command>
      </modules>

      <modules compiler="cray">
	<command name="load">PrgEnv-cray</command>
	<command name="switch">cce cce/8.6.5</command>
      </modules>
      <modules compiler="gnu">
	<command name="load">PrgEnv-gnu</command>
	<command name="switch">gcc gcc/7.3.0</command>
      </modules>
      <modules>
	<command name="load">cray-memkind</command>
	<command name="swap">craype craype/2.5.18</command>
      </modules>
      <modules>
	<command name="switch">cray-libsci/19.02.1</command>
      </modules>
      <modules>
	<command name="load">cray-mpich/7.7.8</command>
      </modules>
      <modules mpilib="mpi-serial">
	<command name="load">cray-hdf5/1.10.5.0</command>
	<command name="load">cray-netcdf/4.6.3.0</command>
      </modules>
      <modules mpilib="!mpi-serial">
	<command name="load">cray-netcdf-hdf5parallel/4.6.3.0</command>
	<command name="load">cray-hdf5-parallel/1.10.5.0</command>
	<command name="load">cray-parallel-netcdf/1.11.1.0</command>
      </modules>
      <modules>
	<command name="load">cmake/3.14.4</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">256M</env>
      <env name="OMP_PROC_BIND">spread</env>
      <env name="OMP_PLACES">threads</env>
    </environment_variables>
  </machine>
  <machine MACH="cori-knl">
    <!-- NODENAME_REGEX makes haswell the default machine for cori -->
    <!-- to make knl the default comment this line and uncomment the one in cori-knl -->
    <!-- <NODENAME_REGEX>cori</NODENAME_REGEX> -->
    <DESC>NERSC XC* KNL, os is CNL, 68 pes/node, batch system is Slurm</DESC>
    <OS>CNL</OS>
    <COMPILERS>intel,gnu,cray</COMPILERS>
    <MPILIBS>mpt</MPILIBS>
    <CIME_OUTPUT_ROOT>$ENV{SCRATCH}</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/project/projectdirs/ccsm1/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/project/projectdirs/ccsm1/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/project/projectdirs/ccsm1/ccsm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/project/projectdirs/ccsm1/tools/cprnc.corip1/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>cseg</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>256</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>64</MAX_MPITASKS_PER_NODE>
    <COSTPES_PER_NODE>68</COSTPES_PER_NODE>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
	<arg name="label"> --label</arg>
	<arg name="num_tasks" > -n {{ total_tasks }}</arg>
	<arg name="binding"> -c {{ srun_binding }} --cpu_bind=cores</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/opt/modules/default/init/perl.pm</init_path>
      <init_path lang="python">/opt/modules/default/init/python.py</init_path>
      <init_path lang="sh">/opt/modules/default/init/sh</init_path>
      <init_path lang="csh">/opt/modules/default/init/csh</init_path>
      <cmd_path lang="perl">/opt/modules/default/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/opt/modules/default/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
	<command name="rm">craype-mic-knl</command>
	<command name="rm">craype-haswell</command>
	<command name="rm">PrgEnv-intel</command>
	<command name="rm">PrgEnv-cray</command>
	<command name="rm">PrgEnv-gnu</command>
	<command name="rm">intel</command>
	<command name="rm">cce</command>
	<command name="rm">cray-parallel-netcdf</command>
	<command name="rm">cray-parallel-hdf5</command>
	<command name="rm">pmi</command>
	<command name="rm">cray-libsci</command>
	<command name="rm">cray-mpich2</command>
	<command name="rm">cray-mpich</command>
	<command name="rm">cray-netcdf</command>
	<command name="rm">cray-hdf5</command>
	<command name="rm">cray-netcdf-hdf5parallel</command>
      </modules>

      <modules compiler="intel">
	<command name="load">PrgEnv-intel</command>
	<command name="switch">intel intel/19.0.3.199</command>
	<command name="use">/global/project/projectdirs/ccsm1/modulefiles/cori</command>
      </modules>
      <modules compiler="intel" mpilib="!mpi-serial" >
	<command name="load">esmf/7.1.0r-defio-intel18.0.1.163-mpi-O-cori-knl</command>
      </modules>
      <modules compiler="intel" mpilib="mpi-serial" >
	<command name="load">esmf/7.1.0r-netcdf-intel18.0.1.163-mpiuni-O-knl</command>
      </modules>

      <modules compiler="cray">
	<command name="load">PrgEnv-cray</command>
	<command name="switch">cce cce/8.6.5</command>
      </modules>
      <modules compiler="gnu">
	<command name="load">PrgEnv-gnu</command>
	<command name="switch">gcc gcc/7.3.0</command>
      </modules>
      <modules>
	<command name="load">cray-memkind</command>
	<command name="swap">craype craype/2.5.18</command>
	<command name="load">craype-mic-knl</command>
      </modules>
      <modules>
	<command name="switch">cray-libsci/19.02.1</command>
      </modules>
      <modules>
	<command name="load">cray-mpich/7.7.8</command>
      </modules>
      <modules mpilib="mpi-serial">
	<command name="load">cray-hdf5/1.10.5.0</command>
	<command name="load">cray-netcdf/4.6.3.0</command>
      </modules>
      <modules mpilib="!mpi-serial">
	<command name="load">cray-netcdf-hdf5parallel/4.6.3.0</command>
	<command name="load">cray-hdf5-parallel/1.10.5.0</command>
	<command name="load">cray-parallel-netcdf/1.11.1.0</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">256M</env>
      <env name="OMP_PROC_BIND">spread</env>
      <env name="OMP_PLACES">threads</env>
    </environment_variables>
  </machine>

  <machine MACH="daint">
    <DESC>CSCS Cray XC50, os is SUSE SLES, 12 pes/node, batch system is SLURM</DESC>
    <OS>CNL</OS>
    <COMPILERS>pgi,cray,gnu</COMPILERS>
    <MPILIBS>mpich</MPILIBS>
    <CIME_OUTPUT_ROOT>/scratch/snx3000/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/project/s824/cesm_inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/project/s824/cesm_inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/project/s824/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/project/s824/ccsm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/project/s824/cesm_tools/ccsm_cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>12</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>edouard.davin -at- env.ethz.ch</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>12</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>12</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
	<arg name="num_tasks"> -n {{ total_tasks }}</arg>
	<arg name="thread_count"> -d $ENV{OMP_NUM_THREADS}</arg>
      </arguments>
    </mpirun>
    <module_system type="none"/>
    <environment_variables>
      <env name="OMP_STACKSIZE">64M</env>
    </environment_variables>
  </machine>

  <machine MACH="eastwind">
    <DESC>PNL IBM Xeon cluster, os is Linux (pgi), batch system is SLURM</DESC>
    <OS>LINUX</OS>
    <COMPILERS>pgi,intel</COMPILERS>
    <MPILIBS>mvapich2,mvapich</MPILIBS>
    <CIME_OUTPUT_ROOT>/lustre/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/lustre/tcraig/IRESM/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/lustre/tcraig/IRESM/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/lustre/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/lustre/tcraig/IRESM/ccsm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/lustre/tcraig/IRESM/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>tcraig -at- ucar.edu</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>12</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>12</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="mvapich">
      <executable>srun</executable>
      <arguments>
	<arg name="num_tasks"> --ntasks={{ total_tasks }}</arg>
	<arg name="cpubind"> --cpu_bind=sockets</arg>
	<arg name="cpubind"> --cpu_bind=verbose</arg>
	<arg name="killonbadexit"> --kill-on-bad-exit</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="mvapich2">
      <executable>srun</executable>
      <arguments>
	<arg name="mpinone"> --mpi=none</arg>
	<arg name="num_tasks"> --ntasks={{ total_tasks }}</arg>
	<arg name="cpubind"> --cpu_bind=sockets</arg>
	<arg name="cpubind"> --cpu_bind=verbose</arg>
	<arg name="killonbadexit"> --kill-on-bad-exit</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/etc/profile.d/modules.perl</init_path>
      <init_path lang="sh">/etc/profile.d/modules.sh</init_path>
      <init_path lang="csh">/etc/profile.d/modules.csh</init_path>
      <cmd_path lang="perl">/share/apps/modules/Modules/3.2.7/bin/modulecmd perl</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
	<command name="purge"/>
	<command name="load">perl/5.20.7</command>
	<command name="load">cmake/3.0.0</command>
	<command name="load">pgi/15.5</command>
	<command name="load">mpi/mvapich2/1.5.1p1/pgi11.3</command>
	<command name="load">netcdf/4.1.2/pgi</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">64M</env>
    </environment_variables>
  </machine>

  <machine MACH="edison">
    <DESC>NERSC XC30, os is CNL, 24 pes/node, batch system is SLURM</DESC>
    <NODENAME_REGEX>edison</NODENAME_REGEX>
    <OS>CNL</OS>
    <COMPILERS>intel,gnu,cray</COMPILERS>
    <MPILIBS>mpt</MPILIBS>
    <CIME_OUTPUT_ROOT>$ENV{CSCRATCH}</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/project/projectdirs/ccsm1/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/project/projectdirs/ccsm1/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/project/projectdirs/ccsm1/ccsm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/project/projectdirs/ccsm1/tools/cprnc.edison/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>cseg</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>48</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>24</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
	<arg name="label"> --label</arg>
	<arg name="num_tasks" > -n {{ total_tasks }}</arg>
	<arg name="thread_count" > -c {{ srun_binding }}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/opt/modules/default/init/perl.pm</init_path>
      <init_path lang="python">/opt/modules/default/init/python.py</init_path>
      <init_path lang="sh">/opt/modules/default/init/sh</init_path>
      <init_path lang="csh">/opt/modules/default/init/csh</init_path>
      <cmd_path lang="perl">/opt/modules/default/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/opt/modules/default/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
	<command name="rm">PrgEnv-intel</command>
	<command name="rm">PrgEnv-cray</command>
	<command name="rm">PrgEnv-gnu</command>
	<command name="rm">intel</command>
	<command name="rm">cce</command>
	<command name="rm">cray-parallel-netcdf</command>
	<command name="rm">cray-parallel-hdf5</command>
	<command name="rm">pmi</command>
	<command name="rm">cray-libsci</command>
	<command name="rm">cray-mpich2</command>
	<command name="rm">cray-mpich</command>
	<command name="rm">cray-netcdf</command>
	<command name="rm">cray-hdf5</command>
	<command name="rm">cray-netcdf-hdf5parallel</command>
	<command name="rm">craype-sandybridge</command>
	<command name="rm">craype-ivybridge</command>
	<command name="rm">craype</command>
      </modules>

      <modules compiler="intel">
	<command name="load">PrgEnv-intel</command>
	<command name="switch">intel intel/18.0.1.163</command>
	<command name="rm">cray-libsci</command>
	<command name="use">/global/project/projectdirs/ccsm1/modulefiles/edison</command>
      </modules>
      <modules compiler="intel" mpilib="!mpi-serial" >
	<command name="load">esmf/7.1.0r-defio-intel18.0.1.163-mpi-O</command>
      </modules>
      <modules compiler="intel" mpilib="mpi-serial" >
	<command name="load">esmf/6.3.0rp1-defio-intel17.0-mpiuni-O</command>
      </modules>
      <modules compiler="cray">
	<command name="load">PrgEnv-cray</command>
	<command name="switch">cce cce/8.6.5</command>
	<command name="switch">cray-libsci/18.03.1</command>
      </modules>
      <modules compiler="gnu">
	<command name="load">PrgEnv-gnu</command>
	<command name="switch">gcc gcc/7.3.0</command>
	<command name="switch">cray-libsci/18.03.1</command>
      </modules>
      <modules>
	<command name="load">papi/5.5.1.4</command>
	<command name="swap">craype craype/2.5.14</command>
	<command name="load">craype-ivybridge</command>
      </modules>
      <modules>
	<command name="load">cray-mpich/7.7.0</command>
      </modules>
      <modules mpilib="mpi-serial">
	<command name="load">cray-hdf5/1.10.1.1</command>
	<command name="load">cray-netcdf/4.4.1.1.6</command>
      </modules>
      <modules mpilib="!mpi-serial">
	<command name="load">cray-netcdf-hdf5parallel/4.4.1.1.6</command>
	<command name="load">cray-hdf5-parallel/1.10.1.1</command>
	<command name="load">cray-parallel-netcdf/1.8.1.3</command>
      </modules>
    </module_system>

    <environment_variables>
      <env name="OMP_STACKSIZE">64M</env>
      <env name="OMP_PROC_BIND">spread</env>
      <env name="OMP_PLACES">threads</env>
    </environment_variables>

  </machine>

  <machine MACH="euler2">
    <DESC>Euler II Linux Cluster ETH, 24 pes/node, InfiniBand, XeonE5_2680v3, batch system LSF</DESC>
    <OS>LINUX</OS>
    <COMPILERS>intel,pgi</COMPILERS>
    <MPILIBS>openmpi,mpich</MPILIBS>
    <CIME_OUTPUT_ROOT>/cluster/work/climate/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/cluster/work/climate/cesm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/cluster/work/climate/cesm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/cluster/work/climate/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/cluster/work/climate/cesm/ccsm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/cluster/work/climate/cesm/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>1</GMAKE_J>
    <BATCH_SYSTEM>lsf</BATCH_SYSTEM>
    <SUPPORTED_BY>urs.beyerle -at- env.ethz.ch</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>24</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>24</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="mpich">
      <executable>mpirun</executable>
      <arguments>
	<arg name="machine_file">-hostfile $ENV{PBS_JOBID}</arg>
	<arg name="tasks_per_node"> -ppn $MAX_MPITASKS_PER_NODE</arg>
	<arg name="num_tasks"> -n {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="openmpi">
      <executable>mpirun</executable>
      <arguments>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="python">/cluster/apps/modules/init/python.py</init_path>
      <init_path lang="sh">/etc/profile.d/modules.sh</init_path>
      <init_path lang="csh">/etc/profile.d/modules.csh</init_path>
      <cmd_path lang="python">/cluster/apps/modules/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
	<command name="purge"/>
      </modules>
      <modules>
	<command name="load">new</command>
      </modules>
      <modules compiler="intel">
	<command name="load">intel/2018.1</command>
      </modules>
      <modules>
	<command name="load">netcdf/4.3.1</command>
      </modules>
      <modules compiler="pgi">
	<command name="load">pgi/14.1</command>
      </modules>
      <modules mpilib="mpich">
	<command name="load">mvapich2/1.8.1</command>
      </modules>
      <modules mpilib="openmpi">
	<command name="load">open_mpi/1.6.5</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">64M</env>
    </environment_variables>
  </machine>

  <machine MACH="euler3">
    <DESC>Euler III Linux Cluster ETH, 4 pes/node, Ethernet, XeonE3_1585Lv5, batch system LSF</DESC>
    <OS>LINUX</OS>
    <COMPILERS>intel,pgi</COMPILERS>
    <MPILIBS>openmpi,mpich</MPILIBS>
    <CIME_OUTPUT_ROOT>/cluster/work/climate/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/cluster/work/climate/cesm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/cluster/work/climate/cesm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/cluster/work/climate/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/cluster/work/climate/cesm/ccsm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/cluster/work/climate/cesm/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>1</GMAKE_J>
    <BATCH_SYSTEM>lsf</BATCH_SYSTEM>
    <SUPPORTED_BY>urs.beyerle -at- env.ethz.ch</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>4</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>4</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="mpich">
      <executable>mpirun</executable>
      <arguments>
	<arg name="machine_file">-hostfile $ENV{PBS_JOBID}</arg>
	<arg name="tasks_per_node"> -ppn $MAX_MPITASKS_PER_NODE</arg>
	<arg name="num_tasks"> -n {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="openmpi">
      <executable>mpirun</executable>
      <arguments>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="python">/cluster/apps/modules/init/python.py</init_path>
      <init_path lang="sh">/etc/profile.d/modules.sh</init_path>
      <init_path lang="csh">/etc/profile.d/modules.csh</init_path>
      <cmd_path lang="python">/cluster/apps/modules/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
	<command name="purge"/>
      </modules>
      <modules>
	<command name="load">new</command>
      </modules>
      <modules>
	<command name="load">interconnect/ethernet</command>
      </modules>
      <modules compiler="intel">
	<command name="load">intel/2018.1</command>
      </modules>
      <modules>
	<command name="load">netcdf/4.3.1</command>
      </modules>
      <modules compiler="pgi">
	<command name="load">pgi/14.1</command>
      </modules>
      <modules mpilib="mpich">
	<command name="load">mvapich2/1.8.1</command>
      </modules>
      <modules mpilib="openmpi">
	<command name="load">open_mpi/1.6.5</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">64M</env>
    </environment_variables>
  </machine>

  <machine MACH="euler4">
    <DESC>Euler IV Linux Cluster ETH, 36 pes/node, InfiniBand, XeonGold_6150, batch system LSF</DESC>
    <OS>LINUX</OS>
    <COMPILERS>intel,pgi</COMPILERS>
    <MPILIBS>openmpi,mpich</MPILIBS>
    <CIME_OUTPUT_ROOT>/cluster/work/climate/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/cluster/work/climate/cesm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/cluster/work/climate/cesm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/cluster/work/climate/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/cluster/work/climate/cesm/ccsm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/cluster/work/climate/cesm/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>1</GMAKE_J>
    <BATCH_SYSTEM>lsf</BATCH_SYSTEM>
    <SUPPORTED_BY>urs.beyerle -at- env.ethz.ch</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>36</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>36</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="mpich">
      <executable>mpirun</executable>
      <arguments>
	<arg name="machine_file">-hostfile $ENV{PBS_JOBID}</arg>
	<arg name="tasks_per_node"> -ppn $MAX_MPITASKS_PER_NODE</arg>
	<arg name="num_tasks"> -n {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="openmpi">
      <executable>mpirun</executable>
      <arguments>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="python">/cluster/apps/modules/init/python.py</init_path>
      <init_path lang="sh">/etc/profile.d/modules.sh</init_path>
      <init_path lang="csh">/etc/profile.d/modules.csh</init_path>
      <cmd_path lang="python">/cluster/apps/modules/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
	<command name="purge"/>
      </modules>
      <modules>
	<command name="load">new</command>
      </modules>
      <modules compiler="intel">
	<command name="load">intel/2018.1</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">64M</env>
    </environment_variables>
  </machine>

  <machine MACH="gaea">
    <DESC>NOAA XE6, os is CNL, 24 pes/node, batch system is PBS</DESC>
    <OS>CNL</OS>
    <COMPILERS>pgi</COMPILERS>
    <MPILIBS>mpich</MPILIBS>
    <CIME_OUTPUT_ROOT>/lustre/fs/scratch/Julio.T.Bacmeister</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/lustre/fs/scratch/Julio.T.Bacmeister/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/lustre/fs/scratch/Julio.T.Bacmeister/inputdata</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/lustre/fs/scratch/Julio.T.Bacmeister/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>UNSET</BASELINE_ROOT>
    <CCSM_CPRNC>UNSET</CCSM_CPRNC>
    <GMAKE_J> 8</GMAKE_J>
    <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
    <SUPPORTED_BY>julio -at- ucar.edu</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>24</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>24</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>aprun</executable>
      <arguments>
	<arg name="hyperthreading" default="2"> -j {{ hyperthreading }}</arg>
	<arg name="num_tasks" > -n {{ total_tasks }}</arg>
	<arg name="tasks_per_numa" > -S {{ tasks_per_numa }}</arg>
	<arg name="tasks_per_node" > -N $MAX_MPITASKS_PER_NODE</arg>
	<arg name="thread_count" > -d $ENV{OMP_NUM_THREADS}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/opt/modules/default/init/perl.pm</init_path>
      <init_path lang="csh">/opt/modules/default/init/csh</init_path>
      <init_path lang="sh">/opt/modules/default/init/sh</init_path>
      <cmd_path lang="perl">/opt/modules/default/bin/modulecmd perl</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
	<command name="rm">PrgEnv-pgi</command>
	<command name="rm">PrgEnv-cray</command>
	<command name="rm">PrgEnv-gnu</command>
	<command name="rm">pgi</command>
	<command name="rm">cray</command>
      </modules>
      <modules compiler="pgi">
	<command name="load">PrgEnv-pgi</command>
	<command name="switch">pgi pgi/12.5.0</command>
      </modules>
      <modules compiler="gnu">
	<command name="load">PrgEnv-gnu</command>
	<command name="load">torque</command>
      </modules>
      <modules compiler="cray">
	<command name="load">PrgEnv-cray/4.0.36</command>
	<command name="load">cce/8.0.2</command>
      </modules>
      <modules>
	<command name="load">torque/4.1.3</command>
	<command name="load">netcdf-hdf5parallel/4.2.0</command>
	<command name="load">parallel-netcdf/1.2.0</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">64M</env>
      <env name="MPICH_ENV_DISPLAY">1</env>
    </environment_variables>
  </machine>

  <machine MACH="hobart">
    <DESC>NCAR CGD Linux Cluster 48 pes/node, batch system is PBS</DESC>
    <NODENAME_REGEX>^h.*\.cgd\.ucar\.edu</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel,pgi,nag,gnu</COMPILERS>
    <MPILIBS>mvapich2,openmpi</MPILIBS>
    <CIME_OUTPUT_ROOT>/scratch/cluster/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/fs/cgd/csm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/project/tss</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/scratch/cluster/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/fs/cgd/csm/ccsm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/fs/cgd/csm/tools/cime/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE>gmake --output-sync</GMAKE>
    <GMAKE_J>4</GMAKE_J>
    <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
    <SUPPORTED_BY> cseg </SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>48</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>48</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="mvapich2">
      <executable>mpiexec</executable>
      <arguments>
	<arg name="machine_file">--machinefile $ENV{PBS_NODEFILE}</arg>
	<arg name="num_tasks"> -n {{ total_tasks }} </arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="openmpi">
      <executable>mpiexec</executable>
      <arguments>
	<arg name="num_tasks"> -n {{ total_tasks }} </arg>
      </arguments>
    </mpirun>
    <module_system type="module" allow_error="true">
      <init_path lang="perl">/usr/share/Modules/init/perl.pm</init_path>
      <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
      <init_path lang="csh">/usr/share/Modules/init/csh</init_path>
      <init_path lang="sh">/usr/share/Modules/init/sh</init_path>
      <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
	<command name="purge"></command>
      </modules>
      <modules compiler="intel">
	<command name="load">compiler/intel/18.0.3</command>
	<command name="load">tool/netcdf/4.6.1/intel</command>
      </modules>
      <modules compiler="intel" mpilib="mvapich2">
	<command name="load">mpi/intel/mvapich2-2.3rc2-intel-18.0.3</command>
      </modules>
      <modules compiler="pgi">
	<command name="load">compiler/pgi/18.1</command>
	<command name="load">tool/netcdf/4.6.1/pgi</command>
      </modules>
      <modules compiler="nag">
	<command name="load">compiler/nag/6.2</command>
	<command name="load">tool/netcdf/4.6.1/nag</command>
      </modules>
      <modules compiler="nag" mpilib="mvapich2">
	<command name="load">mpi/nag/mvapich2-2.3rc2</command>
      </modules>
      <modules compiler="nag" mpilib="openmpi">
	<command name="load">mpi/nag/openmpi-3.1.0</command>
      </modules>
      <modules compiler="gnu">
	<command name="load">compiler/gnu/8.1.0</command>
	<command name="load">tool/netcdf/4.6.1/gcc</command>
      </modules>
      <modules compiler="gnu" mpilib="openmpi">
	<command name="load">mpi/gcc/openmpi-3.1.0a</command>
      </modules>
      <modules compiler="gnu" mpilib="mvapich2">
	<command name="load">mpi/gcc/mvapich2-2.3rc2-qlc</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">64M</env>
      <!-- The following is needed to access qsub from the compute nodes -->
      <env name="PATH">$ENV{PATH}:/cluster/torque/bin</env>
      <env name="ESMFMKFILE">/home/dunlap/ESMF-INSTALL/8.0.0bs16/lib/libg/Linux.intel.64.mvapich2.default/esmf.mk</env>
    </environment_variables>
    <resource_limits>
      <resource name="RLIMIT_STACK">-1</resource>
    </resource_limits>
  </machine>

  <machine MACH="homebrew">
    <DESC>

      Customize these fields as appropriate for your system,
      particularly changing MAX_TASKS_PER_NODE and MAX_MPITASKS_PER_NODE to the
      number of cores on your machine.  You may also want to change
      instances of '$ENV{HOME}/projects' to your desired directory
      organization.  You can use this in either of two ways: (1)
      Without making any changes, by adding `--machine homebrew` to
      create_newcase or create_test (2) Copying this into a
      config_machines.xml file in your personal .cime directory and
      then changing the machine name (MACH="homebrew") to
      your machine name and the NODENAME_REGEX to something matching
      your machine's hostname.  With (2), you should not need the
      `--machine` argument, because the machine should be determined
      automatically.  However, with (2), you will also need to copy the
      homebrew-specific settings in config_compilers.xml into a
      config_compilers.xml file in your personal .cime directory, again
      changing the machine name (MACH="homebrew") to your machine name.

    </DESC>
    <NODENAME_REGEX> something.matching.your.machine.hostname </NODENAME_REGEX>
    <OS>Darwin</OS>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>mpich</MPILIBS>
    <CIME_OUTPUT_ROOT>$ENV{HOME}/projects/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>$ENV{HOME}/projects/cesm-inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>$ENV{HOME}/projects/ptclm-data</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$ENV{HOME}/projects/scratch/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$ENV{HOME}/projects/baselines</BASELINE_ROOT>
    <CCSM_CPRNC>$ENV{HOME}/cesm/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE>make</GMAKE>
    <GMAKE_J>4</GMAKE_J>
    <BATCH_SYSTEM>none</BATCH_SYSTEM>
    <SUPPORTED_BY>__YOUR_NAME_HERE__</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>8</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>4</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments>
	<arg name="anum_tasks"> -np {{ total_tasks }}</arg>
	<arg name="labelstdout">-prepend-rank</arg>
      </arguments>
    </mpirun>
    <module_system type="none"/>
    <environment_variables>
      <env name="NETCDF_PATH">/usr/local</env>
    </environment_variables>
  </machine>

  <machine MACH="izumi">
    <DESC>NCAR CGD Linux Cluster 48 pes/node, batch system is PBS</DESC>
    <NODENAME_REGEX>^i.*\.ucar\.edu</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel,pgi,nag,gnu</COMPILERS>
    <MPILIBS>mvapich2,openmpi</MPILIBS>
    <CIME_OUTPUT_ROOT>/scratch/cluster/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/fs/cgd/csm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/project/tss</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/scratch/cluster/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/fs/cgd/csm/ccsm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/fs/cgd/csm/tools/cime/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE>gmake --output-sync</GMAKE>
    <GMAKE_J>4</GMAKE_J>
    <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
    <SUPPORTED_BY> cseg </SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>48</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>48</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="mvapich2">
      <executable>mpiexec</executable>
      <arguments>
	<arg name="machine_file">--machinefile $ENV{PBS_NODEFILE}</arg>
	<arg name="num_tasks"> -n {{ total_tasks }} </arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="openmpi">
      <executable>mpiexec</executable>
      <arguments>
	<arg name="num_tasks"> -n {{ total_tasks }} </arg>
      </arguments>
    </mpirun>
    <module_system type="module" allow_error="true">
      <init_path lang="perl">/usr/share/Modules/init/perl.pm</init_path>
      <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
      <init_path lang="csh">/usr/share/Modules/init/csh</init_path>
      <init_path lang="sh">/usr/share/Modules/init/sh</init_path>
      <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
	<command name="purge"></command>
      </modules>
      <modules compiler="intel">
	<command name="load">compiler/intel/19.0.2</command>
	<command name="load">tool/netcdf/4.6.1/intel</command>
      </modules>
      <modules compiler="intel" mpilib="mvapich2">
	<command name="load">mvapich2/2.3/intel-cluster-19.0.1</command>
      </modules>
      <modules compiler="pgi">
	<command name="load">compiler/pgi/18.10</command>
	<command name="load">tool/netcdf/4.6.1/pgi</command>
      </modules>
      <modules compiler="nag">
	<command name="load">compiler/nag/6.2</command>
	<command name="load">tool/netcdf/4.6.1/nag</command>
      </modules>
      <modules compiler="nag" mpilib="mvapich2">
	<command name="load">mvapich2/2.3/nag-6.2</command>
      </modules>
      <modules compiler="nag" mpilib="openmpi">
	<command name="load">openmpi/4.0.0/nag-6.2</command>
      </modules>
      <modules compiler="gnu">
	<command name="load">compiler/gnu/8.2.0</command>
	<command name="load">tool/netcdf/4.6.1/gcc</command>
      </modules>
      <modules compiler="gnu" mpilib="openmpi">
	<command name="load">openmpi/4.0.0/gnu-8.2.0</command>
      </modules>
      <modules compiler="gnu" mpilib="mvapich2">
	<command name="load">mvapich2/2.3/gnu-8.2.0</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">64M</env>
      <!-- The following is needed in order to run qsub from the compute nodes -->
      <env name="PATH">$ENV{PATH}:/cluster/torque/bin</env>
    </environment_variables>
    <resource_limits>
      <resource name="RLIMIT_STACK">-1</resource>
    </resource_limits>
  </machine>

  <machine MACH="laramie">
    <DESC>NCAR SGI test platform, os is Linux, 36 pes/node, batch system is PBS</DESC>
    <NODENAME_REGEX>.*.laramie.ucar.edu</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel,gnu</COMPILERS>
    <MPILIBS>mpt</MPILIBS>
    <CIME_OUTPUT_ROOT>/picnic/scratch/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>$ENV{CESMDATAROOT}/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>$ENV{CESMDATAROOT}/lmwg</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$ENV{CESMDATAROOT}/cesm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>$ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
    <SUPPORTED_BY>cseg</SUPPORTED_BY>
    <!-- have not seen any performance benefit in smt -->
    <MAX_TASKS_PER_NODE>36</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>36</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>mpiexec_mpt</executable>
      <arguments>
	<arg name="labelstdout">-p "%g:"</arg>
	<arg name="threadplacement"> omplace </arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/init/perl</init_path>
      <init_path lang="python">/picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/init/env_modules_python.py</init_path>
      <init_path lang="csh">/picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/init/csh</init_path>
      <init_path lang="sh">/picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/init/sh</init_path>
      <cmd_path lang="perl">/picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/libexec/lmod perl</cmd_path>
      <cmd_path lang="python">/picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
	<command name="purge"/>
	<command name="load">ncarenv/1.3</command>
	<command name="load">cmake/3.16.4</command>
      </modules>
      <modules compiler="intel">
	<command name="load">intel/19.0.5</command>
	<command name="load">mkl</command>
      </modules>
      <modules compiler="gnu">
        <command name="load">gnu/9.1.0</command>
        <command name="load">openblas/0.3.6</command>
      </modules>
      <modules mpilib="mpt">
	<command name="load">mpt/2.21</command>
	<command name="load">netcdf-mpi/4.7.3</command>
      </modules>
      <modules mpilib="mpt" compiler="intel">
	<command name="load">pnetcdf/1.12.1</command>
	<command name="load">pio/2.4.4</command>
      </modules>
      <modules mpilib="openmpi">
	<command name="load">openmpi/3.1.4</command>
	<command name="load">netcdf-mpi/4.7.3</command>
      </modules>
      <modules>
	<command name="load">ncarcompilers/0.5.0</command>
      </modules>
      <modules mpilib="mpi-serial">
	<command name="load">netcdf/4.7.3</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">256M</env>
      <env name="MPI_TYPE_DEPTH">16</env>
    </environment_variables>
  </machine>

  <machine MACH="lawrencium-lr3">
    <DESC>Lawrencium LR3 cluster at LBL, OS is Linux (intel), batch system is SLURM</DESC>
    <OS>LINUX</OS>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <CIME_OUTPUT_ROOT>/global/scratch/$ENV{USER}</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/global/scratch/$ENV{USER}/cesm_input_datasets/</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/cesm_archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$CIME_OUTPUT_ROOT/cesm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>4</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>rgknox at lbl dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>16</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>16</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments>
	<arg name="num_tasks">-np {{ total_tasks }}</arg>
	<arg name="tasks_per_node"> -npernode $MAX_MPITASKS_PER_NODE </arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="sh">/etc/profile.d/modules.sh</init_path>
      <init_path lang="csh">/etc/profile.d/modules.csh</init_path>
      <init_path lang="perl">/usr/Modules/init/perl.pm</init_path>
      <init_path lang="python">/usr/Modules/python.py</init_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="perl">/usr/Modules/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/usr/Modules/bin/modulecmd python</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">cmake</command>
        <command name="load">perl xml-libxml switch python/2.7</command>
      </modules>
      <modules compiler="intel">
        <command name="load">intel/2016.4.072</command>
        <command name="load">mkl</command>
      </modules>
      <modules compiler="intel" mpilib="mpi-serial">
        <command name="load">netcdf/4.4.1.1-intel-s</command>
      </modules>
      <modules compiler="intel" mpilib="!mpi-serial">
        <command name="load">openmpi</command>
        <command name="load">netcdf/4.4.1.1-intel-p</command>
      </modules>
    </module_system>
  </machine>

  <machine MACH="lawrencium-lr2">
    <DESC>Lawrencium LR2 cluster at LBL, OS is Linux (intel), batch system is SLURM</DESC>
    <OS>LINUX</OS>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <CIME_OUTPUT_ROOT>/global/scratch/$ENV{USER}</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/global/scratch/$ENV{USER}/cesm_input_datasets/</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/cesm_archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>$CIME_OUTPUT_ROOT/cesm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>4</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>rgknox and gbisht at lbl dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>12</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>12</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks"> -np {{ total_tasks }}</arg>
        <arg name="tasks_per_node"> -npernode $MAX_MPITASKS_PER_NODE</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="sh">/etc/profile.d/modules.sh</init_path>
      <init_path lang="csh">/etc/profile.d/modules.csh</init_path>
      <init_path lang="perl">/usr/Modules/init/perl.pm</init_path>
      <init_path lang="python">/usr/Modules/python.py</init_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="perl">/usr/Modules/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/usr/Modules/bin/modulecmd python</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">cmake</command>
        <command name="load">perl xml-libxml switch python/2.7</command>
      </modules>
      <modules compiler="intel">
        <command name="load">intel/2016.4.072</command>
        <command name="load">mkl</command>
      </modules>
      <modules compiler="intel" mpilib="mpi-serial">
        <command name="load">netcdf/4.4.1.1-intel-s</command>
      </modules>
      <modules compiler="intel" mpilib="!mpi-serial">
        <command name="load">openmpi</command>
        <command name="load">netcdf/4.4.1.1-intel-p</command>
      </modules>
    </module_system>
  </machine>

  <machine MACH="lonestar5">
    <DESC>Lonestar5 cluster at TACC, OS is Linux (intel), batch system is SLURM</DESC>
    <NODENAME_REGEX>.*ls5\.tacc\.utexas\.edu</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>mpich</MPILIBS>
    <CIME_OUTPUT_ROOT>$ENV{SCRATCH}</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/work/02503/edwardsj/CESM/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/work/02503/edwardsj/CESM/inputdata/lmwg</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/cesm_archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/work/02503/edwardsj/CESM/cesm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>4</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>cseg</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>48</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>24</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
	<arg name="num_tasks">--ntasks={{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <!-- allow ls5 modules to write to stderr without cime error -->
    <module_system type="module" allow_error="true">
      <init_path lang="perl">/opt/apps/lmod/lmod/init/perl</init_path>
      <init_path lang="python">/opt/apps/lmod/lmod/init/env_modules_python.py</init_path>
      <init_path lang="sh">/opt/apps/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/opt/apps/lmod/lmod/init/csh</init_path>
      <cmd_path lang="perl">/opt/apps/lmod/lmod/libexec/lmod perl</cmd_path>
      <cmd_path lang="python">/opt/apps/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>

      <modules>
        <command name="reset"/>
        <command name="load">cmake</command>
      </modules>
      <modules compiler="intel">
        <command name="load">intel/18.0.2</command>
      </modules>
      <modules mpilib="mpi-serial">
        <command name="load">netcdf/4.6.2</command>
      </modules>
      <modules mpilib="mpich">
	<command name="load">cray_mpich</command>
      </modules>
      <modules mpilib="!mpi-serial">
        <command name="load">pnetcdf/1.8.0</command>
	<command name="load">parallel-netcdf/4.6.2</command>
      </modules>
    </module_system>
  </machine>


  <machine MACH="melvin">
    <DESC>Linux workstation for Jenkins testing</DESC>
    <NODENAME_REGEX>(melvin|watson)</NODENAME_REGEX>
    <OS>LINUX</OS>
    <PROXY>sonproxy.sandia.gov:80</PROXY>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <SAVE_TIMING_DIR>/sems-data-store/ACME/timings</SAVE_TIMING_DIR>
    <CIME_OUTPUT_ROOT>$ENV{HOME}/acme/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/sems-data-store/ACME/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/sems-data-store/ACME/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/sems-data-store/ACME/baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/sems-data-store/ACME/cprnc/build/cprnc</CCSM_CPRNC>
    <GMAKE>make</GMAKE>
    <GMAKE_J>32</GMAKE_J>
    <TESTS>acme_developer</TESTS>
    <BATCH_SYSTEM>none</BATCH_SYSTEM>
    <SUPPORTED_BY>jgfouca at sandia dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>64</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>64</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments>
        <arg name="num_tasks"> -np {{ total_tasks }}</arg>
        <arg name="tasks_per_node"> --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
      <init_path lang="perl">/usr/share/Modules/init/perl.pm</init_path>
      <init_path lang="sh">/usr/share/Modules/init/sh</init_path>
      <init_path lang="csh">/usr/share/Modules/init/csh</init_path>
      <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
      <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
        <command name="purge"/>
        <command name="load">sems-env</command>
        <command name="load">acme-env</command>
        <command name="load">sems-git</command>
        <command name="load">sems-python/2.7.9</command>
        <command name="load">sems-cmake/2.8.12</command>
      </modules>
      <modules compiler="gnu">
        <command name="load">sems-gcc/5.3.0</command>
      </modules>
      <modules compiler="intel">
        <command name="load">sems-intel/16.0.3</command>
      </modules>
      <modules mpilib="mpi-serial">
        <command name="load">sems-netcdf/4.4.1/exo</command>
        <command name="load">acme-pfunit/3.2.8/base</command>
      </modules>
      <modules mpilib="!mpi-serial">
        <command name="load">sems-openmpi/1.8.7</command>
        <command name="load">sems-netcdf/4.4.1/exo_parallel</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="NETCDFROOT">$ENV{SEMS_NETCDF_ROOT}</env>
      <env name="OMP_STACKSIZE">64M</env>
      <env name="OMP_PROC_BIND">spread</env>
      <env name="OMP_PLACES">threads</env>
    </environment_variables>
    <environment_variables mpilib="!mpi-serial">
      <env name="PNETCDFROOT">$ENV{SEMS_NETCDF_ROOT}</env>
    </environment_variables>
  </machine>

  <machine MACH="mira">
    <DESC>ANL IBM BG/Q, os is BGP, 16 pes/node, batch system is cobalt</DESC>
    <NODENAME_REGEX>.*.fst.alcf.anl.gov</NODENAME_REGEX>
    <OS>BGQ</OS>
    <COMPILERS>ibm</COMPILERS>
    <MPILIBS>ibm</MPILIBS>
    <CIME_OUTPUT_ROOT>/projects/$PROJECT/usr/$ENV{USER}</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/projects/ccsm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/projects/ccsm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/projects/$PROJECT/usr/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/projects/ccsm/ccsm_baselines/</BASELINE_ROOT>
    <CCSM_CPRNC>/projects/ccsm/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>4</GMAKE_J>
    <BATCH_SYSTEM>cobalt</BATCH_SYSTEM>
    <SUPPORTED_BY> cseg </SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>64</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>8</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>/usr/bin/runjob</executable>
      <arguments>
	<arg name="label"> --label short</arg>
	<!-- Ranks per node!! -->
	<arg name="tasks_per_node"> --ranks-per-node $MAX_MPITASKS_PER_NODE</arg>
	<!-- Total MPI Tasks -->
	<arg name="num_tasks"> --np {{ total_tasks }}</arg>
	<arg name="locargs">--block $COBALT_PARTNAME --envs OMP_WAIT_POLICY=active --envs BG_SMP_FAST_WAKEUP=yes $LOCARGS</arg>
	<arg name="bg_threadlayout"> --envs BG_THREADLAYOUT=1</arg>
	<arg name="omp_stacksize"> --envs OMP_STACKSIZE=32M</arg>
	<arg name="thread_count"> --envs OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS}</arg>
      </arguments>
    </mpirun>
    <module_system type="soft">
      <init_path lang="csh">/etc/profile.d/00softenv.csh</init_path>
      <init_path lang="sh">/etc/profile.d/00softenv.sh</init_path>
      <cmd_path lang="csh">soft</cmd_path>
      <cmd_path lang="sh">soft</cmd_path>
      <modules>
	<command name="add">+mpiwrapper-xl</command>
	<command name="add">@ibm-compilers-2015-02</command>
	<command name="add">+cmake</command>
	<command name="add">+python</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="MPI_TYPE_MAX">10000</env>
      <env name="OMP_DYNAMIC">FALSE</env>
      <env name="OMP_STACKSIZE">64M</env>
      <env name="HDF5">/soft/libraries/hdf5/1.8.14/cnk-xl/current</env>
    </environment_variables>
  </machine>

  <machine MACH="modex">
      <DESC>Medium sized linux cluster at BNL, torque scheduler.</DESC>
      <OS>LINUX</OS>
      <COMPILERS>gnu</COMPILERS>
      <MPILIBS>openmpi,mpi-serial</MPILIBS>
      <CIME_OUTPUT_ROOT>/data/$ENV{USER}</CIME_OUTPUT_ROOT>
      <DIN_LOC_ROOT>/data/Model_Data/cesm_input_datasets/</DIN_LOC_ROOT>
      <DIN_LOC_ROOT_CLMFORC>/data/Model_Data/cesm_input_datasets/atm/datm7</DIN_LOC_ROOT_CLMFORC>
      <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/cesm_archive/$CASE</DOUT_S_ROOT>
      <BASELINE_ROOT>$CIME_OUTPUT_ROOT/cesm_baselines</BASELINE_ROOT>
      <CCSM_CPRNC>/data/software/cesm_tools/cprnc/cprnc</CCSM_CPRNC>
      <GMAKE_J>4</GMAKE_J>
      <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
      <SUPPORTED_BY>rgknox at lbl dot gov and sserbin at bnl gov</SUPPORTED_BY>
      <MAX_TASKS_PER_NODE>12</MAX_TASKS_PER_NODE>
      <MAX_MPITASKS_PER_NODE>12</MAX_MPITASKS_PER_NODE>
      <COSTPES_PER_NODE>12</COSTPES_PER_NODE>
      <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
      <mpirun mpilib="default">
          <executable>mpirun</executable>
          <arguments>
              <arg name="num_tasks">-np {{ total_tasks }}</arg>
              <arg name="tasks_per_node">-npernode $MAX_TASKS_PER_NODE</arg>
          </arguments>
      </mpirun>
      <module_system type="module">
          <init_path lang="sh">/etc/profile.d/modules.sh</init_path>
          <init_path lang="csh">/etc/profile.d/modules.csh</init_path>
          <init_path lang="perl">/usr/share/Modules/init/perl.pm</init_path>
          <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
          <cmd_path lang="sh">module</cmd_path>
          <cmd_path lang="csh">module</cmd_path>
          <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
          <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
          <modules>
              <command name="purge"/>
              <command name="load">perl/5.22.1</command>
              <command name="load">libxml2/2.9.2</command>
              <command name="load">maui/3.3.1</command>
              <command name="load">python/2.7.13</command>
          </modules>
          <modules compiler="gnu">
              <command name="load">gcc/5.4.0</command>
              <command name="load">gfortran/5.4.0</command>
              <command name="load">hdf5/1.8.19fates</command>
              <command name="load">netcdf/4.4.1.1-gnu540-fates</command>
              <command name="load">openmpi/2.1.1-gnu540</command>
          </modules>
          <modules compiler="gnu" mpilib="!mpi-serial">
              <command name="load">openmpi/2.1.1-gnu540</command>
          </modules>
      </module_system>
       <environment_variables>
         <env name="HDF5_HOME">/data/software/hdf5/1.8.19fates</env>
         <env name="NETCDF_PATH">/data/software/netcdf/4.4.1.1-gnu540-fates</env>
       </environment_variables>
  </machine>

  <machine MACH="olympus">
    <DESC>PNL cluster, os is Linux (pgi), batch system is SLURM</DESC>
    <OS>LINUX</OS>
    <COMPILERS>pgi</COMPILERS>
    <MPILIBS>mpich</MPILIBS>
    <CIME_OUTPUT_ROOT>/pic/scratch/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/pic/scratch/tcraig/IRESM/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/pic/scratch/tcraig/IRESM/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/pic/scratch/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/pic/scratch/tcraig/IRESM/ccsm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/pic/scratch/tcraig/IRESM/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>tcraig -at- ucar.edu</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>32</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>32</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>mpiexec_mpt</executable>
      <arguments>
	<arg name="mpi">--mpi=none</arg>
	<arg name="num_tasks">-n={{ total_tasks }}</arg>
	<arg name="kill-on-bad-exit">--kill-on-bad-exit</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/share/apps/modules/Modules/3.2.7/init/perl.pm</init_path>
      <init_path lang="csh">/share/apps/modules/Modules/3.2.7/init/csh</init_path>
      <init_path lang="sh">/share/apps/modules/Modules/3.2.7/init/sh</init_path>
      <cmd_path lang="perl">/share/apps/modules/Modules/3.2.7/bin/modulecmd perl</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
	<command name="purge"/>
	<command name="load">precision/i4</command>
	<command name="load">pgi/11.8</command>
	<command name="load">mvapich2/1.7</command>
	<command name="load">netcdf/4.1.3</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">64M</env>
    </environment_variables>
  </machine>

  <machine MACH="pleiades-bro">
    <DESC>NASA/AMES Linux Cluster, Linux (ia64), 2.4 GHz Broadwell Intel Xeon E5-2680v4 processors, 28 pes/node (two 14-core processors) and 128 GB of memory/node, batch system is PBS</DESC>
    <OS>LINUX</OS>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>mpt</MPILIBS>
    <CIME_OUTPUT_ROOT>/nobackup/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/nobackup/fvitt/csm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/nobackup/fvitt/csm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/nobackup/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/nobackup/fvitt/cesm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/u/fvitt/bin/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
    <SUPPORTED_BY>fvitt -at- ucar.edu</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>28</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>28</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>mpiexec_mpt</executable>
      <arguments>
	<arg name="num_tasks">-n {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/usr/share/Modules/3.2.10/init/perl.pm</init_path>
      <init_path lang="sh">/usr/share/Modules/3.2.10/init/sh</init_path>
      <init_path lang="csh">/usr/share/Modules/3.2.10/init/csh</init_path>
      <init_path lang="python">/usr/share/Modules/3.2.10/init/python.py</init_path>
      <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
	<command name="purge"/>
	<command name="load">nas</command>
	<command name="load">pkgsrc</command>
	<command name="load">comp-intel/2018.3.222</command>
	<command name="load">mpi-sgi/mpt.2.15r20</command>
	<command name="load">szip/2.1.1</command>
	<command name="load">hdf4/4.2.12</command>
	<command name="load">hdf5/1.8.18_mpt</command>
	<command name="load">netcdf/4.4.1.1_mpt</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="MPI_GROUP_MAX">1024</env>
      <env name="MPI_TYPE_MAX">100000</env>
      <env name="MPI_TYPE_DEPTH">16</env>
      <env name="KMP_AFFINITY">noverbose,disabled</env>
      <env name="KMP_SCHEDULE">static,balanced</env>
      <env name="OMP_STACKSIZE">256M</env>
    </environment_variables>
  </machine>

  <machine MACH="pleiades-has">
    <DESC>NASA/AMES Linux Cluster, Linux (ia64), 2.5 GHz Haswell Intel Xeon E5-2680v3 processors, 24 pes/node (two 12-core processors) and 128 GB of memory/node, batch system is PBS</DESC>
    <OS>LINUX</OS>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>mpt</MPILIBS>
    <CIME_OUTPUT_ROOT>/nobackup/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/nobackup/fvitt/csm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/nobackup/fvitt/csm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/nobackup/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/nobackup/fvitt/cesm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/u/fvitt/bin/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
    <SUPPORTED_BY>fvitt -at- ucar.edu</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>24</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>24</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>mpiexec_mpt</executable>
      <arguments>
	<arg name="num_tasks">-n {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/usr/share/Modules/3.2.10/init/perl.pm</init_path>
      <init_path lang="sh">/usr/share/Modules/3.2.10/init/sh</init_path>
      <init_path lang="csh">/usr/share/Modules/3.2.10/init/csh</init_path>
      <init_path lang="python">/usr/share/Modules/3.2.10/init/python.py</init_path>
      <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
	<command name="purge"/>
	<command name="load">nas</command>
	<command name="load">pkgsrc</command>
	<command name="load">comp-intel/2018.3.222</command>
	<command name="load">mpi-sgi/mpt.2.15r20</command>
	<command name="load">szip/2.1.1</command>
	<command name="load">hdf4/4.2.12</command>
	<command name="load">hdf5/1.8.18_mpt</command>
	<command name="load">netcdf/4.4.1.1_mpt</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="MPI_GROUP_MAX">1024</env>
      <env name="MPI_TYPE_MAX">100000</env>
      <env name="MPI_TYPE_DEPTH">16</env>
      <env name="KMP_AFFINITY">noverbose,disabled</env>
      <env name="KMP_SCHEDULE">static,balanced</env>
      <env name="OMP_STACKSIZE">256M</env>
    </environment_variables>
  </machine>

  <machine MACH="pleiades-san">
    <DESC>NASA/AMES Linux Cluster, Linux (ia64), Altix ICE, 2.6 GHz Sandy Bridge processors, 16 cores/node and 32 GB of memory, batch system is PBS</DESC>
    <OS>LINUX</OS>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>mpt</MPILIBS>
    <CIME_OUTPUT_ROOT>/nobackup/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/nobackup/fvitt/csm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/nobackup/fvitt/csm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/nobackup/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/nobackup/fvitt/cesm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/u/fvitt/bin/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
    <SUPPORTED_BY>fvitt -at- ucar.edu</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>16</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>16</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>mpiexec_mpt</executable>
      <arguments>
	<arg name="num_tasks">-n {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/usr/share/Modules/3.2.10/init/perl.pm</init_path>
      <init_path lang="sh">/usr/share/Modules/3.2.10/init/sh</init_path>
      <init_path lang="csh">/usr/share/Modules/3.2.10/init/csh</init_path>
      <init_path lang="python">/usr/share/Modules/3.2.10/init/python.py</init_path>
      <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
	<command name="purge"/>
	<command name="load">nas</command>
	<command name="load">pkgsrc</command>
	<command name="load">comp-intel/2018.3.222</command>
	<command name="load">mpi-sgi/mpt.2.15r20</command>
	<command name="load">szip/2.1.1</command>
	<command name="load">hdf4/4.2.12</command>
	<command name="load">hdf5/1.8.18_mpt</command>
	<command name="load">netcdf/4.4.1.1_mpt</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="MPI_GROUP_MAX">1024</env>
      <env name="MPI_TYPE_MAX">100000</env>
      <env name="MPI_TYPE_DEPTH">16</env>
      <env name="KMP_AFFINITY">noverbose,disabled</env>
      <env name="KMP_SCHEDULE">static,balanced</env>
      <env name="OMP_STACKSIZE">256M</env>
    </environment_variables>
  </machine>

  <machine MACH="pleiades-ivy">
    <DESC>NASA/AMES Linux Cluster, Linux (ia64), Altix ICE, 2.8 GHz Ivy Bridge processors, 20 cores/node and 3.2 GB of memory per core, batch system is PBS</DESC>
    <OS>LINUX</OS>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>mpich</MPILIBS>
    <CIME_OUTPUT_ROOT>/nobackup/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/nobackup/fvitt/csm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/nobackup/fvitt/csm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>/nobackup/$USER/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/nobackup/fvitt/cesm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/u/fvitt/bin/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>pbs</BATCH_SYSTEM>
    <SUPPORTED_BY>fvitt -at- ucar.edu</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>20</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>20</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>mpiexec_mpt</executable>
      <arguments>
	<arg name="num_tasks">-n {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/usr/share/Modules/3.2.10/init/perl.pm</init_path>
      <init_path lang="sh">/usr/share/Modules/3.2.10/init/sh</init_path>
      <init_path lang="csh">/usr/share/Modules/3.2.10/init/csh</init_path>
      <init_path lang="python">/usr/share/Modules/3.2.10/init/python.py</init_path>
      <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
	<command name="purge"/>
	<command name="load">nas</command>
	<command name="load">pkgsrc</command>
	<command name="load">comp-intel/2018.3.222</command>
	<command name="load">mpi-sgi/mpt.2.15r20</command>
	<command name="load">szip/2.1.1</command>
	<command name="load">hdf4/4.2.12</command>
	<command name="load">hdf5/1.8.18_mpt</command>
	<command name="load">netcdf/4.4.1.1_mpt</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="MPI_GROUP_MAX">1024</env>
      <env name="MPI_TYPE_MAX">100000</env>
      <env name="MPI_TYPE_DEPTH">16</env>
      <env name="KMP_AFFINITY">noverbose,disabled</env>
      <env name="KMP_SCHEDULE">static,balanced</env>
      <env name="OMP_STACKSIZE">256M</env>
    </environment_variables>
  </machine>

  <machine MACH="sandia-srn-sems">
    <DESC>Linux workstation at Sandia on SRN with SEMS TPL modules</DESC>
    <NODENAME_REGEX>(s999964|climate|penn)</NODENAME_REGEX>
    <OS>LINUX</OS>
    <PROXY>wwwproxy.sandia.gov:80</PROXY>
    <COMPILERS>gnu</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <SAVE_TIMING_DIR>/sems-data-store/ACME/timings</SAVE_TIMING_DIR>
    <CIME_OUTPUT_ROOT>$ENV{HOME}/acme/scratch</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/sems-data-store/ACME/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/sems-data-store/ACME/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/sems-data-store/ACME/baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/sems-data-store/ACME/cprnc/build/cprnc</CCSM_CPRNC>
    <GMAKE>make</GMAKE>
    <GMAKE_J>32</GMAKE_J>
    <TESTS>acme_developer</TESTS>
    <BATCH_SYSTEM>none</BATCH_SYSTEM>
    <SUPPORTED_BY>jgfouca at sandia dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>64</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>64</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments>
	<arg name="num_tasks"> -np {{ total_tasks }}</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
      <init_path lang="perl">/usr/share/Modules/init/perl.pm</init_path>
      <init_path lang="sh">/usr/share/Modules/init/sh</init_path>
      <init_path lang="csh">/usr/share/Modules/init/csh</init_path>
      <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
      <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
	<command name="purge"/>
	<command name="load">sems-env</command>
	<command name="load">sems-git</command>
	<command name="load">sems-python/2.7.9</command>
	<command name="load">sems-gcc/5.1.0</command>
	<command name="load">sems-openmpi/1.8.7</command>
	<command name="load">sems-cmake/2.8.12</command>
	<command name="load">sems-netcdf/4.3.2/parallel</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="NETCDFROOT">$ENV{SEMS_NETCDF_ROOT}</env>
      <env name="PNETCDFROOT">$ENV{SEMS_NETCDF_ROOT}</env>
    </environment_variables>
  </machine>

  <machine MACH="sandiatoss3">
    <DESC>SNL clust</DESC>
    <NODENAME_REGEX>(skybridge|chama)-login</NODENAME_REGEX>
    <OS>LINUX</OS>
    <PROXY>wwwproxy.sandia.gov:80</PROXY>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>openmpi</MPILIBS>
    <SAVE_TIMING_DIR>/projects/ccsm/timings</SAVE_TIMING_DIR>
    <CIME_OUTPUT_ROOT>/gscratch/$USER/acme_scratch/$MACH</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/projects/ccsm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/projects/ccsm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>               <!-- complete path to a short term archiving directory -->
    <BASELINE_ROOT>/projects/ccsm/ccsm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/projects/ccsm/cprnc/build/cprnc_wrap</CCSM_CPRNC>                <!-- path to the cprnc tool used to compare netcdf history files in testing -->
    <GMAKE_J>8</GMAKE_J>
    <TESTS>acme_integration</TESTS>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>jgfouca at sandia dot gov</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>16</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>16</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>

    <mpirun mpilib="default">
      <executable>mpirun</executable>
      <arguments>
	<arg name="num_tasks"> -np {{ total_tasks }}</arg>
	<arg name="tasks_per_node"> -npernode $MAX_MPITASKS_PER_NODE</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="python">/usr/share/Modules/init/python.py</init_path>
      <init_path lang="perl">/usr/share/Modules/init/perl.pm</init_path>
      <init_path lang="sh">/usr/share/Modules/init/sh</init_path>
      <init_path lang="csh">/usr/share/Modules/init/csh</init_path>
      <cmd_path lang="python">/usr/bin/modulecmd python</cmd_path>
      <cmd_path lang="perl">/usr/bin/modulecmd perl</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <modules>
	<command name="purge"/>
	<command name="load">sems-env</command>
	<command name="load">sems-git</command>
	<command name="load">sems-python/2.7.9</command>
	<command name="load">gnu/4.9.2</command>
	<command name="load">intel/intel-15.0.3.187</command>
	<command name="load">libraries/intel-mkl-15.0.2.164</command>
	<command name="load">libraries/intel-mkl-15.0.2.164</command>
      </modules>
      <modules mpilib="!mpi-serial">
	<command name="load" >openmpi-intel/1.8</command>
	<command name="load" >sems-hdf5/1.8.12/parallel</command>
	<command name="load" >sems-netcdf/4.3.2/parallel</command>
	<command name="load" >sems-hdf5/1.8.12/base</command>
	<command name="load" >sems-netcdf/4.3.2/base</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="NETCDFROOT">$ENV{SEMS_NETCDF_ROOT}</env>
      <env name="OMP_STACKSIZE">64M</env>
    </environment_variables>
    <environment_variables mpilib="!mpi-serial">
      <env name="PNETCDFROOT">$ENV{SEMS_NETCDF_ROOT}</env>
    </environment_variables>
  </machine>

  <machine MACH="stampede2-skx">
    <DESC>Intel Xeon Platinum 8160 ("Skylake"),48 cores on two sockets (24 cores/socket) , batch system is SLURM</DESC>
    <NODENAME_REGEX>.*stampede2</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>impi,mvapich2</MPILIBS>
    <PROJECT>TG-ATM180016</PROJECT>
    <CIME_OUTPUT_ROOT>$ENV{SCRATCH}</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/work/02503/edwardsj/CESM/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/work/02503/edwardsj/CESM/inputdata/lmwg</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$ENV{WORK}/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/work/02503/edwardsj/CESM/cesm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>4</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>cseg</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>96</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>48</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="impi">
      <executable>ibrun</executable>
      <arguments>
	<arg name="ntasks"> -n {{ total_tasks }} </arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="mvapich2">
      <executable>ibrun</executable>
      <arguments>
	<arg name="ntasks"> -n {{ total_tasks }} </arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/opt/apps/lmod/lmod/init/perl</init_path>
      <init_path lang="python">/opt/apps/lmod/lmod/init/env_modules_python.py</init_path>
      <init_path lang="sh">/opt/apps/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/opt/apps/lmod/lmod/init/csh</init_path>
      <cmd_path lang="perl">/opt/apps/lmod/lmod/libexec/lmod perl</cmd_path>
      <cmd_path lang="python">/opt/apps/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"></command>
        <command name="load">TACC</command>
        <command name="load">python/2.7.13</command>
        <command name="load">intel/18.0.2</command>
        <command name="load">cmake/3.16.1</command>
      </modules>
      <modules mpilib="mvapich2">
        <command name="load">mvapich2/2.3.1</command>
        <command name="load">pnetcdf/1.11</command>
        <command name="load">parallel-netcdf/4.6.2</command>
      </modules>
      <modules mpilib="impi">
        <command name="rm">mvapich2</command>
        <command name="load">impi/18.0.2</command>
        <command name="load">pnetcdf/1.11</command>
        <command name="load">parallel-netcdf/4.6.2</command>
      </modules>
      <modules mpilib="mpi-serial">
        <command name="load">netcdf/4.3.3.1</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">256M</env>
     </environment_variables>
    <environment_variables comp_interface="nuopc" mpilib="impi">
      <env name="ESMFMKFILE">/work/01118/tg803972/stampede2/ESMF-INSTALL/8.0.0bs38/lib/libO/Linux.intel.64.intelmpi.default/esmf.mk</env>
    </environment_variables>
    <environment_variables comp_interface="nuopc">
      <env name="ESMF_RUNTIME_PROFILE">ON</env>
      <env name="ESMF_RUNTIME_PROFILE_OUTPUT">SUMMARY</env>
      <env name="UGCSINPUTPATH">/work/06242/tg855414/stampede2/FV3GFS/benchmark-inputs/2012010100/gfs/fcst</env>
      <env name="UGCSFIXEDFILEPATH">/work/06242/tg855414/stampede2/FV3GFS/fix_am</env>
      <env name="UGCSADDONPATH">/work/06242/tg855414/stampede2/FV3GFS/addon</env>
    </environment_variables>
  </machine>


  <machine MACH="stampede2-knl">
    <DESC>Intel Xeon Phi 7250 ("Knights Landing") , batch system is SLURM</DESC>
    <OS>LINUX</OS>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>impi,mvapich2</MPILIBS>
    <CIME_OUTPUT_ROOT>$ENV{SCRATCH}</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/work/02503/edwardsj/CESM/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/work/02503/edwardsj/CESM/inputdata/lmwg</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$ENV{WORK}/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/work/02503/edwardsj/CESM/cesm_baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>4</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>cseg</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>256</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>64</MAX_MPITASKS_PER_NODE>
    <mpirun mpilib="impi">
      <executable>ibrun</executable>
    </mpirun>
    <mpirun mpilib="mvapich2">
      <executable>ibrun</executable>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/opt/apps/lmod/lmod/init/perl</init_path>
      <init_path lang="python">/opt/apps/lmod/lmod/init/env_modules_python.py</init_path>
      <init_path lang="sh">/opt/apps/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/opt/apps/lmod/lmod/init/csh</init_path>
      <cmd_path lang="perl">/opt/apps/lmod/lmod/libexec/lmod perl</cmd_path>
      <cmd_path lang="python">/opt/apps/lmod/lmod/libexec/lmod python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
        <command name="purge"></command>
        <command name="load">TACC</command>
        <command name="load">python/2.7.13</command>
        <command name="load">intel/17.0.4</command>
        <command name="load">cmake/3.10.2</command>
      </modules>
      <modules mpilib="mvapich2">
        <command name="load">mvapich2/2.3b</command>
        <command name="load">pnetcdf/1.11.0</command>
        <command name="load">parallel-netcdf/4.3.3.1</command>
      </modules>
      <modules mpilib="impi">
        <command name="rm">mvapich2</command>
        <command name="load">impi/17.0.3</command>
        <command name="load">pnetcdf/1.11.0</command>
        <command name="load">parallel-netcdf/4.3.3.1</command>
      </modules>
      <modules mpilib="mpi-serial">
        <command name="load">netcdf/4.3.3.1</command>
      </modules>
    </module_system>
    <environment_variables>
      <env name="OMP_STACKSIZE">256M</env>
    </environment_variables>
  </machine>

  <machine MACH="theia">
    <DESC>theia</DESC>
    <NODENAME_REGEX>tfe</NODENAME_REGEX>
    <OS>LINUX</OS>
    <COMPILERS>intel</COMPILERS>
    <MPILIBS>impi</MPILIBS>
    <PROJECT>nems</PROJECT>
    <SAVE_TIMING_DIR/>
    <CIME_OUTPUT_ROOT>/scratch4/NCEPDEV/nems/noscrub/$USER/cimecases</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/cesmdataroot/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/cesmdataroot/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/BASELINES</BASELINE_ROOT>
    <CCSM_CPRNC>/scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/cesmdataroot/tools/cprnc</CCSM_CPRNC>
    <GMAKE>make</GMAKE>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>slurm</BATCH_SYSTEM>
    <SUPPORTED_BY>cseg</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>24</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>24</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>srun</executable>
      <arguments>
	<arg name="num_tasks">-n $TOTALPES</arg>
      </arguments>
    </mpirun>
    <mpirun mpilib="mpi-serial">
      <executable></executable>
    </mpirun>
    <module_system type="module">
      <init_path lang="sh">/apps/lmod/lmod/init/sh</init_path>
      <init_path lang="csh">/apps/lmod/lmod/init/csh</init_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <cmd_path lang="python">/apps/lmod/lmod/libexec/lmod python</cmd_path>
      <modules compiler="intel">
        <command name="purge"/>
        <command name="load">intel/15.1.133</command>
        <command name="load">impi/5.1.1.109</command>
        <command name="load">netcdf/4.3.0</command>
        <command name="load">pnetcdf</command>
        <command name="use">/scratch4/NCEPDEV/nems/noscrub/emc.nemspara/soft/modulefiles</command>
	<command name="load">yaml-cpp</command>
        <command name="load">esmf/8.0.0bs29g</command>
      </modules>
    </module_system>
    <environment_variables comp_interface="nuopc">
      <env name="ESMF_RUNTIME_PROFILE">ON</env>
      <env name="ESMF_RUNTIME_PROFILE_OUTPUT">SUMMARY</env>
      <env name="UGCSINPUTPATH">/scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/INPUTDATA/benchmark-inputs/2012010100/gfs/fcst</env>
      <env name="UGCSFIXEDFILEPATH">/scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/INPUTDATA/fix_am</env>
      <env name="UGCSADDONPATH">/scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/INPUTDATA/addon</env>
    </environment_variables>
  </machine>

  <machine MACH="theta">
    <DESC>ALCF Cray XC* KNL, os is CNL, 64 pes/node, batch system is cobalt</DESC>
    <NODENAME_REGEX>theta.*</NODENAME_REGEX>
    <OS>CNL</OS>
    <COMPILERS>intel,gnu,cray</COMPILERS>
    <MPILIBS>mpt</MPILIBS>
    <PROJECT>CESM_Highres_Testing</PROJECT>
    <CIME_OUTPUT_ROOT>/projects/CESM_Highres_Testing/cesm/scratch/$USER</CIME_OUTPUT_ROOT>
    <DIN_LOC_ROOT>/projects/CESM_Highres_Testing/cesm/inputdata</DIN_LOC_ROOT>
    <DIN_LOC_ROOT_CLMFORC>/projects/CESM_Highres_Testing/cesm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
    <DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
    <BASELINE_ROOT>/projects/CESM_Highres_Testing/cesm/baselines</BASELINE_ROOT>
    <CCSM_CPRNC>/projects/CESM_Highres_Testing/cesm/tools/cprnc/cprnc</CCSM_CPRNC>
    <GMAKE_J>8</GMAKE_J>
    <BATCH_SYSTEM>cobalt_theta</BATCH_SYSTEM>
    <SUPPORTED_BY>cseg</SUPPORTED_BY>
    <MAX_TASKS_PER_NODE>64</MAX_TASKS_PER_NODE>
    <MAX_MPITASKS_PER_NODE>64</MAX_MPITASKS_PER_NODE>
    <PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
    <mpirun mpilib="default">
      <executable>aprun</executable>
      <arguments>
	<arg name="num_tasks" >-n {{ total_tasks }}</arg>
	<arg name="tasks_per_node" >-N {{ tasks_per_node }} </arg>
	<arg name="thread_count">--cc depth -d $OMP_NUM_THREADS</arg>
	<arg name="env_omp_stacksize">-e OMP_STACKSIZE=64M</arg>
	<arg name="env_thread_count">-e OMP_NUM_THREADS=$OMP_NUM_THREADS</arg>
      </arguments>
    </mpirun>
    <module_system type="module">
      <init_path lang="perl">/opt/modules/default/init/perl.pm</init_path>
      <init_path lang="python">/opt/modules/default/init/python.py</init_path>
      <init_path lang="sh">/opt/modules/default/init/sh</init_path>
      <init_path lang="csh">/opt/modules/default/init/csh</init_path>
      <cmd_path lang="perl">/opt/modules/default/bin/modulecmd perl</cmd_path>
      <cmd_path lang="python">/opt/modules/default/bin/modulecmd python</cmd_path>
      <cmd_path lang="sh">module</cmd_path>
      <cmd_path lang="csh">module</cmd_path>
      <modules>
	<command name="rm">craype-mic-knl</command>
	<command name="rm">PrgEnv-intel</command>
	<command name="rm">PrgEnv-cray</command>
	<command name="rm">PrgEnv-gnu</command>
	<command name="rm">intel</command>
	<command name="rm">cce</command>
	<command name="rm">cray-parallel-netcdf</command>
	<command name="rm">cray-hdf5-parallel</command>
	<command name="rm">pmi</command>
	<command name="rm">cray-libsci</command>
	<command name="rm">cray-mpich</command>
	<command name="rm">cray-netcdf</command>
	<command name="rm">cray-hdf5</command>
	<command name="rm">cray-netcdf-hdf5parallel</command>
	<command name="rm">craype</command>
	<command name="rm">papi</command>
      </modules>

      <modules compiler="intel">
        <command name="load">PrgEnv-intel/6.0.4</command>
        <command name="switch">intel intel/18.0.0.128</command>
        <command name="rm">cray-libsci</command>
      </modules>

      <modules compiler="cray">
        <command name="load">PrgEnv-cray/6.0.4</command>
        <command name="switch">cce cce/8.7.0</command>
      </modules>
      <modules compiler="gnu">
        <command name="load">PrgEnv-gnu/6.0.4</command>
        <command name="switch">gcc gcc/7.3.0</command>
      </modules>
      <modules>
        <command name="load">papi/5.6.0.1</command>
        <command name="swap">craype craype/2.5.14</command>
      </modules>
      <modules compiler="!intel">
        <command name="switch">cray-libsci/18.04.1</command>
      </modules>
      <modules>
        <command name="load">cray-mpich/7.7.0</command>
      </modules>
      <modules mpilib="mpt">
        <command name="load">cray-netcdf-hdf5parallel/4.4.1.1.6</command>
        <command name="load">cray-hdf5-parallel/1.10.1.1</command>
        <command name="load">cray-parallel-netcdf/1.8.1.3</command>
      </modules>
    </module_system>
  </machine>

  <default_run_suffix>
    <default_run_exe>${EXEROOT}/cesm.exe </default_run_exe>
    <default_run_misc_suffix> >> cesm.log.$LID 2>&amp;1 </default_run_misc_suffix>
  </default_run_suffix>

</config_machines>

CESM XML settings for Parallel Input/Output (PIO) library.

<?xml version="1.0"?>

<config_pio version="1.0">

  <!--- uncomment and fill in relevant sections
  <entry id="PIO_CONFIG_OPTS">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="PIO_ASYNC_INTERFACE">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <entry id="PIO_STRIDE">
    <values>
      <value>$MAX_MPITASKS_PER_NODE</value>
      <value mach="yellowstone" grid="a%ne120.+oi%gx1">60</value>
    </values>
  </entry>

  <!--
  <entry id="PIO_VERSION">
    <values>
      <value mach="stampede2-skx">1</value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="PIO_ROOT">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="PIO_NUMTASKS">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <entry id="PIO_TYPENAME">
    <values>
      <value>pnetcdf</value>
      <value mpilib="mpi-serial">netcdf</value>
    </values>
  </entry>

  <entry id="PIO_REARRANGER">
    <values>
      <!-- Note: problem not using box rearranger with nuopc caps -->
      <!-- <value>$PIO_VERSION</value> -->
      <value>1</value>
    </values>
  </entry>

  <!--- uncomment and fill in relevant sections
  <entry id="PIO_DEBUG_LEVEL">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="PIO_BLOCKSIZE">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="PIO_BUFFER_SIZE_LIMIT">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="OCN_PIO_STRIDE">
    <values>
      <value grid="a%ne120.+oi%gx1">60</value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="OCN_PIO_ROOT">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="OCN_PIO_NUMTASKS">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="OCN_PIO_TYPENAME">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="LND_PIO_STRIDE">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="LND_PIO_ROOT">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="LND_PIO_NUMTASKS">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="LND_PIO_TYPENAME">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="ROF_PIO_STRIDE">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="ROF_PIO_ROOT">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="ROF_PIO_NUMTASKS">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="ROF_PIO_TYPENAME">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="ICE_PIO_STRIDE">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="ICE_PIO_ROOT">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="ICE_PIO_NUMTASKS">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="ICE_PIO_TYPENAME">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="ATM_PIO_STRIDE">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="ATM_PIO_ROOT">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="ATM_PIO_NUMTASKS">
    <values>
      <value></value>
    </values>
  </entry>
  -->


  <entry id="ATM_PIO_TYPENAME">
    <values>
      <value compset="DATM">netcdf</value>
    </values>
  </entry>


  <!--- uncomment and fill in relevant sections
  <entry id="CPL_PIO_STRIDE">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="CPL_PIO_ROOT">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="CPL_PIO_NUMTASKS">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="CPL_PIO_TYPENAME">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="GLC_PIO_STRIDE">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="GLC_PIO_ROOT">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="GLC_PIO_NUMTASKS">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="GLC_PIO_TYPENAME">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="WAV_PIO_STRIDE">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="WAV_PIO_ROOT">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="WAV_PIO_NUMTASKS">
    <values>
      <value></value>
    </values>
  </entry>
  -->

  <!--- uncomment and fill in relevant sections
  <entry id="WAV_PIO_TYPENAME">
    <values>
      <value></value>
    </values>
  </entry>
  -->

</config_pio>

allactive SRCROOT/cime_config

The CESM all-active model settings are stored in the CESM cime_config github repository. That repository includes the following XML files.

CESM XML settings for all-active component set (compset) configurations.

CESM XML settings for all-active test configurations.

CESM XML settings for optimized processor elements (PEs) layout configurations.