<?xml version="1.0" encoding="UTF-8"?>
<!-- generator="FeedCreator 1.8" -->
<?xml-stylesheet href="https://hpcwiki.uark.edu/lib/exe/css.php?s=feed" type="text/css"?>
<rdf:RDF
    xmlns="http://purl.org/rss/1.0/"
    xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
    xmlns:slash="http://purl.org/rss/1.0/modules/slash/"
    xmlns:dc="http://purl.org/dc/elements/1.1/">
    <channel rdf:about="https://hpcwiki.uark.edu/feed.php">
        <title>Arkansas High Performace Computing Center  [hpcwiki]</title>
        <description></description>
        <link>https://hpcwiki.uark.edu/</link>
        <image rdf:resource="https://hpcwiki.uark.edu/lib/exe/fetch.php?media=wiki:dokuwiki.svg" />
       <dc:date>2026-04-30T03:17:52+00:00</dc:date>
        <items>
            <rdf:Seq>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=100g_tuning&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=abinit&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=abinit9&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=abyss&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=ahpcc_slurmjob_watcher&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=backups&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=batch&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=bioinformatics_catalog&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=blast&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=bowtie2&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=cloud_bursting&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=compiling&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=compression&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=comsol&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=conda&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=condo_queues&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=containers&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=cudnn&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=environment_modules&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=equipment&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=etiquette&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=facilities&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=faq&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=federated_identity_cirrus&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=federated_identity_login&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=gamess&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=getting_started&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=get_an_account&amp;rev=1777471972&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=github&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=gpu&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=graphics&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=gromacs&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=gromacs2023&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=hpcportal1_login&amp;rev=1777471064&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=hpcwiki&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=infernal&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=installed_software&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=interactive&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=jargon&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=jellyfish&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=kallisto&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=karpinski_usage&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=lammps&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=learning_resources&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=logging_in&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=login_advisor&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=login_servers&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=markdowntest&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=matlab&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=migration&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=mkl_library&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=moving_data&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=mpi&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=mpiblast&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=mpi_old&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=mpi_python&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=mxnet&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=namd&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=namd2023&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=nebula&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=newfront&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=non_bioinformatics_catalog&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=nwchem&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=off-campsu_access-dmz&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=off-campus_access&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=off_campus_access_dmz&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=off_campus_new&amp;rev=1777501340&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=ollama_llm&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=optimization&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=orca&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=package_managers&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=parabricks&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=perl&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=pinnacle&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=pinnacle_usage&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=portal&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=portal_login&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=portal_login_new&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=python&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=python_catalog&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=python_old&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=qe&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=quantum_espresso&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=quast&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=queueing_system&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=queues&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=r&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=research&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=resource_selection&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=salmon&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=sapplication_software&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=sas&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=scratch_output&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=selecting_resources&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=sickle&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=singularity-apptainer&amp;rev=1771955478&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=singularity&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=slurm_interactive&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=slurm_queues&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=slurm_sbatch_srun&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=slurm_scripts&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=software&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=spades&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=spark&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=ssh_2fa&amp;rev=1777501287&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=ssh_certificates&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=ssh_keys&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=ssh_login&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=ssh_login_new&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=ssh_login_nopass&amp;rev=1777501222&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=start&amp;rev=1777496993&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=storage&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=storage_old&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=sub_node_jobs&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=support_requests&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=tensorflow&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=tophat&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=torque_slurm_scripts&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=trinity&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=updates&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=vasp&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=vasp_old&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=vectorization&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=velvet&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=viennarna&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=virtual_machines&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=virtual_machines_old&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=walltime_extensions&amp;rev=1760557911&amp;do=diff"/>
                <rdf:li rdf:resource="https://hpcwiki.uark.edu/doku.php?id=windows&amp;rev=1760557911&amp;do=diff"/>
            </rdf:Seq>
        </items>
    </channel>
    <image rdf:about="https://hpcwiki.uark.edu/lib/exe/fetch.php?media=wiki:dokuwiki.svg">
        <title>Arkansas High Performace Computing Center  [hpcwiki]</title>
        <link>https://hpcwiki.uark.edu/</link>
        <url>https://hpcwiki.uark.edu/lib/exe/fetch.php?media=wiki:dokuwiki.svg</url>
    </image>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=100g_tuning&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>100g_tuning</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=100g_tuning&amp;rev=1760557911&amp;do=diff</link>
        <description>`

100Gbit Ethernet Tuning

A summary of internet resources.  Background: A 100 Gb ethernet adapter will not do anything near 100 Gb without tuning.  A new installation over 200 miles shows 12 GB/s.

 fasterdata.es.net

 romoreira

 Stanford

 Redhat-1

 Redhat-2 

 Redhat-3]

[[https://docs.nvidia.com/networking/display/winof2v320/Performance+Tuning | NVidia 

 bbr 

 PerfSonar 

 Intel 

What PerfSonar does automatically</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=abinit&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>abinit</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=abinit&amp;rev=1760557911&amp;do=diff</link>
        <description>Abinit

ABINIT is an open-source suite of programs for materials science. It implements density functional theory, using a plane wave basis set and pseudopotentials, to compute the electronic density and derived properties of materials ranging from molecules to surfaces to solids. More information and tutorials can be found</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=abinit9&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>abinit9</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=abinit9&amp;rev=1760557911&amp;do=diff</link>
        <description>Abinit9

Production version is 9.10.3


module load intel/21.7.0 mkl/21.3.0 impi/19.0.9 hdf5/ts/1.10.8 netcdf-c-2/4.9.0 netcdf-f-2/4.6.0 libxc-2/6.0.0 wannier90-2/3.1.0 omp/1 abinit/9.10.3


Run times with a sample problem on pairs of different CPUs. In each case OMP__</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=abyss&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>abyss</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=abyss&amp;rev=1760557911&amp;do=diff</link>
        <description>Abyss

ABySS is a de novo, parallel, paired-end sequence assembler that is designed for short reads. You can fins more information on abyss here.

Environment Setup

To work with abyss first we need to load abyss and it's dependencies. The easiest way to do this is to modify the</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=ahpcc_slurmjob_watcher&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>ahpcc_slurmjob_watcher</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=ahpcc_slurmjob_watcher&amp;rev=1760557911&amp;do=diff</link>
        <description>AHPCC has a custom tool to help you interface with the SLURM scheduler. 
This tool is designed to retrieve the most commonly sought-after metrics of your SLURM jobs with much less effort to the user. 

The script can be found here:
/path/to/ahpcc_slurmjob_watcher.sh/on/pinnacle</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=backups&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>backups</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=backups&amp;rev=1760557911&amp;do=diff</link>
        <description>Backups

razor and trestles clusters /home areas are automatically and independently backed up each Sunday morning if they fit the size criteria (&lt;150 GB).  Each cluster home area is only backed up if size is less than 150 GB (150 million KB). This backup limit is independent of any quota.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=batch&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>batch</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=batch&amp;rev=1760557911&amp;do=diff</link>
        <description>Batch Jobs

A batch job is simply a set of commands grouped in a single file which are to be executed on a node or set of nodes assigned to the job by the scheduler.  Below is an example batch job which contains a single command hostname.  This command prints out the name of the host running the command.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=bioinformatics_catalog&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>bioinformatics_catalog</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=bioinformatics_catalog&amp;rev=1760557911&amp;do=diff</link>
        <description>Bioinformatics Software Catalog

Not including OpenHPC and other RPM software in /opt and R/python/perl modules</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=blast&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>blast</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=blast&amp;rev=1760557911&amp;do=diff</link>
        <description>blast+ , blastall

NCBI Blast+ is a shared-memory program that runs on a single node with multiple threads. The Intel processors on the razor cluster will run blast about three times as fast as the AMD processors on trestles (but trestles has twice as many per node). Razor 12-core nodes are sufficient since blast+ scales to about 8 threads as shown by user/real time, but the number of cores actually present is used as the threads variable in each example.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=bowtie2&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>bowtie2</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=bowtie2&amp;rev=1760557911&amp;do=diff</link>
        <description>Bowtie2

Bowtie 2 is an ultrafast and memory-efficient tool for aligning sequencing reads to long reference sequences. You can find more information about Bowtie2 here.

Enviornment and Setup

First we we must load the bowtie2 module. The easiest way to do the is to add it to your</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=cloud_bursting&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>cloud_bursting</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=cloud_bursting&amp;rev=1760557911&amp;do=diff</link>
        <description>Cloud Bursting

Nebula &lt;http://nebula.uark.edu&gt; is a University of Arkansas privately hosted Open Stack &lt;https://www.openstack.org/&gt; software platform for cloud computing.  Some of the advantages of using a cloud computing environment are:

	*  the ability to bring up virtual machines using a wide range of operating systems.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=compiling&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>compiling</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=compiling&amp;rev=1760557911&amp;do=diff</link>
        <description>Compiling and MPI

Compiler Modules

Gnu

(all gnu compilers by devtools or AHPCC,gcc/g++/gfortran) gcc/4.8.5, 7.3.1, 8.3.1, 8.4.0, 9.3.1

Version 4.8.5 is the base gcc rpm installation with Centos 7.  The module does nothing but satisfies conditions for a compiler module by an MPI module (see below).  It is out-of-date and we don't recommend it for most purposes.
Versions 7.3.1, 8.3.1, 9.3.1 are RedHat devtools distributions with debugger,eclipse,etc.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=compression&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>compression</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=compression&amp;rev=1760557911&amp;do=diff</link>
        <description>Compression/Decompression

Using gzip, zip, pigz with 8 threads, and crabz with 8 threads on a 6.5 GB test file, one of the Blast NR tar files. crabz is very much faster for compression than the alternatives.


method compress uncompress 
gzip    335 s      74 s
zip     315 s      70 s
pgzip    41 s      43 s
crabz    21 s      50 s</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=comsol&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>comsol</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=comsol&amp;rev=1760557911&amp;do=diff</link>
        <description>Comsol 5.2.1

Comsol is licensed software, users will need to supply their own license or have permission from the license holder.  The performance of Comsol is highly sensitive to how you allocate the available cores between MPI processes (comsol -nn runs MPI like mpirun -np) and OpenMP threads, and Comsol is generally much faster on Intel than on AMD. We have tested on our nodes as follows:</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=conda&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>conda</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=conda&amp;rev=1760557911&amp;do=diff</link>
        <description>Conda Package Manager

Conda

Conda is an open source package management system and environment management system. Conda as a package manager helps you find and install packages. If you need a package that requires a different version of Python, you do not need to switch to a different environment manager, because conda is also an environment manager. With just a few commands, you can set up a totally separate environment to run that different version of Python, while continuing to run your usua…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=condo_queues&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>condo_queues</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=condo_queues&amp;rev=1760557911&amp;do=diff</link>
        <description>Condo Queues

The Condo program allows individual users to purchase computing hardware with their own funds and use AHPCC staff to provide installation, power, cooling, networking and maintenance services.  The condo hardware owner has priority access to the condo nodes and has no job walltime restriction.  However, the condo nodes are also available for public use for up to 6 hour walltime jobs.  The 6 hour limit may be extended upon request if there are no condo owner jobs waiting in the queue…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=containers&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>containers</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=containers&amp;rev=1760557911&amp;do=diff</link>
        <description>Singularity/Containers

Singularity &lt;http://singularity.lbl.gov/&gt; is a software container system.  It allows users to build and run entire scientific workflows, software and libraries using a specific distribution and version of Linux all packaged into a single image file.  It is based on the Linux</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=cudnn&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>cudnn</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=cudnn&amp;rev=1760557911&amp;do=diff</link>
        <description>cudnn

NVidia cuDNN is installed with Cuda 11.5.  Example:


pinnacle-l1:rfeynman:$ git clone https://github.com/Hardware-Alchemy/cuDNN-sample
pinnacle-l1:rfeynman:$ cd cuDNN-sample/cudnn_samples_v7/conv_sample
pinnacle-l1:rfeynman:$ srun --nodes=1 --ntasks-per-node=1 --cpus-per-task=32 \
  --partition gpu06 --qos gpu --time=6:00:00 --pty /bin/bash
c1715:rfeynman:/cuDNN-sample/cudnn_samples_v7/conv_sample$ module load gcc/9.3.1 cuda/11.5
c1715:rfeynman:/cuDNN-sample/cudnn_samples_v7/conv_sample$…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=environment_modules&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>environment_modules</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=environment_modules&amp;rev=1760557911&amp;do=diff</link>
        <description>Environment Modules, .bashrc

Modules

When multiple versions of software are installed, some method is needed to use the version you want to use.  The 
Modules package is the overwhelming choice of HPC centers for this.
Modules is supplied on the system to manipulate the users's environment variables to run a choice of the needed programs and versions. The most important of these variables are</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=equipment&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>equipment</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=equipment&amp;rev=1760557911&amp;do=diff</link>
        <description>Equipment/Selecting Resources

We describe the resources available at AHPCC and how to select the best one for your computing job.
Computing resources are presently divided into four clusters that use separate schedulers.  This will be condensed in the future, as all logins will be moved to</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=etiquette&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>etiquette</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=etiquette&amp;rev=1760557911&amp;do=diff</link>
        <description>System Etiquette on a Shared Research Resource

Welcome as users on the AHPCC Pinnacle HPC system! As Pinnacle is a shared resource used by many concurrent researchers at any given time, it's very important to observe a few basic rules so as to ensure that everyone enjoys a favorable user experience</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=facilities&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>facilities</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=facilities&amp;rev=1760557911&amp;do=diff</link>
        <description>Facilities

The Pinnacle Phase I (2019), Phase II (2022), and Phase III (2024) clusters are the major compute resources at AHPCC. 

Pinnacle Phase I consists of 106 mostly Intel Skylake based compute nodes with a total of 26 NVidia mostly Volta V100 GPUs. Phase II consists of 79 AMD Zen based compute nodes with a total of 74 NVidia mostly Ampere A100 GPUs. Phase III consists of 36 AMD Zen based compute nodes with 4 Ampere L40 GPUs.  An awarded CC* grant will augment Phase III with non-GPU nodes …</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=faq&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>faq</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=faq&amp;rev=1760557911&amp;do=diff</link>
        <description>FAQ

1. How do I find my /scratch/ files during/after the job?

/scratch/${SLURMJOBID}/ is not really a directory but is a softlink to either /scr1/${SLURMJOBID}/ for $SLURMJOBID odd, or /scr2/${SLURMJOBID}/ for ${SLURMJOBID} even.  In the future there may be more scratch drives. The separation and link of scratch partitions makes a failure of a scratch disk affect fewer jobs.  We try to make the link from scr[1-2] to scratch on compute nodes in the job and on head nodes but it may not be presen…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=federated_identity_cirrus&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>federated_identity_cirrus</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=federated_identity_cirrus&amp;rev=1760557911&amp;do=diff</link>
        <description>Federated Identity login

Authentication of user accounts for students, faculty and staff of the University of Arkansas in Fayetteville (UAF) is handled by the UAF Identity Provider (IdP) service.  An IdP service verifies user's identity by requiring a user name, password and a two factor authentication step (2FA) - an additional time limited security token usually obtained by a phone application.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=federated_identity_login&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>federated_identity_login</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=federated_identity_login&amp;rev=1760557911&amp;do=diff</link>
        <description>Federated Identity with Globus ID

Globus ID (Globus ID) is an identity provider operated Globus.org, a cloud based bulk data transfer solution used by AHPCC.  A Globus ID account is required to use the Globus data transfer system, which is the recommended solution for</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=gamess&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>gamess</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=gamess&amp;rev=1760557911&amp;do=diff</link>
        <description>Gamess

The General Atomic and Molecular Electronic Structure System (GAMESS) is a general ab initio quantum chemistry package. It is maintained by Mark Gordon's research group at Iowa State University. Source code, documentation and examples are available at the</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=getting_started&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>getting_started</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=getting_started&amp;rev=1760557911&amp;do=diff</link>
        <description>Getting Started

Get an Account

AHPCC is available for research and instructional use to faculty and students of any Arkansas university and their research collaborators.  There is no charge for use of our computing resources.  (A priority access to our resources is available through our</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=get_an_account&amp;rev=1777471972&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2026-04-29T14:12:52+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>get_an_account</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=get_an_account&amp;rev=1777471972&amp;do=diff</link>
        <description>Get an Account

AHPCC is available for research and instructional use to faculty and students of any Arkansas university and their research collaborators.  There is no charge for use of our computing resources.  (Priority access to storage and compute resources is available through our</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=github&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>github</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=github&amp;rev=1760557911&amp;do=diff</link>
        <description>Git - version control tool

'git' is an open source version control protocol available to all users on Pinnacle. It is used to keep track of changes made to files in a specific directory.  The basic commands used for this purpose are:


git init     - specify a directory for version control with git
git add      - add a file to a list of file for version control
git commit   - create a snapshot of the current version of files 
git branch   - create an alternate version of files 
git checkout - s…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=gpu&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>gpu</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=gpu&amp;rev=1760557911&amp;do=diff</link>
        <description>AHPCC GPU Nodes

This describes the NVidia GPU nodes on the Razor cluster and how to use them.

Hardware and Queues

There are nine NVidia GPU nodes. Five have dual Intel E5520 2.27 GHz CPUs, 12 GB main memory, and dual NVidia GTX 480 GPUs.  One has dual Intel E5-2630v3 2.40</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=graphics&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>graphics</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=graphics&amp;rev=1760557911&amp;do=diff</link>
        <description>Viewing remote X graphics from the cluster

The HPC clusters are not designed for graphics, but it is possible.  The compute nodes, except for GPU nodes, have low-capability embedded graphics cards.  Simple graphics are possible.  Complex graphics like grid generation and Matlab plots are usually better done on a graphics workstation (though moderately complex graphics from a GPU node are possible; see the end of this page).</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=gromacs&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>gromacs</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=gromacs&amp;rev=1760557911&amp;do=diff</link>
        <description>Gromacs

Updated by gromacs2023.

Several versions of the &lt;http://www.gromacs.org&gt; molecular dynamics program are installed. The most complete and latest is version 2016.3.

Gromacs makes heavy use of the &lt;http://www.fftw.org&gt; FFT package.  Both Gromacs and FFTW make heavy use of Intel SSE/AVX vector instructions.  Our modules for gromacs 2016.3 and fftw 3.3.6 automatically select the proper vector type for our compute nodes at runtime (SSE2, AVX, AVX2 for FFTW, same plus SSE4.1 for Gromacs). Th…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=gromacs2023&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>gromacs2023</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=gromacs2023&amp;rev=1760557911&amp;do=diff</link>
        <description>Gromacs 2023.2/2023.3

Several versions of the &lt;http://www.gromacs.org&gt; molecular dynamics program are installed. The most complete and latest is version 2023.2.  Only the single precision version is installed at this time.  The various computers are compared with the GMX50 bare water benchmark __</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=hpcportal1_login&amp;rev=1777471064&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2026-04-29T13:57:44+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>hpcportal1_login</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=hpcportal1_login&amp;rev=1777471064&amp;do=diff</link>
        <description>Portal Login

&lt;https://hpc-portal1.hpc.uark.edu&gt; is an alternative to our main  OpenOnDemand  portal (&lt;https://hpc-portal2.hpc.uark.edu&gt;). It uses a new authentication procedure which no longer relies on the UofA's locally maintained LDAP or Active Directory services.

 &lt;https://hpc-portal1.hpc.uark.edu&gt;

Users from any of the University of Arkansas System (UASys)</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=hpcwiki&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>hpcwiki</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=hpcwiki&amp;rev=1760557911&amp;do=diff</link>
        <description>Arkansas HPCC User Support Wiki

Return to AHPCC Web Site

Getting Started with AHPCC Clusters

Pinnacle &quot;How to Use&quot; Summanry 

Trestles &quot;How to Use&quot; Summary

Razor &quot;How to Use&quot; Summary

Karpinski &quot;How to Use&quot; Summary

 Virtual Machines on Pinnacle 

Razor GPU nodes &quot;How to Use&quot; Summary

Environment Modules

How to file a support request

Installed Software

Learning Resources

Storage

Moving Data

Backups

Queues

Queueing System

Cloud Computing

Grant-Supported Condo Computing

Vectorizatio…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=infernal&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>infernal</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=infernal&amp;rev=1760557911&amp;do=diff</link>
        <description>Infernal

Infernal (“INFERence of RNA ALignment”) is for searching DNA sequence databases for RNA structure and sequence similarities. You can find more information about Infernal here.

Environment Setup

To use Infernal the modules for its software and other dependencies need to be loaded first. The easiest way to do this is editing your</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=installed_software&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>installed_software</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=installed_software&amp;rev=1760557911&amp;do=diff</link>
        <description>Abinit

abyss

blast

Bowtie2

Matlab

mpiblast

comsol

Gamess

gromacs

Infernal

JellyFish

Kallisto

NWChem

namd

ORCA

python

quantum_espresso

Quast

Parabricks

R

Salmon

SAS

Sickle

Singularity

spades

spark

TopHat

Trinity

velvet

ViennaRNA

Libraries

MKL library

tensorflow

mxnet</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=interactive&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>interactive</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=interactive&amp;rev=1760557911&amp;do=diff</link>
        <description>Interactive Jobs

To start an interactive use the qsub command with the -I option: 


razor-l1:pwolinsk:$ qsub -I 
qsub: waiting for job 1451274.sched to start
qsub: job 1451274.sched ready

PBS_NODEFILE=/var/spool/torque/aux/1451274.sched PBS_NUM_NODES=walltime=03 PBS_PPN=walltime=03:00:00 PBS_PPA=12
Currently Loaded Modulefiles:
  1) os/el6
compute1141:pwolinsk:$</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=jargon&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>jargon</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=jargon&amp;rev=1760557911&amp;do=diff</link>
        <description>HPC Jargon

See also &lt;https://computing.llnl.gov/tutorials/parallel_comp/#Terminology&gt;

HPC

high performance computing. Implies a higher percentage of CPU and memory usage than typical administrative computing, or implies a program too large for, or that takes too long to reasonably run on, a desktop computer.  In academia, used for and implies computing for research. Related, HTC, high throughput computing, essentially similar but oriented to processing large data files and/or many separate in…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=jellyfish&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>jellyfish</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=jellyfish&amp;rev=1760557911&amp;do=diff</link>
        <description>JellyFish

JELLYFISH is a tool for fast, memory-efficient counting of k-mers in DNA. If was developed by Guillaume Marçais and Carl Kingsford. You can find more information on JellyFish here.

Enivironment Setup

To work with jellyfish first we need to load the module. The easiest way to do this is to modify the</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=kallisto&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>kallisto</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=kallisto&amp;rev=1760557911&amp;do=diff</link>
        <description>Kallisto

Kallisto is a program for quantifying abundances of transcripts from RNA-Seq data, or more generally of target sequences using high-throughput sequencing reads. More information on kallisto can be found here

Enviornment Setup

Edit your $HOME/.bashrc</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=karpinski_usage&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>karpinski_usage</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=karpinski_usage&amp;rev=1760557911&amp;do=diff</link>
        <description>How to use the Karpinski Cluster

This is a brief “how to” summary of usage for users of the Karpinski cluster.

Karpinski has 18 compute nodes.  Each node has a single E5-2620 v4 cpu, 32 GB of ram and an   NVidia T4 GPU card.  The cluster is primarily intended for the use of the Computer Science &amp; Computer Engineering (CSCE) students and Faculty as a teaching and training resource in two areas:</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=lammps&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>lammps</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=lammps&amp;rev=1760557911&amp;do=diff</link>
        <description>lammps

For a reference Intel compilation, see
/scrfs/apps/lammps/lammps-16Mar18/src/lmp_pinnacle-threaded.  Lammps modules are


/scrfs/apps/lammps/lammps-16Mar18/src$ make ps | grep &quot;YES:&quot;
Installed YES: package CLASS2
Installed YES: package KSPACE
Installed YES: package MANYBODY
Installed YES: package MOLECULE
Installed YES: package USER-FEP
Installed YES: package USER-INTEL
Installed YES: package USER-MESO
Installed YES: package USER-MISC
Installed YES: package USER-MOLFILE
Installed YES: pa…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=learning_resources&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>learning_resources</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=learning_resources&amp;rev=1760557911&amp;do=diff</link>
        <description>UA credit classes relevant to HPC and Computational Science
Class Name Semester Instructor Class Page BIOL 480V Special Topics (Landscape Ecology) Spring Kusum Naithani  BIOL 5153 Practical Programming for Biologists Spring Andy Alverson  BIOL 5253 Genomics and Bioinformatics</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=logging_in&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>logging_in</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=logging_in&amp;rev=1760557911&amp;do=diff</link>
        <description>Logging In

AHPCC clusters can be access via the SSH protocol ssh hpc-portal2.hpc.uark.edu and and using the  OpenOnDemand  web portal &lt;https://hpc-portal2.hpc.uark.edu&gt;.

ssh common account text login host is  hpc-portal2.hpc.uark.edu.  Once logged in users are redirected to on of 10 login nodes, pinnacle-l1..pinnacle-l10.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=login_advisor&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>login_advisor</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=login_advisor&amp;rev=1760557911&amp;do=diff</link>
        <description>AHPCC Login Advisor

Due to network security concerns, UAF ITS networking team restricts access to AHPCC resources.  IP addresses of AHPCC clients are divided into 3 groups:

	*  UAF campus
	*  Research network 
	*  Commodity internet

The appropriate login point and login method to AHPCC depend on which of these 3 IP groups the source IP belongs to.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=login_servers&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>login_servers</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=login_servers&amp;rev=1760557911&amp;do=diff</link>
        <description>Login Servers and Networks

Various external networks pass through varying amounts of security on the way to the cluster login servers.
These networks are, in order of increasing security measures:

	* UA Campus and machines logged into the  UA VPN :
These will have IP addresses of 130.184.x.x or 10.x.x.x.  You should be able to web connect https: to</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=markdowntest&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>markdowntest</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=markdowntest&amp;rev=1760557911&amp;do=diff</link>
        <description>Namd

The namd-verbs-smp binary
&lt;https://web.archive.org/web/20181127065652/http://www.ks.uiuc.edu/Research/namd/benchmarks/&gt;
version 2.11 or 2.12 is installed in /share/apps/NAMD on razor
and trestles. It does not use MPI.

This is for multiple-node runs with charmrun as the distributed
component and namd2 on each compute node. We have found most runs are
faster with the **</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=matlab&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>matlab</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=matlab&amp;rev=1760557911&amp;do=diff</link>
        <description>Matlab

Matlab is a matrix-oriented computing language.  UA/Fayetteville has a campus-wide license.  We have permission from Mathworks to serve this program to in-state users but not to users outside of Arkansas who are not affiliated with UAF.

The</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=migration&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>migration</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=migration&amp;rev=1760557911&amp;do=diff</link>
        <description>Migration

/scratch filesystems are relatively small and expensive.  Unused data will be migrated out (razor) or deleted (trestles).

Razor: Top level subdirectories under /scratch/$USER are searched for files modified less than 60 days ago. If none are found, the subdirectories are migrated to either /storage/$USER/ if no name conflict or /storage/$USER/scratch if there is a name conflict.  This may erroneously move read-only data that doesn't hit the</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=mkl_library&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>mkl_library</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=mkl_library&amp;rev=1760557911&amp;do=diff</link>
        <description>Intel MKL

Intel Math Kernel Library (Intel MKL) is a library of optimized math routines for science, engineering, and financial applications. Core math functions include BLAS, LAPACK, ScaLAPACK, sparse solvers, fast Fourier transforms, and vector math. The routines in MKL are hand-optimized specifically for Intel processors.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=moving_data&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>moving_data</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=moving_data&amp;rev=1760557911&amp;do=diff</link>
        <description>Data Transfer to and from AHPCC Clusters

Small files (&lt;100MB)

There data transfer protocols are supported to move data to and from the main storage on the AHPCC clusters:

	* scp (secure copy)
	*  sftp (secure ftp)
   * rsync

In addition the wget and</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=mpi&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>mpi</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=mpi&amp;rev=1760557911&amp;do=diff</link>
        <description>MPI

The great majority of multi-node programs in HPC (as well as many single-node programs) use the MPI (Message Passing Interface) parallel software &lt;https://www.mcs.anl.gov/research/projects/mpi/&gt;.  There are many possible options in configuring MPI for a particular set of hardware (the help file ./configure ________</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=mpiblast&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>mpiblast</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=mpiblast&amp;rev=1760557911&amp;do=diff</link>
        <description>mpiBLAST



mpiBlast is a freely available, opensource, parallel implementation of NCBI Blast. mpiBlast takes advantage of shared parallel computign resources, i.e. a cluster this gives it access to more avaliable resources unlike NCBI blast which only can take advantage of shared-memory multi-processors(SMP's).</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=mpi_old&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>mpi_old</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=mpi_old&amp;rev=1760557911&amp;do=diff</link>
        <description>MPI types, MPI examples, MPI-threaded hybrid, HPL

Here are some MPI examples for different flavors. Each also illustrates (a) setting modules and environment variables in the batch file, and (b) hybrid MPI/MKL threads. Hybrid MPI/OpenMP is run in the same way as MPI/MKL, except the relevant environment variable is **</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=mpi_python&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>mpi_python</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=mpi_python&amp;rev=1760557911&amp;do=diff</link>
        <description>MPI-python

Referring to the mpi page, we will use openmpi/4.1.4 and mvapich2/2.3.7 MPI modules, and the same form for mpirun/mpiexec. Referring to the python page, we will create a conda environment to match each MPI variant.

Mixing modules and conda can be a little tricky as both try to take over the current environment, most importantly ___</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=mxnet&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>mxnet</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=mxnet&amp;rev=1760557911&amp;do=diff</link>
        <description>MXNet

MXNet is a deep learning library with support for Python, R, C++, Scala, Julia, Matlab and javascript.  It can run on cpus, gpus and well as multiple cluster nodes.  Please see the project website for detailed infromation:

&lt;https://mxnet.readthedocs.io/en/latest/&gt;

On Razor, MXNet is compiled with GPU support as a python package and installed in</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=namd&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>namd</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=namd&amp;rev=1760557911&amp;do=diff</link>
        <description>Namd

The namd-verbs-smp binary &lt;https://web.archive.org/web/20181127065652/http://www.ks.uiuc.edu/Research/namd/benchmarks/&gt; version 2.11 or 2.12 is installed in /share/apps/NAMD on razor and trestles.  It does not use MPI.

This is for multiple-node runs with charmrun as the distributed component and namd2 on each compute node. We have found most runs are faster with the</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=namd2023&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>namd2023</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=namd2023&amp;rev=1760557911&amp;do=diff</link>
        <description>namd 2023

Here is an update on namd for the shared memory one-node version namd2/namd3 and the multi-node version charmrun++.  The standard NAMD benchmark apoa1 is too small to show the scaling on  a reasonably modern system, so here we use a user's lipid simulation for 25k steps until it prints its</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=nebula&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>nebula</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=nebula&amp;rev=1760557911&amp;do=diff</link>
        <description>Nebula/OpenStack

Nebula &lt;http://nebula.uark.edu&gt; is a University of Arkansas privately hosted Open Stack &lt;https://www.openstack.org/&gt; software platform for cloud computing.  Some of the advantages of using a cloud computing environment are:

	*  the ability to bring up virtual machines using a wide range of operating systems.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=newfront&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>newfront</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=newfront&amp;rev=1760557911&amp;do=diff</link>
        <description>ARP User Support Wiki wiki.hpc.arkansas.edu

Our mission: Enabling computional academic research at greater than desktop scale.

ARP is available for all Arkansas academic researchers and educators, and most services are free of charge.

Get an account

Login by portal</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=non_bioinformatics_catalog&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>non_bioinformatics_catalog</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=non_bioinformatics_catalog&amp;rev=1760557911&amp;do=diff</link>
        <description>Non-bioinformatics Software Catalog

Not including OpenHPC and other RPM software in /opt and R/python/perl modules</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=nwchem&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>nwchem</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=nwchem&amp;rev=1760557911&amp;do=diff</link>
        <description>NWChem

NWChem is a scalable computational tool for large scale molecular simulations. Along with computational chemistry the package also inculdes quantum chemical and molecular dynamics functionality. It was developed at the Pacific Northwest National Labratory. More documentation is avaliable at the</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=off-campsu_access-dmz&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>off-campsu_access-dmz</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=off-campsu_access-dmz&amp;rev=1760557911&amp;do=diff</link>
        <description>Off-campus Access DMZ

SSH access

To accommodate users logging in from other locations a new access portal, hpc-portal2.hpc.uark.edu, is available for any user holding an active AHPCC account.  This service, upon successful login, registers the source IP address of user's client machine and adds that IP to the list of allowed IP's allowed to pass network traffic to the AHPCC login nodes.  To use the service ssh into port 2022 of hpc-portal2.hpc.uark.edu.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=off-campus_access&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>off-campus_access</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=off-campus_access&amp;rev=1760557911&amp;do=diff</link>
        <description>Off-campus Access

SSH access

Direct Secure Shell (SSH) access to AHPCC login nodes is only allowed from:

	* UAF campus network
	* via UAF Virtual Private Network (VPN &lt;https://its.uark.edu/network-access/vpn/index.php&gt;) connection
	* Other research organization networks, with membership determined by the Arkansas Research and Education Optical Network (ARE-ON</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=off_campus_access_dmz&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>off_campus_access_dmz</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=off_campus_access_dmz&amp;rev=1760557911&amp;do=diff</link>
        <description>Off-campus Access DMZ

SSH access

To accommodate users logging in from other locations a new access portal, hpc-portal2.hpc.uark.edu, is available for any user holding an active AHPCC account.


First Step

(You may skip this if located at UA or UAMS, or using their VPNs, since those addresses are pre-registered.  Also those addresses which ARE-ON recognizes as research networks, including most university networks and Ozarks Go)</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=off_campus_new&amp;rev=1777501340&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2026-04-29T22:22:20+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>off_campus_new</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=off_campus_new&amp;rev=1777501340&amp;do=diff</link>
        <description>Off UofA Campus Network SSH Access

Secure Shell (SSH) access to Pinnacle from the University of Arkansas, Fayetteville campus network and the local broadband providers in Fayetteville (Ozarks Go and Cox Internet) is not restricted.  Unrestricted access also includes selected, well known IP ranges from UAMS, UALR, ASU, UAPB, UCA, UAMC, ATU, SAU and HARDING.  To enable SSH access from outside of these networks you must take an additional step to register your source IP address and add it to the l…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=ollama_llm&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>ollama_llm</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=ollama_llm&amp;rev=1760557911&amp;do=diff</link>
        <description>Ollama LLM

Ollama is an open source inference engine for running LLM models. &lt;https://ollama.com&gt;.
To run a local LLM, you need two ingredients: the model itself, and the inference engine, which is a piece of software that can run the model. Conceptually, the inference engine processes the input (a text prompt), feeds it through the neural network of the model, and retrieves the response. Ollama is different than Chat-GPT and Gemini as it runs locally without accessing the internet for model da…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=optimization&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>optimization</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=optimization&amp;rev=1760557911&amp;do=diff</link>
        <description>Optimization/Making your code faster

Here we focus on compiling someone else's code in Linux for scientific computing. Writing your own code expands the problem considerably.  For that you might check the free textbooks and supplemental material at</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=orca&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>orca</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=orca&amp;rev=1760557911&amp;do=diff</link>
        <description>ORCA

ORCA is an ab initio, DFT and semiempirical SCF-MO quantum chemistry package.  Detailed infromation about the package is available on the software home page:

&lt;https://orcaforum.cec.mpg.de/&gt;

Example input file

Orca-example.inp


! BP86 def2-SVP Opt 
# BP86 is here the method (DFT functional), def2-SVP is the basis set and Opt is
# the jobtype (geometry optimization). Order of the keywords is not important.

*xyz 0 1
H 0.0 0.0 0.0
H 0.0 0.0 1.0
*</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=package_managers&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>package_managers</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=package_managers&amp;rev=1760557911&amp;do=diff</link>
        <description>Package and Environment Managers

A brief explanation of the various ways to install and manage software on an HPC system. Constraints are

	* HPC users can only install software in their dedicated areas
	* HPC staff can install software in shared system areas</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=parabricks&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>parabricks</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=parabricks&amp;rev=1760557911&amp;do=diff</link>
        <description>Parabricks

Parabricks is a GPU accelerated software suite for performing secondary analysis of next generation sequencing (NGS) DNA data. A major benefit of Parabricks is that it is designed to deliver results at blazing fast speeds and low cost. Parabricks can analyze whole human genomes in about 45 minutes, compared to about 30 hours for 30x WGS data. The best part is the output results exactly match the commonly used software. So, it's fairly simple to verify the accuracy of the output.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=perl&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>perl</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=perl&amp;rev=1760557911&amp;do=diff</link>
        <description>Perl

Perl can be accessed by modules (currently 5.24.0,5.36.0) or you can install and modify perl through conda python.


module load perl/5.36.0
perl


To install your own perl modules through cpan, you will need to tell perl where to put them and look for them in your disk area.  For example,installing</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=pinnacle&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>pinnacle</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=pinnacle&amp;rev=1760557911&amp;do=diff</link>
        <description>How to use the Pinnacle Cluster

Equipment

Pinnacle has 98 compute nodes. GPU and GPU-ready nodes are Dell R740, other nodes are Dell R640. There is no user-side difference between R740 (GPU-ready) and R640 nodes.

Public nodes number 75, of which 7 nodes have 768</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=pinnacle_usage&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>pinnacle_usage</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=pinnacle_usage&amp;rev=1760557911&amp;do=diff</link>
        <description>How to use the Pinnacle Cluster

This is a brief “how to” summary of usage for users of the Pinnacle cluster.

Pinnacle has 101 compute nodes. 30 GPU and GPU-ready nodes are Dell R740, 69 nodes are Dell R640, two nodes are Dell R7425. There is no user-side difference between R740 (GPU-ready) and R640 nodes.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=portal&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>portal</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=portal&amp;rev=1760557911&amp;do=diff</link>
        <description>Portal Login

An  OpenOnDemand  web portal is available at &lt;https://pinnacle-portal.uark.edu&gt;.  pinnacle-portal requires a Uark id for login at this time.  If you go to the web page, a login popup will accept your Uark ID (no @uark.edu) and Uark password.

At the portal home page, you can select Files</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=portal_login&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>portal_login</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=portal_login&amp;rev=1760557911&amp;do=diff</link>
        <description>Portal Login

An  OpenOnDemand  graphical web portal is available at &lt;https://hpc-portal2.hpc.uark.edu&gt;. 

If you go to the web page of either portal, a login popup will accept your AHPCC id and password (Your id may be the first part of your email, use “rfeynman” not “rfeynman@caltech.edu”</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=portal_login_new&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>portal_login_new</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=portal_login_new&amp;rev=1760557911&amp;do=diff</link>
        <description>Portal Login

AHPCC has several  OpenOnDemand  web portals.  The internet address will vary depending on your location, see  login servers .  The only software requirement on your workstation is a web browser. Chrome and Firefox work.  Safari may or may not work.

To login, enter the portal name in your browser tab. Example</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=python&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>python</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=python&amp;rev=1760557911&amp;do=diff</link>
        <description>Python

AHPCC maintains a number of Python versions:  Anaconda, Intel/Anaconda, and compiled from source.

Terminology: Python modules are prewritten programs in Python.  environment_modules are scripts on the system that set up different versions of software.

 Anaconda Python  is what we recommend for most purposes as it has wide compatibility and easy installation with Python modules.  Recommended</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=python_catalog&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>python_catalog</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=python_catalog&amp;rev=1760557911&amp;do=diff</link>
        <description>Python Software Catalog

Not including OpenHPC and other RPM software in /opt and R/python/perl modules</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=python_old&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>python_old</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=python_old&amp;rev=1760557911&amp;do=diff</link>
        <description>==== Python Versions ====
AHPCC maintains Python versionsf rom source 2.7.5, 2.7.11, and 3.5.2. 2.7.11 is most used.
Anaconda Python 2.7.13 is installed on razor.
Intel Python 2.7.13 is installed on razor.

“python” at the prompt (with no python enviroment modules loaded) defaults to /usr/bin/python, which is the centos rpm installation of Python 2.6 with very few Python modules (installable code used with</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=qe&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>qe</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=qe&amp;rev=1760557911&amp;do=diff</link>
        <description>qe/quantum espresso

With version 6.5 from github,building on trestles to use the oldest environment for xHOST:


module load intel/19.0.5 impi/19.0.5 mkl/19.0.5
$ ./install/configure  \
MPIF90=mpiifort F90=ifort F77=ifort FC=ifort CC=icc  \
SCALAPACK_LIBS=&quot;-lmkl_scalapack_lp64 -lmkl_blacs_intelmpi_lp64&quot;  \
LAPACK_LIBS=&quot;-lmkl_lapack95_lp64 -lmkl_blas95_lp64&quot;  \
BLAS_LIBS=&quot;-lmkl_intel_lp64  -lmkl_intel_thread -lmkl_core&quot; \
FFT_LIBS=&quot;-L${MKL_ROOT}/interfaces/fftw3xf -lfftw3xf_intel&quot;  \
FCFLAGS=&quot;-O…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=quantum_espresso&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>quantum_espresso</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=quantum_espresso&amp;rev=1760557911&amp;do=diff</link>
        <description>Quantum Espresso

Versions 6.8/7.1

 Compilation 
With Intel compiler, Intel MPI, and MKL


#COMPUTER=skylake
#OPT=&quot;-xHOST&quot;
COMPUTER=bulldozer
OPT=&quot;-msse3 -axsse3,sse4.2,AVX,core-AVX2,CORE-AVX512&quot;
VERSION=7.1
HDF5=1.12.0
module purge
module load intel/19.0.5 mkl/20.0.4 impi/17.0.4
OMP=&quot;--enable-openmp&quot;
make clean
./install/configure MPIF90=mpiifort F90=ifort F77=ifort FC=ifort CC=icc \
SCALAPACK_LIBS=&quot;-L$MKLROOT/lib/intel64 -lmkl_scalapack_lp64 -lmkl_blacs_intelmpi_lp64&quot; \
LAPACK_LIBS=&quot;-L$MKLROO…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=quast&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>quast</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=quast&amp;rev=1760557911&amp;do=diff</link>
        <description>Quast

QUAST stands for QUality ASsessment Tool. The tool evaluates genome assemblies by computing various metrics. Quast was developed by the Algorithmic Biology Lab at the St. Petersburg Academic University of the Russian Academy of Sciences. For more documentation on Quast click</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=queueing_system&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>queueing_system</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=queueing_system&amp;rev=1760557911&amp;do=diff</link>
        <description>Queueing System

All jobs on AHPCC clusters which require a significant amount of CPU or memory should be submitted through the queueing system.  In general, two types of jobs may be passed into the queue:


	*  A batch job - a specific command is executed on the node(s) assigned to the job without the need for user interaction.  A vast majority of jobs ran on the HPC clusters are batch jobs.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=queues&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>queues</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=queues&amp;rev=1760557911&amp;do=diff</link>
        <description>Queueing System

All jobs on AHPCC clusters which require a significant amount of CPU or memory should be submitted through the queueing system.  In general, two types of jobs may be passed into the queue:


	*  A batch job - a specific command is executed on the node(s) assigned to the job without the need for user interaction.  A vast majority of jobs ran on the HPC clusters are batch jobs.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=r&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>r</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=r&amp;rev=1760557911&amp;do=diff</link>
        <description>R

The “R” statistical/programming package is installed on the clusters.  The greatest number of add-on packages are installed in the 4.0.2 version.  Also included are older saved versions, newer 4.1.0 and R 4.2.2 and slightly modified Microsoft R Open 4.0.2</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=research&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>research</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=research&amp;rev=1760557911&amp;do=diff</link>
        <description>HPC Researcher News

2024-07-31 Khodadadi selected for NIH Summer Course &lt;https://news.uark.edu/articles/70822/ph-d-student-ehsan-khodadadi-accepted-into-prestigious-nih-summer-course&gt;

2024-07-01 Research report from House, Hasan, Asnayanti, Alrubaye, Pummill, Rhoads &lt;https://www.researchgate.net/publication/382672799_Phylogenomic_Analyses_of_Three_Distinct_Lineages_Uniting_Staphylococcus_cohnii_and_Staphylococcus_urealyticus_from_Diverse_Hosts&gt;

2024-06-17 Research report from Yamashita, Rhoad…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=resource_selection&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>resource_selection</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=resource_selection&amp;rev=1760557911&amp;do=diff</link>
        <description>Resource_Selection

Selecting an appropriate resource for your program is required when using AHPCC resources.  The purpose of this policy is to use very expensive resources such as GPU nodes and high-memory nodes to run programs that require those capabilities, and to use less expensive resources to run programs for which they are suited, thus producing the most computing throughput per dollar. Some latitude is given when the exact resources are not known because a similar job has not been run …</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=salmon&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>salmon</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=salmon&amp;rev=1760557911&amp;do=diff</link>
        <description>Salmon

Salmon is a tool for transcript quantification from RNA-seq data. To use Salmon all you need is a FASTA file containing your reference transcript, and a file or files containing your FASTA/FASTAQ reads. There are other files that can be used to run Salmon, documentation on this can be found</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=sapplication_software&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>sapplication_software</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=sapplication_software&amp;rev=1760557911&amp;do=diff</link>
        <description>Application Software

Locating and using software has been made a little more complicated by some then reasonable decisions made 50 years ago for Unix: /usr/local for applications, environment variables $PATH to find an executable, $LD_LIBRARY_PATH to find dynamic link libraries, the file _</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=sas&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>sas</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=sas&amp;rev=1760557911&amp;do=diff</link>
        <description>====SAS====
Statistical Analysis System is a statistical software suite developed by SAS Institute for data management, advanced analytics, multivariate analysis, business intelligence, criminal investigation, and predictive analytics.

Example Job

An example job is stored in</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=scratch_output&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>scratch_output</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=scratch_output&amp;rev=1760557911&amp;do=diff</link>
        <description>Efficient Program Output to /scratch

“Large” program output should not be written by a job directly to main storage /storage[x]/ or /[x]home/. This is because most computational programs are very inefficient at writing output. This results in a huge load on the output file system (that is open file/write one line/close file millions of times). System utilities such as</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=selecting_resources&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>selecting_resources</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=selecting_resources&amp;rev=1760557911&amp;do=diff</link>
        <description>Equipment/Selecting Resources/Slurm Parameters

There are six to seven different Slurm parameters that must be specified to pick a computational resource and run a job.  Additional Slurm parameters are optional.

Partitions are

Comments/Rules

	* Each set of</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=sickle&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>sickle</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=sickle&amp;rev=1760557911&amp;do=diff</link>
        <description>Sickle

Sickle is a tool that uses sliding windows along with quality and length thresholds to determine when quality is sufficiently low to trim the 3'-end of reads and also determines when the quality is sufficiently high enough to trim the 5'-end of reads. You can find more information on sickle</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=singularity-apptainer&amp;rev=1771955478&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2026-02-24T17:51:18+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>singularity-apptainer</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=singularity-apptainer&amp;rev=1771955478&amp;do=diff</link>
        <description>Apptainer/Singularity

Apptainer from &lt;https://apptainer.org/&gt;, formerly Sylabs Singularity, is a container system for HPC systems.  In many respects it is similar to &lt;https://www.docker.com/&gt;, but Docker is too insecure for use with parallel file systems.  Containers allow a specific distribution and version of linux and application software to be set in the container image while running on the HPC system. Containers are very useful for applications that were written on personal workstations (o…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=singularity&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>singularity</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=singularity&amp;rev=1760557911&amp;do=diff</link>
        <description>Singularity

Singularity &lt;http://singularity.lbl.gov/&gt; is a software container system.  It allows users to build and run entire scientific workflows, software and libraries using a specific distribution and version of Linux all packaged into a single image file.  It is based on the Linux</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=slurm_interactive&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>slurm_interactive</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=slurm_interactive&amp;rev=1760557911&amp;do=diff</link>
        <description>Slurm Interactive

On Pinnacle/Karpinski we currently only support interactive jobs on single nodes from one core to the number of the cores in the node.  So always --nodes=1 with srun. MPI is supported, just not multiple-node interactive MPI.

slurm</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=slurm_queues&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>slurm_queues</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=slurm_queues&amp;rev=1760557911&amp;do=diff</link>
        <description>Slurm Queues Pinnacle/Karpinski

See  Selecting Resources  for help on choosing the best node/queue for your work.

Updates: 


tres288 queue added with 288 hour/12 day maximum
tres72 time limit changed to 288 hours, same as tres288, retained for existing scripts
csce-k2-72 queue added for new csce Pinnacle-2 nodes</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=slurm_sbatch_srun&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>slurm_sbatch_srun</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=slurm_sbatch_srun&amp;rev=1760557911&amp;do=diff</link>
        <description>Slurm sbatch/srun scripts

Slurm jobs may be submitted by:

	* Slurm batch scripts submitted by sbatch
* PBS batch scripts submitted by sbatch or qsub
* Slurm interactive submitted by srun
* Slurm interactive and graphical submitted by  OpenOnDemand 

Essential slurm subcommands and available values are described in</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=slurm_scripts&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>slurm_scripts</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=slurm_scripts&amp;rev=1760557911&amp;do=diff</link>
        <description>Slurm Commands and Scripts

Basic slurm commands are:

A Torque compatibility layer also offers some torque commands such as qstat and qsub.  A basic script in slurm looks like:


#!/bin/bash
#SBATCH --job-name=mpi
#SBATCH --output=zzz.slurm
#SBATCH --partition comp06
#SBATCH --nodes=2
#SBATCH --tasks-per-node=32
#SBATCH --time=6:00:00
cd $SLURM_SUBMIT_DIR
module purge
module load intel/18.0.1 impi/18.0.1 mkl/18.0.1
mpirun -np $SLURM_NTASKS -machinefile /scratch/${SLURM_JOB_ID}/machinefile_${SLU…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=software&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>software</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=software&amp;rev=1760557911&amp;do=diff</link>
        <description>Application Software

lammps

matlab

 NVidia cuDNN 

namd

Ollama LLM

perl

python

R

quantum espresso

vasp

bioinformatics_catalog

non_bioinformatics_catalog

python_catalog</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=spades&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>spades</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=spades&amp;rev=1760557911&amp;do=diff</link>
        <description>Spades

SPAdes – St. Petersburg genome assembler – is an assembly toolkit containing various assembly pipelines. You can find more information on SPAdes here.

Environment Setup

To use spades edit your $HOME/.bashrc to include the spades module.


module load spades</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=spark&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>spark</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=spark&amp;rev=1760557911&amp;do=diff</link>
        <description>Spark

Apache Spark version 1.6.1 is installed in /share/apps/spark. Spark is a fast and general-purpose cluster computing system. It provides high-level APIs in Java, Scala, Python and R, and an optimized engine that supports general execution graphs. It allows users to combine the memory and cpus of multiple compute nodes into into a Spark cluster and use the aggregated cluster memory and cpus to run a single task.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=ssh_2fa&amp;rev=1777501287&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2026-04-29T22:21:27+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>ssh_2fa</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=ssh_2fa&amp;rev=1777501287&amp;do=diff</link>
        <description>2FA SSH Access

Two Factor Authentication (2FA) is an additional layer of security that can be configured automatically for Secure Shell (SSH) access for your account on Pinnacle.  To enable it


1. log into the web portal at: &lt;https://hpc-portal1.hpc.uark.edu&gt;

2. In the top menu go to</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=ssh_certificates&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>ssh_certificates</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=ssh_certificates&amp;rev=1760557911&amp;do=diff</link>
        <description>SSH Certificate Access

SSH Certificates can be used to log into Pinnacle without using password authentication.  A set of two files has to be present on the SSH client computer:

	* SSH private key (key-ed25519)
	* SSH public certificate (key-ed25519-cert.pub)</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=ssh_keys&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>ssh_keys</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=ssh_keys&amp;rev=1760557911&amp;do=diff</link>
        <description>SSH Keys

SSH keys are a convenient way to perform passwordless authentication with the cluster's ssh interface.  These are generated using ssh-keygen on your linux-like workstation (linux, Mac, or Windows WSL).  You can generate either with or without a local passphrase, which provides more security, although it slows some single-command functions like</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=ssh_login&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>ssh_login</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=ssh_login&amp;rev=1760557911&amp;do=diff</link>
        <description>SSH Login

AHPCC clusters are available on several ssh login hosts and from a web portal, see portal_login. The ssh common account text login host is hpc-portal2.hpc.uark.edu.

Workstation Set-up:
An ssh client is needed on your workstation to access the ssh login hosts.  The https web portal</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=ssh_login_new&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>ssh_login_new</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=ssh_login_new&amp;rev=1760557911&amp;do=diff</link>
        <description>SSH Login

Most experienced users use the fast SSH text interface to submit batch jobs. The alternative  OpenOnDemand  web interface is needed for graphical jobs and is usually easier for new users (and can also provide an SSH terminal in a browser window).

SSH text login will require a SSH client on your workstation and one of several</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=ssh_login_nopass&amp;rev=1777501222&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2026-04-29T22:20:22+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>ssh_login_nopass</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=ssh_login_nopass&amp;rev=1777501222&amp;do=diff</link>
        <description>SSH Password-less Login

The secure shell (SSH) server on hpc-portal1.hpc.uark.edu does not accept passwords as an authentication method. Instead, SSH key files are used for access. A public SSH key file is stored in your account on Pinnacle. A matching private key (pinnacle.key) file has to be supplied as a parameter to the ssh command on the client to log in.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=start&amp;rev=1777496993&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2026-04-29T21:09:53+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>start</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=start&amp;rev=1777496993&amp;do=diff</link>
        <description>Arkansas HPCC User Support Wiki

Return to AHPCC Web Site

See  Sitemap  for items from old wiki

Note: tres288 12-day trestles queue available queues

Get Started: Get an Account | SSH Login | Portal Login |  Off-campus Access DMZ | System Etiquette

Alternate Login: Portal Login | SSH Password-less Login |  Off-campus Access | 2FA for SSH

Run Jobs: Selecting Resources |  Slurm Scripts 


	*  Slurm scheduler: Pinnacle/Karpinski  Pinnacle  |  Karpinski  | Queues  | Interactive

Data:</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=storage&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>storage</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=storage&amp;rev=1760557911&amp;do=diff</link>
        <description>Storage/Filesystems/Quotas/Backup

Pinnacle, Trestles and Razor clusters share the same Lustre storage.  The Karpinski cluster has separate storage because it lacks InfiniBand over which the Lustre storage works.

storage

The main bulk storage area for user</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=storage_old&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>storage_old</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=storage_old&amp;rev=1760557911&amp;do=diff</link>
        <description>Storage

Razor and Trestles clusters have separate sets of file systems.  Each cluster has 3 main storage areas: /home, /scratch and /storage.  The purpose and various properties of each file system for both clusters are summarized below.

 Razor File Systems</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=sub_node_jobs&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>sub_node_jobs</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=sub_node_jobs&amp;rev=1760557911&amp;do=diff</link>
        <description>Sub-node Compute Jobs

As compute nodes grow in size (our latest equipment in purchasing will have 96 cores per node and are not the highest-core-count available), we face the issue of trying to efficiently schedule jobs of one core up to less than one node.  Slurm doesn't do well when you send the same partition everything from one core to multiple node jobs.  Invariably Slurm leaves one core running on an otherwise empty node.</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=support_requests&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>support_requests</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=support_requests&amp;rev=1760557911&amp;do=diff</link>
        <description>Support Requests

An example of how to file a support request for which we don't have to search for the needed info: 

email hpc-support@listserv.uark.edu with


cluster: razor  
job number : 1774768 (not necessary if we have the scheduler output as below)
directory: /storage/rfeynman/RICE/RNA_Seq/Julie-Sep_2016
scheduler script: myjob.pbs
scheduler output: razor.1774768.sched
issue: error message from htseq-count</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=tensorflow&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>tensorflow</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=tensorflow&amp;rev=1760557911&amp;do=diff</link>
        <description>Tensorflow

Tensorflow is an open source, deep learning software library for numerical computation using data flow graphs.  Detailed information about the software is available on the project website:

&lt;https://www.tensorflow.org/&gt;

The library is available as a python package.  The cpu version is installed for</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=tophat&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>tophat</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=tophat&amp;rev=1760557911&amp;do=diff</link>
        <description>TopHat

 TopHat is a fast splice junction mapper for RNA-Seq reads. TopHat is a collaborative effort among Daehwan Kim and Steven Salzberg in the Center for Computational Biology at Johns Hopkins University, and Cole Trapnell in the Genome Sciences Department at the University of Washington. You can find more information on TopHat</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=torque_slurm_scripts&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>torque_slurm_scripts</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=torque_slurm_scripts&amp;rev=1760557911&amp;do=diff</link>
        <description>Slurm Commands and Scripts

Basic slurm commands are:

A basic script in slurm looks like:


#!/bin/bash
#SBATCH --job-name=mpi
#SBATCH --output=zzz.slurm
#SBATCH --partition comp06
#SBATCH --nodes=2
#SBATCH --tasks-per-node=32
#SBATCH --time=6:00:00
cd $SLURM_SUBMIT_DIR
module purge
module load intel/18.0.1 impi/18.0.1 mkl/18.0.1
mpirun -np $SLURM_NTASKS -machinefile /scratch/${SLURM_JOB_ID}/machinefile_${SLURM_JOB_ID} ./mympiexe -inputfile MA4um.mph -outputfile MA4um-output.mph</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=trinity&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>trinity</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=trinity&amp;rev=1760557911&amp;do=diff</link>
        <description>Trinity

Trinity combines three independent software modules: Inchworm, Chrysalis, and Butterfly, applied sequentially to process large volumes of RNA-seq reads. Trinity was developed at the Broad Institute and the Hebrew University of Jerusalem. You can find more information on Trinity here.

Environment Setup</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=updates&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>updates</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=updates&amp;rev=1760557911&amp;do=diff</link>
        <description>2017/03/21 Maximum queued jobs per user set to 1000 for both clusters.

2016/12/13 PacBio GenomicConsensus and multiple prerequisites/components quiver,ccs,blasr,bax2bam,bam2bax,pbbams,samtools are installed on trestles. Use module load gcc/4.9.1 mkl/16.0.1 python/2.7.3 pitchfork/5.3</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=vasp&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>vasp</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=vasp&amp;rev=1760557911&amp;do=diff</link>
        <description>vasp

For use by authorized users only.

Local copy 5.4.4 (for Intel) is modified from distribution as follows:


installs$ diff -r VASP-REFERENCE/vasp.5.4.4/src VASP/5.4.4/vasp.5.4.4/build/std | grep -v &quot;\.o$&quot; | grep -v &quot;\.f90$&quot; | grep -v &quot;\.mod$&quot;
Only in VASP-REFERENCE/vasp.5.4.4/src: CUDA
Only in VASP-REFERENCE/vasp.5.4.4/src: fftlib
Only in VASP/5.4.4/vasp.5.4.4/build/std/lib: libdmy.a
Only in VASP/5.4.4/vasp.5.4.4/build/std/lib: makefile.include
diff -r VASP-REFERENCE/vasp.5.4.4/src/makefil…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=vasp_old&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>vasp_old</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=vasp_old&amp;rev=1760557911&amp;do=diff</link>
        <description>VASP 5.4.4

under construction

For licensed research groups only.  Please contact hpc-support with license info to be added to the vasp user group for access to the compiled versions.

 Compilation 


module load intel/18.0.2 impi/18.0.2 mkl/18.0.2
$ diff arch/makefile.include.linux_intel build/std/makefile.include
7d6
&lt;              -Davoidalloc \
10a10
&gt; #            -Davoidalloc \
20c20
&lt; OFLAG      = -O2
---
&gt; OFLAG      = -O2 -xsse3 -axsse4.2,AVX,COREAVX512</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=vectorization&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>vectorization</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=vectorization&amp;rev=1760557911&amp;do=diff</link>
        <description>Vectorization

Here we test single-core vectorization using several different CPUs and several different versions of the Intel compiler.   The test problem is a linear solve DGESV of order 4000, compiling and using the reference fortran BLAS to solve rather than the various versions of optimized BLAS that would be used in practice.  Most of the work for DGESV is done in the DGEMM matrix multiply routine.  Tested compiler options are -O1, -O2, -O3, and -O3 with -xsse3,-xssse3,-xsse4.1,-xsse4.2, a…</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=velvet&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>velvet</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=velvet&amp;rev=1760557911&amp;do=diff</link>
        <description>Velvet

Velvet is a de novo genomic assembler specially designed for short read sequencing technologies, such as Solexa or 454, developed by Daniel Zerbino and Ewan Birney at the European Bioinformatics Institute (EMBL-EBI), near Cambridge, in the United Kingdom. You can find out more about Velvet</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=viennarna&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>viennarna</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=viennarna&amp;rev=1760557911&amp;do=diff</link>
        <description>ViennaRNA

The ViennaRNA package provides RNA secondary structure prediction through energy minimization, along with other functions, and was developed by the Theoretical Biochemistry Group at the Institute for Theoretical Chemistry. You can find more information on the ViennaRNA package</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=virtual_machines&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>virtual_machines</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=virtual_machines&amp;rev=1760557911&amp;do=diff</link>
        <description>Virtual Machines on Pinnacle/Karpinski

One of the new features introduced in the Pinnacle/Karpinski clusters is the ability to spin up virtual machines.  Virtual machines allow users to:

	* run operating systems versions other than that installed on the Pinnacle compute nodes</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=virtual_machines_old&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>virtual_machines_old</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=virtual_machines_old&amp;rev=1760557911&amp;do=diff</link>
        <description>Virtual Machines on Pinnacle

One of the new features introduced in the Pinnacle cluster is the ability to spin up virtual machines.  Virtual machines allow users to:

	* run operating systems versions other than that installed on the Pinnacle compute nodes</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=walltime_extensions&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>walltime_extensions</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=walltime_extensions&amp;rev=1760557911&amp;do=diff</link>
        <description>Job Walltime Extensions

Each user is initially allotted 25 walltime job extensions for both Razor and Trestles clusters combined. Additional extensions have to be approved by the AHPCC advisory committee. To petition the AHPCC advisory committee for additional extensions, please send an email to HPC-SUPPORT@listerv.uark.edu with a subject line</description>
    </item>
    <item rdf:about="https://hpcwiki.uark.edu/doku.php?id=windows&amp;rev=1760557911&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:51:51+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>windows</title>
        <link>https://hpcwiki.uark.edu/doku.php?id=windows&amp;rev=1760557911&amp;do=diff</link>
        <description>Windows

We recommend using WSL, see  SSH Login .  As a good imitation of a Linux system, several Windows issues will be avoided.  But still using a Windows filesystem, several issues will not be avoided.  This can vary depending on which version of WSL your Linux VM is running in</description>
    </item>
</rdf:RDF>
