<?xml version="1.0" encoding="UTF-8"?>
<!-- generator="FeedCreator 1.8" -->
<?xml-stylesheet href="https://www.limulus-computing.com/Limulus-Manual/lib/exe/css.php?s=feed" type="text/css"?>
<rdf:RDF
    xmlns="http://purl.org/rss/1.0/"
    xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
    xmlns:slash="http://purl.org/rss/1.0/modules/slash/"
    xmlns:dc="http://purl.org/dc/elements/1.1/">
    <channel rdf:about="https://www.limulus-computing.com/Limulus-Manual/feed.php">
        <title>Systems Manual</title>
        <description></description>
        <link>https://www.limulus-computing.com/Limulus-Manual/</link>
        <image rdf:resource="https://www.limulus-computing.com/Limulus-Manual/lib/tpl/dokuwiki/images/favicon.ico" />
       <dc:date>2026-04-30T18:47:53+00:00</dc:date>
        <items>
            <rdf:Seq>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=adding_users&amp;rev=1619711921&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=becoming_the_administrative_user&amp;rev=1621440972&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=desk-side_case&amp;rev=1757954410&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=essential_quick_start&amp;rev=1595952356&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=essential_quick_start_data_analytics_systems&amp;rev=1621440205&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=essential_quick_start_hpc_systems&amp;rev=1619795848&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=fan_control&amp;rev=1760555992&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=functional_diagram&amp;rev=1620053269&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=general_questions&amp;rev=1761250807&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=getting_help&amp;rev=1619619356&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=hadoop_distributed_file_system&amp;rev=1624041056&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=hdfs_management&amp;rev=1624041263&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=hpc_logs_and_log_management&amp;rev=1621529608&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=limulus_compute_blades&amp;rev=1757948398&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=limulus_design_philosophy&amp;rev=1620051865&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=logging_in&amp;rev=1591716673&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=monitoring_system_resources&amp;rev=1619796811&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=node_power_control&amp;rev=1624041232&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=open_hpc_components&amp;rev=1593623069&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=powering_up_down_nodes&amp;rev=1757342219&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=provision_listing&amp;rev=1593290893&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=rack-mount_case&amp;rev=1591725129&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=software_management&amp;rev=1597333124&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=start&amp;rev=1757953624&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=submitting_jobs_to_the_slurm_workload_manager&amp;rev=1619621498&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=system-wide_administration_commands_pdsh&amp;rev=1621436360&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=system_images&amp;rev=1757947735&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=unpacking_and_startup&amp;rev=1757440137&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=user_software&amp;rev=1591716575&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=using_software_modules&amp;rev=1619620582&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=using_the_apache_ambari_cluster_manager&amp;rev=1621442121&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=using_the_zeppelin_web_notebook&amp;rev=1760556091&amp;do=diff"/>
                <rdf:li rdf:resource="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=warewulf_worker_node_images&amp;rev=1621531101&amp;do=diff"/>
            </rdf:Seq>
        </items>
    </channel>
    <image rdf:about="https://www.limulus-computing.com/Limulus-Manual/lib/tpl/dokuwiki/images/favicon.ico">
        <title>Systems Manual</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/</link>
        <url>https://www.limulus-computing.com/Limulus-Manual/lib/tpl/dokuwiki/images/favicon.ico</url>
    </image>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=adding_users&amp;rev=1619711921&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-04-29T15:58:41+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>adding_users</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=adding_users&amp;rev=1619711921&amp;do=diff</link>
        <description>Adding and Deleting Users

Adding users can be done using two convenience scripts. (See below for deleting users.)
 AddUser
 DelUser  
Both of these scripts can operate in either text or graphical mode. In text mode, they must be started in a terminal window as the</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=becoming_the_administrative_user&amp;rev=1621440972&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-05-19T16:16:12+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>becoming_the_administrative_user</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=becoming_the_administrative_user&amp;rev=1621440972&amp;do=diff</link>
        <description>Becoming The Administrative User

If you have been approved as an administrator, you should be able to run the following command from a terminal on the administrative node. (Assume USERNAME is your username on the cluster.)
$ sudo -i
[sudo] password for USERNAME: 
#</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=desk-side_case&amp;rev=1757954410&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-09-15T16:40:10+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>desk-side_case</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=desk-side_case&amp;rev=1757954410&amp;do=diff</link>
        <description>Please see System Images  for pictures and a description of the case.</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=essential_quick_start&amp;rev=1595952356&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2020-07-28T16:05:56+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>essential_quick_start</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=essential_quick_start&amp;rev=1595952356&amp;do=diff</link>
        <description>Limulus HPC Systems Quick Start

The following is basic information about your Limulus system. Please consult the other sections in this manual for further details.

The Case:

The case has two doors on both sides and a removable front bezel. The front bezel can be</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=essential_quick_start_data_analytics_systems&amp;rev=1621440205&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-05-19T16:03:25+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>essential_quick_start_data_analytics_systems</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=essential_quick_start_data_analytics_systems&amp;rev=1621440205&amp;do=diff</link>
        <description>Limulus Data Analytics (Hadoop/Spark/Kafka)  Systems Quick Start

The following is basic information about your Limulus system. Please consult the other sections in this manual for further details. A general description of the systems can be found by consulting the</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=essential_quick_start_hpc_systems&amp;rev=1619795848&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-04-30T15:17:28+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>essential_quick_start_hpc_systems</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=essential_quick_start_hpc_systems&amp;rev=1619795848&amp;do=diff</link>
        <description>Limulus HPC Systems Quick Start

The following is basic information about your Limulus system. Please consult the other sections in this manual for further details. A general description of the systems can be found by consulting the Functional Diagram.

Adding Users:</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=fan_control&amp;rev=1760555992&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:19:52+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>fan_control</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=fan_control&amp;rev=1760555992&amp;do=diff</link>
        <description>Limulus Fan Control

There are two fans that pull air from under the case. The fans are directly under the blades and ensure there is adequate airflow to the blade CPUs.

The fans are connected to a single main motherboard auxiliary fan connector using a</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=functional_diagram&amp;rev=1620053269&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-05-03T14:47:49+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>functional_diagram</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=functional_diagram&amp;rev=1620053269&amp;do=diff</link>
        <description>Functional Diagram



The figure illustrates the functional parts that are packaged in a standard Limulus system (either desk-side or rack-mount). The “double-wide” Limulus diagram is forthcoming. It is similar and has eight total motherboards (one main motherboard), two 1-GbE switches, two USB Power Relay boards, and an additional 25-GbE NIC to facilitate the 25-GbE switchless network.</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=general_questions&amp;rev=1761250807&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-23T20:20:07+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>general_questions</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=general_questions&amp;rev=1761250807&amp;do=diff</link>
        <description>General Limulus Systems Questions

The following topics about Limulus personal HPC and Data Analytics Systems are discussed as a series of questions and answers.

	*  Audience and Market
	*  Hardware
	*  Power Heat Noise
	*  Software
	*  Support

Audience and Market

Is this a real cluster?

Yes, it is a real cluster. The basic system has four motherboards which are cluster nodes in a standard off-the-shelf case with a single power supply. The main motherboard is always powered on and functions …</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=getting_help&amp;rev=1619619356&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-04-28T14:15:56+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>getting_help</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=getting_help&amp;rev=1619619356&amp;do=diff</link>
        <description>Getting Support

For customers who have  Limulus Computing Support you may submit questions to our  Question and Answer Page. 

You must register with a valid Machine ID that can be found by running dmidecode -s system-uuid. A long hexadecimal string will be printed, similar to e.g. e5b27ff5-49c4-eeb0-2caa-3c7c3f81d9d7</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=hadoop_distributed_file_system&amp;rev=1624041056&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-06-18T18:30:56+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>hadoop_distributed_file_system</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=hadoop_distributed_file_system&amp;rev=1624041056&amp;do=diff</link>
        <description>HDFS Management</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=hdfs_management&amp;rev=1624041263&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-06-18T18:34:23+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>hdfs_management</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=hdfs_management&amp;rev=1624041263&amp;do=diff</link>
        <description>HDFS Management

coming soon</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=hpc_logs_and_log_management&amp;rev=1621529608&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-05-20T16:53:28+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>hpc_logs_and_log_management</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=hpc_logs_and_log_management&amp;rev=1621529608&amp;do=diff</link>
        <description>HPC Logs and Log Management

What Logs Should I Check?

The logs have been configured so that all important information can be found on the headnode. There should not be a need to log  in to nodes and check log files. The following is some general guidance when looking for problems.</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=limulus_compute_blades&amp;rev=1757948398&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-09-15T14:59:58+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>limulus_compute_blades</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=limulus_compute_blades&amp;rev=1757948398&amp;do=diff</link>
        <description>Limulus Compute Blades

Introduction

The Limulus 2.0 Micro-ATX (μATX) blade represents a new generation of local, low-cost, and high-performance computing (e.g. for an office, lab, classroom, factory, a.k.a. edge computing). The modular design allows virtually any standard μATX motherboard to be combined into a dense high-performance computing resource using Limulus desk-side and rack-mount enclosures. The blade design provides an economic and flexible platform on which to build high performanc…</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=limulus_design_philosophy&amp;rev=1620051865&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-05-03T14:24:25+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>limulus_design_philosophy</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=limulus_design_philosophy&amp;rev=1620051865&amp;do=diff</link>
        <description>Limulus Design Philosophy

The Limulus design is based on several concepts. Many of these concepts, including commodity hardware and open software, were born during the early days of the  Beowulf Cluster revolution. While conventional cluster designs rightfully grew to support more users and bigger problem sizes, the Limulus approach is designed to leverage quality low-cost hardware and open software to build high-performance</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=logging_in&amp;rev=1591716673&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2020-06-09T15:31:13+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>logging_in</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=logging_in&amp;rev=1591716673&amp;do=diff</link>
        <description>Console Login (GUI)

Remote Terminal Login</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=monitoring_system_resources&amp;rev=1619796811&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-04-30T15:33:31+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>monitoring_system_resources</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=monitoring_system_resources&amp;rev=1619796811&amp;do=diff</link>
        <description>HPC Cluster Monitoring

Ganglia

On HPC systems the popular Ganglia monitoring tool is available. To bring up the Ganglia interface simply enter:


http://localhost/ganglia/


In Firefox (or any other browser that is installed on the system). The default screen is shown below.</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=node_power_control&amp;rev=1624041232&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-06-18T18:33:52+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>node_power_control</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=node_power_control&amp;rev=1624041232&amp;do=diff</link>
        <description>Node Power Control

Each of the three worker nodes (seven nodes on the double-wide Limulus) can be powered up or down from the login node. This feature is generally not used in the Data Analytics systems because most of the daemons must be constantly running. In particular, the</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=open_hpc_components&amp;rev=1593623069&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2020-07-01T17:04:29+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>open_hpc_components</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=open_hpc_components&amp;rev=1593623069&amp;do=diff</link>
        <description>OpenHPC Components


slurm-slurmd-ohpc-18.08.8-4.1.ohpc.1.3.8.1.x86_64
slurm-example-configs-ohpc-18.08.8-4.1.ohpc.1.3.8.1.x86_64
warewulf-provision-ohpc-3.8.1-56.1.ohpc.1.3.9.x86_64
mrsh-rsh-compat-ohpc-2.12-19.1.x86_64
ohpc-ganglia-1.3.8-3.1.ohpc.1.3.8.x86_64
lua-posix-ohpc-33.2.1-4.1.x86_64
warewulf-cluster-ohpc-3.8.1-10.5.ohpc.1.3.6.x86_64
pdsh-mod-slurm-ohpc-2.33-97.1.ohpc.1.3.7.x86_64
munge-ohpc-0.5.13-7.1.ohpc.1.3.7.x86_64
lua-filesystem-ohpc-1.6.3-4.1.x86_64
slurm-ohpc-18.08.8-4.1.ohpc.1…</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=powering_up_down_nodes&amp;rev=1757342219&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-09-08T14:36:59+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>powering_up_down_nodes</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=powering_up_down_nodes&amp;rev=1757342219&amp;do=diff</link>
        <description>Powering Nodes Up and Down

Each Limulus system consists of one login-node and three or seven worker nodes. As shipped, the login-node has the alias name headnode (or limulus) and the worker node names are “n0, n1, and n2” and “n0, n1, n2, n3, n4, n5, and n6</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=provision_listing&amp;rev=1593290893&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2020-06-27T20:48:13+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>provision_listing</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=provision_listing&amp;rev=1593290893&amp;do=diff</link>
        <description># wwsh provision print
#### n0 #######################################################################
             n0: BOOTSTRAP        = 5.4.1-1.el7.elrepo.x86_64
             n0: VNFS             = co7_base
             n0: FILES            = Limulus-node-startup.sh,gmond.conf,group,idmapd.conf,munge.key,passwd,report-ganglia-temp,resolv.conf,slurm.conf
             n0: PRESHELL         = FALSE
             n0: POSTSHELL        = FALSE
             n0: CONSOLE          = UNDEF
             n0…</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=rack-mount_case&amp;rev=1591725129&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2020-06-09T17:52:09+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>rack-mount_case</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=rack-mount_case&amp;rev=1591725129&amp;do=diff</link>
        <description>To be completed, contact Limulus Computing for more information</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=software_management&amp;rev=1597333124&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2020-08-13T15:38:44+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>software_management</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=software_management&amp;rev=1597333124&amp;do=diff</link>
        <description>Software Management

Installing, Updating, and Removing Packages

Using Yum

Using RPM

Creating Module Files (lmod package)

 lmod documentation</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=start&amp;rev=1757953624&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-09-15T16:27:04+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>start</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=start&amp;rev=1757953624&amp;do=diff</link>
        <description>Welcome to the Limulus Systems Manual

Limulus Cluster Computing systems are available in either four- or eight-node desk-side cases or in a 4-node rack-mount unit. Multiple systems can be connected together to work as a larger cluster. There are two software stacks available: High Performance Computing (HPC) or scalable data analytics (Hadoop, Spark, Kafka, etc.).</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=submitting_jobs_to_the_slurm_workload_manager&amp;rev=1619621498&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-04-28T14:51:38+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>submitting_jobs_to_the_slurm_workload_manager</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=submitting_jobs_to_the_slurm_workload_manager&amp;rev=1619621498&amp;do=diff</link>
        <description>Submitting Jobs to the Slurm Workload Manager

All Limulus HPC systems use a workflow (user job) scheduler called Slurm. The purpose of the workflow scheduler is to distribute user programs across the cluster based on the amount of resources needed by each job, since the amount of user programs may exceed the amount of resources. For example, if each program needs one core and the cluster has 32 cores, then running more than 32 programs will oversubscribe the cluster resources. To manage a possi…</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=system-wide_administration_commands_pdsh&amp;rev=1621436360&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-05-19T14:59:20+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>system-wide_administration_commands_pdsh</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=system-wide_administration_commands_pdsh&amp;rev=1621436360&amp;do=diff</link>
        <description>System Wide Commands using pdsh

All Limulus systems have a a mechanism for executing commands on all worker nodes at the same time. The pdsh command allows the same command to be run on any combination of Limulus nodes. By default, and without any arguments, the</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=system_images&amp;rev=1757947735&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-09-15T14:48:55+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>system_images</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=system_images&amp;rev=1757947735&amp;do=diff</link>
        <description>System Component Views and Identification

The Limulus Cluster Workstation is a desk-side high performance machine. Views of the case are pictured below. Design detail are available on the  Functional Diagram  page.

The case has two side doors,a removable front panel, and is on castors for easy movement. The power button (for the main motherboard), indicator lights, and USB ports are across the top. Immediately below, there is a removable SSD drive cage.</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=unpacking_and_startup&amp;rev=1757440137&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-09-09T17:48:57+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>unpacking_and_startup</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=unpacking_and_startup&amp;rev=1757440137&amp;do=diff</link>
        <description>Unpacking The System

The Limulus system may arrive in a double cardboard shipping box or on a mini-pallet.

It is best to have two people unpack the system. It weighs approximately 60 pounds (27 kilograms).
Once the system is unpacked and standing upright on the castors,</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=user_software&amp;rev=1591716575&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2020-06-09T15:29:35+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>user_software</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=user_software&amp;rev=1591716575&amp;do=diff</link>
        <description>HPC Software

Data Analytics Software</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=using_software_modules&amp;rev=1619620582&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-04-28T14:36:22+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>using_software_modules</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=using_software_modules&amp;rev=1619620582&amp;do=diff</link>
        <description>Using Software Modules

Many HPC systems support multiple libraries and library versions. For instance, by default, HPC Limulus systems support both Open MPI (Message Passing Interface) and MPICH. Each of these packages provides the MPI API for programmers and applications. These libraries are supplied as RPM packages as part of the</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=using_the_apache_ambari_cluster_manager&amp;rev=1621442121&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-05-19T16:35:21+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>using_the_apache_ambari_cluster_manager</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=using_the_apache_ambari_cluster_manager&amp;rev=1621442121&amp;do=diff</link>
        <description>Introduction

The Ambari web-based tool can be started opening a browser on the head node and entering &quot;http://localhost:8080&quot; . After login (password is provided by Limulus Computing), the Ambari Dash Board (control panel) similar to the image below will be displayed.



The left side menu allows the values, services and nodes of the cluster to be viewed. An example of the the HDFS  (Hadoop Distributed File System) is shown in the following figure:</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=using_the_zeppelin_web_notebook&amp;rev=1760556091&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2025-10-15T19:21:31+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>using_the_zeppelin_web_notebook</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=using_the_zeppelin_web_notebook&amp;rev=1760556091&amp;do=diff</link>
        <description>TBC</description>
    </item>
    <item rdf:about="https://www.limulus-computing.com/Limulus-Manual/doku.php?id=warewulf_worker_node_images&amp;rev=1621531101&amp;do=diff">
        <dc:format>text/html</dc:format>
        <dc:date>2021-05-20T17:18:21+00:00</dc:date>
        <dc:creator>Anonymous (anonymous@undisclosed.example.com)</dc:creator>
        <title>warewulf_worker_node_images</title>
        <link>https://www.limulus-computing.com/Limulus-Manual/doku.php?id=warewulf_worker_node_images&amp;rev=1621531101&amp;do=diff</link>
        <description>Warewulf Worker Node Images

All Limulus HPC systems use the Warewulf Toolkit to manage worker node images. The Warewulf toolkit allows worker nodes to boot “disk-less” using a RAM disk. The RAM disk and bootable kernel are managed by Warewulf. The following is a description of how Warewulf is configured and run on Limulus systems. Consult the Warewulf site for more detailed information.</description>
    </item>
</rdf:RDF>
