diff --git a/docs/.pages.yml b/docs/.pages.yml index 6cca8cbda..b391a0079 100644 --- a/docs/.pages.yml +++ b/docs/.pages.yml @@ -1,6 +1,5 @@ nav: - - Getting_Started - - General - - Scientific_Computing - - Storage - - NeSI_Service_Subscriptions + - Access + - High_Performance_Computing + - Researcher_Developer_Cloud + - Capability_&_Skills diff --git a/docs/Access/.pages.yml b/docs/Access/.pages.yml new file mode 100644 index 000000000..9ec6cb51e --- /dev/null +++ b/docs/Access/.pages.yml @@ -0,0 +1,2 @@ +nav: + - ... diff --git a/docs/Getting_Started/Accounts-Projects_and_Allocations/.pages.yml b/docs/Access/Accounts-Projects_and_Allocations/.pages.yml similarity index 69% rename from docs/Getting_Started/Accounts-Projects_and_Allocations/.pages.yml rename to docs/Access/Accounts-Projects_and_Allocations/.pages.yml index 21114c7e8..f35fdaca2 100644 --- a/docs/Getting_Started/Accounts-Projects_and_Allocations/.pages.yml +++ b/docs/Access/Accounts-Projects_and_Allocations/.pages.yml @@ -3,6 +3,8 @@ nav: - Creating_a_NeSI_Account_Profile.md - Applying_for_a_new_NeSI_project.md - Applying_to_join_an_existing_NeSI_project.md + - Setting_Up_and_Resetting_Your_Password.md + - Setting_Up_Two_Factor_Authentication.md - What_is_an_allocation.md - Quarterly_allocation_periods.md - ... diff --git a/docs/General/NeSI_Policies/Account_Requests_for_non_Tuakiri_Members.md b/docs/Access/Accounts-Projects_and_Allocations/Account_Requests_for_non_Tuakiri_Members.md similarity index 79% rename from docs/General/NeSI_Policies/Account_Requests_for_non_Tuakiri_Members.md rename to docs/Access/Accounts-Projects_and_Allocations/Account_Requests_for_non_Tuakiri_Members.md index b2860a659..d32fc0ff2 100644 --- a/docs/General/NeSI_Policies/Account_Requests_for_non_Tuakiri_Members.md +++ b/docs/Access/Accounts-Projects_and_Allocations/Account_Requests_for_non_Tuakiri_Members.md @@ -21,7 +21,7 @@ affiliated with an organisation supported by the federation, you can request access via [my.nesi.org.nz/register](https://my.nesi.org.nz/register). -![mceclip0.png](../../assets/images/Account_Requests_for_non_Tuakiri_Members.png) +![mceclip0.png](Account_Requests_for_non_Tuakiri_Members.png) !!! prerequisite The email address you use on your application must be your @@ -43,6 +43,6 @@ my.nesi.org.nz. If you still can't find the email, {% include "partials/support_request.html" %}. !!! note "What next?" - - [Project Eligibility](../../General/NeSI_Policies/Allocation_classes.md) - - [Applying for a new project.](../../Getting_Started/Accounts-Projects_and_Allocations/Applying_for_a_new_NeSI_project.md) - - [Applying to join an existing project](../../Getting_Started/Accounts-Projects_and_Allocations/Applying_to_join_an_existing_NeSI_project.md). + - [Project Eligibility](Allocation_classes.md) + - [Applying for a new project.](Applying_for_a_new_NeSI_project.md) + - [Applying to join an existing project](Applying_to_join_an_existing_NeSI_project.md). diff --git a/docs/Getting_Started/Accounts-Projects_and_Allocations/Adding_members_to_your_NeSI_project.md b/docs/Access/Accounts-Projects_and_Allocations/Adding_members_to_your_NeSI_project.md similarity index 66% rename from docs/Getting_Started/Accounts-Projects_and_Allocations/Adding_members_to_your_NeSI_project.md rename to docs/Access/Accounts-Projects_and_Allocations/Adding_members_to_your_NeSI_project.md index 11d249110..5c6a4620b 100644 --- a/docs/Getting_Started/Accounts-Projects_and_Allocations/Adding_members_to_your_NeSI_project.md +++ b/docs/Access/Accounts-Projects_and_Allocations/Adding_members_to_your_NeSI_project.md @@ -9,15 +9,15 @@ description: How to add a new member to your NeSI project. --- !!! prerequisite - - Have a [NeSI Account profile](./Creating_a_NeSI_Account_Profile.md). - - Be the **owner** of a [NeSI project](./Applying_for_a_new_NeSI_project.md). + - Have a [NeSI Account profile](Creating_a_NeSI_Account_Profile.md). + - Be the **owner** of a [NeSI project](Applying_for_a_new_NeSI_project.md). 1. Log in to [my.nesi.org.nz](https://my.nesi.org.nz/) via your browser. 2. Under **List Projects**, click on the project you want to add members to. 3. When the page is loaded, scroll down to the section **Project Members** and select the **+** button (you will need to be the owner of the project). 4. Enter the Username of the new member, select a project role from the drop-down options, and click **Submit**. - ![Adding_Members.png](../../assets/images/Adding_Members.png) + ![Adding_Members.png](Adding_Members.png) !!! prerequisite "What Next?" - - The new team member will now be able to access your project on NeSI, provided they have [set a NeSI account password](../Accessing_the_HPCs/Setting_Up_and_Resetting_Your_Password.md). + - The new team member will now be able to access your project on NeSI, provided they have [set a NeSI account password](Setting_Up_and_Resetting_Your_Password.md). diff --git a/docs/General/NeSI_Policies/Allocation_classes.md b/docs/Access/Accounts-Projects_and_Allocations/Allocation_classes.md similarity index 98% rename from docs/General/NeSI_Policies/Allocation_classes.md rename to docs/Access/Accounts-Projects_and_Allocations/Allocation_classes.md index 4fa852ee2..997525c65 100644 --- a/docs/General/NeSI_Policies/Allocation_classes.md +++ b/docs/Access/Accounts-Projects_and_Allocations/Allocation_classes.md @@ -14,7 +14,7 @@ general terms, the allocation class granted to your project is used to decide who pays for that aspect of your project's consumption of NeSI services. - +
@@ -208,4 +208,4 @@ take a while to set up. Our team is happy to answer any questions you may have throughout the application process. -For more information, see [how we review applications](../NeSI_Policies/How_we_review_applications.md). +For more information, see [how we review applications](How_we_review_applications.md). diff --git a/docs/Getting_Started/Accounts-Projects_and_Allocations/Applying_for_a_new_NeSI_project.md b/docs/Access/Accounts-Projects_and_Allocations/Applying_for_a_new_NeSI_project.md similarity index 82% rename from docs/Getting_Started/Accounts-Projects_and_Allocations/Applying_for_a_new_NeSI_project.md rename to docs/Access/Accounts-Projects_and_Allocations/Applying_for_a_new_NeSI_project.md index 48a410db0..c396879b4 100644 --- a/docs/Getting_Started/Accounts-Projects_and_Allocations/Applying_for_a_new_NeSI_project.md +++ b/docs/Access/Accounts-Projects_and_Allocations/Applying_for_a_new_NeSI_project.md @@ -12,7 +12,7 @@ zendesk_section_id: 360000196195 --- !!! prerequisite - - Have a [NeSI Account profile](../../Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md). + - Have a [NeSI Account profile](Creating_a_NeSI_Account_Profile.md). - NIWA researchers only: read and follow the [NIWA internal documentation for gaining access to the HPCs](https://one.niwa.co.nz/display/ONE/High+Performance+Computing+Facility+Services) (this link is only valid from within the NIWA network or VPN). @@ -24,11 +24,9 @@ zendesk_section_id: 360000196195 Carpentry](https://swcarpentry.github.io/shell-novice/), to help you and your project team gain the necessary skills. - Become familiar with foundational HPC skills, for example by - attending a NeSI introductory workshop, one of our [weekly - introductory sessions (or watching the - recording)](../../Getting_Started/Getting_Help/Introductory_Material.md), + attending a NeSI introductory workshop, one of our weekly introductory sessions (or watching the recording), or having one or more of your project team members do so. - - Review our [allocation classes](../../General/NeSI_Policies/Allocation_classes.md). If + - Review our [allocation classes](Allocation_classes.md). If you don't think you currently qualify for any class other than Proposal Development, please {% include "partials/support_request.html" %} as soon as possible to discuss your options. Your institution may be in a @@ -70,7 +68,7 @@ information: research programme's current or expected funding) - Details of how your project is funded (this will help determine whether you are eligible for an allocation from our - [Merit](../../General/NeSI_Policies/Merit_allocations.md) class) + [Merit](Merit_allocations.md) class) - Your previous HPC experience - Whether you would like expert scientific programming support on your project @@ -82,8 +80,8 @@ is relevant. !!! prerequisite "What Next?" - Your NeSI Project proposal will be - [reviewed](../../General/NeSI_Policies/How_we_review_applications.md), + [reviewed](How_we_review_applications.md), after which you will be informed of the outcome. - We may contact you if further details are required. - - When your project is approved you will be able to [set your Linux - Password](../../Getting_Started/Accessing_the_HPCs/Setting_Up_and_Resetting_Your_Password.md). + - When your project is approved you will be able to + [set your Linux Password](Setting_Up_and_Resetting_Your_Password.md). diff --git a/docs/Getting_Started/Accounts-Projects_and_Allocations/Applying_to_join_an_existing_NeSI_project.md b/docs/Access/Accounts-Projects_and_Allocations/Applying_to_join_an_existing_NeSI_project.md similarity index 64% rename from docs/Getting_Started/Accounts-Projects_and_Allocations/Applying_to_join_an_existing_NeSI_project.md rename to docs/Access/Accounts-Projects_and_Allocations/Applying_to_join_an_existing_NeSI_project.md index 653bc279e..146ef673e 100644 --- a/docs/Getting_Started/Accounts-Projects_and_Allocations/Applying_to_join_an_existing_NeSI_project.md +++ b/docs/Access/Accounts-Projects_and_Allocations/Applying_to_join_an_existing_NeSI_project.md @@ -13,8 +13,7 @@ zendesk_section_id: 360000196195 --- !!! prerequisite - - You must have a [NeSI - account](../../Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md). + - You must have a [NeSI account](Creating_a_NeSI_Account_Profile.md). ## How to join an existing project on NeSI @@ -24,9 +23,9 @@ If you do not know your NeSI account username: 1. Log in to [my.nesi.org.nz](https://my.nesi.org.nz/) via your browser. 2. In the left side panel, under Account, click My HPC Account. Your Username will appear at the top of the page. - ![authentication\_factor\_setup.png](../../assets/images/Setting_Up_and_Resetting_Your_Password.png) + ![authentication\_factor\_setup.png](Setting_Up_and_Resetting_Your_Password.png) !!! prerequisite "What Next?" - The project owner will add your username to the project. - - Once it is done, you will be able to [set your NeSI account - password](../../Getting_Started/Accessing_the_HPCs/Setting_Up_and_Resetting_Your_Password.md). + - Once it is done, you will be able to + [set your NeSI account password](Setting_Up_and_Resetting_Your_Password.md). diff --git a/docs/Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md b/docs/Access/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md similarity index 54% rename from docs/Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md rename to docs/Access/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md index b649679c6..47f3b16c3 100644 --- a/docs/Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md +++ b/docs/Access/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md @@ -14,15 +14,14 @@ zendesk_section_id: 360000196195 !!! prerequisite - Either an active login at a Tuakiri member institution, or [a Tuakiri - Virtual Home account in respect of your current place of work or - study](../../General/NeSI_Policies/Account_Requests_for_non_Tuakiri_Members.md). + Either an active login at a Tuakiri member institution, or + [a Tuakiri Virtual Home account](Account_Requests_for_non_Tuakiri_Members.md) in respect of your current place of work or study. 1. Access [my.nesi.org.nz](https://my.nesi.org.nz) via your browser and log in with either your institutional credentials, or your Tuakiri Virtual Home account, whichever applies. -2. If this is your first time logging in to my.nesi and you do not have +2. If this is your first time logging in to MyNeSI and you do not have an entry in our database (you have not previously had a NeSI account) you will be asked to fill out some fields, such as your role at your institution and contact telephone number, and submit @@ -30,6 +29,5 @@ zendesk_section_id: 360000196195 our records. !!! prerequisite "What next?" - - [Apply for Access](../../Getting_Started/Accounts-Projects_and_Allocations/Applying_for_a_new_NeSI_project.md), - either submit an application for a new project or - [join an existing project](../../Getting_Started/Accounts-Projects_and_Allocations/Applying_to_join_an_existing_NeSI_project.md). + - [Apply for Access](Applying_for_a_new_NeSI_project.md). + - [Join an existing project](Applying_to_join_an_existing_NeSI_project.md). diff --git a/docs/General/NeSI_Policies/Institutional_allocations.md b/docs/Access/Accounts-Projects_and_Allocations/Institutional_allocations.md similarity index 77% rename from docs/General/NeSI_Policies/Institutional_allocations.md rename to docs/Access/Accounts-Projects_and_Allocations/Institutional_allocations.md index 74ec80e47..5a130a649 100644 --- a/docs/General/NeSI_Policies/Institutional_allocations.md +++ b/docs/Access/Accounts-Projects_and_Allocations/Institutional_allocations.md @@ -26,11 +26,11 @@ from your institution. If you are a postgraduate student at a NeSI collaborator, your project will likely be considered for an Institutional allocation rather than a -[Merit](../../General/NeSI_Policies/Merit_allocations.md) or -[Postgraduate](../../General/NeSI_Policies/Postgraduate_allocations.md) +[Merit](Merit_allocations.md) or +[Postgraduate](Postgraduate_allocations.md) allocation. -Read more about [how we review applications](../../General/NeSI_Policies/How_we_review_applications.md). +Read more about [how we review applications](How_we_review_applications.md). To learn more about NeSI Projects or to apply for a new project, please -read our article [Applying for a NeSI Project](../../Getting_Started/Accounts-Projects_and_Allocations/Applying_for_a_new_NeSI_project.md). +read our article [Applying for a NeSI Project](Applying_for_a_new_NeSI_project.md). diff --git a/docs/Getting_Started/Getting_Help/Job_efficiency_review.md b/docs/Access/Accounts-Projects_and_Allocations/Job_efficiency_review.md similarity index 96% rename from docs/Getting_Started/Getting_Help/Job_efficiency_review.md rename to docs/Access/Accounts-Projects_and_Allocations/Job_efficiency_review.md index 55a99ef30..d80bb0f61 100644 --- a/docs/Getting_Started/Getting_Help/Job_efficiency_review.md +++ b/docs/Access/Accounts-Projects_and_Allocations/Job_efficiency_review.md @@ -31,7 +31,7 @@ following outcomes: - For "quick wins" we may be able to achieve these improvements within the scope of the job efficiency review - For larger pieces of work, we would assist you in applying for a - [NeSI Consultancy](../../Getting_Started/Getting_Help/Consultancy.md) + [NeSI Consultancy](../../Training/Consultancy.md) project, where we would work with you on a longer term project to implement any agreed changes diff --git a/docs/General/NeSI_Policies/Merit_allocations.md b/docs/Access/Accounts-Projects_and_Allocations/Merit_allocations.md similarity index 76% rename from docs/General/NeSI_Policies/Merit_allocations.md rename to docs/Access/Accounts-Projects_and_Allocations/Merit_allocations.md index 8bfb769c2..fbe75596f 100644 --- a/docs/General/NeSI_Policies/Merit_allocations.md +++ b/docs/Access/Accounts-Projects_and_Allocations/Merit_allocations.md @@ -14,8 +14,7 @@ zendesk_section_id: 360000224835 This is the highest award given for use of NeSI services. A Merit allocation is intended for highly skilled research teams carrying out high quality research funded via a peer review process that supports the -[New Zealand Government's Science -Goals](https://www.mbie.govt.nz/science-and-technology/science-and-innovation/funding-information-and-opportunities/national-statement-of-science-investment/). +[New Zealand Government's Science Goals](https://www.mbie.govt.nz/science-and-technology/science-and-innovation/funding-information-and-opportunities/national-statement-of-science-investment/). Merit allocations may be made for the HPC Compute & Analytics and Consultancy services. @@ -24,8 +23,7 @@ must meet the following criteria: - The underpinning research programme (that requires access to NeSI HPC services to achieve the objectives of the research) must support - the [Government’s Science - Goals](https://www.mbie.govt.nz/science-and-technology/science-and-innovation/funding-information-and-opportunities/national-statement-of-science-investment/). + the [Government’s Science Goals](https://www.mbie.govt.nz/science-and-technology/science-and-innovation/funding-information-and-opportunities/national-statement-of-science-investment/). - To demonstrate research quality and alignment with national research priorities, the research funding must have come from a peer-reviewed, contestable process at an institutional, regional or @@ -51,8 +49,7 @@ must meet the following criteria: discretion consider your application for a Merit award if your supervisor is a named investigator. -Read more about [how we review -applications](../../General/NeSI_Policies/How_we_review_applications.md). +Read more about [how we review applications](How_we_review_applications.md). To learn more about NeSI Projects or to apply for a new project, please -read our article [Applying for a NeSI Project](../../Getting_Started/Accounts-Projects_and_Allocations/Applying_for_a_new_NeSI_project.md). +read our article [Applying for a NeSI Project](Applying_for_a_new_NeSI_project.md). diff --git a/docs/General/NeSI_Policies/Postgraduate_allocations.md b/docs/Access/Accounts-Projects_and_Allocations/Postgraduate_allocations.md similarity index 85% rename from docs/General/NeSI_Policies/Postgraduate_allocations.md rename to docs/Access/Accounts-Projects_and_Allocations/Postgraduate_allocations.md index 682990ddb..887ce2827 100644 --- a/docs/General/NeSI_Policies/Postgraduate_allocations.md +++ b/docs/Access/Accounts-Projects_and_Allocations/Postgraduate_allocations.md @@ -36,8 +36,7 @@ project an allocation from the Postgraduate class: until a later time, if there is insufficient computing capacity available to meet demand. -Read more about [how we review -applications](../../General/NeSI_Policies/How_we_review_applications.md). +Read more about [how we review applications](How_we_review_applications.md). To learn more about NeSI Projects, and to apply please review the -content of the section entitled [Applying for a NeSI Project](../../Getting_Started/Accounts-Projects_and_Allocations/Applying_for_a_new_NeSI_project.md). +content of the section entitled [Applying for a NeSI Project](Applying_for_a_new_NeSI_project.md). diff --git a/docs/Getting_Started/Accounts-Projects_and_Allocations/Project_Extensions_and_New_Allocations_on_Existing_Projects.md b/docs/Access/Accounts-Projects_and_Allocations/Project_Extensions_and_New_Allocations_on_Existing_Projects.md similarity index 88% rename from docs/Getting_Started/Accounts-Projects_and_Allocations/Project_Extensions_and_New_Allocations_on_Existing_Projects.md rename to docs/Access/Accounts-Projects_and_Allocations/Project_Extensions_and_New_Allocations_on_Existing_Projects.md index 28e9f4d28..c236eee0b 100644 --- a/docs/Getting_Started/Accounts-Projects_and_Allocations/Project_Extensions_and_New_Allocations_on_Existing_Projects.md +++ b/docs/Access/Accounts-Projects_and_Allocations/Project_Extensions_and_New_Allocations_on_Existing_Projects.md @@ -16,7 +16,7 @@ for a new project to carry on the same work. We currently offer two sorts of extensions: - A new allocation of computing resources (usually compute units on - Mahuika or node hours on Māui) + Mahuika) - A project extension without a new allocation of computing resources. ## Will my project qualify for an extension? @@ -58,16 +58,14 @@ extension request. You can submit a request for an extension using or by {% include "partials/support_request.html" %}. -Please see [Requesting to renew an allocation via -my.nesi.org.nz](../../Getting_Started/my-nesi-org-nz/Requesting_to_renew_an_allocation_via_my-nesi-org-nz.md) +Please see [Requesting to renew an allocation via my.nesi.org.nz](Requesting_to_renew_an_allocation_via_my-nesi-org-nz.md) for more details. You will receive a series of automated emails inviting you to apply for a new allocation (or, alternatively, clean up your project data) in the following circumstances: -- In the lead-up to the end of the [call - window](https://www.nesi.org.nz/news/2018/04/new-application-process-merit-postgraduate-allocations) +- In the lead-up to the end of the [call window](https://www.nesi.org.nz/news/2018/04/new-application-process-merit-postgraduate-allocations) immediately before your currently active allocation is scheduled to end. - In the lead-up to the end of your allocation. @@ -104,7 +102,7 @@ If this is your situation, please let us know when you request your project extension. Please note that we are unlikely to let a project continue without a compute allocation for more than six months at a time. If you expect that your project will be suspended for more than -six months, we encourage you to enquire about our [Long-Term Storage -Service](../../Storage/Nearline_long_term_storage/Nearline_Long_Term_Storage_Service.md) or -to move your research data off our facility and make arrangements with +six months, we encourage you to enquire about our +[Long-Term Storage Service](Nearline_Long_Term_Storage_Service.md) +or to move your research data off our facility and make arrangements with your project's host institution for long-term data storage. diff --git a/docs/General/NeSI_Policies/Proposal_Development_allocations.md b/docs/Access/Accounts-Projects_and_Allocations/Proposal_Development_allocations.md similarity index 77% rename from docs/General/NeSI_Policies/Proposal_Development_allocations.md rename to docs/Access/Accounts-Projects_and_Allocations/Proposal_Development_allocations.md index 451523ccd..da707248f 100644 --- a/docs/General/NeSI_Policies/Proposal_Development_allocations.md +++ b/docs/Access/Accounts-Projects_and_Allocations/Proposal_Development_allocations.md @@ -15,7 +15,8 @@ A Proposal Development allocation is a short-term allocation of up to (on Māui) or both, for up to six months. During your Proposal Development allocation you can find out: -- whether your software can run on a [NeSI HPC](../../Scientific_Computing/The_NeSI_High_Performance_Computers/index.md), +- whether your software can run on a +[NeSI HPC](../../Scientific_Computing/Software/index.md), - how your software scales to multiple cores or across compute nodes, - approximately how many compute units or node hours your research project is likely to need. @@ -34,11 +35,11 @@ Proposal Development allocation. Once you have completed your Proposal Development allocation, you are welcome to apply for a further allocation. If you are successful, the -project's next allocation will be from another of the [allocation -classes](../../General/NeSI_Policies/Allocation_classes.md). +project's next allocation will be from another of the +[allocation classes](Allocation_classes.md). The [How Applications are Reviewed](How_we_review_applications.md) section provides additional important information for applicants. To learn more about NeSI Projects, and to apply please review the -content of the section entitled [Applying for a NeSI Project](../../Getting_Started/Accounts-Projects_and_Allocations/Applying_for_a_new_NeSI_project.md). +content of the section entitled [Applying for a NeSI Project](Applying_for_a_new_NeSI_project.md). diff --git a/docs/Getting_Started/Accounts-Projects_and_Allocations/Quarterly_allocation_periods.md b/docs/Access/Accounts-Projects_and_Allocations/Quarterly_allocation_periods.md similarity index 91% rename from docs/Getting_Started/Accounts-Projects_and_Allocations/Quarterly_allocation_periods.md rename to docs/Access/Accounts-Projects_and_Allocations/Quarterly_allocation_periods.md index d24eb366b..002afcc29 100644 --- a/docs/Getting_Started/Accounts-Projects_and_Allocations/Quarterly_allocation_periods.md +++ b/docs/Access/Accounts-Projects_and_Allocations/Quarterly_allocation_periods.md @@ -24,7 +24,7 @@ Allocations will start on the first day of the next month and run for one year. The diagram below illustrates how these quarterly call periods are scheduled during the year: -![Quarterly\_Allocation\_Periods\_2021\_\_1\_.png](../../assets/images/Quarterly_allocation_periods.png) +![Quarterly\_Allocation\_Periods\_2021\_\_1\_.png](Quarterly_allocation_periods.png) For example, if you apply for a new allocation on your existing project in the month of October, we will review your application in October or @@ -32,7 +32,7 @@ early November, you will be notified of your allocation by the end of November, and your allocation will start on 1 December (as shown in the graphic below). -![Blank\_Diagram\_\_1\_.png](../../assets/images/Quarterly_allocation_periods_0.png) +![Blank\_Diagram\_\_1\_.png](Quarterly_allocation_periods_0.png) ## Existing allocations @@ -54,4 +54,4 @@ month. wait for the following call before your request is considered. If you have questions about the review cycles or other steps involved -with getting access to NeSI, {% include "partials/support_request.html" %} \ No newline at end of file +with getting access to NeSI, {% include "partials/support_request.html" %} diff --git a/docs/Getting_Started/Accessing_the_HPCs/Setting_Up_Two_Factor_Authentication.md b/docs/Access/Accounts-Projects_and_Allocations/Setting_Up_Two_Factor_Authentication.md similarity index 68% rename from docs/Getting_Started/Accessing_the_HPCs/Setting_Up_Two_Factor_Authentication.md rename to docs/Access/Accounts-Projects_and_Allocations/Setting_Up_Two_Factor_Authentication.md index 6c8995e1b..3aaca1992 100644 --- a/docs/Getting_Started/Accessing_the_HPCs/Setting_Up_Two_Factor_Authentication.md +++ b/docs/Access/Accounts-Projects_and_Allocations/Setting_Up_Two_Factor_Authentication.md @@ -12,9 +12,9 @@ zendesk_section_id: 360000034315 --- !!! prerequisite - - Have a [NeSI account](../../Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md). - - Be a member of an [active project](../../Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md). - - Have [set up your NeSI account password](../../Getting_Started/Accessing_the_HPCs/Setting_Up_and_Resetting_Your_Password.md). + - Have a [NeSI account](Creating_a_NeSI_Account_Profile.md). + - Be a member of an [active project](Creating_a_NeSI_Account_Profile.md). + - Have [set up your NeSI account password](Setting_Up_and_Resetting_Your_Password.md). - Have a device with an authentication app. ##  Authentication App @@ -33,10 +33,10 @@ If you some reason you can't do this, please contact NeSI support. 2. Click **My HPC Account** on left hand panel  and then **Setup Two-Factor Authentication device** - ![authentication\_factor\_setup.png](../../assets/images/Setting_Up_Two_Factor_Authentication.png) + ![authentication\_factor\_setup.png](Setting_Up_Two_Factor_Authentication.png) 3. Click the "**Setup Two-Factor Authentication device**" link. - ![set\_up\_2fa\_device.png](../../assets/images/Setting_Up_Two_Factor_Authentication_0.png) + ![set\_up\_2fa\_device.png](Setting_Up_Two_Factor_Authentication_0.png) 4. After clicking on "Continue" you will retrieve the QR code. @@ -54,4 +54,4 @@ This means that you can only try logging in to the lander node once every 30 seconds. !!! tip "What next?" - [Getting access to the cluster](../../Getting_Started/Accessing_the_HPCs/Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md) + [Getting access to the cluster](Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md) diff --git a/docs/Getting_Started/Accessing_the_HPCs/Setting_Up_and_Resetting_Your_Password.md b/docs/Access/Accounts-Projects_and_Allocations/Setting_Up_and_Resetting_Your_Password.md similarity index 57% rename from docs/Getting_Started/Accessing_the_HPCs/Setting_Up_and_Resetting_Your_Password.md rename to docs/Access/Accounts-Projects_and_Allocations/Setting_Up_and_Resetting_Your_Password.md index cc0f33e87..771f59c94 100644 --- a/docs/Getting_Started/Accessing_the_HPCs/Setting_Up_and_Resetting_Your_Password.md +++ b/docs/Access/Accounts-Projects_and_Allocations/Setting_Up_and_Resetting_Your_Password.md @@ -13,9 +13,8 @@ zendesk_section_id: 360000034315 !!! prerequisite - - Have a [NeSI - account](../../Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md). - - Be a member of an [active project.](../../Getting_Started/Accounts-Projects_and_Allocations/Applying_for_a_new_NeSI_project.md) + - Have a [NeSI account](Creating_a_NeSI_Account_Profile.md). + - Be a member of an [active project.](Applying_for_a_new_NeSI_project.md) ## Setting NeSI Password @@ -26,19 +25,18 @@ zendesk_section_id: 360000034315 Password** (If you are resetting your password this will read **Reset Password**). Note your **Username**. - ![authentication\_factor\_setup.png](../../assets/images/Setting_Up_and_Resetting_Your_Password.png) + ![authentication\_factor\_setup.png](Setting_Up_and_Resetting_Your_Password.png) 3. Enter and verify your new password, making sure it follows the - [password - policy](../../General/NeSI_Policies/NeSI_Password_Policy.md). - ![SetNeSIaccountPassword.png](../../assets/images/Setting_Up_and_Resetting_Your_Password_0.png) + [password policy](NeSI_Password_Policy.md). + ![SetNeSIaccountPassword.png](Setting_Up_and_Resetting_Your_Password_0.png) 4. If the password set was successful, following confirmation label will appear on the same page within few seconds - ![change\_success.png](../../assets/images/Setting_Up_and_Resetting_Your_Password_1.png) + ![change\_success.png](Setting_Up_and_Resetting_Your_Password_1.png) 5. Followed by an email confirmation similar to below - ![password\_set\_confirmation.png](../../assets/images/Setting_Up_and_Resetting_Your_Password_2.png) + ![password\_set\_confirmation.png](Setting_Up_and_Resetting_Your_Password_2.png) ## Resetting NeSI Password via my NeSI Portal @@ -55,11 +53,10 @@ zendesk_section_id: 360000034315 4. If the password **reset** was successful, following confirmation label will appear on the same page within few seconds - ![change\_success.png](../../assets/images/Setting_Up_and_Resetting_Your_Password_3.png) + ![change\_success.png](Setting_Up_and_Resetting_Your_Password_3.png) 5. Followed by an email confirmation similar to below -![password\_set\_confirmation.png](../../assets/images/Setting_Up_and_Resetting_Your_Password_4.png) +![password\_set\_confirmation.png](Setting_Up_and_Resetting_Your_Password_4.png) !!! prerequisite "What next?" - - Set up [Second Factor - Authentication.](../../Getting_Started/Accessing_the_HPCs/Setting_Up_Two_Factor_Authentication.md) + - Set up [Second Factor Authentication.](Setting_Up_Two_Factor_Authentication.md) diff --git a/docs/Getting_Started/Accounts-Projects_and_Allocations/What_is_an_allocation.md b/docs/Access/Accounts-Projects_and_Allocations/What_is_an_allocation.md similarity index 80% rename from docs/Getting_Started/Accounts-Projects_and_Allocations/What_is_an_allocation.md rename to docs/Access/Accounts-Projects_and_Allocations/What_is_an_allocation.md index 5eeca06fb..c1b80d10d 100644 --- a/docs/Getting_Started/Accounts-Projects_and_Allocations/What_is_an_allocation.md +++ b/docs/Access/Accounts-Projects_and_Allocations/What_is_an_allocation.md @@ -16,15 +16,15 @@ different allocation criteria. An allocation will come from one of our allocation classes. We will decide what class of allocation is most suitable for you and your -research programme, however you're welcome to review [our article on -allocation classes](../../General/NeSI_Policies/Allocation_classes.md) +research programme, however you're welcome to review +[our article on allocation classes](Allocation_classes.md) to find out what class you're likely eligible for. ## An important note on CPU hour allocations You may continue to submit jobs even if you have used all your CPU-hour allocation. The effect of 0 remaining CPU hours allocation is a -[lower fairshare](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Fair_Share_How_jobs_get_prioritised.md), +[lower fairshare](Fair_Share_How_jobs_get_prioritised.md), not the inability to use CPUs. Your ability to submit jobs will only be removed when your project's allocation expires, not when core-hours are exhausted. @@ -38,8 +38,8 @@ plus one kind of compute allocation) in order to be valid and active. Compute allocations are expressed in terms of a number of units, to be consumed or reserved between a set start date and time and a set end -date and time. For allocations of computing power, we use [Fair -Share](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Fair_Share_How_jobs_get_prioritised.md) +date and time. For allocations of computing power, we use +[Fair Share](Fair_Share_How_jobs_get_prioritised.md) to balance work between different projects. NeSI allocations and the relative "prices" of resources used by those allocations should not be taken as any indicator of the real NZD costs of purchasing or running @@ -48,7 +48,7 @@ the associated infrastructure and services. ### Mahuika allocations Allocations on -[Mahuika](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Mahuika.md) +[Mahuika](Mahuika.md) are measured in Mahuika compute units. A job uses one Mahuika compute unit if it runs for one hour on one physical Mahuika CPU core (two logical CPUs), using 3 GB of RAM and no GPU devices. This means a single @@ -75,7 +75,7 @@ depend on your contractual arrangements with the NeSI host. Note that the minimum number of logical cores a job can take on Mahuika is two -(see [Hyperthreading](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Hyperthreading.md) for +(see [Hyperthreading](Hyperthreading.md) for details). Therefore: - the lowest possible price for a CPU-only job is 0.70 compute units @@ -87,20 +87,6 @@ details). Therefore: In reality, every job must request at least some RAM. -### Māui allocations - -The compute capacity of the -[Māui](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Maui.md) -supercomputer is allocated by node-hours. Though some Māui nodes have -more RAM than others, we do not currently distinguish between low-memory -and high-memory nodes for allocation, billing or Fair Share purposes. - -Each allocation on Māui includes an entitlement to use the Māui -ancillary nodes equally with other NeSI projects having Māui allocations -at that time. - -One Māui node hour is roughly equivalent to 40 Mahuika compute units. - ### Online storage allocations An online storage allocation, unlike compute allocations, is more like a diff --git a/docs/Access/Accounts-Projects_and_Allocations/index.md b/docs/Access/Accounts-Projects_and_Allocations/index.md new file mode 100644 index 000000000..e69de29bb diff --git a/docs/General/NeSI_Policies/.pages.yml b/docs/Access/NeSI_Policies/.pages.yml similarity index 100% rename from docs/General/NeSI_Policies/.pages.yml rename to docs/Access/NeSI_Policies/.pages.yml diff --git a/docs/General/NeSI_Policies/Acceptable_Use_Policy.md b/docs/Access/NeSI_Policies/Acceptable_Use_Policy.md similarity index 100% rename from docs/General/NeSI_Policies/Acceptable_Use_Policy.md rename to docs/Access/NeSI_Policies/Acceptable_Use_Policy.md diff --git a/docs/General/NeSI_Policies/Access_Policy.md b/docs/Access/NeSI_Policies/Access_Policy.md similarity index 90% rename from docs/General/NeSI_Policies/Access_Policy.md rename to docs/Access/NeSI_Policies/Access_Policy.md index 7bead9529..0028990ef 100644 --- a/docs/General/NeSI_Policies/Access_Policy.md +++ b/docs/Access/NeSI_Policies/Access_Policy.md @@ -15,7 +15,7 @@ Our Access Policy provides essential information for researchers accessing the following NeSI services: - HPC Compute and Analytics – provides access to - [HPC platforms](../../Scientific_Computing/The_NeSI_High_Performance_Computers/index.md) + [HPC platforms](index.md) that host a broad range of high-performance [software applications and libraries](https://www.nesi.org.nz/services/high-performance-computing/software). - Consultancy and Training – provides access to diff --git a/docs/General/NeSI_Policies/Acknowledgement-Citation_and_Publication.md b/docs/Access/NeSI_Policies/Acknowledgement-Citation_and_Publication.md similarity index 100% rename from docs/General/NeSI_Policies/Acknowledgement-Citation_and_Publication.md rename to docs/Access/NeSI_Policies/Acknowledgement-Citation_and_Publication.md diff --git a/docs/General/NeSI_Policies/How_we_review_applications.md b/docs/Access/NeSI_Policies/How_we_review_applications.md similarity index 95% rename from docs/General/NeSI_Policies/How_we_review_applications.md rename to docs/Access/NeSI_Policies/How_we_review_applications.md index 3b402a52c..16bd4a497 100644 --- a/docs/General/NeSI_Policies/How_we_review_applications.md +++ b/docs/Access/NeSI_Policies/How_we_review_applications.md @@ -52,8 +52,8 @@ new projects is as follows: of GPU hours or access to ancillary nodes or virtual labs. 6. **Decision and notification:** If we approve an initial allocation for your project, we will typically award the project an - [allocation of Mahuika compute units, Māui node hours, or both, and also an online storage allocation](../../Getting_Started/Accounts-Projects_and_Allocations/What_is_an_allocation.md), - from one of [our allocation classes](../../General/NeSI_Policies/Allocation_classes.md). + [allocation of Mahuika compute units, Māui node hours, or both, and also an online storage allocation](What_is_an_allocation.md), + from one of [our allocation classes](Allocation_classes.md). In an case, we will send you an email telling you about our decision. Our review process for requests for new allocations on existing projects diff --git a/docs/General/NeSI_Policies/NeSI_Application_Support_Model.md b/docs/Access/NeSI_Policies/NeSI_Application_Support_Model.md similarity index 100% rename from docs/General/NeSI_Policies/NeSI_Application_Support_Model.md rename to docs/Access/NeSI_Policies/NeSI_Application_Support_Model.md diff --git a/docs/General/NeSI_Policies/NeSI_Licence_Policy.md b/docs/Access/NeSI_Policies/NeSI_Licence_Policy.md similarity index 100% rename from docs/General/NeSI_Policies/NeSI_Licence_Policy.md rename to docs/Access/NeSI_Policies/NeSI_Licence_Policy.md diff --git a/docs/General/NeSI_Policies/NeSI_Password_Policy.md b/docs/Access/NeSI_Policies/NeSI_Password_Policy.md similarity index 100% rename from docs/General/NeSI_Policies/NeSI_Password_Policy.md rename to docs/Access/NeSI_Policies/NeSI_Password_Policy.md diff --git a/docs/General/NeSI_Policies/NeSI_Privacy_Policy.md b/docs/Access/NeSI_Policies/NeSI_Privacy_Policy.md similarity index 100% rename from docs/General/NeSI_Policies/NeSI_Privacy_Policy.md rename to docs/Access/NeSI_Policies/NeSI_Privacy_Policy.md diff --git a/docs/General/NeSI_Policies/Total_HPC_Resources_Available.md b/docs/Access/NeSI_Policies/Total_HPC_Resources_Available.md similarity index 96% rename from docs/General/NeSI_Policies/Total_HPC_Resources_Available.md rename to docs/Access/NeSI_Policies/Total_HPC_Resources_Available.md index a4157d8fa..119a89b04 100644 --- a/docs/General/NeSI_Policies/Total_HPC_Resources_Available.md +++ b/docs/Access/NeSI_Policies/Total_HPC_Resources_Available.md @@ -9,15 +9,15 @@ zendesk_section_id: 360000224835 --- NeSI resources available for allocation each year combined across both -[Mahuika](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Mahuika.md) and -[Māui](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Maui.md) HPC systems +[Mahuika](Mahuika.md) and +[Māui](Maui.md) HPC systems include 152 million x86 CPU Core-hours and 112 thousand GPGPU-hours (equivalent to 400 million Cuda Core-hours) per annum and are divided between Allocation Classes as specified in Table 1, and Table 2. Table 1: NeSI HPC resources (physical core-hours) available per annum. Note: (1) One Node-h on Māui is equivalent to 40 Core-hs; (2) -Allocations on Mahuika (cloud) will be available in Q4, 2018. +Allocations on Mahuika (cloud) will be available in Q4, 2018.
diff --git a/docs/NeSI_Service_Subscriptions/.pages.yml b/docs/Access/NeSI_Service_Subscriptions/.pages.yml similarity index 100% rename from docs/NeSI_Service_Subscriptions/.pages.yml rename to docs/Access/NeSI_Service_Subscriptions/.pages.yml diff --git a/docs/NeSI_Service_Subscriptions/Contracts_and_billing_processes/.pages.yml b/docs/Access/NeSI_Service_Subscriptions/Contracts_and_billing_processes/.pages.yml similarity index 100% rename from docs/NeSI_Service_Subscriptions/Contracts_and_billing_processes/.pages.yml rename to docs/Access/NeSI_Service_Subscriptions/Contracts_and_billing_processes/.pages.yml diff --git a/docs/NeSI_Service_Subscriptions/Contracts_and_billing_processes/Billing_process.md b/docs/Access/NeSI_Service_Subscriptions/Contracts_and_billing_processes/Billing_process.md similarity index 100% rename from docs/NeSI_Service_Subscriptions/Contracts_and_billing_processes/Billing_process.md rename to docs/Access/NeSI_Service_Subscriptions/Contracts_and_billing_processes/Billing_process.md diff --git a/docs/NeSI_Service_Subscriptions/Contracts_and_billing_processes/Types_of_contracts.md b/docs/Access/NeSI_Service_Subscriptions/Contracts_and_billing_processes/Types_of_contracts.md similarity index 100% rename from docs/NeSI_Service_Subscriptions/Contracts_and_billing_processes/Types_of_contracts.md rename to docs/Access/NeSI_Service_Subscriptions/Contracts_and_billing_processes/Types_of_contracts.md diff --git a/docs/NeSI_Service_Subscriptions/Overview/.pages.yml b/docs/Access/NeSI_Service_Subscriptions/Overview/.pages.yml similarity index 100% rename from docs/NeSI_Service_Subscriptions/Overview/.pages.yml rename to docs/Access/NeSI_Service_Subscriptions/Overview/.pages.yml diff --git a/docs/NeSI_Service_Subscriptions/Overview/Pricing.md b/docs/Access/NeSI_Service_Subscriptions/Overview/Pricing.md similarity index 100% rename from docs/NeSI_Service_Subscriptions/Overview/Pricing.md rename to docs/Access/NeSI_Service_Subscriptions/Overview/Pricing.md diff --git a/docs/NeSI_Service_Subscriptions/Overview/Questions.md b/docs/Access/NeSI_Service_Subscriptions/Overview/Questions.md similarity index 100% rename from docs/NeSI_Service_Subscriptions/Overview/Questions.md rename to docs/Access/NeSI_Service_Subscriptions/Overview/Questions.md diff --git a/docs/NeSI_Service_Subscriptions/Overview/What_is_a_Subscription.md b/docs/Access/NeSI_Service_Subscriptions/Overview/What_is_a_Subscription.md similarity index 100% rename from docs/NeSI_Service_Subscriptions/Overview/What_is_a_Subscription.md rename to docs/Access/NeSI_Service_Subscriptions/Overview/What_is_a_Subscription.md diff --git a/docs/NeSI_Service_Subscriptions/Service_Governance/.pages.yml b/docs/Access/NeSI_Service_Subscriptions/Service_Governance/.pages.yml similarity index 100% rename from docs/NeSI_Service_Subscriptions/Service_Governance/.pages.yml rename to docs/Access/NeSI_Service_Subscriptions/Service_Governance/.pages.yml diff --git a/docs/NeSI_Service_Subscriptions/Service_Governance/Allocation_approvals.md b/docs/Access/NeSI_Service_Subscriptions/Service_Governance/Allocation_approvals.md similarity index 100% rename from docs/NeSI_Service_Subscriptions/Service_Governance/Allocation_approvals.md rename to docs/Access/NeSI_Service_Subscriptions/Service_Governance/Allocation_approvals.md diff --git a/docs/NeSI_Service_Subscriptions/Service_Governance/Service_Governance_contact.md b/docs/Access/NeSI_Service_Subscriptions/Service_Governance/Service_Governance_contact.md similarity index 100% rename from docs/NeSI_Service_Subscriptions/Service_Governance/Service_Governance_contact.md rename to docs/Access/NeSI_Service_Subscriptions/Service_Governance/Service_Governance_contact.md diff --git a/docs/NeSI_Service_Subscriptions/Service_Governance/Subscriber_Monthly_Usage_Reports.md b/docs/Access/NeSI_Service_Subscriptions/Service_Governance/Subscriber_Monthly_Usage_Reports.md similarity index 100% rename from docs/NeSI_Service_Subscriptions/Service_Governance/Subscriber_Monthly_Usage_Reports.md rename to docs/Access/NeSI_Service_Subscriptions/Service_Governance/Subscriber_Monthly_Usage_Reports.md diff --git a/docs/Access/index.md b/docs/Access/index.md new file mode 100644 index 000000000..f2e249131 --- /dev/null +++ b/docs/Access/index.md @@ -0,0 +1,7 @@ +--- +created_at: 2025-02-20 +hide: + - toc +--- + +ACCESS HOME PLACEHOLDER diff --git a/docs/Getting_Started/my-nesi-org-nz/.pages.yml b/docs/Access/my-nesi-org-nz/.pages.yml similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/.pages.yml rename to docs/Access/my-nesi-org-nz/.pages.yml diff --git a/docs/Getting_Started/my-nesi-org-nz/Logging_in_to_my-nesi-org-nz.md b/docs/Access/my-nesi-org-nz/Logging_in_to_my-nesi-org-nz.md similarity index 82% rename from docs/Getting_Started/my-nesi-org-nz/Logging_in_to_my-nesi-org-nz.md rename to docs/Access/my-nesi-org-nz/Logging_in_to_my-nesi-org-nz.md index 464e9285a..b627d8d00 100644 --- a/docs/Getting_Started/my-nesi-org-nz/Logging_in_to_my-nesi-org-nz.md +++ b/docs/Access/my-nesi-org-nz/Logging_in_to_my-nesi-org-nz.md @@ -11,30 +11,26 @@ zendesk_section_id: 360001059296 ## Login credentials We allow students, academics, alumni and researchers to securely login -and create a [NeSI account -profile](../../Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md) +and create a [NeSI account profile](Creating_a_NeSI_Account_Profile.md) using the credentials granted by their home organisation via Tuakiri. ### Tuakiri - federated identity and access management Most New Zealand universities and Crown Research Institutes are members -of the [Tuakiri authentication -federation](https://www.reannz.co.nz/products-and-services/tuakiri/join/), +of the [Tuakiri authentication federation](https://www.reannz.co.nz/products-and-services/tuakiri/join/), but many other institutions, including private sector organisations and most central and local government agencies, are not. -See also [Creating a NeSI Account -Profile](../../Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md) +See also [Creating a NeSI Account Profile](Creating_a_NeSI_Account_Profile.md) ### Support for users outside the Tuakiri federation In case your organisation is not part of the Tuakiri federated identity -management service, a user can still [request a NeSI Account -profile.](https://my.nesi.org.nz/html/request_nesi_account) NeSI will +management service, a user can still +[request a NeSI Account profile.](https://my.nesi.org.nz/html/request_nesi_account) NeSI will (if approved) provision a so-called "virtual home account" on Tuakiri. -See also [Account Requests for non-Tuakiri -Members](../../General/NeSI_Policies/Account_Requests_for_non_Tuakiri_Members.md) +See also [Account Requests for non-Tuakiri Members](Account_Requests_for_non_Tuakiri_Members.md) ## Troubleshooting login issues diff --git a/docs/Getting_Started/my-nesi-org-nz/Managing_notification_preferences.md b/docs/Access/my-nesi-org-nz/Managing_notification_preferences.md similarity index 82% rename from docs/Getting_Started/my-nesi-org-nz/Managing_notification_preferences.md rename to docs/Access/my-nesi-org-nz/Managing_notification_preferences.md index d07f5a637..b2f0bfc84 100644 --- a/docs/Getting_Started/my-nesi-org-nz/Managing_notification_preferences.md +++ b/docs/Access/my-nesi-org-nz/Managing_notification_preferences.md @@ -27,8 +27,8 @@ Use the 'Manage' button provided to open the externally hosted preferences or the checkboxes for the NeSI Project-related notifications. -![2022-04-12\_16-46-56.png](../../assets/images/Managing_notification_preferences.png) +![2022-04-12\_16-46-56.png](Managing_notification_preferences.png) ### See also -Our support article on the NeSI [System status.](../../Getting_Started/Getting_Help/System_status.md) +Our support article on the NeSI [System status.](System_status.md) diff --git a/docs/Getting_Started/my-nesi-org-nz/Navigating_the_my-nesi-org-nz_web_interface.md b/docs/Access/my-nesi-org-nz/Navigating_the_my-nesi-org-nz_web_interface.md similarity index 90% rename from docs/Getting_Started/my-nesi-org-nz/Navigating_the_my-nesi-org-nz_web_interface.md rename to docs/Access/my-nesi-org-nz/Navigating_the_my-nesi-org-nz_web_interface.md index 6dc26a760..7ac3aa838 100644 --- a/docs/Getting_Started/my-nesi-org-nz/Navigating_the_my-nesi-org-nz_web_interface.md +++ b/docs/Access/my-nesi-org-nz/Navigating_the_my-nesi-org-nz_web_interface.md @@ -8,7 +8,7 @@ zendesk_article_id: 360003592875 zendesk_section_id: 360001059296 --- -![mceclip0.png](../../assets/images/Navigating_the_my-nesi-org-nz_web_interface.png) +![mceclip0.png](Navigating_the_my-nesi-org-nz_web_interface.png) ## Main navigation @@ -34,4 +34,4 @@ sidebar by reducing the visible content to icons only. ## Closing the session The 'user name menu' on the top right contains the option to logout and -close the session. \ No newline at end of file +close the session. diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/.pages.yml b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/.pages.yml similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/.pages.yml rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/.pages.yml diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-0-1.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-0-1.md similarity index 79% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-0-1.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-0-1.md index d94db9b43..5e235587f 100644 --- a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-0-1.md +++ b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-0-1.md @@ -16,11 +16,10 @@ search: ## New and Improved - An updated web application is introducing a - [navigation](../../../Getting_Started/my-nesi-org-nz/Navigating_the_my-nesi-org-nz_web_interface.md) + [navigation](Navigating_the_my-nesi-org-nz_web_interface.md) in the sidebar and links to important functions -- Improved [project application - form](../../../Getting_Started/my-nesi-org-nz/The_NeSI_Project_Request_Form.md) +- Improved [project application form](The_NeSI_Project_Request_Form.md) with automatic draft state so you can continue the application at a later stage without the need to re-enter details diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-0-3.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-0-3.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-0-3.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-0-3.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-1-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-1-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-1-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-1-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-10-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-10-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-10-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-10-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-11-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-11-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-11-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-11-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-12-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-12-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-12-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-12-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-13-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-13-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-13-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-13-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-14-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-14-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-14-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-14-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-15-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-15-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-15-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-15-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-16-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-16-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-16-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-16-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-17-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-17-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-17-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-17-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-18-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-18-0.md similarity index 85% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-18-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-18-0.md index 130fbea07..ff2a9b68e 100644 --- a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-18-0.md +++ b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-18-0.md @@ -16,8 +16,7 @@ search: ## New and Improved -- A link to [NeSI's privacy - policy](https://www.nesi.org.nz/about-us/security-privacy/privacy-policy) +- A link to [NeSI's privacy policy](https://www.nesi.org.nz/about-us/security-privacy/privacy-policy) has been added to the bottom of all pages of my.nesi environment - We've shifted from using Tuakiri's RapidConnect service to Tuakiri's OpenID Connect bridge to improve overall security of my.nesi's user @@ -37,8 +36,8 @@ search: allocation requests to no further than one year in the future. - Changed which system components from NeSI's [System Status page](https://status.nesi.org.nz) are default notifications emailed to users. Users can customise their system status email - notifications at any time. [Read more about that - here](../../../General/Announcements/Status_page_subscription_notification_changes.md). + notifications at any time. + [Read more about that here](Status_page_subscription_notification_changes.md). If you have any questions about any of the improvements or fixes, please {% include "partials/support_request.html" %}. diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-19-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-19-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-19-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-19-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-2-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-2-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-2-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-2-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-20-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-20-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-20-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-20-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-21-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-21-0.md similarity index 89% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-21-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-21-0.md index 157fab785..9b07e3f26 100644 --- a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-21-0.md +++ b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-21-0.md @@ -20,7 +20,7 @@ search: items under Accounts. - On the Project page and New Allocation Request page, tool tip text referring to - [nn\_corehour\_usage](../../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Checking_your_projects_usage_using_nn_corehour_usage.md) + [nn\_corehour\_usage](Checking_your_projects_usage_using_nn_corehour_usage.md) will appear when you hover over the Mahuika Compute Units information. diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-22-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-22-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-22-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-22-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-23-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-23-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-23-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-23-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-24-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-24-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-24-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-24-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-25-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-25-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-25-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-25-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-26-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-26-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-26-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-26-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-27-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-27-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-27-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-27-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-28-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-28-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-28-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-28-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-29-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-29-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-29-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-29-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-3-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-3-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-3-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-3-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-30-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-30-0.md similarity index 95% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-30-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-30-0.md index eaf816ba5..599179931 100644 --- a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-30-0.md +++ b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-30-0.md @@ -10,7 +10,7 @@ search: ## New and Improved - In the project details view, the NeSI OnDemand resources (NeSI OnDemand compute, NFS storage options) and the Freezer (long-term storage) resources are now visible. -![alt text](../../../assets/images/my.nesiNewResources.png) +![alt text](my.nesiNewResources.png) - Multiple changes have been made to the new allocation request page: - Long-term storage units are now listed in Terabytes (TB) instead of Gigabytes (GB). - You can now customise the name of your long-term storage in Freezer, if desired. This field is optional. If you choose not to use a custom name, we will automatically assign a default name related to your project id (eg. default_nesi9999). This naming field was added to enable easier searching and identification in the Freezer environment after the storage request has been created. diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-31-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-31-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-31-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-31-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-4-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-4-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-4-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-4-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-5-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-5-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-5-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-5-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-6-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-6-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-6-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-6-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-7-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-7-0.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-7-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-7-0.md diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-8-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-8-0.md similarity index 55% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-8-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-8-0.md index 3a7ac227c..49cf4978f 100644 --- a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-8-0.md +++ b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-8-0.md @@ -15,10 +15,8 @@ search: ## New and Improved -- Improved [NeSI Notification - Preferences](../../../Getting_Started/my-nesi-org-nz/Managing_notification_preferences.md) +- Improved [NeSI Notification Preferences](Managing_notification_preferences.md) to be project-specific -- Improved [allocation renewal - requests](../../../Getting_Started/my-nesi-org-nz/Requesting_to_renew_an_allocation_via_my-nesi-org-nz.md) +- Improved [allocation renewal requests](Requesting_to_renew_an_allocation_via_my-nesi-org-nz.md) by providing more context diff --git a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-9-0.md b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-9-0.md similarity index 70% rename from docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-9-0.md rename to docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-9-0.md index c30bd6d62..2a6a38458 100644 --- a/docs/Getting_Started/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-9-0.md +++ b/docs/Access/my-nesi-org-nz/Release_Notes_my-nesi-org-nz/my-nesi-org-nz_release_notes_v2-9-0.md @@ -15,7 +15,6 @@ search: ## New and Improved -- Improved [allocation renewal - requests](../../../Getting_Started/my-nesi-org-nz/Requesting_to_renew_an_allocation_via_my-nesi-org-nz.md) default +- Improved [allocation renewal requests](Requesting_to_renew_an_allocation_via_my-nesi-org-nz.md) default organisation selection - Added a sub-section to list open allocation requests diff --git a/docs/Getting_Started/my-nesi-org-nz/Requesting_to_renew_an_allocation_via_my-nesi-org-nz.md b/docs/Access/my-nesi-org-nz/Requesting_to_renew_an_allocation_via_my-nesi-org-nz.md similarity index 75% rename from docs/Getting_Started/my-nesi-org-nz/Requesting_to_renew_an_allocation_via_my-nesi-org-nz.md rename to docs/Access/my-nesi-org-nz/Requesting_to_renew_an_allocation_via_my-nesi-org-nz.md index 70cbd94a0..76166db43 100644 --- a/docs/Getting_Started/my-nesi-org-nz/Requesting_to_renew_an_allocation_via_my-nesi-org-nz.md +++ b/docs/Access/my-nesi-org-nz/Requesting_to_renew_an_allocation_via_my-nesi-org-nz.md @@ -13,14 +13,14 @@ zendesk_section_id: 360001059296 1. Login to and select a project from the list. - ![my.nesi.png](../../assets/images/Requesting_to_renew_an_allocation_via_my-nesi-org-nz.png) + ![my.nesi.png](Requesting_to_renew_an_allocation_via_my-nesi-org-nz.png) 2. Click the Plus button icon 'action' next to the compute allocation line item  - ![my.nesi.png](../../assets/images/Requesting_to_renew_an_allocation_via_my-nesi-org-nz_0.png) + ![my.nesi.png](Requesting_to_renew_an_allocation_via_my-nesi-org-nz_0.png) 3. Verify the preset values and add a comment in case you update some. Finally, click 'Submit' - ![mceclip2.png](../../assets/images/Requesting_to_renew_an_allocation_via_my-nesi-org-nz_1.png) + ![mceclip2.png](Requesting_to_renew_an_allocation_via_my-nesi-org-nz_1.png) ### Can I request any allocation size? @@ -39,5 +39,5 @@ Please be aware that: - An allocation from an institution's entitlement is subject to approval by that institution. -See [Project Extensions and New Allocations on Existing Projects](../../Getting_Started/Accounts-Projects_and_Allocations/Project_Extensions_and_New_Allocations_on_Existing_Projects.md) -for more details. \ No newline at end of file +See [Project Extensions and New Allocations on Existing Projects](Project_Extensions_and_New_Allocations_on_Existing_Projects.md) +for more details. diff --git a/docs/Getting_Started/my-nesi-org-nz/The_NeSI_Project_Request_Form.md b/docs/Access/my-nesi-org-nz/The_NeSI_Project_Request_Form.md similarity index 89% rename from docs/Getting_Started/my-nesi-org-nz/The_NeSI_Project_Request_Form.md rename to docs/Access/my-nesi-org-nz/The_NeSI_Project_Request_Form.md index 43a1b7dab..a656332bd 100644 --- a/docs/Getting_Started/my-nesi-org-nz/The_NeSI_Project_Request_Form.md +++ b/docs/Access/my-nesi-org-nz/The_NeSI_Project_Request_Form.md @@ -8,7 +8,7 @@ zendesk_article_id: 360003648716 zendesk_section_id: 360001059296 --- -See [Applying for a NeSI project](../../Getting_Started/Accounts-Projects_and_Allocations/Applying_for_a_new_NeSI_project.md)  +See [Applying for a NeSI project](Applying_for_a_new_NeSI_project.md)  for how to access the form. ## Preparing a request to use NeSI resources @@ -21,7 +21,7 @@ below. [https://my.nesi.org.nz](https://my.nesi.org.nz/projects/apply) and login. Select "Apply for Access" from the sidebar navigation on the left. - ![mceclip1.png](../../assets/images/The_NeSI_Project_Request_Form.png) + ![mceclip1.png](The_NeSI_Project_Request_Form.png) 2. Choose from the following items: - If you are returning to continue work on a draft request you started earlier, choose the link based on the date/time or title @@ -45,4 +45,4 @@ save a draft. The request can only be successfully submitted once all mandatory data has been entered. The final section in the form 'Summary' will highlight missing data and allow you to navigate back to the relevant section. -  \ No newline at end of file +  diff --git a/docs/Getting_Started/my-nesi-org-nz/Tuakiri_Attribute_Validator.md b/docs/Access/my-nesi-org-nz/Tuakiri_Attribute_Validator.md similarity index 100% rename from docs/Getting_Started/my-nesi-org-nz/Tuakiri_Attribute_Validator.md rename to docs/Access/my-nesi-org-nz/Tuakiri_Attribute_Validator.md diff --git a/docs/General/Announcements/.pages.yml b/docs/Announcements/.pages.yml similarity index 100% rename from docs/General/Announcements/.pages.yml rename to docs/Announcements/.pages.yml diff --git a/docs/General/Announcements/Accessing_NeSI_Support_during_the_holiday_break.md b/docs/Announcements/Accessing_NeSI_Support_during_the_holiday_break.md similarity index 72% rename from docs/General/Announcements/Accessing_NeSI_Support_during_the_holiday_break.md rename to docs/Announcements/Accessing_NeSI_Support_during_the_holiday_break.md index c7fb58cae..63bdc8f29 100644 --- a/docs/General/Announcements/Accessing_NeSI_Support_during_the_holiday_break.md +++ b/docs/Announcements/Accessing_NeSI_Support_during_the_holiday_break.md @@ -21,8 +21,7 @@ will be online and available, but non-critical support requests will be responded to when the team is back on 06 January. Urgent / critical requests will be addressed on a best effort basis. Any -changes to system status will be reported via our [System Status -page](https://status.nesi.org.nz/ "https://status.nesi.org.nz/") and +changes to system status will be reported via our [System Status page](https://status.nesi.org.nz/) and alerts. A quick reminder of our main support channels as well as other sources @@ -31,16 +30,13 @@ of self-service support: - {% include "partials/support_request.html" %} Note: non-emergency requests will be addressed on or after 06 January 2025. -- [Sign up for NeSI system status - updates](../../Getting_Started/Getting_Help/System_status.md) for +- [Sign up for NeSI system status updates](System_status.md) for advance warning of any system updates or unplanned outages. -- [Consult our User - Documentation](https://www.docs.nesi.org.nz) pages +- [Consult our User Documentation](https://www.docs.nesi.org.nz) pages for instructions and guidelines for using the systems -- [Visit NeSI’s YouTube - channel](https://www.youtube.com/playlist?list=PLvbRzoDQPkuGMWazx5LPA6y8Ji6tyl0Sp "https://www.youtube.com/playlist?list=PLvbRzoDQPkuGMWazx5LPA6y8Ji6tyl0Sp") for +- [Visit NeSI’s YouTube channel](https://www.youtube.com/playlist?list=PLvbRzoDQPkuGMWazx5LPA6y8Ji6tyl0Sp) for introductory training webinars On behalf of the entire NeSI team, we wish you a safe and relaxing diff --git a/docs/General/Announcements/Early_access_opens_for_OnDemand.md b/docs/Announcements/Early_access_opens_for_OnDemand.md similarity index 100% rename from docs/General/Announcements/Early_access_opens_for_OnDemand.md rename to docs/Announcements/Early_access_opens_for_OnDemand.md diff --git a/docs/General/Announcements/Improved_data_management_and_efficient_use_of_NeSI_HPC_storage.md b/docs/Announcements/Improved_data_management_and_efficient_use_of_NeSI_HPC_storage.md similarity index 84% rename from docs/General/Announcements/Improved_data_management_and_efficient_use_of_NeSI_HPC_storage.md rename to docs/Announcements/Improved_data_management_and_efficient_use_of_NeSI_HPC_storage.md index e5c471a86..db4f2d41a 100644 --- a/docs/General/Announcements/Improved_data_management_and_efficient_use_of_NeSI_HPC_storage.md +++ b/docs/Announcements/Improved_data_management_and_efficient_use_of_NeSI_HPC_storage.md @@ -27,18 +27,17 @@ The NeSI project filesystem is becoming critically full, however it is currently storing a large amount of dormant data that has not been accessed for more than 12 months. We need your help to free up space on the project filesystem as soon as possible. Please review the data you -are currently storing in any  `/nesi/project/` directories and **delete +are currently storing in any `/nesi/project/` directories and **delete or relocate** any files that are no longer required for ongoing computational and/or analytics work on NeSI. We have started regular audits of data stored in project folders, using -the same format as our nobackup auto cleaning ([described -here](../../Storage/File_Systems_and_Quotas/Automatic_cleaning_of_nobackup_file_system.md)). +the same format as our [nobackup auto cleaning system](Automatic_cleaning_of_nobackup_file_system.md). See the file `/nesi/project//.policy.test/scan485/latest.summary.txt` for a summary of the number and size of files within each project that have not been accessed for more than 485 days (this is ~15 months, and -is the draft auto cleaning timeframe under consideration for the project +is the draft auto cleaning time frame under consideration for the project filesystem). If you need assistance with this, {% include "partials/support_request.html" %} and @@ -47,13 +46,13 @@ we’d be happy to help or answer questions. If you have data that may be used again on NeSI later, {% include "partials/support_request.html" %} and we will consider whether a -[Nearline](../../Storage/Nearline_long_term_storage/Nearline_Long_Term_Storage_Service.md) +[Nearline](Nearline_Long_Term_Storage_Service.md) storage allocation would be appropriate to manage this. ### 18 October 2021 We will begin a limited roll-out of a new feature to automatically -identify inactive files in  `/nesi/project/` directories and schedule +identify inactive files in `/nesi/project/` directories and schedule them for deletion. Generally, we will be looking to identify files that are inactive / untouched for more than 12 months. @@ -72,11 +71,11 @@ research project itself becomes inactive. ### January 2022 -Starting in January 2022, we will expand the `/nesi/project/` directory +Starting in January 2022, we will expand the `/nesi/project/` directory data management programme to include all active projects on NeSI. Additional Support documentation and user information sessions will be hosted prior to wider implementation, to provide advance notice of the -change and to answer any questions you may have around data lifecycle +change and to answer any questions you may have around data life cycle management. ## Frequently asked questions @@ -87,8 +86,8 @@ We want to avoid our online filesystems reaching critically full levels, as that impacts their performance and availability for users. We also want to ensure our active storage filesystems aren't being used to store inactive data. This new data management feature -for `/nesi/project/` directories will complement our existing programme -of [automatic cleaning of the /nobackup file system](../../Storage/File_Systems_and_Quotas/Automatic_cleaning_of_nobackup_file_system.md). +for `/nesi/project/` directories will complement our existing programme +of [automatic cleaning of the /nobackup file system](Automatic_cleaning_of_nobackup_file_system.md). ### Can I check how much storage I’m currently using on NeSI systems? @@ -104,7 +103,7 @@ and cached between updates. Perhaps. We regularly make read-only copies of the file system and save them for up to seven days. For more information, -[refer to our File Recovery page](../../Storage/Data_Recovery/File_Recovery.md). +[refer to our File Recovery page](File_Recovery.md). ### Where should I store my data on NeSI systems? @@ -129,8 +128,7 @@ situ. There are two tracked resources in the NeSI filesystem, *disk space* and *inodes (number of files)*. If you run into problems with -either of these, [refer to this Support page for more -information](../../General/FAQs/Ive_run_out_of_storage_space.md). +either of these, see [I've Run Out of Storage Space](Ive_run_out_of_storage_space.md). ### I have questions that aren’t covered here. Who can I talk to? diff --git a/docs/General/Announcements/New_capabilities_for_Machine_Learning_and_GPU_pricing_updates.md b/docs/Announcements/New_capabilities_for_Machine_Learning_and_GPU_pricing_updates.md similarity index 81% rename from docs/General/Announcements/New_capabilities_for_Machine_Learning_and_GPU_pricing_updates.md rename to docs/Announcements/New_capabilities_for_Machine_Learning_and_GPU_pricing_updates.md index 9e5415f91..d91c0837a 100644 --- a/docs/General/Announcements/New_capabilities_for_Machine_Learning_and_GPU_pricing_updates.md +++ b/docs/Announcements/New_capabilities_for_Machine_Learning_and_GPU_pricing_updates.md @@ -16,7 +16,7 @@ platform and some noteworthy changes to resource pricing as a result. ## New Graphics Processing Units (GPUs) We’ve installed eight NVIDIA A100 GPU cards into the -[Mahuika HPC system](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Mahuika.md), +[Mahuika HPC system](Mahuika.md), providing a significant boost in computing performance and an environment particularly suited to machine learning workloads. Over the last few months we’ve worked directly with a group of beta tester @@ -24,7 +24,7 @@ researchers to ensure this new capability is fit-for-purpose and tuned to communities' specific software and tool requirements. These new A100s, alongside -[software optimised for data science](../FAQs/What_software_environments_on_NeSI_are_optimised_for_Machine_Learning_and_data_science.md)), +[software optimised for data science](What_software_environments_on_NeSI_are_optimised_for_Machine_Learning_and_data_science.md), are available to researchers using **machine learning** approaches. If this is you, {% include "partials/support_request.html" %} to discuss how these new resources could support your work. @@ -32,21 +32,18 @@ discuss how these new resources could support your work. ## Reduced pricing for P100s We’ve recently reviewed our -[pricing](../../Getting_Started/Accounts-Projects_and_Allocations/What_is_an_allocation.md) +[pricing](What_is_an_allocation.md) and reduced the price of our existing [P100](https://www.nvidia.com/en-us/data-center/tesla-p100/) GPUs to 7.0 compute units per device-hour. The P100 GPUs are available to any project with a Mahuika allocation so if you have an existing allocation on Mahuika, you can access the P100s right away. -If you need a larger or new allocation on Mahuika, you can [apply for -access](https://www.nesi.org.nz/services/applyforaccess) now, but +If you need a larger or new allocation on Mahuika, you can [apply for access](https://www.nesi.org.nz/services/applyforaccess) now, but requests will likely be considered as part of our next allocation call -window: [31 August - 01 -October](https://www.nesi.org.nz/services/high-performance-computing-and-analytics/guidelines/allocations-allocation-classes-review#window). +window: [31 August - 01 October](https://www.nesi.org.nz/services/high-performance-computing-and-analytics/guidelines/allocations-allocation-classes-review#window). -For more technical information about using GPUs on NeSI, [click -here](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/GPU_use_on_NeSI.md). +For more technical information about using [GPUs on NeSI](GPU_use_on_NeSI.md). If you have questions about allocations or how to access the P100s, {% include "partials/support_request.html" %}. diff --git a/docs/General/Announcements/Preparing_to_move_data_to_NeSI_long_term_storage.md b/docs/Announcements/Preparing_to_move_data_to_NeSI_long_term_storage.md similarity index 82% rename from docs/General/Announcements/Preparing_to_move_data_to_NeSI_long_term_storage.md rename to docs/Announcements/Preparing_to_move_data_to_NeSI_long_term_storage.md index c2104f025..2fe31c6d4 100644 --- a/docs/General/Announcements/Preparing_to_move_data_to_NeSI_long_term_storage.md +++ b/docs/Announcements/Preparing_to_move_data_to_NeSI_long_term_storage.md @@ -25,8 +25,8 @@ Steps involved: ## Questions? -We have an [FAQ page](../FAQs/Common_questions_about_the_platform_refresh.md) to help answer common questions and -[weekly Online Office Hours](../../Getting_Started/Getting_Help/Weekly_Online_Office_Hours.md) with our support team, who are ready to answer or talk through any questions or issues you might have. You can also reach out anytime via email. +We have an [FAQ page](Common_questions_about_the_platform_refresh.md) to help answer common questions and +[weekly Online Office Hours](Weekly_Online_Office_Hours.md) with our support team, who are ready to answer or talk through any questions or issues you might have. You can also reach out anytime via email. {% include "partials/support_request.html" %} We are ready to work with you ensure this transition is a smooth process and the new storage platform delivers an improved experience. diff --git a/docs/General/Announcements/Preparing_your_code_for_use_on_NeSIs_new_HPC_platform.md b/docs/Announcements/Preparing_your_code_for_use_on_NeSIs_new_HPC_platform.md similarity index 92% rename from docs/General/Announcements/Preparing_your_code_for_use_on_NeSIs_new_HPC_platform.md rename to docs/Announcements/Preparing_your_code_for_use_on_NeSIs_new_HPC_platform.md index 6be2886fb..4e4544712 100644 --- a/docs/General/Announcements/Preparing_your_code_for_use_on_NeSIs_new_HPC_platform.md +++ b/docs/Announcements/Preparing_your_code_for_use_on_NeSIs_new_HPC_platform.md @@ -39,7 +39,7 @@ Below is a quick overview of some of the changes you need to be aware of when po ## Test your code on Mahuika The platform NeSI has selected to replace Mahuika is most similar to the -[Mahuika AMD Milan compute nodes](../../General/Announcements/Mahuikas_new_Milan_CPU_nodes_open_to_all_NeSI_users.md) than nodes on other partitions. +[Mahuika AMD Milan compute nodes](Mahuikas_new_Milan_CPU_nodes_open_to_all_NeSI_users.md) than nodes on other partitions. So, we'll be using the Milan nodes to validate any issues, mitigating risks of your subsequent migration to the new platform. Some projects on Māui will move to the new NeSI hardware. These projects have been notified and given a small allocation on Mahuika which can be used by the Māui users to validate the software they need is available (or can be built) on the AMD Milan nodes and works as expected. All members of the project can use this Mahuika allocation. @@ -76,9 +76,9 @@ has AMD Milan (Zen3) CPUs, while the rest of Mahuika has Intel Broadwell CPUs. If for any reason you want to use any of the other Mahuika partitions,see -[Mahuika Slurm Partitions](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Mahuika_Slurm_Partitions.md) for +[Mahuika Slurm Partitions](Mahuika_Slurm_Partitions.md) for an overview and -[Milan Compute Nodes](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Milan_Compute_Nodes.md) for +[Milan Compute Nodes](Milan_Compute_Nodes.md) for the differences between them and *milan*. #### Shared nodes @@ -139,12 +139,12 @@ or FFTW then you will be best off loading one of our EasyBuild "toolchain" environment modules such as: - `foss/2023a`  - GCC, FFTW, - [FlexiBLAS](../../Scientific_Computing/Supported_Applications/FlexiBLAS.md), + [FlexiBLAS](FlexiBLAS.md), OpenBLAS, OpenMPI - `intel/2022a`  - Intel compilers, Intel MKL with its FFTW wrappers, Intel MPI. -For more on this topic, please see [Compiling software on Mahuika](../../Scientific_Computing/HPC_Software_Environment/Compiling_software_on_Mahuika.md). +For more on this topic, please see [Compiling software on Mahuika](Compiling_software_on_Mahuika.md). Since an increasing proportion of NeSI CPUs are AMD ones, good performance of Intel's MKL library should not be assumed - other @@ -161,7 +161,7 @@ NeSI hardware will have AMD Zen4 CPUs, which will have AVX512. ## Questions? If you have any questions or need any help, {% include "partials/support_request.html" %} -or pop in to one of our [weekly Online Office Hours](../../Getting_Started/Getting_Help/Weekly_Online_Office_Hours.md) +or pop in to one of our [weekly Online Office Hours](Weekly_Online_Office_Hours.md) to chat with Support staff one-to-one. No question is too small - don't hesitate to reach out. diff --git a/docs/General/Announcements/Status_page_subscription_notification_changes.md b/docs/Announcements/Status_page_subscription_notification_changes.md similarity index 94% rename from docs/General/Announcements/Status_page_subscription_notification_changes.md rename to docs/Announcements/Status_page_subscription_notification_changes.md index c7124bda4..936d60e8a 100644 --- a/docs/General/Announcements/Status_page_subscription_notification_changes.md +++ b/docs/Announcements/Status_page_subscription_notification_changes.md @@ -19,7 +19,7 @@ Now, instead of automatically subscribing new users for all notifications, we wi - **Submit new HPC Jobs** - notices regarding status of login nodes, Slurm scheduler, or filesystem - **Jobs running on HPC** - notices regarding network issues, or status of Slurm scheduler or filesystem -- **Jupyter on NeSI** - notices regarding the status of our [Jupyter Service](../../Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md) for interactive computing +- **Jupyter on NeSI** - notices regarding the status of our [Jupyter Service](Jupyter_on_NeSI.md) for interactive computing - **HPC Storage** - notices regarding the status of storage resources on NeSI systems Effective Friday 20 October, we adjusted all existing and non-customised Status page subscriptions to match this shorter notification list. diff --git a/docs/General/Announcements/Visual_Studio_Code_Remote-Next_Release_Not_Supported.md b/docs/Announcements/Visual_Studio_Code_Remote-Next_Release_Not_Supported.md similarity index 100% rename from docs/General/Announcements/Visual_Studio_Code_Remote-Next_Release_Not_Supported.md rename to docs/Announcements/Visual_Studio_Code_Remote-Next_Release_Not_Supported.md diff --git a/docs/updates.md b/docs/Announcements/index.md similarity index 100% rename from docs/updates.md rename to docs/Announcements/index.md diff --git a/docs/General/Announcements/platform_refresh_updates.md b/docs/Announcements/platform_refresh_updates.md similarity index 100% rename from docs/General/Announcements/platform_refresh_updates.md rename to docs/Announcements/platform_refresh_updates.md diff --git a/docs/Capability_&_Skills/Consultancy.md b/docs/Capability_&_Skills/Consultancy.md new file mode 100644 index 000000000..ce49e8d05 --- /dev/null +++ b/docs/Capability_&_Skills/Consultancy.md @@ -0,0 +1,128 @@ +--- +created_at: '2019-02-07T21:55:45Z' +tags: +- help +vote_count: 1 +vote_sum: 1 +zendesk_article_id: 360000751916 +zendesk_section_id: 360000164635 +--- + +NeSI's Consultancy service provides scientific and HPC-focussed +computational and data science support to research projects across a +range of domains. + +## Need support with your research project? + +If you would like to learn more about NeSI's Consultancy service and how +you can work with NeSI's Research Software and Data Science Engineers on +a project, please {% include "partials/support_request.html" %} to set up an +initial meeting. We can discuss your needs and complete a Consultancy +application form together. + +Researchers from NeSI collaborator institutions (University of Auckland, +NIWA, University of Otago and Manaaki Whenua - Landcare Research) and +those with Merit projects can usually access consultancy at no cost to +themselves, based on their institution's or MBIE's investment into NeSI. + +## What do we do? + +The NeSI team are available to help with any stage of your research +software development. We can get involved with designing and developing +your software from scratch, or assist with improving software you have +already written. + +The service is completely bespoke and tailored to your requirements. +Some examples of outcomes we could assist with (this list is general and +non-exhaustive): + +- Code development + - Design and develop research software from scratch + - Algorithmic improvements + - Translate Python/R/Matlab code to C/C++/Fortran for faster + execution + - Accelerate code by offloading computations to a GPU + - Develop visualisation and post-processing tools (GUIs, dashboards, etc) +- Performance improvement + - Code optimisation – profile and improve efficiency (speed and + memory), IO performance + - Parallelisation – software (OpenMP, MPI, etc.) and workflow + parallelisation +- Improve software sustainability (version control, testing, + continuous integration, etc) +- Data Science Engineering + - Optimise numerical performance of machine learning pipelines + - Conduct an Exploratory Data Analysis + - Assist with designing and fitting explanatory and predictive + models +- Anything else you can think of ;-) + +## What can you expect from us? + +During a consultancy project we aim to provide: + +- Expertise and advice +- An agreed timeline to develop or improve a solution (typical + projects are of the order of 1 day per week for up to 4 months but + this is determined on a case-by-case basis) +- Training, knowledge transfer and/or capability development +- A summary document outlining what has been achieved during the + project +- A case study published on our website after the project has been + completed, to showcase the work you are doing on NeSI + +## What is expected of you? + +Consultancy projects are intended to be a collaboration and thus some +input is required on your part. You should be willing to: + +- Contribute to a case study upon successful completion of the + consultancy project +- Complete a short survey to help us measure the impact of our service +- Attend regular meetings (usually via video conference) +- Invest time to answer questions, provide code and data as necessary + and make changes to your workflow if needed +- [Acknowledge](https://www.nesi.org.nz/services/high-performance-computing/guidelines/acknowledgement-and-publication) + NeSI in article and code publications that we have contributed to, + which could include co-authorship if our contribution is deemed + worthy +- Accept full ownership/maintenance of the work after the project + completes (NeSI's involvement in the project is limited to the + agreed timeline) + +## Previous projects + +Listed below are some examples of previous projects we have contributed +to: + +- [A quantum casino helps define atoms in the big chill](https://www.nesi.org.nz/case-studies/quantum-casino-helps-define-atoms-big-chill) +- [Using statistical models to help New Zealand prepare for large earthquakes](https://www.nesi.org.nz/case-studies/using-statistical-models-help-new-zealand-prepare-large-earthquakes) +- [Improving researchers' ability to access and analyse climate model data sets](https://www.nesi.org.nz/case-studies/improving-researchers-ability-access-and-analyse-climate-model-data-sets) +- [Speeding up the post-processing of a climate model data pipeline](https://www.nesi.org.nz/case-studies/speeding-post-processing-climate-model-data-pipeline) +- [Overcoming data processing overload in scientific web mapping software](https://www.nesi.org.nz/case-studies/overcoming-data-processing-overload-scientific-web-mapping-software) +- [Visualising ripple effects in riverbed sediment transport](https://www.nesi.org.nz/case-studies/visualising-ripple-effects-riverbed-sediment-transport) +- [New Zealand's first national river flow forecasting system for flooding resilience](https://www.nesi.org.nz/case-studies/new-zealand%E2%80%99s-first-national-river-flow-forecasting-system-flooding-resilience) +- [A fast model for predicting floods and storm damage](https://www.nesi.org.nz/case-studies/fast-model-predicting-floods-and-storm-damage) +- [How multithreading and vectorisation can speed up seismic simulations by 40%](https://www.nesi.org.nz/case-studies/how-multithreading-and-vectorisation-can-speed-seismic-simulations-40) +- [Machine learning for marine mammals](https://www.nesi.org.nz/case-studies/machine-learning-marine-mammals) +- [Parallel processing for ocean life](https://www.nesi.org.nz/case-studies/parallel-processing-ocean-life) +- [NeSI support helps keep NZ rivers healthy](https://www.nesi.org.nz/case-studies/nesi-support-helps-keep-nz-rivers-healthy) +- [Heating up nanowires with HPC](https://www.nesi.org.nz/case-studies/heating-nanowires-hpc) +- [Understanding the behaviours oflight](https://www.nesi.org.nz/case-studies/development-next-generation-weather-and-climate-models-heating) +- [Understanding the behaviours oflight](https://www.nesi.org.nz/case-studies/understanding-behaviours-light) +- [Getting closer to more accurate climate predictions for NewZealand](https://www.nesi.org.nz/case-studies/getting-closer-more-accurate-climate-predictions-new-zealand) +- [Fractal analysis of brain signals for autism spectrumdisorder](https://www.nesi.org.nz/case-studies/fractal-analysis-brain-signals-autism-spectrum-disorder) +- [Optimising tools used for geneticanalysis](https://www.nesi.org.nz/case-studies/optimising-tools-used-genetic-analysis) +- [Investigating climatesensitivity](https://www.nesi.org.nz/case-studies/optimising-tools-used-genetic-analysis) +- [Tracking coastal precipitation systems in thetropics](https://www.nesi.org.nz/case-studies/tracking-coastal-precipitation-systems-tropics) +- [Powering global climatesimulations](https://www.nesi.org.nz/case-studies/powering-global-climate-simulations) +- [Optimising tools used for geneticanalysis](https://www.nesi.org.nz/case-studies/optimising-tools-used-genetic-analysis) +- [Investigating climatesensitivity](https://www.nesi.org.nz/case-studies/investigating-climate-sensitivity) +- [Improving earthquake forecastingmethods](https://www.nesi.org.nz/case-studies/improving-earthquake-forecasting-methods) +- [Modernising models to diagnose and treat disease andinjury](https://www.nesi.org.nz/case-studies/modernising-models-diagnose-and-treat-disease-and-injury) +- [Cataloguing NZ's earthquakeactivities](https://www.nesi.org.nz/case-studies/cataloguing-nz%E2%80%99s-earthquake-activities) +- [Finite element modelling of biologicalcells](https://www.nesi.org.nz/case-studies/finite-element-modelling-biological-cells) +- [Preparing New Zealand to adapt to climatechange](https://www.nesi.org.nz/case-studies/preparing-new-zealand-adapt-climate-change) +- [Using GPUs to expand our understanding of the solarsystem](https://www.nesi.org.nz/case-studies/using-gpus-expand-our-understanding-solar-system) +- [Speeding up Basilisk withGPGPUs](https://www.nesi.org.nz/case-studies/speeding-basilisk-gpgpus) +- [Helping communities anticipate floodevents](https://www.nesi.org.nz/case-studies/helping-communities-anticipate-flood-events) diff --git a/docs/Scientific_Computing/Training/Introduction_to_computing_on_the_NeSI_HPC.md b/docs/Capability_&_Skills/Introduction_to_computing_on_the_NeSI_HPC.md similarity index 100% rename from docs/Scientific_Computing/Training/Introduction_to_computing_on_the_NeSI_HPC.md rename to docs/Capability_&_Skills/Introduction_to_computing_on_the_NeSI_HPC.md diff --git a/docs/Scientific_Computing/Training/Introduction_to_computing_on_the_NeSI_HPC_YouTube_Recordings.md b/docs/Capability_&_Skills/Introduction_to_computing_on_the_NeSI_HPC_YouTube_Recordings.md similarity index 100% rename from docs/Scientific_Computing/Training/Introduction_to_computing_on_the_NeSI_HPC_YouTube_Recordings.md rename to docs/Capability_&_Skills/Introduction_to_computing_on_the_NeSI_HPC_YouTube_Recordings.md diff --git a/docs/Getting_Started/Getting_Help/Introductory_Material.md b/docs/Capability_&_Skills/Introductory_Material.md similarity index 96% rename from docs/Getting_Started/Getting_Help/Introductory_Material.md rename to docs/Capability_&_Skills/Introductory_Material.md index 3f259303a..0fa168b27 100644 --- a/docs/Getting_Started/Getting_Help/Introductory_Material.md +++ b/docs/Capability_&_Skills/Introductory_Material.md @@ -34,7 +34,7 @@ In addition to the material mentioned above, you can also seek help at our weekly "Office Hours" or by attending a Workshop. Office hours are drop-in sessions hosted by the NeSI Support Team where any and all questions are welcome. **Our Office Hours schedule** can be found -[here](../../Getting_Started/Getting_Help/Weekly_Online_Office_Hours.md). +[here](./Weekly_Online_Office_Hours.md). NeSI also participates in and organises [Workshops](https://www.nesi.org.nz/services/training) covering a range of topics. Many of these workshops - and particularly "Introduction to HPC using NeSI", are designed for new users. diff --git a/docs/Scientific_Computing/Training/Webinars.md b/docs/Capability_&_Skills/Webinars.md similarity index 100% rename from docs/Scientific_Computing/Training/Webinars.md rename to docs/Capability_&_Skills/Webinars.md diff --git a/docs/Getting_Started/Getting_Help/Weekly_Online_Office_Hours.md b/docs/Capability_&_Skills/Weekly_Online_Office_Hours.md similarity index 100% rename from docs/Getting_Started/Getting_Help/Weekly_Online_Office_Hours.md rename to docs/Capability_&_Skills/Weekly_Online_Office_Hours.md diff --git a/docs/Scientific_Computing/Training/Workshops.md b/docs/Capability_&_Skills/Workshops.md similarity index 100% rename from docs/Scientific_Computing/Training/Workshops.md rename to docs/Capability_&_Skills/Workshops.md diff --git a/docs/Capability_&_Skills/index.md b/docs/Capability_&_Skills/index.md new file mode 100644 index 000000000..d13a177fd --- /dev/null +++ b/docs/Capability_&_Skills/index.md @@ -0,0 +1,6 @@ +--- +created_at: 2025-02-04 +--- + + +TRAINING PLACEHOLDER diff --git a/docs/General/FAQs/.pages.yml b/docs/FAQs/.pages.yml similarity index 100% rename from docs/General/FAQs/.pages.yml rename to docs/FAQs/.pages.yml diff --git a/docs/General/FAQs/Can_I_change_my_time_zone_to_New_Zealand_time.md b/docs/FAQs/Can_I_change_my_time_zone_to_New_Zealand_time.md similarity index 91% rename from docs/General/FAQs/Can_I_change_my_time_zone_to_New_Zealand_time.md rename to docs/FAQs/Can_I_change_my_time_zone_to_New_Zealand_time.md index 361beee22..fb16151e1 100644 --- a/docs/General/FAQs/Can_I_change_my_time_zone_to_New_Zealand_time.md +++ b/docs/FAQs/Can_I_change_my_time_zone_to_New_Zealand_time.md @@ -31,7 +31,7 @@ latter but not the former: test -r ~/.bashrc && . ~/.bashrc ``` -Please see the article, [.bashrc or.bash profile?](../../General/FAQs/What_are_my-bashrc_and-bash_profile_for.md) +Please see the article, [What are my .bashrc and .bash_profile for](What_are_my-bashrc_and-bash_profile_for.md)" for more information. ## What about cron jobs? diff --git a/docs/General/FAQs/Can_I_use_SSHFS_to_mount_the_cluster_filesystem_on_my_local_machine.md b/docs/FAQs/Can_I_use_SSHFS_to_mount_the_cluster_filesystem_on_my_local_machine.md similarity index 88% rename from docs/General/FAQs/Can_I_use_SSHFS_to_mount_the_cluster_filesystem_on_my_local_machine.md rename to docs/FAQs/Can_I_use_SSHFS_to_mount_the_cluster_filesystem_on_my_local_machine.md index a6641ab73..36bc665ae 100644 --- a/docs/General/FAQs/Can_I_use_SSHFS_to_mount_the_cluster_filesystem_on_my_local_machine.md +++ b/docs/FAQs/Can_I_use_SSHFS_to_mount_the_cluster_filesystem_on_my_local_machine.md @@ -11,14 +11,13 @@ zendesk_section_id: 360000039036 [SSHFS](https://github.com/libfuse/sshfs) allows you to mount a remote filesystem on your local machine. SSHFS relies on SSH underneath, so you should follow the "Recommended logon procedure" instructions -[here](../../Getting_Started/Accessing_the_HPCs/Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md) to configure SSH +[here](Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md) to configure SSH first. ## Linux Use the following commands to mount your home directory from Mahuika on -your local machine (the same command will work for Māui, just replace -the names): +your local machine: ```sh # create a mount point and connect diff --git a/docs/Getting_Started/Cheat_Sheets/.pages.yml b/docs/FAQs/Cheat_Sheets/.pages.yml similarity index 100% rename from docs/Getting_Started/Cheat_Sheets/.pages.yml rename to docs/FAQs/Cheat_Sheets/.pages.yml diff --git a/docs/Getting_Started/Cheat_Sheets/Bash-Reference_Sheet.md b/docs/FAQs/Cheat_Sheets/Bash-Reference_Sheet.md similarity index 95% rename from docs/Getting_Started/Cheat_Sheets/Bash-Reference_Sheet.md rename to docs/FAQs/Cheat_Sheets/Bash-Reference_Sheet.md index a309dddd9..08c3d8480 100644 --- a/docs/Getting_Started/Cheat_Sheets/Bash-Reference_Sheet.md +++ b/docs/FAQs/Cheat_Sheets/Bash-Reference_Sheet.md @@ -11,8 +11,7 @@ zendesk_section_id: 360000278975 Regardless of the operating system of your personal computer you will need to know some basic Unix Shell commands since the HPC are Linux machines. If you do not have any experiencing using Unix Shell we would -advise going at least the first (3 parts) of the [Software Carpentry -Unix Shell lessons](http://swcarpentry.github.io/shell-novice/). +advise going at least the first (3 parts) of the [Software Carpentry Unix Shell lessons](http://swcarpentry.github.io/shell-novice/). | Command Description | Examples | Command | | ------------------- | ---------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | @@ -42,9 +41,9 @@ Unix Shell lessons](http://swcarpentry.github.io/shell-novice/). !!! tip Pressing the 'tab' key once will automatically complete the line if it is the only option. e.g.  - ![complete1.gif](../../assets/images/Unix_Shell-Reference_Sheet.gif) + ![complete1.gif](Unix_Shell-Reference_Sheet.gif) If there are more than one possible completions, pressing tab again will show all those options. - ![complete2.gif](../../assets/images/Unix_Shell-Reference_Sheet_0.gif) + ![complete2.gif](Unix_Shell-Reference_Sheet_0.gif) Use of the tab key can help navigate the filesystem, spellcheck your - commands and save you time typing. \ No newline at end of file + commands and save you time typing. diff --git a/docs/Getting_Started/Cheat_Sheets/Git-Reference_Sheet.md b/docs/FAQs/Cheat_Sheets/Git-Reference_Sheet.md similarity index 93% rename from docs/Getting_Started/Cheat_Sheets/Git-Reference_Sheet.md rename to docs/FAQs/Cheat_Sheets/Git-Reference_Sheet.md index f564bd2d5..78b79cbac 100644 --- a/docs/Getting_Started/Cheat_Sheets/Git-Reference_Sheet.md +++ b/docs/FAQs/Cheat_Sheets/Git-Reference_Sheet.md @@ -18,7 +18,7 @@ developing, managing and distributing code. Full Git documentation can be found [here](https://git-scm.com/docs/git), or using `man git`. -![Git\_Diagram.svg](../../assets/images/Git-Reference_Sheet.svg) +![Git\_Diagram.svg](Git-Reference_Sheet.svg) ## Authentication @@ -31,8 +31,7 @@ need to authenticate yourself on the cluster. ### SSH Authentication (GitHub) -More information can be found in the [GitHub -documentation](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent). +More information can be found in the [GitHubdocumentation](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent). - On the NeSI cluster, run the command @@ -59,19 +58,19 @@ documentation](https://docs.github.com/en/authentication/connecting-to-github-wi - Now log in to your GitHub account. In the upper-right corner of any page, click your profile photo click **Settings**. - ![Settings icon in the user bar](../../assets/images/Git-Reference_Sheet.png) + ![Settings icon in the user bar](Git-Reference_Sheet.png) - In the "Access" section of the sidebar, click **SSH and GPG keys**. - Click **New SSH key** or **Add SSH key**. - ![SSH Key button](../../assets/images/Git-Reference_Sheet_0.png) + ![SSH Key button](Git-Reference_Sheet_0.png) - In the "Title" field, put "Mahuika" or "NeSI". - Paste your key into the "Key" field. - ![The key field](../../assets/images/Git-Reference_Sheet_1.png) + ![The key field](Git-Reference_Sheet_1.png) - Click **Add SSH key**. diff --git a/docs/Getting_Started/Cheat_Sheets/Slurm-Reference_Sheet.md b/docs/FAQs/Cheat_Sheets/Slurm-Reference_Sheet.md similarity index 90% rename from docs/Getting_Started/Cheat_Sheets/Slurm-Reference_Sheet.md rename to docs/FAQs/Cheat_Sheets/Slurm-Reference_Sheet.md index 0719f6288..951999b1f 100644 --- a/docs/Getting_Started/Cheat_Sheets/Slurm-Reference_Sheet.md +++ b/docs/FAQs/Cheat_Sheets/Slurm-Reference_Sheet.md @@ -12,7 +12,7 @@ zendesk_section_id: 360000278975 --- If you are unsure about using our job scheduler Slurm, more details can -be found on [Submitting_your_first_job](../../Getting_Started/Next_Steps/Submitting_your_first_job.md). +be found on [Submitting_your_first_job](Submitting_your_first_job.md). ## Slurm Commands @@ -50,7 +50,7 @@ an '=' sign e.g. `#SBATCH --account=nesi99999` or a space e.g. | `--account` | `#SBATCH --account=nesi99999` | The account your core hours will be 'charged' to. | | `--time` | `#SBATCH --time=DD-HH:MM:SS` | Job max walltime. | | `--mem` | `#SBATCH --mem=512MB` | Memory required per node. | -| `--partition` | `#SBATCH --partition=milan` | Specified job[partition](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Mahuika_Slurm_Partitions.md). | +| `--partition` | `#SBATCH --partition=milan` | Specified job[partition](Mahuika_Slurm_Partitions.md). | | `--output` | `#SBATCH --output=%j_output.out` | Path and name of standard output file. | | `--mail-user` | `#SBATCH --mail-user=user123@gmail.com` | Address to send mail notifications. | | `--mail-type` | `#SBATCH --mail-type=ALL` | Will send a mail notification at `BEGIN END FAIL`. | @@ -62,10 +62,10 @@ an '=' sign e.g. `#SBATCH --account=nesi99999` or a space e.g. | | | | | --------------------- | -------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | | `--nodes` | ``#SBATCH --nodes=2`` | Will request tasks be run across 2 nodes. | -| `--ntasks` | ``#SBATCH --ntasks=2 `` | Will start 2 [MPI](../../Getting_Started/Next_Steps/Parallel_Execution.md) tasks. | +| `--ntasks` | ``#SBATCH --ntasks=2 `` | Will start 2 [MPI](Parallel_Execution.md) tasks. | | `--ntasks-per-node` | `#SBATCH --ntasks-per-node=1` | Will start 1 task per requested node. | -| `--cpus-per-task` | `#SBATCH --cpus-per-task=10` | Will request 10 [*logical* CPUs](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Hyperthreading.md) per task. | -| `--mem-per-cpu` | `#SBATCH --mem-per-cpu=512MB` | Memory Per *logical* CPU. `--mem` Should be used if shared memory job. See [How do I request memory?](../../General/FAQs/How_do_I_request_memory.md) | +| `--cpus-per-task` | `#SBATCH --cpus-per-task=10` | Will request 10 [*logical* CPUs](Hyperthreading.md) per task. | +| `--mem-per-cpu` | `#SBATCH --mem-per-cpu=512MB` | Memory Per *logical* CPU. `--mem` Should be used if shared memory job. See [How do I request memory?](How_do_I_request_memory.md) | | --array | `#SBATCH --array=1-5` | Will submit job 5 times each with a different `$SLURM_ARRAY_TASK_ID` (1,2,3,4,5). | | | `#SBATCH --array=0-20:5` | Will submit job 5 times each with a different `$SLURM_ARRAY_TASK_ID` (0,5,10,15,20). | | | `#SBATCH --array=1-100%10` | Will submit 1 though to 100 jobs but no more than 10 at once. | @@ -75,9 +75,9 @@ an '=' sign e.g. `#SBATCH --account=nesi99999` or a space e.g. | | | | | -- | -- | -- | | `--qos` | `#SBATCH --qos=debug` | Adding this line gives your job a high priority. *Limited to one job at a time, max 15 minutes*. | -| `--profile` | `#SBATCH --profile=ALL` | Allows generation of a .h5 file containing job profile information. See [Slurm Native Profiling](../../Scientific_Computing/Profiling_and_Debugging/Slurm_Native_Profiling.md) | +| `--profile` | `#SBATCH --profile=ALL` | Allows generation of a .h5 file containing job profile information. See [Slurm Native Profiling](Slurm_Native_Profiling.md) | | `--dependency` | `#SBATCH --dependency=afterok:123456789` | Will only start after the job 123456789 has completed. | -| `--hint` | `#SBATCH --hint=nomultithread` | Disables [hyperthreading](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Hyperthreading.md), be aware that this will significantly change how your job is defined. | +| `--hint` | `#SBATCH --hint=nomultithread` | Disables [hyperthreading](Hyperthreading.md), be aware that this will significantly change how your job is defined. | !!! tip Many options have a short (`-`) and long (`--`) form e.g. diff --git a/docs/Getting_Started/Cheat_Sheets/tmux-Reference_sheet.md b/docs/FAQs/Cheat_Sheets/tmux-Reference_sheet.md similarity index 100% rename from docs/Getting_Started/Cheat_Sheets/tmux-Reference_sheet.md rename to docs/FAQs/Cheat_Sheets/tmux-Reference_sheet.md diff --git a/docs/General/FAQs/Common_questions_about_the_platform_refresh.md b/docs/FAQs/Common_questions_about_the_platform_refresh.md similarity index 100% rename from docs/General/FAQs/Common_questions_about_the_platform_refresh.md rename to docs/FAQs/Common_questions_about_the_platform_refresh.md diff --git a/docs/General/FAQs/Converting_from_Windows_style_to_UNIX_style_line_endings.md b/docs/FAQs/Converting_from_Windows_style_to_UNIX_style_line_endings.md similarity index 100% rename from docs/General/FAQs/Converting_from_Windows_style_to_UNIX_style_line_endings.md rename to docs/FAQs/Converting_from_Windows_style_to_UNIX_style_line_endings.md diff --git a/docs/General/FAQs/How_can_I_give_read_only_team_members_access_to_my_files.md b/docs/FAQs/How_can_I_give_read_only_team_members_access_to_my_files.md similarity index 95% rename from docs/General/FAQs/How_can_I_give_read_only_team_members_access_to_my_files.md rename to docs/FAQs/How_can_I_give_read_only_team_members_access_to_my_files.md index a4f3c780e..6936d9ab5 100644 --- a/docs/General/FAQs/How_can_I_give_read_only_team_members_access_to_my_files.md +++ b/docs/FAQs/How_can_I_give_read_only_team_members_access_to_my_files.md @@ -9,8 +9,7 @@ zendesk_section_id: 360000039036 --- !!! tip "See also" - [File permissions and - groups](../../Storage/File_Systems_and_Quotas/File_permissions_and_groups.md) + [File permissions and groups](File_permissions_and_groups.md) Not all projects have read-only groups created by default. If your project has a read-only group created after the project itself was diff --git a/docs/General/FAQs/How_can_I_let_my_fellow_project_team_members_read_or_write_my_files.md b/docs/FAQs/How_can_I_let_my_fellow_project_team_members_read_or_write_my_files.md similarity index 97% rename from docs/General/FAQs/How_can_I_let_my_fellow_project_team_members_read_or_write_my_files.md rename to docs/FAQs/How_can_I_let_my_fellow_project_team_members_read_or_write_my_files.md index 6073bf2ab..c536066d5 100644 --- a/docs/General/FAQs/How_can_I_let_my_fellow_project_team_members_read_or_write_my_files.md +++ b/docs/FAQs/How_can_I_let_my_fellow_project_team_members_read_or_write_my_files.md @@ -9,8 +9,7 @@ zendesk_section_id: 360000039036 --- !!! tip "See also" - [File permissions and - groups](../../Storage/File_Systems_and_Quotas/File_permissions_and_groups.md) + [File permissions and groups](File_permissions_and_groups.md) If you move or copy a file or directory from one project directory to another, or from somewhere within your home directory to somewhere diff --git a/docs/General/FAQs/How_can_I_see_how_busy_the_cluster_is.md b/docs/FAQs/How_can_I_see_how_busy_the_cluster_is.md similarity index 100% rename from docs/General/FAQs/How_can_I_see_how_busy_the_cluster_is.md rename to docs/FAQs/How_can_I_see_how_busy_the_cluster_is.md diff --git a/docs/General/FAQs/How_can_I_view_images_generated_on_the_cluster.md b/docs/FAQs/How_can_I_view_images_generated_on_the_cluster.md similarity index 80% rename from docs/General/FAQs/How_can_I_view_images_generated_on_the_cluster.md rename to docs/FAQs/How_can_I_view_images_generated_on_the_cluster.md index fe97f0c99..7fe5a1e2b 100644 --- a/docs/General/FAQs/How_can_I_view_images_generated_on_the_cluster.md +++ b/docs/FAQs/How_can_I_view_images_generated_on_the_cluster.md @@ -19,5 +19,4 @@ the cluster using the `display` command. For example, display myImage.png ``` -This requires a [working X-11 -server](../../Scientific_Computing/Terminal_Setup/X11_on_NeSI.md). +This requires a [working X11 connection](X11_on_NeSI.md). diff --git a/docs/General/FAQs/How_do_I_find_out_the_size_of_a_directory.md b/docs/FAQs/How_do_I_find_out_the_size_of_a_directory.md similarity index 100% rename from docs/General/FAQs/How_do_I_find_out_the_size_of_a_directory.md rename to docs/FAQs/How_do_I_find_out_the_size_of_a_directory.md diff --git a/docs/General/FAQs/How_do_I_fix_my_locale_and_language_settings.md b/docs/FAQs/How_do_I_fix_my_locale_and_language_settings.md similarity index 100% rename from docs/General/FAQs/How_do_I_fix_my_locale_and_language_settings.md rename to docs/FAQs/How_do_I_fix_my_locale_and_language_settings.md diff --git a/docs/General/FAQs/How_do_I_request_memory.md b/docs/FAQs/How_do_I_request_memory.md similarity index 97% rename from docs/General/FAQs/How_do_I_request_memory.md rename to docs/FAQs/How_do_I_request_memory.md index 5b75bbb25..ed1a00adb 100644 --- a/docs/General/FAQs/How_do_I_request_memory.md +++ b/docs/FAQs/How_do_I_request_memory.md @@ -9,7 +9,7 @@ zendesk_section_id: 360000039036 --- - `--mem`: Memory per node -- `--mem-per-cpu`: Memory per [logical CPU](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Hyperthreading.md) +- `--mem-per-cpu`: Memory per [logical CPU](Hyperthreading.md) In most circumstances, you should request memory using `--mem`. The exception is if you are running an MPI job that could be placed on more diff --git a/docs/General/FAQs/How_do_I_run_my_Python_Notebook_through_SLURM.md b/docs/FAQs/How_do_I_run_my_Python_Notebook_through_SLURM.md similarity index 93% rename from docs/General/FAQs/How_do_I_run_my_Python_Notebook_through_SLURM.md rename to docs/FAQs/How_do_I_run_my_Python_Notebook_through_SLURM.md index c9095562e..6d734d155 100644 --- a/docs/General/FAQs/How_do_I_run_my_Python_Notebook_through_SLURM.md +++ b/docs/FAQs/How_do_I_run_my_Python_Notebook_through_SLURM.md @@ -35,5 +35,5 @@ the file explorer in Jupyter from your downloads folder. This script can then be run as a regular python script as described in our -[Python](../../Scientific_Computing/Supported_Applications/Python.md) +[Python](Python.md) documentation. diff --git a/docs/Getting_Started/Getting_Help/Making_a_Helpful_Support_Request.md b/docs/FAQs/How_to_Make_a_Helpful_Support_Request.md similarity index 93% rename from docs/Getting_Started/Getting_Help/Making_a_Helpful_Support_Request.md rename to docs/FAQs/How_to_Make_a_Helpful_Support_Request.md index 864d4fc5e..fd57e58dd 100644 --- a/docs/Getting_Started/Getting_Help/Making_a_Helpful_Support_Request.md +++ b/docs/FAQs/How_to_Make_a_Helpful_Support_Request.md @@ -37,5 +37,4 @@ If your problem involves a SLURM job, please include: - directory/pathway to files. - When you last had a job succeed (and slurm jobid if applicable)  -[Further -Reading](https://hpc-uit.readthedocs.io/en/latest/help/writing-support-requests.html) +[FurtherReading](https://hpc-uit.readthedocs.io/en/latest/help/writing-support-requests.html) diff --git a/docs/General/FAQs/How_to_replace_my_2FA_token.md b/docs/FAQs/How_to_replace_my_2FA_token.md similarity index 64% rename from docs/General/FAQs/How_to_replace_my_2FA_token.md rename to docs/FAQs/How_to_replace_my_2FA_token.md index 25e4ac00c..6ac652aa8 100644 --- a/docs/General/FAQs/How_to_replace_my_2FA_token.md +++ b/docs/FAQs/How_to_replace_my_2FA_token.md @@ -21,7 +21,7 @@ Log in to [my.nesi.org.nz](https://my.nesi.org.nz) and select the option Once you've created a token, you should see the 'Manage Two-Factor token' option. -![image2018-10-26\_15-36-10.png](../../assets/images/How_to_replace_my_2FA_token.png) +![image2018-10-26\_15-36-10.png](How_to_replace_my_2FA_token.png) ## Delete the Two-Factor token @@ -29,17 +29,16 @@ Select the option 'Manage Two-Factor token' under 'Account'. You should see the confirmation of the previous token. -![confirm](../../assets/images/How_to_replace_my_2FA_token_0.png) +![confirm](How_to_replace_my_2FA_token_0.png) After 'Delete Token' has been selected: -![delete](../../assets/images/How_to_replace_my_2FA_token_1.png) +![delete](How_to_replace_my_2FA_token_1.png) You will also receive an email confirmation: -![email](../../assets/images/How_to_replace_my_2FA_token_2.png) +![email](How_to_replace_my_2FA_token_2.png) ## Related content -[Setting Up Two-Factor -Authentication](../../Getting_Started/Accessing_the_HPCs/Setting_Up_Two_Factor_Authentication.md) +[Setting Up Two-Factor Authentication](Setting_Up_Two_Factor_Authentication.md) diff --git a/docs/General/FAQs/I_have_not_scanned_the_2FA_QR_code.md b/docs/FAQs/I_have_not_scanned_the_2FA_QR_code.md similarity index 89% rename from docs/General/FAQs/I_have_not_scanned_the_2FA_QR_code.md rename to docs/FAQs/I_have_not_scanned_the_2FA_QR_code.md index 5c2c210fe..9fa0c0799 100644 --- a/docs/General/FAQs/I_have_not_scanned_the_2FA_QR_code.md +++ b/docs/FAQs/I_have_not_scanned_the_2FA_QR_code.md @@ -23,5 +23,4 @@ the option 'Manage Two-Factor token' under 'Account'. ## Related content -[How to replace my 2FA -token](../../General/FAQs/How_to_replace_my_2FA_token.md) +[How to replace my 2FA token](How_to_replace_my_2FA_token.md) diff --git a/docs/General/FAQs/Ive_run_out_of_storage_space.md b/docs/FAQs/Ive_run_out_of_storage_space.md similarity index 95% rename from docs/General/FAQs/Ive_run_out_of_storage_space.md rename to docs/FAQs/Ive_run_out_of_storage_space.md index d09126a3d..2aef609c9 100644 --- a/docs/General/FAQs/Ive_run_out_of_storage_space.md +++ b/docs/FAQs/Ive_run_out_of_storage_space.md @@ -48,8 +48,7 @@ files as viable. Many files can be compressed into a single SquashFS archive. We have written a utility, `nn_archive_files`, to help with this process. -This utility can be run on Māui or Mahuika, but not, as yet, on -Māui-ancil; and it can submit the work as a Slurm job, which is +It can submit the work as a Slurm job, which is preferred. `nn_archive_files` can take, as trailing options, the same options as `mksquashfs`, including choice of compression algorithm; see `man mksquashfs` for more details. diff --git a/docs/General/FAQs/Login_Troubleshooting.md b/docs/FAQs/Login_Troubleshooting.md similarity index 88% rename from docs/General/FAQs/Login_Troubleshooting.md rename to docs/FAQs/Login_Troubleshooting.md index 74cb932a9..07bdae065 100644 --- a/docs/General/FAQs/Login_Troubleshooting.md +++ b/docs/FAQs/Login_Troubleshooting.md @@ -10,7 +10,7 @@ zendesk_section_id: 360000039036 !!! prerequisite - - Please make sure you have followed the recommended setup. See [Choosing and Configuring Software for Connecting to the Clusters](../../Getting_Started/Accessing_the_HPCs/Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md) for more information. + - Please make sure you have followed the recommended setup. See [Choosing and Configuring Software for Connecting to the Clusters](Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md) for more information. - Most terminals do not give an indication of how many characters have been typed when entering a password. - Paste is not usually bound to `ctrl` + `V` and will vary based on your method of access. @@ -50,15 +50,10 @@ ssh @lander.nesi.org.nz ssh login..nesi.org.nz ``` -**If this fails:** Are you logging in to the correct cluster? -Mahuika/Maui have separate access control, also Māui requires your -password input in a different format, see -[here](../../General/FAQs/Mahuika_Maui_Differences.md). - **If this succeeds**: -- If you are using a bash terminal, confirm your .ssh config is [set up correctly](../../Getting_Started/Accessing_the_HPCs/Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md). -- If you are using a ssh client like *MobaXterm* or *WinSCP* make sure your session is [set up correctly](../../Getting_Started/Accessing_the_HPCs/Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md). +- If you are using a bash terminal, confirm your .ssh config is [set up correctly](Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md). +- If you are using a ssh client like *MobaXterm* or *WinSCP* make sure your session is [set up correctly](Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md). ### Check you are a member of an active project @@ -86,7 +81,7 @@ contact the person or team responsible for supporting it. ### Ensure you're not reusing the same 6-digit code from your token -Login will fail if the same 6-digit code is used to access the Māui or +Login will fail if the same 6-digit code is used to access the Mahuika login node after it has been used to access the lander node, or for consecutive login attempts to any node. If in doubt, wait 30 seconds for a new token to be generated. @@ -178,4 +173,4 @@ Helpful things to include: institution while also keeping your position at your old institution? Might NeSI know about any of these changes? - What have you tried so far? -- Are you on the NIWA network, the NIWA VPN, or neither? \ No newline at end of file +- Are you on the NIWA network, the NIWA VPN, or neither? diff --git a/docs/General/FAQs/Mahuika_Maui_Differences.md b/docs/FAQs/Mahuika_Maui_Differences.md similarity index 84% rename from docs/General/FAQs/Mahuika_Maui_Differences.md rename to docs/FAQs/Mahuika_Maui_Differences.md index 5b53c621c..5d0c44165 100644 --- a/docs/General/FAQs/Mahuika_Maui_Differences.md +++ b/docs/FAQs/Mahuika_Maui_Differences.md @@ -75,14 +75,12 @@ run, but the limits on each machine is different. ### Mahuika -Currently, Mahuika has Intel Broadwell and [AMD Milan -CPUs](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Milan_Compute_Nodes.md). +Currently, Mahuika has Intel Broadwell and [AMD Milan CPUs](Milan_Compute_Nodes.md). To run on the faster AMD Milan CPUs you will need to specify -"--partition=milan" in your Slurm script. +`--partition=milan` in your Slurm script. -Mahuika is made up of several [partitions which have different resources -and different -limits](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Mahuika_Slurm_Partitions.md). +Mahuika is made up of several partitions which have different resources +and different limits, for more information see [Mahuika_Slurm_Partitions](Mahuika_Slurm_Partitions.md). A job can request up to 20,000 CPU core hours, running up to 3 weeks with up to 576 CPU cores (equivalent to eight full nodes). Furthermore, there are special nodes available with high memory (up to 6 TB) or GPUs. @@ -98,9 +96,7 @@ be shared with other jobs. ### Māui -Māui only has a [single partition to which NeSI users are permitted to -submit -work](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Maui_Slurm_Partitions.md). +Māui only has a [single partition to which NeSI users are permitted to submit work](Maui_Slurm_Partitions.md). For your job, you can request a maximum of 24 hours or a maximum of 240 nodes, however no job may request more than 1,200 Māui node-hours in total. (This means that if you request more than 50 nodes, your maximum @@ -109,8 +105,7 @@ in units of nodes, so the smallest possible job takes a whole node, and there can never be more than one job on a node at a time. Additionally, projects with valid allocations on Māui will also have -access to [Māui's ancillary -nodes,](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Maui_Ancillary.md) +access to [Māui's ancillary nodes](Maui_Ancillary.md), where jobs requiring up to 768 GB of memory or jobs that require GPUs can be run. When submitting a job to the Māui ancillary nodes you may also request parts of nodes, rather than needing to use the entire node. diff --git a/docs/General/FAQs/Password_Expiry.md b/docs/FAQs/Password_Expiry.md similarity index 76% rename from docs/General/FAQs/Password_Expiry.md rename to docs/FAQs/Password_Expiry.md index 096ffe82c..43dbfad6c 100644 --- a/docs/General/FAQs/Password_Expiry.md +++ b/docs/FAQs/Password_Expiry.md @@ -11,13 +11,11 @@ zendesk_section_id: 360000039036 NeSI passwords expire after two years.  The prompt you will see when that happens is -```sl +```sh Password expired. Change your password now. First Factor (Current Password): Second Factor (optional): Login Password: ``` -however passwords can not be reset this way, instead you should [reset -your password via the My NeSI -Portal](../../Getting_Started/Accessing_the_HPCs/Setting_Up_and_Resetting_Your_Password.md). +however passwords can not be reset this way, instead you should [reset your password via the My NeSI Portal](Setting_Up_and_Resetting_Your_Password.md). diff --git a/docs/General/FAQs/Skylake_warning_message_on_Maui.md b/docs/FAQs/Skylake_warning_message_on_Maui.md similarity index 100% rename from docs/General/FAQs/Skylake_warning_message_on_Maui.md rename to docs/FAQs/Skylake_warning_message_on_Maui.md diff --git a/docs/General/FAQs/Two_Factor_Authentication_FAQ.md b/docs/FAQs/Two_Factor_Authentication_FAQ.md similarity index 90% rename from docs/General/FAQs/Two_Factor_Authentication_FAQ.md rename to docs/FAQs/Two_Factor_Authentication_FAQ.md index 625d8de54..f72abe5a5 100644 --- a/docs/General/FAQs/Two_Factor_Authentication_FAQ.md +++ b/docs/FAQs/Two_Factor_Authentication_FAQ.md @@ -25,7 +25,7 @@ mobile device. On that mobile device, you will then need to install Google Authenticator (or another QR code reader application that implements the Time-based One Time Password algorithm) on your mobile device. -See also [Setting Up Two-Factor Authentication](../../Getting_Started/Accessing_the_HPCs/Setting_Up_Two_Factor_Authentication.md). +See also [Setting Up Two-Factor Authentication](Setting_Up_Two_Factor_Authentication.md). ## Can I use an SSH key pair as an alternative second factor? @@ -50,7 +50,7 @@ existing accounts** then scan the QR code provided on the old device. ## How do I get a new Second Factor? -See article [here](../../General/FAQs/How_to_replace_my_2FA_token.md). +See article [here](How_to_replace_my_2FA_token.md). ## Can I use the same Second Factor again? diff --git a/docs/General/FAQs/What_are_my-bashrc_and-bash_profile_for.md b/docs/FAQs/What_are_my-bashrc_and-bash_profile_for.md similarity index 100% rename from docs/General/FAQs/What_are_my-bashrc_and-bash_profile_for.md rename to docs/FAQs/What_are_my-bashrc_and-bash_profile_for.md diff --git a/docs/General/FAQs/What_does_oom_kill_mean.md b/docs/FAQs/What_does_oom_kill_mean.md similarity index 100% rename from docs/General/FAQs/What_does_oom_kill_mean.md rename to docs/FAQs/What_does_oom_kill_mean.md diff --git a/docs/General/FAQs/What_is_a_core_file.md b/docs/FAQs/What_is_a_core_file.md similarity index 90% rename from docs/General/FAQs/What_is_a_core_file.md rename to docs/FAQs/What_is_a_core_file.md index ceb9fcf8f..9af5bde7b 100644 --- a/docs/General/FAQs/What_is_a_core_file.md +++ b/docs/FAQs/What_is_a_core_file.md @@ -18,7 +18,7 @@ called a 'core dump'. .core files are a record of the working memory at time of failure, and can be used for -[debugging](../../Scientific_Computing/Profiling_and_Debugging/Debugging.md). +[debugging](Debugging.md). MPI jobs will usually create a .core file for each task. As .core files are usually very large, you should delete the ones you diff --git a/docs/General/FAQs/What_software_environments_on_NeSI_are_optimised_for_Machine_Learning_and_data_science.md b/docs/FAQs/What_software_environments_on_NeSI_are_optimised_for_Machine_Learning_and_data_science.md similarity index 56% rename from docs/General/FAQs/What_software_environments_on_NeSI_are_optimised_for_Machine_Learning_and_data_science.md rename to docs/FAQs/What_software_environments_on_NeSI_are_optimised_for_Machine_Learning_and_data_science.md index c9b043314..8ea0c40cb 100644 --- a/docs/General/FAQs/What_software_environments_on_NeSI_are_optimised_for_Machine_Learning_and_data_science.md +++ b/docs/FAQs/What_software_environments_on_NeSI_are_optimised_for_Machine_Learning_and_data_science.md @@ -16,37 +16,33 @@ use. Examples of software environments on NeSI optimised for data science include: -- [R](../../Scientific_Computing/Supported_Applications/R.md) and [Python](../../Scientific_Computing/Supported_Applications/TensorFlow_on_GPUs.md) users +- [R](R.md) and [Python](TensorFlow_on_GPUs.md) users can get right into using and exploring the several built-in packages or create custom code. -- [Jupyter on NeSI - ](../../Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md)is +- [Jupyter on NeSI](Jupyter_on_NeSI.md)is particularly well suited to artificial intelligence and machine - learning workloads. [R - Studio](../../Scientific_Computing/Interactive_computing_using_Jupyter/RStudio_via_Jupyter_on_NeSI.md) + learning workloads. [RStudio](RStudio_via_Jupyter_on_NeSI.md) and/or Conda can be accessed via Jupyter. - Commonly used data science environments and libraries such as - [Keras](../../Scientific_Computing/Supported_Applications/Keras.md), - [LambdaStack](../../Scientific_Computing/Supported_Applications/Lambda_Stack.md), - [Tensorflow](../../Scientific_Computing/Supported_Applications/TensorFlow_on_GPUs.md) + [Keras](Keras.md), + [LambdaStack](Lambda_Stack.md), + [Tensorflow](TensorFlow_on_GPUs.md) and [Conda](https://docs.conda.io/en/latest/) are available to create comprehensive workflows. For more information about available software and applications, you -can [browse our catalogue](../../Scientific_Computing/Supported_Applications/index.md). +can [browse our catalogue](index.md). As pictured in the screenshot below, you can type keywords into the catalogue's search field to browse by a specific software name or using more broad terms such as "machine learning". -![MachineLearningSoftwareEnvironments-May2021.png](../../assets/images/What_software_environments_on_NeSI_are_optimised_for_Machine_Learning_and_data_science.png) +![MachineLearningSoftwareEnvironments](What_software_environments_on_NeSI_are_optimised_for_Machine_Learning_and_data_science.png) For more information on NeSI's model and approach to application -support, refer to our [policy for the management of scientific -application -software](../../General/NeSI_Policies/NeSI_Application_Support_Model.md). +support, refer to our [policy for the management of scientific application software](NeSI_Application_Support_Model.md). If you need help installing your software or would like to discuss your software needs with us, {% include "partials/support_request.html" %}. diff --git a/docs/General/FAQs/Where_should_I_store_my_data_on_NeSI_systems.md b/docs/FAQs/Where_should_I_store_my_data_on_NeSI_systems.md similarity index 100% rename from docs/General/FAQs/Where_should_I_store_my_data_on_NeSI_systems.md rename to docs/FAQs/Where_should_I_store_my_data_on_NeSI_systems.md diff --git a/docs/General/FAQs/Why_am_I_seeing_Account_is_not_ready.md b/docs/FAQs/Why_am_I_seeing_Account_is_not_ready.md similarity index 89% rename from docs/General/FAQs/Why_am_I_seeing_Account_is_not_ready.md rename to docs/FAQs/Why_am_I_seeing_Account_is_not_ready.md index 6aa3b7a44..de64e3534 100644 --- a/docs/General/FAQs/Why_am_I_seeing_Account_is_not_ready.md +++ b/docs/FAQs/Why_am_I_seeing_Account_is_not_ready.md @@ -19,4 +19,4 @@ please email and wait for a member of our support team to confirm your account and group membership. -![mceclip0.png](../../assets/images/Why_am_I_seeing_Account_is_not_ready.png) +![mceclip0.png](Why_am_I_seeing_Account_is_not_ready.png) diff --git a/docs/General/FAQs/Why_cant_I_log_in_using_MobaXTerm.md b/docs/FAQs/Why_cant_I_log_in_using_MobaXTerm.md similarity index 88% rename from docs/General/FAQs/Why_cant_I_log_in_using_MobaXTerm.md rename to docs/FAQs/Why_cant_I_log_in_using_MobaXTerm.md index d882ed1e3..cba98e566 100644 --- a/docs/General/FAQs/Why_cant_I_log_in_using_MobaXTerm.md +++ b/docs/FAQs/Why_cant_I_log_in_using_MobaXTerm.md @@ -57,5 +57,5 @@ trying to use your old password from credential manager. 4. Restart MobaXterm 5. Try logging in again -More information about [how to log in to our HPC facilities](../../Getting_Started/Accessing_the_HPCs/Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md), -and [login troubleshooting](../../General/FAQs/Login_Troubleshooting.md). +More information about [how to log in to our HPC facilities](Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md), +and [login troubleshooting](Login_Troubleshooting.md). diff --git a/docs/General/FAQs/Why_does_my_program_crash.md b/docs/FAQs/Why_does_my_program_crash.md similarity index 70% rename from docs/General/FAQs/Why_does_my_program_crash.md rename to docs/FAQs/Why_does_my_program_crash.md index 6d8a2b4dc..b6a3d8f57 100644 --- a/docs/General/FAQs/Why_does_my_program_crash.md +++ b/docs/FAQs/Why_does_my_program_crash.md @@ -15,12 +15,12 @@ investigate. ### OOM One common reason is a limited amount of memory. Then the application -could crash with an [Out Of Memory exception](../../General/FAQs/What_does_oom_kill_mean.md). +could crash with an [Out Of Memory exception](What_does_oom_kill_mean.md). ### Stack size -On our XC system (Māui) the stack size is limited. If you application -needs more resources on stack, if could result in strange unpredictable +If you application +needs more resources on stack, it could result in strange unpredictable crashes. You could get for example an \`array index out of bounds\` error, not directly pointing to the source of the issue. You can try to unlimit stack size using \`ulimit -s unlimited\` in your submission @@ -31,10 +31,9 @@ script. Another common issue is an error in the code. For example an application could (may to unexpected input and missing error handling) call a division by 0. Debugger can help to find the source of the issue. On the -NeSI systems are different debuggers available. For serial application -the [Gnu debugger -gdb](https://sourceware.org/gdb/download/onlinedocs/gdb/index.html) is -available. Furthermore, the [ARM DDT -debugger](https://developer.arm.com/docs/101136/latest/ddt/getting-started) +NeSI systems are different debuggers available. For serial application the +[Gnu debugger gdb](https://sourceware.org/gdb/download/onlinedocs/gdb/index.html) is +available. Furthermore, the +[ARM DDT debugger](https://developer.arm.com/docs/101136/latest/ddt/getting-started) is available, which can handle, parallel, serial, applications, written in C/C++, Fortran, and Python (limited support). diff --git a/docs/General/FAQs/Why_is_my_job_taking_a_long_time_to_start.md b/docs/FAQs/Why_is_my_job_taking_a_long_time_to_start.md similarity index 95% rename from docs/General/FAQs/Why_is_my_job_taking_a_long_time_to_start.md rename to docs/FAQs/Why_is_my_job_taking_a_long_time_to_start.md index f14fe6cfb..0104a28c4 100644 --- a/docs/General/FAQs/Why_is_my_job_taking_a_long_time_to_start.md +++ b/docs/FAQs/Why_is_my_job_taking_a_long_time_to_start.md @@ -73,9 +73,9 @@ Priority and Resources at the same time. You can check the job's priority relative to other waiting jobs by means of the following command on a -[Mahuika](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Mahuika.md) +[Mahuika](Mahuika.md) or -[Māui](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Maui.md) +[Māui](Maui.md) login node (as appropriate): ```sh @@ -112,7 +112,7 @@ If, compared to other jobs in the queue, your job's priority (third column) and fair share score (fifth column) are both low, this usually means that your project team has recently been using through CPU core hours faster than expected. -See [Fair Share -- How jobs get prioritised](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Fair_Share_How_jobs_get_prioritised.md) for more +See [Fair Share -- How jobs get prioritised](Fair_Share_How_jobs_get_prioritised.md) for more information on Fair Share, how you can check your project's fair share score, and what you can do about a low project fair share score. diff --git a/docs/FORMAT.md b/docs/FORMAT.md index d00a4a36b..a0d6d26b6 100644 --- a/docs/FORMAT.md +++ b/docs/FORMAT.md @@ -305,7 +305,13 @@ May want to add formatting to this later. some code ``` -DON'T prefix a command with `$` (e.g. `$ ls` if this is something we want it should be added through formatting, not text. +DON'T prefix a command with `$` e.g. `$ ls` if this is something we want it should be added through formatting, not text. + +By default code blocks will have a 'copy content' button, this can be disabled by adding the class `.no-copy` e.g. + +
``` txt { .no-copy }
+something you wont need to copy
+```
### Inline @@ -320,7 +326,7 @@ Code should be used for any text that you want the user to copy exactly. Keyboard keys can be added using the `` tag. -Press ``ctrl`` + ``c`` to copy text from terminal. +Press ctrl + c to copy text from terminal. ```md Press ctrl + c to copy text from terminal. @@ -536,6 +542,68 @@ Leading and trailing `|` are optional. | Paragraph | Text | And more | ``` +## Grids + +Grids can be made by wrapping a list in `html` tags `
`. + +
+- ![](account-details.svg) __Card One__ + + --- + Card body one + + +- ![](compass.svg) __Card Two__ + + --- + + Card body two + +- ![](cog-transfer-outline.svg) __Card Three__ + + --- + + Card body three +
+ +```html +
+- ![](account-details.svg) __Card One__ + + --- + Card body one + + +- ![](compass.svg) __Card Two__ + + --- + + Card body two + +- ![](cog-transfer-outline.svg) __Card Three__ + + --- + + Card body three + +
+``` + +By default, cards will have a max of two columns, the classes `md-grid-three` and `md-grid-four` can be added to the main `
`, to increase this to three and four cards respectivly. + +If increasing number of cards per row consider [hiding the table of contents using `hide`](NEWPAGE.md#material-theme-parameters) to allow more room. + +The card format + +```md +- ![](image) __Title__ + + --- (horizontal rule) + Text +``` + +Is not part of the grid format itself, but should be used as a standard format for cards. + ## Macros Macros allow use of [Jinja filter syntax](https://jinja.palletsprojects.com/en/3.1.x/templates/) _inside the markdown files_ allowing for much more flexible templating. diff --git a/docs/General/.pages.yml b/docs/General/.pages.yml deleted file mode 100644 index 9c85eb253..000000000 --- a/docs/General/.pages.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -nav: -- Announcements -- FAQs -- NeSI_Policies -- Release_Notes diff --git a/docs/General/Announcements/Accessing_NeSI_Support_during_the_Easter_break.md b/docs/General/Announcements/Accessing_NeSI_Support_during_the_Easter_break.md deleted file mode 100644 index f335ffd87..000000000 --- a/docs/General/Announcements/Accessing_NeSI_Support_during_the_Easter_break.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -description: A page sharing the details of reduced support hours over Easter break -created_at: '2024-03-20T01:58:22Z' -hidden: false -position: 0 -tags: [] -title: Accessing NeSI Support during the Easter break -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 9308584352783 -zendesk_section_id: 200732737 -search: - boost: 0.1 ---- - -During the Easter break, [NeSI platform -services](https://status.nesi.org.nz/) will be online and available, but -urgent / critical requests received between 5:00 pm Thursday 28 March -and 9:00 am Wednesday 03 April will be addressed on a best effort -basis. - -Below is a quick reminder of our main support channels as well as other -sources of self-service support: - -- Changes to system status are reported via our [System Status - page](https://status.nesi.org.nz/ "https://status.nesi.org.nz/"). - You can also subscribe for notifications of system updates and - unplanned outages sent straight to your inbox. [Sign up - here.](../../Getting_Started/Getting_Help/System_status.md) - -- [Consult our User - Documentation](https://www.docs.nesi.org.nz) pages - for instructions and guidelines for using the systems. - -- [Visit NeSI’s YouTube - channel](https://www.youtube.com/playlist?list=PLvbRzoDQPkuGMWazx5LPA6y8Ji6tyl0Sp "https://www.youtube.com/playlist?list=PLvbRzoDQPkuGMWazx5LPA6y8Ji6tyl0Sp") for - introductory training webinars. - -- {% include "partials/support_request.html" %} (Note: - non-emergency requests will be addressed on or after 03 April) - -On behalf of the entire NeSI team, we wish you a safe and relaxing -break. diff --git a/docs/General/Announcements/Improvements_to_Fair_Share_job_prioritisation_on_Maui.md b/docs/General/Announcements/Improvements_to_Fair_Share_job_prioritisation_on_Maui.md deleted file mode 100644 index 0465589b9..000000000 --- a/docs/General/Announcements/Improvements_to_Fair_Share_job_prioritisation_on_Maui.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -created_at: '2020-09-04T02:01:07Z' -tags: [] -title: "Improvements to Fair Share job prioritisation on M\u0101ui" -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 360001829555 -zendesk_section_id: 200732737 -search: - boost: 0.1 ---- - -*On Thursday 3 September 2020, NeSI updated the way we prioritise jobs -on the Māui HPC platform.* - -## Background - -Since the start of the year, we have been using Slurm's Fair Tree -algorithm on Māui (*not yet on Mahuika*) to prioritise jobs. This -provides a hierarchical structure to Slurm's account management, with -the hierarchy representing shares of a total cluster under Slurm's -control. This enables control of higher level or aggregate account -considerations, such as ensuring a group of projects within a research -programme or institution are ensured access to their share of a cluster. - -Under our Fair Tree implementation, each of [NeSI's four collaborating -institutions](https://www.nesi.org.nz/about-us) is assigned a percentage -share of Māui, alongside a percentage share for MBIE's Merit allocations -(including Postgraduate and Proposal Development allocations), and the -remainder as a share to allocations coming from subscriptions. - -These six shares, or what we in NeSI call national pools, are then -ranked in order, starting with the pool that has been using at the -lowest rate compared to its allocated percentage share. *See [this -page](https://slurm.schedmd.com/fair_tree.html) (off site) for more -details about Slurm's Fair Tree algorithm.* - -Previously, we had given each pool a hard-coded share of Māui use. These -hard-coded shares did not reflect ongoing rounds of allocations given to -projects, and so some researchers were suffering from deprioritised -jobs. These jobs ended up delayed in the queue, sometimes excessively. - -## What has changed? - -We have now recalculated the shares for each pool to take into account -the following: - -- The investments into HPC platforms by the various collaborating - institutions and by MBIE; -- The capacity of each HPC platform; -- The split of requested time (allocations) by project teams between - the Māui and Mahuika HPC platforms, both overall and within each - institution's pool. - -Under this scheme, any job's priority is affected by the behaviour of -other workload within the same project team, but also other project -teams drawing on the same pool. In particular, even if your project team -has been under-using compared to your allocation, your jobs may still be -held up if: - -- Other project teams at your institution (within your pool) have been - over-using compared to their allocations, or -- Your institution has approved project allocations totalling more - time than it is entitled to within its pool's share. - -## What will I notice? - -If your institution or pool's ranking has not changed, nothing much will -immediately change for you. - -However, if your institution or pool's assigned share of the machine has -increased, it will become easier to move up the priority rankings, at -least in the short term. - -Conversely, if your institution or pool's assigned share of the machine -has decreased, it will become easier to move down the rankings. This -change is one you are more likely to notice over time. - -Whenever your institution or pool's ranking changes, whether because of -usage or because we adjust the assigned shares based on ongoing rounds -of allocations, your job priorities will alter almost immediately. -Moving up the rankings will increase your job priorities. Moving down -the rankings will decrease your job priorities. - -## What other changes are NeSI planning on making? - -We are looking at introducing Fair Tree on Mahuika as well, though not -on Māui ancillary nodes. We will announce this change well ahead of any -planned introduction. - -We will also adjust the assigned Fair Tree shares on Māui routinely so -we don't diverge from allocations across HPC platforms again. diff --git a/docs/General/Announcements/Mahuika-Core_Dumps_generation_now_disabled_as_default.md b/docs/General/Announcements/Mahuika-Core_Dumps_generation_now_disabled_as_default.md deleted file mode 100644 index 7a6303e03..000000000 --- a/docs/General/Announcements/Mahuika-Core_Dumps_generation_now_disabled_as_default.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -created_at: '2022-07-11T23:23:04Z' -status: -tags: -- mahuika -- .core -- corefile -- coredump -title: 'Mahuika: Core Dumps generation now disabled as default' -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 5126681349903 -zendesk_section_id: 200732737 -search: - boost: 0.1 ---- - -A Slurm configuration change has been made on Mahuika so that the  -maximum size of [core file](../FAQs/What_is_a_core_file.md) that -can be generated inside a job now defaults to `0` bytes rather -than `unlimited`. - -You can reenable core dumps with `ulimit -c unlimited` . diff --git a/docs/General/Announcements/Mahuikas_new_Milan_CPU_nodes_open_to_all_NeSI_users.md b/docs/General/Announcements/Mahuikas_new_Milan_CPU_nodes_open_to_all_NeSI_users.md deleted file mode 100644 index bb868c337..000000000 --- a/docs/General/Announcements/Mahuikas_new_Milan_CPU_nodes_open_to_all_NeSI_users.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -created_at: '2023-03-30T02:23:48Z' -tags: [] -title: "Mahuika's new Milan CPU nodes open to all NeSI users" -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 6686934564239 -zendesk_section_id: 200732737 -search: - boost: 0.1 ---- - -Following a successful early access programme, Mahuika’s newest CPU -nodes are now available for use by any projects that have a Mahuika -allocation on NeSI's HPC Platform. - -The production launch of these new nodes is an exciting milestone in -NeSI’s strategy to lower the carbon footprint and continually improve -the performance and fit-for-purpose design of our platforms to meet your -research needs. - -## What’s new - -- faster, more powerful computing, enabled by AMD 3rd Gen EPYC Milan - architecture - -- specialised high-memory capabilities, allowing rapid simultaneous - processing - -- improved energy efficiency - these nodes are 2.5 times more power - efficient than Mahuika’s original Broadwell nodes - -How to access - -- Visit our Support portal for [instructions to get - started](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Milan_Compute_Nodes.md) - and details of how the Milan nodes differ from Mahuika’s original - Broadwell nodes - -## Learn more - -- [Watch this webinar](https://youtu.be/IWRZLl__uhg) sharing a quick - overview of the new resources and some tips for making the most of - the nodes. - -- Bring questions to our [weekly Online Office - Hours](../../Getting_Started/Getting_Help/Weekly_Online_Office_Hours.md) - -- {% include "partials/support_request.html" %} - any time - -If you have feedback on the new nodes or suggestions for improving your -experience getting started with or using any of our systems, please [get -in touch {% include "partials/support_request.html" %}. diff --git a/docs/General/Announcements/Maui_upgrade_is_complete.md b/docs/General/Announcements/Maui_upgrade_is_complete.md deleted file mode 100644 index fdd4f88de..000000000 --- a/docs/General/Announcements/Maui_upgrade_is_complete.md +++ /dev/null @@ -1,221 +0,0 @@ ---- -created_at: '2023-03-09T02:46:57Z' -tags: [] -title: "M\u0101ui upgrade is complete" -vote_count: 1 -vote_sum: 1 -zendesk_article_id: 6546340907919 -zendesk_section_id: 200732737 -search: - boost: 0.1 ---- - -The recent upgrade of the Māui is now complete. The operating system, -libraries, and software stack have been upgraded and rebuilt, improving -performance and stability and enabling new capabilities. - -If you encounter any issues, have any questions about the upgrade, need -help with getting your software working on the upgraded system, or have -a suggestion for our documentation, please {% include "partials/support_request.html" %}. We are committed to -providing you with the best computing resources possible and will do our -best to assist you. - -## Why - -This upgrade brings Māui's operating environment up to the latest -supported release available for Cray's XC50 supercomputing platforms, -with performance, reliability, and security benefits. This includes more -up-to-date tooling and libraries with associated features and -performance benefits. This work also enables further upgrades to NeSI's -shared HPC storage system. - -## Impact - -Please be aware that this is a major upgrade to Māui’s operating -environment which may impact the compatibility of software compiled with -the current toolchains and libraries, as such users should expect to -need to test existing applications post-upgrade and in some cases -(especially where the application is leveraging software modules on -Māui) rebuilding will be required. Users of applications maintained as -software modules in the NeSI software stack can expect NeSI to provide -rebuilt and/or updated versions of these applications (though this will -be an ongoing effort post-upgrade). - -The following information will help your transition from the pre-upgrade -Māui environment to the post-upgrade one:  - -- The three main toolchains (CrayCCE, CrayGNU and CrayIntel) have all - been updated to release 23.02 (CrayCCE and CrayGNU) and 23.02-19 - (CrayIntel). **The previously installed versions are no longer - available**. -- Consequently, nearly all of the previously provided **environment - modules have been replaced by new versions**. You can use the - *module avail* command to see what versions of those software - packages are now available. If your batch scripts load exact module - versions, they will need updating. -- The few jobs in the Slurm queue at the start of the upgrade process - have been placed in a “user hold” state. You have the choice of - cancelling them with *scancel <jobid>* or releasing them with - *scontrol release <jobid>*. -- Be aware that if you have jobs submitted that rely on any software - built before the upgrade, there is a good chance that this software - will not run. **We recommend rebuilding any binaries you maintain** - before running jobs that utilise those binaries. -- Note that Māui login does not require adding a second factor to the - password when authenticating on the Māui login node after the first - successful login attempt. That is, if you have successfully logged - in using <first factor><second factor> format, no second - factor part will be required later on. - -We have also updated our support documentation for Māui to reflect the -changes, so please review it before starting any new projects.  - -## Software Changes - -Software built on Māui may not work without recompilation after the -upgrade. See the tables below for more detail regarding version changes. -If you have any particular concerns about the impact on your work, -please {% include "partials/support_request.html" %}. - -The table below outlines the known and expected Cray component changes: - -
----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

CLE -Components

Version -pre-upgrade

-

19.04

Version -post-upgrade

-

23.02

Cray Developer -Toolkit

19.04

23.02

Cray Compiling -Environment

CCE 8.7.10

CCE 15.0.1

Cray Message -Passing Toolkit

MPT 7.7.6

-

PMI 5.0.14

-

GA 5.3.0.10

-

Cray OpenSHMEMX 8.0.1

MPT 7.7.20

-

PMI 5.0.17

-

Cray OpenSHMEMX 9.1.2

Cray Debugging -Support Tools

ATP 2.13

-

CCDB 3.0.4

-

CTI 2.15.5

-

Gdb4hpc 3.0.10

-

STAT 3.0.1.3

-

Valgrind4hpc 1.0.0

ATP 3.14.13

-

CCDB 4.12.13

-

CTI 2.17.2

-

Gdb4hpc 4.14.3

-

STAT 4.11.13

-

Valgrind4hpc 2.12.11

Cray Performance -Measurement & Analysis Tools –CPMAT (1)

Perftools 7.0.6

-

PAPI 5.6.0.6

Perftools 23.02.0

-

PAPI 7.0.0.1

Cray Scientific -and Math Libraries -CSML

LibSci 19.02.1

-

LibSci_ACC 18.12.1 (CLE 6)

-

PETSc 3.9.3.0

-

Trilinos 12.12.1.1

-

TPSL 18.06.1

-

FFTW 2.1.5.9

-

FFTW 3.3.8.2

Petsc 3.14.5.0

-

TPSL 20.03.2

-

Trilinos 12.18.1.1

Cray Environment -Setup and Compiling support -CENV

craype-installer1.24.5

-

craypkg-gen1.3.7

-

craype 2.5.18

-

cray-modules 3.2.11.1

-

cray-mpich-compat1.0.0-8 (patch)

-

cdt-prgenv 6.0.5

craypkg-gen 1.3.26

-

craype 2.7.15

Third party -products

HDF5 1.10.2.0

-

NetCDF 4.6.1.3

-

parallel-NetCDF 1.8.1.4

-

iobuf 2.0.8

-

java jdk 1.8.0_51 (CLE 6)

-

GCC 7.3.0

-

GCC 8.3.0

-

cray-python 2.7.15.3 & 3.6.5.3 (CLE 6)

-

cray-R 3.4.2

HDF5 1.12.2.3

-

NetCDF 4.9.0.3

-

Parallel-NetCDF 1.12.3.3

-

iobuf 2.0.10

-

GCC 10.3.0

-

GCC 12.1.0

-

cray-python 3.9.13.2

-

cray-R 4.2.1.1

Third Party -Licensed Products

PGI 18.10 (CLE 6 only)

-

TotalView 2018.3.8

-

Forge 19.0.3.1

Forge 21.0.3

-

Totalview 2021.2.14

- -[S-2529: XC Series Cray Programming Environment User's -Guide](https://support.hpe.com/hpesc/public/docDisplay?docLocale=en_US&docId=a00113984en_us) - -[S-2559: XC Series Software Installation and Configuration Guide (CLE -7.0.UP04 Rev -E)](https://support.hpe.com/hpesc/public/docDisplay?docLocale=en_US&docId=sd00002132en_us) - -Reference: - -[HPE Cray Programming Environment 21.09 for Cray XC (x86) -Systems](https://support.hpe.com/hpesc/public/docDisplay?docLocale=en_US&docId=a00118188en_us) - -[Cray XC (x86) Programming Environments -19.04](https://support.hpe.com/hpesc/public/docDisplay?docId=a00114073en_us&docLocale=en_US) - -[Applications supported by NeSIteam](../../Scientific_Computing/Supported_Applications/index.md) diff --git a/docs/General/Announcements/NeSI_Support_is_changing_tools.md b/docs/General/Announcements/NeSI_Support_is_changing_tools.md deleted file mode 100644 index 0aa5bc18a..000000000 --- a/docs/General/Announcements/NeSI_Support_is_changing_tools.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -created_at: 2024-05-20 -description: -tags: [] -search: - boost: 0.1 ---- - -From the 29th of May, NeSI's Support team will be using a new support desk platform to accept, track, and solve inquiries and issues sent to [support@nesi.org.nz](mailto:support@nesi.org.nz). The change is part of an evolution of our tools to better support researchers using NeSI's compute platforms and data services. - -## How this impacts you - -Emailing [support@nesi.org.nz](mailto:support@nesi.org.nz) is the most common way to connect with our Support team. You can ask us questions, let us know of issues or challenges you're having with systems or services, and action tasks related to your account and allocation(s). -The process of contacting our Support team won't change much (see below for more details), but behind the scenes, the new ticketing system will allow us to more effectively respond to your requests for help and suggestions for service improvements. - -## What is changing - -* Replies to support tickets will come from 'support@cloud.nesi.org.nz'. -* We are reviewing the value of having a separate portal where you can view your past tickets, open tickets or raise new tickets. -Tell us what you think using this [form](https://docs.google.com/forms/d/e/1FAIpQLSdvR-0kJxunSiKUYNtHsG6l7Ne9Q5KPeunCVJiSbMuTvGcS8A/viewform) or by sending an email to [support@nesi.org.nz](mailto:support@nesi.org.nz). - -## What stays the same - -* Requests for support or questions about NeSI platforms and services can still be sent by email to [support@nesi.org.nz](mailto:support@nesi.org.nz). These will raise new support tickets for response from a member of our Support Team. -* All your current tickets will stay open. Any requests you currently have in the queue will be migrated over to the new support desk platform and solved from there. - -## Documentation Changes - -Our support documentation is now hosted at [docs.nesi.org.nz](https://docs.nesi.org.nz). -We made the shift to improve maintainability, openness, and collaboration around our support documentation. We shared more details [in this announcement](https://docs.nesi.org.nz/General/Announcements/Upcoming_changes_to_NeSI_documentation/). -We would love to hear your feedback on the new documentation pages. Let us know your thoughts [via this form](https://docs.google.com/forms/d/e/1FAIpQLSdBNPmOEy-SqUmktZaoaMXs2VO31W3DaAh6Py_lNf1Td2VBfA/viewform) or by emailing [support@nesi.org.nz](mailto:support@nesi.org.nz) - -Thank you for your patience while we make these changes. We're working to ensure responses to support requests are not overly delayed during the switchover. In general, we strive to reply to support requests within one business day of receiving a message. - -If you have any questions at any time, send an email to [support@nesi.org.nz](mailto:support@nesi.org.nz) or pop into our [online Weekly Office Hours](https://docs.nesi.org.nz/Getting_Started/Getting_Help/Weekly_Online_Office_Hours/) to chat one-on-one with a member of our Support team. diff --git a/docs/General/Announcements/Slurm_upgrade_to_version_21-8.md b/docs/General/Announcements/Slurm_upgrade_to_version_21-8.md deleted file mode 100644 index 412f7a8f0..000000000 --- a/docs/General/Announcements/Slurm_upgrade_to_version_21-8.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -created_at: '2022-03-22T02:16:17Z' -tags: -- general -title: Slurm upgrade to version 21.8 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 4544913401231 -zendesk_section_id: 200732737 -search: - boost: 0.1 ---- - -- Added `--me` option, equivalent to` --user=$USER`. -- Added "pendingtime" as a option for --Format. -- Put sorted start times of "N/A" or 0 at the end of the list. - - - -- Add time specification: "now-" (i.e. subtract from the present) -- AllocGres and ReqGres were removed. Alloc/ReqTres should be used - instead.  - - - -- MAGNETIC flag on reservations. Reservations the user doesn't have to - even request. -- The LicensesUsed line has been removed from `scontrol show config` . - Please use updated `scontrol show licenses` command as an - alternative. - - - --  `--threads-per-core` now influences task layout/binding, not just - allocation. -- `--gpus-per-node` can be used instead of `--gres=GPU` -- `--hint=nomultithread` can now be replaced - with `--threads-per-core=1` -- The inconsistent terminology and environment variable naming for - Heterogeneous Job ("HetJob") support has been tidied up. -- The correct term for these jobs are "HetJobs", references to - "PackJob"   have been corrected. -- The correct term for the separate constituent jobs are - "components",   references to "packs" have been corrected. - - - -- Added support for an "Interactive Step", designed to be used with - salloc to launch a terminal on an allocated compute node - automatically. Enable by setting "use\_interactive\_step" as part of - LaunchParameters. - - - --  By default, a step started with srun will be granted exclusive (or - non- overlapping) access to the resources assigned to that step. No - other parallel step will be allowed to run on the same resources at - the same time. This replaces one facet of the '--exclusive' option's - behavior, but does not imply the '--exact' option described below. - To get the previous default behavior - which allowed parallel steps - to share all resources - use the new srun '--overlap' option. -- In conjunction to this non-overlapping step allocation behavior - being the new default, there is an additional new option for step - management '--exact', which will allow a step access to only those - resources requested by the step. This is the second half of the - '--exclusive' behavior. Otherwise, by default all non-gres resources - on each node in the allocation will be used by the step, making it - so no other parallel step will have access to those resources unless - both steps have specified '--overlap'. - - - -- New command which permits crontab-compatible job scripts to be - defined. These scripts will recur automatically (at most) on the - intervals described. diff --git a/docs/General/Announcements/University_of_Auckland_ANSYS_users.md b/docs/General/Announcements/University_of_Auckland_ANSYS_users.md deleted file mode 100644 index 6c0ff4fca..000000000 --- a/docs/General/Announcements/University_of_Auckland_ANSYS_users.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -created_at: '2021-04-03T22:28:54Z' -tags: [] -title: University of Auckland - ANSYS users -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 360003984776 -zendesk_section_id: 200732737 -search: - boost: 0.1 ---- - - -On 01/04/2021 afternoon, there was a change to the University ANSYS -licences; you may find that your jobs fail with a licence error. - -The following command should resolve the issue (where `-revn 202` is -replaced with the version you use). - -``` sl -module load ANSYS/2020R2 -ansysli_util -revn 202 -deleteuserprefs -``` - -The effect this will have on all of the ANSYS products is yet to be -determined, so please {% include "partials/support_request.html" %} if you encounter problems. diff --git a/docs/General/Announcements/Upcoming_changes_to_NeSI_documentation.md b/docs/General/Announcements/Upcoming_changes_to_NeSI_documentation.md deleted file mode 100644 index 4a2b512b5..000000000 --- a/docs/General/Announcements/Upcoming_changes_to_NeSI_documentation.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -created_at: '2024-03-25T01:25:49Z' -hidden: false -position: 0 -status: new -tags: -- announcement -title: Upcoming changes to NeSI documentation -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 9367039527823 -zendesk_section_id: 200732737 -search: - boost: 0.1 ---- - -Over the next few months NeSI will be making the shift to a new -framework for our support documentation. - -The content you know and love will be unchanged, but things will look a -bit different. - -We also have a new domain, -[docs.nesi.org.nz](https://www.docs.nesi.org.nz/?utm_source=announcement) -where you can browse the 'new' docs now. - -[support.nesi.org.nz](https://support.nesi.org.nz) will continue -displaying the 'old' docs for a bit longer while we ensure everything is -working as it should. - -## Why? - -**Maintainability:** Due to the large number of pages hosted, keeping -information up to date requires a lot of time and effort. The new system -will make this easier. - -**Openness:** We are moving from a proprietary closed source content -management system to a community maintained open source one based on -[mkdocs](https://www.mkdocs.org/). - -**Collaboration:** Our new docs are publicly hosted on GitHub, meaning -anyone can view, copy, and suggest changes to the source material. This -will help ensure our documentation is more accessible and responsive to -community needs. - -**Pretty:** This one is subjective, but we think the docs have never -looked better. - -## What will happen to old links? - -[support.nesi.org.nz](https://support.nesi.org.nz) will not be going -anywhere, ***any links you have saved will continue to work.*** - -## What do I need to do? - -*Nothing at all.* - -If you like trying new things, you can see our new docs at -[docs.nesi.org.nz](https://www.docs.nesi.org.nz/?utm_source=announcement) - -We would love to hear your feedback via -[form](https://docs.google.com/forms/d/e/1FAIpQLSdBNPmOEy-SqUmktZaoaMXs2VO31W3DaAh6Py_lNf1Td2VBfA/viewform?usp=sf_link) -or {% include "partials/support_request.html" %} diff --git a/docs/General/Announcements/Upcoming_webinar-Tips_for_making_the_most_of_Mahuikas_new_Milan_nodes.md b/docs/General/Announcements/Upcoming_webinar-Tips_for_making_the_most_of_Mahuikas_new_Milan_nodes.md deleted file mode 100644 index 35c99b438..000000000 --- a/docs/General/Announcements/Upcoming_webinar-Tips_for_making_the_most_of_Mahuikas_new_Milan_nodes.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -created_at: '2023-03-28T22:23:54Z' -tags: [] -title: "Upcoming webinar: Tips for making the most of Mahuika\u2019s new Milan nodes" -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 6678260710031 -zendesk_section_id: 200732737 -search: - boost: 0.1 ---- - - -In late 2022, the Mahuika cluster was expanded to allow a wider range of -research communities to adopt HPC approaches and build digital skills -within their research teams. - -Join us on Thursday 30 March for a short webinar sharing some practical -tips and tricks for making the most of these new resources: - -**Making the most of Mahuika's new Milan nodes -Thursday 30 March** -**11:30 am - 12:00 pm** -**[Click here to -RSVP](https://www.eventbrite.co.nz/e/webinar-making-the-most-of-mahuikas-new-milan-nodes-registration-557428302057)** - -*Background:* -Following a successful early access programme, Mahuika’s newest CPU -nodes are now available for use by any projects that have a Mahuika -allocation on NeSI's HPC Platform. The production launch of these new -nodes is an exciting milestone in NeSI’s strategy to lower the carbon -footprint and continually improve the performance and fit-for-purpose -design of our platforms to meet your research needs. - -*What’s new* - -- faster, more powerful computing, enabled by AMD 3rd Gen EPYC Milan - architecture - -- specialised high-memory capabilities, allowing rapid simultaneous - processing - -- improved energy efficiency - these nodes are 2.5 times more power - efficient than Mahuika’s original Broadwell nodes - -Come along to [this -webinar](https://www.eventbrite.co.nz/e/webinar-making-the-most-of-mahuikas-new-milan-nodes-registration-557428302057) -to learn more and to ask questions about how your research project can -use these powerful resources. - -***About the speaker*** - -Alexander Pletzer is a Research Software Engineer working for NeSI at -NIWA. Alex helps researchers run better and faster on NeSI platforms. - -***More Information*** - -If you're unable to join us for this session but have questions about -the Milan nodes or would like more information, come along to one of our -[weekly Online Office -Hours](../../Getting_Started/Getting_Help/Weekly_Online_Office_Hours.md) -or email anytime.  diff --git a/docs/General/Announcements/Visual_Studio_Code_Remote-Latest_Version_Not_Supported_UPDATE.md b/docs/General/Announcements/Visual_Studio_Code_Remote-Latest_Version_Not_Supported_UPDATE.md deleted file mode 100644 index 1ed2a8591..000000000 --- a/docs/General/Announcements/Visual_Studio_Code_Remote-Latest_Version_Not_Supported_UPDATE.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -created_at: '2024-02-07T20:23:09Z' -hidden: false -position: 1 -status: new -tags: -- announcement -title: 'Visual Studio Code Remote: Latest Version Not Supported (UPDATE)' -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 8974326930319 -zendesk_section_id: 200732737 -search: - boost: 0.1 ---- - -The latest version of Visual Studio Code (1.86.0) released in January -2024 requires a later version of GLIBC than is currently available on -the NeSI login nodes. - -For the moment please roll back to the [previous release -(1.8.5)](https://code.visualstudio.com/updates/v1_85). - -You will also have to roll back the 'Remote - SSH' plugin. This can be -done by selecting the plugin in the Extension Marketplace, clicking on -the 'Uninstall' drop down and choosing 'Install another version'. - -## Update: 09/02/2024 - -Due to the amount of [feedback on the glibc -change](https://github.com/microsoft/vscode/issues/204658) the VSCode -team have said that **future versions will allow you to connect with a -warning instead.** - -![2024-02-09 14.42.46.png](../../assets/images/vscodeos-not-supported.png) - -You can get the fix in a [pre-release build -(1.86.1)](https://github.com/microsoft/vscode/releases/tag/1.86.1), or -wait for the next stable release in March. diff --git a/docs/General/Release_Notes/index.md b/docs/General/Release_Notes/index.md deleted file mode 100644 index d0908b597..000000000 --- a/docs/General/Release_Notes/index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -created_at: '2021-02-23T19:52:34Z' -tags: [] -vote_count: 0 -vote_sum: 0 -title: Release Notes -zendesk_article_id: 360003507115 -zendesk_section_id: 360000437436 ---- - -NeSI publishes release notes for applications, 3rd party applications -and NeSI services. This section will function as a directory to find all -published release note articles with the label 'releasenote'. - -## NeSI applications - -You can find published release notes for NeSI applications in the -context of the structure of our documentation. -Product context > release notes section > versioned release note - -Example: Release Notes Long-Term Storage can -be located under Storage, Long-Term Storage - -## 3rd party applications - -3rd party applications listed under [Supported Applications](../../Scientific_Computing/Supported_Applications/index.md) -have child pages with details about the available versions on NeSI, and -a reference to the vendor release notes or documentation. - -## NeSI services - -Jupyter on NeSI is a recent example of a service composed of multiple -components and dependencies that NeSI maintains. diff --git a/docs/Getting_Started/.pages.yml b/docs/Getting_Started/.pages.yml deleted file mode 100644 index 0c149a581..000000000 --- a/docs/Getting_Started/.pages.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -nav: - - Accounts, Projects and Allocations : Accounts-Projects_and_Allocations - - Accessing_the_HPCs - - Next_Steps - - Getting_Help - - Cheat_Sheets - - ... - - my.nesi.org.nz: my-nesi-org-nz \ No newline at end of file diff --git a/docs/Getting_Started/Accessing_the_HPCs/.pages.yml b/docs/Getting_Started/Accessing_the_HPCs/.pages.yml deleted file mode 100644 index a2df11ddf..000000000 --- a/docs/Getting_Started/Accessing_the_HPCs/.pages.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -nav: - - Setting_Up_and_Resetting_Your_Password.md - - Setting_Up_Two_Factor_Authentication.md - - Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md - - X_Forwarding_using_the_Ubuntu_Terminal_on_Windows.md - - Port_Forwarding.md - - ... diff --git a/docs/Getting_Started/Accessing_the_HPCs/X_Forwarding_using_the_Ubuntu_Terminal_on_Windows.md b/docs/Getting_Started/Accessing_the_HPCs/X_Forwarding_using_the_Ubuntu_Terminal_on_Windows.md deleted file mode 100644 index a037935e5..000000000 --- a/docs/Getting_Started/Accessing_the_HPCs/X_Forwarding_using_the_Ubuntu_Terminal_on_Windows.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -created_at: '2021-10-04T19:55:45Z' -tags: -- x11 -- x forwarding -- x-forwarding -title: X-Forwarding using the Ubuntu Terminal on Windows -vote_count: 10 -vote_sum: -6 -zendesk_article_id: 4407442866703 -zendesk_section_id: 360000034315 ---- - - -1. [Download and install Xming from here](https://sourceforge.net/projects/xming/). Don't install an SSH - client when prompted during the installation, if you are prompted - for Firewall permissions after installing Xming close the window - without allowing any Firewall permissions. -2. Open your Ubuntu terminal and install x11-apps with the command: - `sudo apt install x11-apps -y`. -3. Restart your terminal, start your Xming (there should be a desktop - icon after installing it). You should now be able to X-Forward - displays from the HPC when you log in (assuming you have completed - the - [terminal setup instructions found here](../../Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md)). diff --git a/docs/Getting_Started/Getting_Help/.pages.yml b/docs/Getting_Started/Getting_Help/.pages.yml deleted file mode 100644 index 704d239ad..000000000 --- a/docs/Getting_Started/Getting_Help/.pages.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -nav: - - NeSI_wide_area_network_connectivity.md - - Weekly_Online_Office_Hours.md - - Consultancy.md - - Job_efficiency_review.md - - System_status.md - - Introductory_Material.md \ No newline at end of file diff --git a/docs/Getting_Started/Getting_Help/Consultancy.md b/docs/Getting_Started/Getting_Help/Consultancy.md deleted file mode 100644 index 78e7441f9..000000000 --- a/docs/Getting_Started/Getting_Help/Consultancy.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -created_at: '2019-02-07T21:55:45Z' -tags: -- help -vote_count: 1 -vote_sum: 1 -zendesk_article_id: 360000751916 -zendesk_section_id: 360000164635 ---- - -NeSI's Consultancy service provides scientific and HPC-focussed -computational and data science support to research projects across a -range of domains. - -## Need support with your research project? - -If you would like to learn more about NeSI's Consultancy service and how -you can work with NeSI's Research Software and Data Science Engineers on -a project, please {% include "partials/support_request.html" %} to set up an -initial meeting. We can discuss your needs and complete a Consultancy -application form together. - -Researchers from NeSI collaborator institutions (University of Auckland, -NIWA, University of Otago and Manaaki Whenua - Landcare Research) and -those with Merit projects can usually access consultancy at no cost to -themselves, based on their institution's or MBIE's investment into NeSI. - -## What do we do? - -The NeSI team are available to help with any stage of your research -software development. We can get involved with designing and developing -your software from scratch, or assist with improving software you have -already written. - -The service is completely bespoke and tailored to your requirements. -Some examples of outcomes we could assist with (this list is general and -non-exhaustive): - -- Code development - - Design and develop research software from scratch - - Algorithmic improvements - - Translate Python/R/Matlab code to C/C++/Fortran for faster - execution - - Accelerate code by offloading computations to a GPU - - Develop visualisation and post-processing tools (GUIs, dashboards, etc) -- Performance improvement - - Code optimisation – profile and improve efficiency (speed and - memory), IO performance - - Parallelisation – software (OpenMP, MPI, etc.) and workflow - parallelisation -- Improve software sustainability (version control, testing, - continuous integration, etc) -- Data Science Engineering - - Optimise numerical performance of machine learning pipelines - - Conduct an Exploratory Data Analysis - - Assist with designing and fitting explanatory and predictive - models -- Anything else you can think of ;-) - -## What can you expect from us? - -During a consultancy project we aim to provide: - -- Expertise and advice -- An agreed timeline to develop or improve a solution (typical - projects are of the order of 1 day per week for up to 4 months but - this is determined on a case-by-case basis) -- Training, knowledge transfer and/or capability development -- A summary document outlining what has been achieved during the - project -- A case study published on our website after the project has been - completed, to showcase the work you are doing on NeSI - -## What is expected of you? - -Consultancy projects are intended to be a collaboration and thus some -input is required on your part. You should be willing to: - -- Contribute to a case study upon successful completion of the - consultancy project -- Complete a short survey to help us measure the impact of our service -- Attend regular meetings (usually via video conference) -- Invest time to answer questions, provide code and data as necessary - and make changes to your workflow if needed -- [Acknowledge](https://www.nesi.org.nz/services/high-performance-computing/guidelines/acknowledgement-and-publication) - NeSI in article and code publications that we have contributed to, - which could include co-authorship if our contribution is deemed - worthy -- Accept full ownership/maintenance of the work after the project - completes (NeSI's involvement in the project is limited to the - agreed timeline) - -## Previous projects - -Listed below are some examples of previous projects we have contributed -to: - -- [A quantum casino helps define atoms in the big - chill](https://www.nesi.org.nz/case-studies/quantum-casino-helps-define-atoms-big-chill) -- [Using statistical models to help New Zealand prepare for large - earthquakes](https://www.nesi.org.nz/case-studies/using-statistical-models-help-new-zealand-prepare-large-earthquakes) -- [Improving researchers' ability to access and analyse climate model - data - sets](https://www.nesi.org.nz/case-studies/improving-researchers-ability-access-and-analyse-climate-model-data-sets) -- [Speeding up the post-processing of a climate model data - pipeline](https://www.nesi.org.nz/case-studies/speeding-post-processing-climate-model-data-pipeline) -- [Overcoming data processing overload in scientific web mapping - software](https://www.nesi.org.nz/case-studies/overcoming-data-processing-overload-scientific-web-mapping-software) -- [Visualising ripple effects in riverbed sediment - transport](https://www.nesi.org.nz/case-studies/visualising-ripple-effects-riverbed-sediment-transport) -- [New Zealand's first national river flow forecasting system for - flooding - resilience](https://www.nesi.org.nz/case-studies/new-zealand%E2%80%99s-first-national-river-flow-forecasting-system-flooding-resilience) -- [A fast model for predicting floods and storm - damage](https://www.nesi.org.nz/case-studies/fast-model-predicting-floods-and-storm-damage) -- [How multithreading and vectorisation can speed up seismic - simulations by - 40%](https://www.nesi.org.nz/case-studies/how-multithreading-and-vectorisation-can-speed-seismic-simulations-40) -- [Machine learning for marine - mammals](https://www.nesi.org.nz/case-studies/machine-learning-marine-mammals) -- [Parallel processing for ocean - life](https://www.nesi.org.nz/case-studies/parallel-processing-ocean-life) -- [NeSI support helps keep NZ rivers - healthy](https://www.nesi.org.nz/case-studies/nesi-support-helps-keep-nz-rivers-healthy) -- [Heating up nanowires with - HPC](https://www.nesi.org.nz/case-studies/heating-nanowires-hpc) -- [The development of next generation weather and climate models is - heating - up](https://www.nesi.org.nz/case-studies/development-next-generation-weather-and-climate-models-heating) -- [Understanding the behaviours of - light](https://www.nesi.org.nz/case-studies/understanding-behaviours-light) -- [Getting closer to more accurate climate predictions for New - Zealand](https://www.nesi.org.nz/case-studies/getting-closer-more-accurate-climate-predictions-new-zealand) -- [Fractal analysis of brain signals for autism spectrum - disorder](https://www.nesi.org.nz/case-studies/fractal-analysis-brain-signals-autism-spectrum-disorder) -- [Optimising tools used for genetic - analysis](https://www.nesi.org.nz/case-studies/optimising-tools-used-genetic-analysis) -- [Investigating climate - sensitivity](https://www.nesi.org.nz/case-studies/optimising-tools-used-genetic-analysis) -- [Tracking coastal precipitation systems in the - tropics](https://www.nesi.org.nz/case-studies/tracking-coastal-precipitation-systems-tropics) -- [Powering global climate - simulations](https://www.nesi.org.nz/case-studies/powering-global-climate-simulations) -- [Optimising tools used for genetic - analysis](https://www.nesi.org.nz/case-studies/optimising-tools-used-genetic-analysis) -- [Investigating climate - sensitivity](https://www.nesi.org.nz/case-studies/investigating-climate-sensitivity) -- [Improving earthquake forecasting - methods](https://www.nesi.org.nz/case-studies/improving-earthquake-forecasting-methods) -- [Modernising models to diagnose and treat disease and - injury](https://www.nesi.org.nz/case-studies/modernising-models-diagnose-and-treat-disease-and-injury) -- [Cataloguing NZ's earthquake - activities](https://www.nesi.org.nz/case-studies/cataloguing-nz%E2%80%99s-earthquake-activities) -- [Finite element modelling of biological - cells](https://www.nesi.org.nz/case-studies/finite-element-modelling-biological-cells) -- [Preparing New Zealand to adapt to climate - change](https://www.nesi.org.nz/case-studies/preparing-new-zealand-adapt-climate-change) -- [Using GPUs to expand our understanding of the solar - system](https://www.nesi.org.nz/case-studies/using-gpus-expand-our-understanding-solar-system) -- [Speeding up Basilisk with - GPGPUs](https://www.nesi.org.nz/case-studies/speeding-basilisk-gpgpus) -- [Helping communities anticipate flood - events](https://www.nesi.org.nz/case-studies/helping-communities-anticipate-flood-events) diff --git a/docs/Getting_Started/Next_Steps/.pages.yml b/docs/Getting_Started/Next_Steps/.pages.yml deleted file mode 100644 index c0dbb3681..000000000 --- a/docs/Getting_Started/Next_Steps/.pages.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -nav: - - Moving_files_to_and_from_the_cluster.md - - Submitting_your_first_job.md - - Parallel_Execution.md - - Finding_Job_Efficiency.md - - ... \ No newline at end of file diff --git a/docs/Getting_Started/Next_Steps/The_HPC_environment.md b/docs/Getting_Started/Next_Steps/The_HPC_environment.md deleted file mode 100644 index 9299a758e..000000000 --- a/docs/Getting_Started/Next_Steps/The_HPC_environment.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -created_at: '2019-08-16T01:22:03Z' -tags: [] -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 360001113076 -zendesk_section_id: 360000189716 ---- - -## Environment Modules - -Modules are a convenient  way to provide access to applications  on the cluster. -They prepare the environment you need to run an application. - -For a full list of module commands run `man module` or visit the [lmod documentation](https://lmod.readthedocs.io/en/latest/010_user.html). - -| Command | Description | -|-------------------------------|---------------------------------------------------------------| -| `module spider` | Lists all available modules. (only Mahuika) | -| `module spider [module name]` | Searches available modules for \[module name\] (only Mahuika) | -| `module show [module name]` | Shows information about \[module name\] | -| `module load [module name]` | Loads \[module name\] | -| `module list [module name]` | Lists currently loaded modules. | diff --git a/docs/High_Performance_Computing/.pages.yml b/docs/High_Performance_Computing/.pages.yml new file mode 100644 index 000000000..9369590e3 --- /dev/null +++ b/docs/High_Performance_Computing/.pages.yml @@ -0,0 +1,9 @@ +nav: + - index.md + - Mahuika_Cluster + - Data_Management + - Software + - Batch_Computing + - Open_OnDemand + - Parallel_Computing + - ... diff --git a/docs/High_Performance_Computing/Batch_Computing/.pages.yml b/docs/High_Performance_Computing/Batch_Computing/.pages.yml new file mode 100644 index 000000000..58088b568 --- /dev/null +++ b/docs/High_Performance_Computing/Batch_Computing/.pages.yml @@ -0,0 +1,2 @@ +nav: + - ... diff --git a/docs/Getting_Started/Next_Steps/Finding_Job_Efficiency.md b/docs/High_Performance_Computing/Batch_Computing/Finding_Job_Efficiency.md similarity index 94% rename from docs/Getting_Started/Next_Steps/Finding_Job_Efficiency.md rename to docs/High_Performance_Computing/Batch_Computing/Finding_Job_Efficiency.md index 3d8ac11e2..e954c7381 100644 --- a/docs/Getting_Started/Next_Steps/Finding_Job_Efficiency.md +++ b/docs/High_Performance_Computing/Batch_Computing/Finding_Job_Efficiency.md @@ -131,7 +131,7 @@ the compute node where it it running. If 'nodelist' is not one of the fields in the output of your `sacct` or `squeue` commands you can find the node a job is running on using the command; `squeue -h -o %N -j ` The node will look something -like `wbn123` on Mahuika or `nid00123` on Māui +like `wbn123` on Mahuika. !!! Note If your job is using MPI it may be running on multiple nodes @@ -159,7 +159,7 @@ parent process). Processes in green can be ignored -![how\_to\_read\_htop.png](../../assets/images/Finding_Job_Efficiency.png) +![how\_to\_read\_htop.png](../Mahuika_Cluster/Next_Steps/Finding_Job_Efficiency.png) **RES** - Current memory being used (same thing as 'RSS' from sacct) @@ -185,16 +185,16 @@ time* the CPUs are in use. This is not enough to get a picture of overall job efficiency, as required CPU time *may vary by number of CPU*s. -The only way to get the full context, is to compare walltime performance between jobs at different scale. See [Job Scaling](../../Getting_Started/Next_Steps/Job_Scaling_Ascertaining_job_dimensions.md) for more details. +The only way to get the full context, is to compare walltime performance between jobs at different scale. See [Job Scaling](../Parallel_Computing/Job_Scaling_Ascertaining_job_dimensions.md) for more details. ### Example -![qdyn\_eff.png](../../assets/images/Finding_Job_Efficiency_0.png) +![qdyn\_eff.png](../Mahuika_Cluster/Next_Steps/Finding_Job_Efficiency_0.png) From the above plot of CPU efficiency, you might decide a 5% reduction of CPU efficiency is acceptable and scale your job up to 18 CPU cores . -![qdyn\_walltime.png](../../assets/images/Finding_Job_Efficiency_1.png) +![qdyn\_walltime.png](../Mahuika_Cluster/Next_Steps/Finding_Job_Efficiency_1.png) However, when looking at a plot of walltime it becomes apparent that performance gains per CPU added drop significantly after 4 CPUs, and in diff --git a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/GPU_use_on_NeSI.md b/docs/High_Performance_Computing/Batch_Computing/GPU_use_on_NeSI.md similarity index 81% rename from docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/GPU_use_on_NeSI.md rename to docs/High_Performance_Computing/Batch_Computing/GPU_use_on_NeSI.md index 18a49d380..97e7db3ea 100644 --- a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/GPU_use_on_NeSI.md +++ b/docs/High_Performance_Computing/Batch_Computing/GPU_use_on_NeSI.md @@ -12,23 +12,22 @@ please have a look at the dedicated pages listed at the end of this page. !!! warning - An overview of available GPU cards is available in the [Available GPUs on NeSI](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Available_GPUs_on_NeSI.md) + An overview of available GPU cards is available in the [Available GPUs on NeSI](Available_GPUs_on_NeSI.md) support page. Details about GPU cards for each system and usage limits are in the - [Mahuika Slurm Partitions](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Mahuika_Slurm_Partitions.md) - and [Māui\_Ancil (CS500) Slurm Partitions](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Maui_Slurm_Partitions.md) + [Mahuika Slurm Partitions](Mahuika_Slurm_Partitions.md) support pages. Details about pricing in terms of compute units can be found in the - [What is an allocation?](../../Getting_Started/Accounts-Projects_and_Allocations/What_is_an_allocation.md) + [What is an allocation?](What_is_an_allocation.md) page. !!! note Recall, memory associated with the GPUs is the VRAM, and is a separate resource from the RAM requested by Slurm. The memory values listed below are VRAM values. For available RAM on the GPU nodes, please see - [Mahuika Slurm Partitions](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Mahuika_Slurm_Partitions.md). + [Mahuika Slurm Partitions](Mahuika_Slurm_Partitions.md). ## Request GPU resources using Slurm -To request a GPU for your [Slurm job](../../Getting_Started/Next_Steps/Submitting_your_first_job.md), add +To request a GPU for your [Slurm job](Submitting_your_first_job.md), add the following option at the beginning of your submission script: ```sl @@ -98,7 +97,7 @@ cases: #SBATCH --gpus-per-node=A100:1 ``` - *These GPUs are on Milan nodes, check the [dedicated support page](../Running_Jobs_on_Maui_and_Mahuika/Milan_Compute_Nodes.md) + *These GPUs are on Milan nodes, check the [dedicated support page](Milan_Compute_Nodes.md) for more information.* - 4 A100 (80GB & NVLink) GPU on Mahuika @@ -108,7 +107,7 @@ cases: #SBATCH --gpus-per-node=A100:4 ``` - *These GPUs are on Milan nodes, check the [dedicated support page](../Running_Jobs_on_Maui_and_Mahuika/Milan_Compute_Nodes.md) + *These GPUs are on Milan nodes, check the [dedicated support page](Milan_Compute_Nodes.md) for more information.* *You cannot ask for more than 4 A100 (80GB) GPUs per node on @@ -126,7 +125,7 @@ cases: regular Mahuika node (A100 40GB GPU) or on a Milan node (A100 80GB GPU).* -You can also use the `--gpus-per-node`option in [Slurm interactive sessions](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Slurm_Interactive_Sessions.md), +You can also use the `--gpus-per-node`option in [Slurm interactive sessions](Slurm_Interactive_Sessions.md), with the `srun` and `salloc` commands. For example: ``` sh @@ -156,7 +155,7 @@ duration of 30 minutes. ## Load CUDA and cuDNN modules To use an Nvidia GPU card with your application, you need to load the -driver and the CUDA toolkit via the [environment modules](../HPC_Software_Environment/Finding_Software.md) +driver and the CUDA toolkit via the [environment modules](Finding_Software.md) mechanism: ``` sh @@ -172,9 +171,6 @@ module spider CUDA Please{% include "partials/support_request.html" %} if you need a version not available on the platform. -!!! note - On Māui Ancillary Nodes, use `module avail CUDA` to list available - versions. The CUDA module also provides access to additional command line tools: @@ -326,8 +322,8 @@ graphical interface. !!! warning The `nsys-ui` and `ncu-ui` tools require access to a display server, either via - [X11](../../Scientific_Computing/Terminal_Setup/X11_on_NeSI.md) or a - [Virtual Desktop](../../Scientific_Computing/Interactive_computing_using_Jupyter/Virtual_Desktop_via_Jupyter_on_NeSI.md). + [X11](X11_on_NeSI.md) or a + [Virtual Desktop](Virtual_Desktop_via_Jupyter_on_NeSI.md). You also need to load the `PyQt` module beforehand: ```sh @@ -341,14 +337,14 @@ graphical interface. The following pages provide additional information for supported applications: -- [ABAQUS](../../Scientific_Computing/Supported_Applications/ABAQUS.md#examples) -- [GROMACS](../../Scientific_Computing/Supported_Applications/GROMACS.md#nvidia-gpu-container) -- [Lambda Stack](../../Scientific_Computing/Supported_Applications/Lambda_Stack.md) -- [Matlab](../../Scientific_Computing/Supported_Applications/MATLAB.md#using-gpus) -- [TensorFlow on GPUs](../../Scientific_Computing/Supported_Applications/TensorFlow_on_GPUs.md) +- [ABAQUS](../Supported_Applications/ABAQUS.md#examples) +- [GROMACS](../Supported_Applications/GROMACS.md#nvidia-gpu-container) +- [Lambda Stack](Lambda_Stack.md) +- [Matlab](../Supported_Applications/MATLAB.md#using-gpus) +- [TensorFlow on GPUs](TensorFlow_on_GPUs.md) And programming toolkits: -- [Offloading to GPU with OpenMP](../../Scientific_Computing/HPC_Software_Environment/Offloading_to_GPU_with_OpenMP.md) -- [Offloading to GPU with OpenACC using the Cray compiler](../HPC_Software_Environment/Offloading_to_GPU_with_OpenACC.md) -- [NVIDIA GPU Containers](../../Scientific_Computing/HPC_Software_Environment/NVIDIA_GPU_Containers.md) +- [Offloading to GPU with OpenMP](Offloading_to_GPU_with_OpenMP.md) +- [Offloading to GPU with OpenACC using the Cray compiler](Offloading_to_GPU_with_OpenACC.md) +- [NVIDIA GPU Containers](NVIDIA_GPU_Containers.md) diff --git a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Job_Checkpointing.md b/docs/High_Performance_Computing/Batch_Computing/Job_Checkpointing.md similarity index 100% rename from docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Job_Checkpointing.md rename to docs/High_Performance_Computing/Batch_Computing/Job_Checkpointing.md diff --git a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Milan_Compute_Nodes.md b/docs/High_Performance_Computing/Batch_Computing/Milan_Compute_Nodes.md similarity index 93% rename from docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Milan_Compute_Nodes.md rename to docs/High_Performance_Computing/Batch_Computing/Milan_Compute_Nodes.md index bf9c53a6e..78974f35b 100644 --- a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Milan_Compute_Nodes.md +++ b/docs/High_Performance_Computing/Batch_Computing/Milan_Compute_Nodes.md @@ -2,10 +2,7 @@ created_at: '2023-02-09T01:30:43Z' tags: [] title: Milan Compute Nodes -vote_count: 1 -vote_sum: 1 -zendesk_article_id: 6367209795471 -zendesk_section_id: 360000030876 +status: deprecated ---   ## How to access @@ -26,7 +23,6 @@ Alternatively, the same effect can be achieved by specifying in a Slurm script: #SBATCH --partition=milan ``` - ## Hardware Each node has two AMD Milan CPUs, each with 8 "chiplets" of 8 cores and @@ -123,9 +119,7 @@ to try it: module load AOCC ``` -For more information on AOCC compiler suite please, visit [AMD -Optimizing C/C++ and Fortran Compilers -(AOCC)](https://developer.amd.com/amd-aocc/) +For more information on AOCC compiler suite please, visit [AMD Optimizing C/C++ and Fortran Compilers (AOCC)](https://developer.amd.com/amd-aocc/) ## Network @@ -143,5 +137,5 @@ configuration is expected to be addressed in the future. Don't hesitate to {% include "partials/support_request.html" %}. No question is too big or small. We are available for Zoom sessions or -[Weekly Online Office Hours](../../Getting_Started/Getting_Help/Weekly_Online_Office_Hours.md) +[Weekly Online Office Hours](Weekly_Online_Office_Hours.md) if it's easier to discuss your question in a call rather than via email. diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Per_job_temporary_directories.md b/docs/High_Performance_Computing/Batch_Computing/Per_job_temporary_directories.md similarity index 100% rename from docs/Scientific_Computing/HPC_Software_Environment/Per_job_temporary_directories.md rename to docs/High_Performance_Computing/Batch_Computing/Per_job_temporary_directories.md diff --git a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Checking_your_projects_usage_using_nn_corehour_usage.md b/docs/High_Performance_Computing/Batch_Computing/Project_Accounting/Checking_your_projects_usage_using_nn_corehour_usage.md similarity index 100% rename from docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Checking_your_projects_usage_using_nn_corehour_usage.md rename to docs/High_Performance_Computing/Batch_Computing/Project_Accounting/Checking_your_projects_usage_using_nn_corehour_usage.md diff --git a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Fair_Share.md b/docs/High_Performance_Computing/Batch_Computing/Project_Accounting/Fair_Share.md similarity index 98% rename from docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Fair_Share.md rename to docs/High_Performance_Computing/Batch_Computing/Project_Accounting/Fair_Share.md index fc49d1361..0e0d1aa91 100644 --- a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Fair_Share.md +++ b/docs/High_Performance_Computing/Batch_Computing/Project_Accounting/Fair_Share.md @@ -19,7 +19,7 @@ Your *Fair Share score* is a number between **0** and **1**. Projects with a **larger** Fair Share score receive a **higher priority** in the queue. -A project is given an [allocation of compute units](../../Getting_Started/Accounts-Projects_and_Allocations/What_is_an_allocation.md) +A project is given an [allocation of compute units](../What_is_an_allocation.md) over a given **period**. An institution also has a percentage **Fair Share entitlement** of each machine's deliverable capacity over that same period. diff --git a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Fair_Share_How_jobs_get_prioritised.md b/docs/High_Performance_Computing/Batch_Computing/Project_Accounting/Fair_Share_How_jobs_get_prioritised.md similarity index 97% rename from docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Fair_Share_How_jobs_get_prioritised.md rename to docs/High_Performance_Computing/Batch_Computing/Project_Accounting/Fair_Share_How_jobs_get_prioritised.md index f95e96aa3..c47ae25c5 100644 --- a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Fair_Share_How_jobs_get_prioritised.md +++ b/docs/High_Performance_Computing/Batch_Computing/Project_Accounting/Fair_Share_How_jobs_get_prioritised.md @@ -19,7 +19,7 @@ Your *Fair Share score* is a number between **0** and **1**. Projects with a **larger** Fair Share score receive a **higher priority** in the queue. -A project is given an [**allocation** of compute units](../../Getting_Started/Accounts-Projects_and_Allocations/What_is_an_allocation.md) +A project is given an [**allocation** of compute units](../What_is_an_allocation.md) over a given **period**. An institution also has a percentage **Fair Share entitlement** of each machine's deliverable capacity over that same period. @@ -155,7 +155,7 @@ request for projects that expect to use the cluster heavily on average, can predict when they will need their heaviest use with a high degree of confidence, and give us plenty of notice. -For full details on Slurm's Fair share mechanism, please see [this +For full details on Slurm's Fair share mechanism, please see page](https://slurm.schedmd.com/priority_multifactor.html#fairshare) (offsite). diff --git a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Job_prioritisation.md b/docs/High_Performance_Computing/Batch_Computing/Project_Accounting/Job_prioritisation.md similarity index 92% rename from docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Job_prioritisation.md rename to docs/High_Performance_Computing/Batch_Computing/Project_Accounting/Job_prioritisation.md index 4811e8bfb..4644f8c08 100644 --- a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Job_prioritisation.md +++ b/docs/High_Performance_Computing/Batch_Computing/Project_Accounting/Job_prioritisation.md @@ -31,7 +31,7 @@ jobs, but is limited to one small job per user at a time: no more than Job priority decreases whenever the project uses more core-hours than expected, across all partitions. -This [Fair Share](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Fair_Share.md) +This [Fair Share](Fair_Share.md) policy means that projects that have consumed many CPU core hours in the recent past compared to their expected rate of use (either by submitting and running many jobs, or by submitting and running large jobs) will @@ -85,8 +85,8 @@ they get requeued after a node failure. Cluster and partition-specific limits can sometimes prevent jobs from starting regardless of their priority score.  For details see the pages -on [Mahuika](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Mahuika_Slurm_Partitions.md) or -[Māui.](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Maui_Slurm_Partitions.md) +on [Mahuika](../Mahuika_Slurm_Partitions.md) or +[Māui.](../Maui_Slurm_Partitions.md) ## Backfill diff --git a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/SLURM-Best_Practice.md b/docs/High_Performance_Computing/Batch_Computing/SLURM-Best_Practice.md similarity index 93% rename from docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/SLURM-Best_Practice.md rename to docs/High_Performance_Computing/Batch_Computing/SLURM-Best_Practice.md index fe5dca161..1573a8367 100644 --- a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/SLURM-Best_Practice.md +++ b/docs/High_Performance_Computing/Batch_Computing/SLURM-Best_Practice.md @@ -43,7 +43,7 @@ etc). ### Memory (RAM) If you request more memory (RAM) than you need for your job, it -[will wait longer in the queue and will be more expensive when it runs](../../General/FAQs/Why_is_my_job_taking_a_long_time_to_start.md). +[will wait longer in the queue and will be more expensive when it runs](Why_is_my_job_taking_a_long_time_to_start.md). On the other hand, if you don't request enough memory, the job may be killed for attempting to exceed its allocated memory limits. @@ -52,7 +52,7 @@ your program will need at peak memory usage. We also recommend using `--mem` instead of `--mem-per-cpu` in most cases. There are a few kinds of jobs for which `--mem-per-cpu` is more -suitable. See [our article on how to request memory](../../General/FAQs/How_do_I_request_memory.md) +suitable. See [our article on how to request memory](How_do_I_request_memory.md) for more information. ## Parallelism @@ -76,7 +76,7 @@ job array in a single command) A low fairshare score will affect your jobs priority in the queue, learn more about how to effectively use your allocation -[here](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Fair_Share_How_jobs_get_prioritised.md). +[here](Project_Accounting/Fair_Share_How_jobs_get_prioritised.md). ## Cross machine submission diff --git a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Slurm_Interactive_Sessions.md b/docs/High_Performance_Computing/Batch_Computing/Slurm_Interactive_Sessions.md similarity index 98% rename from docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Slurm_Interactive_Sessions.md rename to docs/High_Performance_Computing/Batch_Computing/Slurm_Interactive_Sessions.md index f69e65dc2..4a1aa407b 100644 --- a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Slurm_Interactive_Sessions.md +++ b/docs/High_Performance_Computing/Batch_Computing/Slurm_Interactive_Sessions.md @@ -14,7 +14,7 @@ you to use them interactively as you would the login node. There are two main commands that can be used to make a session, `srun` and `salloc`, both of which use most of the same options available to `sbatch` (see -[our Slurm Reference Sheet](../../Getting_Started/Cheat_Sheets/Slurm-Reference_Sheet.md)). +[our Slurm Reference Sheet](Slurm-Reference_Sheet.md)). !!! warning An interactive session will, once it starts, use the entire requested @@ -207,8 +207,7 @@ scontrol update jobid=12345678 StartTime=now ### Other changes using `scontrol` There are many other changes you can make by means of `scontrol`. For -further information, please see [the `scontrol` -documentation](https://slurm.schedmd.com/scontrol.html). +further information, please see [the `scontrol` documentation](https://slurm.schedmd.com/scontrol.html). ## Modifying multiple interactive sessions at once diff --git a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Mahuika_Slurm_Partitions.md b/docs/High_Performance_Computing/Batch_Computing/Slurm_Partitions.md similarity index 93% rename from docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Mahuika_Slurm_Partitions.md rename to docs/High_Performance_Computing/Batch_Computing/Slurm_Partitions.md index 16b914b11..cfb65b2ba 100644 --- a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Mahuika_Slurm_Partitions.md +++ b/docs/High_Performance_Computing/Batch_Computing/Slurm_Partitions.md @@ -23,7 +23,7 @@ they undertake to do so with job arrays. ## Partitions -A partition can be specified via the appropriate [sbatch option](../../Getting_Started/Cheat_Sheets/Slurm-Reference_Sheet.md), +A partition can be specified via the appropriate [sbatch option](Slurm-Reference_Sheet.md), e.g.: ``` sl @@ -90,7 +90,7 @@ sbatch: `bigmem` is not the most appropriate partition for this job, which would 1850 MB 460 GB 2560 -Jobs using Milan Nodes +Jobs using Milan Nodes 8 256 @@ -170,7 +170,7 @@ below for more info. 460 GB 64 Part of -Milan Nodes. See below. +Milan Nodes. See below. @@ -216,7 +216,7 @@ To request A100 GPUs, use instead: #SBATCH --gpus-per-node=A100:1 ``` -See [GPU use on NeSI](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/GPU_use_on_NeSI.md) +See [GPU use on NeSI](GPU_use_on_NeSI.md) for more details about Slurm and CUDA settings. ### Limits on GPU Jobs @@ -242,7 +242,7 @@ connected via - Explicitly specify the partition to access them, with `--partition=hgx`. -- Hosting nodes are Milan nodes. Check the [dedicated support page](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Milan_Compute_Nodes.md) +- Hosting nodes are Milan nodes. Check the [dedicated support page](Milan_Compute_Nodes.md) for more information about the Milan nodes' differences from Mahuika's Broadwell nodes. diff --git a/docs/Getting_Started/Next_Steps/Submitting_your_first_job.md b/docs/High_Performance_Computing/Batch_Computing/Submitting_your_first_job.md similarity index 66% rename from docs/Getting_Started/Next_Steps/Submitting_your_first_job.md rename to docs/High_Performance_Computing/Batch_Computing/Submitting_your_first_job.md index d7acc43e4..aa0bd515b 100644 --- a/docs/Getting_Started/Next_Steps/Submitting_your_first_job.md +++ b/docs/High_Performance_Computing/Batch_Computing/Submitting_your_first_job.md @@ -9,19 +9,6 @@ zendesk_article_id: 360000684396 zendesk_section_id: 360000189716 --- -## Environment Modules - -Modules are a convenient way to provide access to applications on the cluster. They prepare the environment you need to run an application. - -For a full list of module commands run `man module`. - -| Command | Description | -| ---------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `module spider [ ]` | List all modules whose names, including version strings, contain ``. If the `` argument is not supplied, list all available modules. (only on Mahuika) | -| `module show ` | Show the contents of the module given by ``. If only the module name (e.g. `Python`) is given, show the default module of that name. If both name and version are given, show that particular version module. | -| `module load ` | Load the module (name and version) given by ``. If no version is given, load the default version. | -| `module list [ ]` | List all currently loaded modules whose names, including version strings, contain ``. If the `` argument is not supplied, list all currently loaded modules. | - ## Slurm Jobs on Mahuika and Māui are submitted in the form of a *batch script* containing the code you want to run and a header of information needed by our job scheduler *Slurm*. diff --git a/docs/High_Performance_Computing/Batch_Computing/index.md b/docs/High_Performance_Computing/Batch_Computing/index.md new file mode 100644 index 000000000..42fc7230e --- /dev/null +++ b/docs/High_Performance_Computing/Batch_Computing/index.md @@ -0,0 +1,11 @@ +--- +created_at: '2018-05-01T23:29:39Z' +tags: +- hpc +- info +title: Batch Computing +hide: + - toc +--- + +Batch computing is... diff --git a/docs/Storage/.pages.yml b/docs/High_Performance_Computing/Data_Management/.pages.yml similarity index 81% rename from docs/Storage/.pages.yml rename to docs/High_Performance_Computing/Data_Management/.pages.yml index d5a84a90e..1e2097e73 100644 --- a/docs/Storage/.pages.yml +++ b/docs/High_Performance_Computing/Data_Management/.pages.yml @@ -1,6 +1,5 @@ nav: - File_Systems_and_Quotas - Data_Recovery - - Data_Transfer_Services - Nearline long-term storage: Nearline_long_term_storage - ... diff --git a/docs/Storage/Data_Recovery/.pages.yml b/docs/High_Performance_Computing/Data_Management/Data_Recovery/.pages.yml similarity index 100% rename from docs/Storage/Data_Recovery/.pages.yml rename to docs/High_Performance_Computing/Data_Management/Data_Recovery/.pages.yml diff --git a/docs/Storage/Data_Recovery/File_Recovery.md b/docs/High_Performance_Computing/Data_Management/Data_Recovery/File_Recovery.md similarity index 100% rename from docs/Storage/Data_Recovery/File_Recovery.md rename to docs/High_Performance_Computing/Data_Management/Data_Recovery/File_Recovery.md diff --git a/docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/.pages.yml b/docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/.pages.yml new file mode 100644 index 000000000..8ae5ab46e --- /dev/null +++ b/docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/.pages.yml @@ -0,0 +1,3 @@ +nav: + - index.md + - ... diff --git a/docs/Storage/File_Systems_and_Quotas/Automatic_cleaning_of_nobackup_file_system.md b/docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/Automatic_cleaning_of_nobackup_file_system.md similarity index 100% rename from docs/Storage/File_Systems_and_Quotas/Automatic_cleaning_of_nobackup_file_system.md rename to docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/Automatic_cleaning_of_nobackup_file_system.md diff --git a/docs/Storage/File_Systems_and_Quotas/Data_Compression.md b/docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/Data_Compression.md similarity index 100% rename from docs/Storage/File_Systems_and_Quotas/Data_Compression.md rename to docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/Data_Compression.md diff --git a/docs/Storage/File_Systems_and_Quotas/File_permissions_and_groups.md b/docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/File_permissions_and_groups.md similarity index 92% rename from docs/Storage/File_Systems_and_Quotas/File_permissions_and_groups.md rename to docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/File_permissions_and_groups.md index b7c5953de..60d054adc 100644 --- a/docs/Storage/File_Systems_and_Quotas/File_permissions_and_groups.md +++ b/docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/File_permissions_and_groups.md @@ -89,9 +89,8 @@ project group. set, meaning that files and subdirectories later created within that directory will inherit neither the group nor the setgid bit. You probably don't want this to happen. For instructions on how to - prevent it, please see our article: [How can I let my fellow project - team members read or write my - files?](../../General/FAQs/How_can_I_let_my_fellow_project_team_members_read_or_write_my_files.md) + prevent it, please see our article: + [How can I let my fellow project team members read or write my files?](How_can_I_let_my_fellow_project_team_members_read_or_write_my_files.md) By default, the world, i.e. people not in the project team, have no privileges in respect of a project directory, with certain exceptions. @@ -146,6 +145,6 @@ If we agree to set up a special-purpose directory for you, we will discuss and a suitable permissions model. !!! prerequisite "See also" - - [How can I let my fellow project team members read or write my files?](../../General/FAQs/How_can_I_let_my_fellow_project_team_members_read_or_write_my_files.md) - - [How can I give read-only team members access to my files?](../../General/FAQs/How_can_I_give_read_only_team_members_access_to_my_files.md) - - [NeSI file systems and quotas](../../Storage/File_Systems_and_Quotas/NeSI_File_Systems_and_Quotas.md) + - [How can I let my fellow project team members read or write my files?](How_can_I_let_my_fellow_project_team_members_read_or_write_my_files.md) + - [How can I give read-only team members access to my files?](How_can_I_give_read_only_team_members_access_to_my_files.md) + - [NeSI file systems and quotas](index.md) diff --git a/docs/Storage/File_Systems_and_Quotas/I-O_Performance_Considerations.md b/docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/I-O_Performance_Considerations.md similarity index 86% rename from docs/Storage/File_Systems_and_Quotas/I-O_Performance_Considerations.md rename to docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/I-O_Performance_Considerations.md index 9826bccaa..316867555 100644 --- a/docs/Storage/File_Systems_and_Quotas/I-O_Performance_Considerations.md +++ b/docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/I-O_Performance_Considerations.md @@ -17,14 +17,14 @@ Scale clients*, and those that employ *Cray’s DVS* *solution*. Applications that make heavy demands on metadata services and or have high levels of small I/O activity should generally not be run on -[Māui](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Maui.md) (the Cray +[Māui](Maui.md) (the Cray XC50). ## Nodes which access storage via native Spectrum Scale Clients -All [Mauhika](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Mahuika.md) -HPC Cluster, [Mahuika Ancillary](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Mahuika.md), -[Māui Ancillary](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Maui_Ancillary.md) and +All [Mauhika](Mahuika.md) +HPC Cluster, [Mahuika Ancillary](Mahuika.md), +[Māui Ancillary](Maui_Ancillary.md) and Māui login (aka build) nodes have native Spectrum Scale clients installed and provide high performance access to storage: @@ -42,7 +42,7 @@ as DVS (Data Virtualisation Service), to expose the Spectrum Scale file systems to XC compute nodes. DVS adds an additional layer of hardware and software between the XC compute nodes and storage (see Figure). - ![cray\_xc50.jpg](../../assets/images/I-O_Performance_Considerations.jpg) + ![cray\_xc50.jpg](I-O_Performance_Considerations.jpg) Figure 1: Cray XC50 DVS architecture. @@ -96,4 +96,4 @@ to decompress the data after use. However, testing has shown that there can be an impact on job performance due to I/O. You can find out more about tests and results regarding performance of transparent data compression on the NeSI platforms on our -[Data Compression support page](../../Storage/File_Systems_and_Quotas/Data_Compression.md). +[Data Compression support page](Data_Compression.md). diff --git a/docs/Storage/File_Systems_and_Quotas/Moving_to_the_new_filesystem.md b/docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/Moving_to_the_new_filesystem.md similarity index 100% rename from docs/Storage/File_Systems_and_Quotas/Moving_to_the_new_filesystem.md rename to docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/Moving_to_the_new_filesystem.md diff --git a/docs/Storage/File_Systems_and_Quotas/NeSI_File_Systems_and_Quotas.md b/docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/index.md similarity index 91% rename from docs/Storage/File_Systems_and_Quotas/NeSI_File_Systems_and_Quotas.md rename to docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/index.md index f182e380e..7424aecec 100644 --- a/docs/Storage/File_Systems_and_Quotas/NeSI_File_Systems_and_Quotas.md +++ b/docs/High_Performance_Computing/Data_Management/File_Systems_and_Quotas/index.md @@ -6,7 +6,7 @@ tags: - storage - maui - quota -title: NeSI File Systems and Quotas +title: File Systems and Quotas vote_count: 4 vote_sum: 4 zendesk_article_id: 360000177256 @@ -17,8 +17,8 @@ zendesk_section_id: 360000033936 We have recently started rolling out compression of inactive data on the NeSI Project filesystem. Please see the [documentation below](#transparent-file-data-compression) to learn more about how this works and what data will be compressed. -[Māui](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Maui.md) and -[Mahuika](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Mahuika.md), along +[Māui](Maui.md) and +[Mahuika](Mahuika.md), along with all the ancillary nodes, share access to the same IBM Storage Scale file systems. Storage Scale was previously known as Spectrum Scale, and before that as GPFS, or General Parallel File System - we'll generally @@ -34,7 +34,7 @@ command: The values for `nn_storage_quota` are updated approximately every hour and cached between updates. -![neSI\_filetree.svg](../../assets/images/NeSI_File_Systems_and_Quotas.svg) +![neSI\_filetree.svg](NeSI_File_Systems_and_Quotas.svg) ## File System Specifications @@ -98,7 +98,7 @@ This file system is accessible from login, compute and ancillary nodes. Users should **not** run jobs from this filesystem. All home directories are backed up daily, both via the Spectrum Protect backup system, which retains the last 10 versions of all files for up to 90 days, and via -[Scale snapshots](../Data_Recovery/File_Recovery.md). +[Scale snapshots](File_Recovery.md). No cleaning policy will be applied to your home directory as long as your My NeSI account is active and you are a member of at least one active project. @@ -112,15 +112,14 @@ cleaning policy is applied. It provides storage space for datasets, shared code or configuration scripts that need to be accessed by users within a project, and -[potentially by other projects](../File_Systems_and_Quotas/File_permissions_and_groups.md). +[potentially by other projects](File_permissions_and_groups.md). Read and write performance increases using larger files, therefore you should consider archiving small files with the `nn_archive_files` utility, or a similar archiving package such as `tar` . Each NeSI project receives quota allocations for `/nesi/project/`, based on the requirements you tell us -about in your [application for a new NeSI -project](https://my.nesi.org.nz/html/request_project), and separately +about in your [application for a new NeSI project](https://my.nesi.org.nz/html/request_project), and separately covering disk space and number of files. ### /nesi/nobackup @@ -136,12 +135,12 @@ apply per-project quotas to both disk space and number of files on this file system. The default per-project quotas are as described in the above table; if you require more temporary (scratch) space for your project than the default quota allows for, you can discuss your -requirements with us during [the project application process](../../General/NeSI_Policies/How_we_review_applications.md), +requirements with us during [the project application process](How_we_review_applications.md), or {% include "partials/support_request.html" %} at any time. To ensure this file system remains fit-for-purpose, we have a regular cleaning policy as described in -[Automatic cleaning of nobackup filesystem](../../Storage/File_Systems_and_Quotas/Automatic_cleaning_of_nobackup_file_system.md). +[Automatic cleaning of nobackup filesystem](Automatic_cleaning_of_nobackup_file_system.md). Do not use the `touch` command or an equivalent to prevent the cleaning policy from removing unused files, because this behaviour would deprive @@ -166,12 +165,12 @@ an Automatic Tape Library (ATL). Files will remain on `/nesi/nearline` temporarily, typically for hours to days, before being moved to tape. A catalogue of files on tape will remain on the disk for quick access. -See [more information about the nearline service](../../Storage/Nearline_long_term_storage/Nearline_Long_Term_Storage_Service.md). +See [more information about the nearline service](Nearline_Long_Term_Storage_Service.md). ## Snapshots If you have accidentally deleted data you can recover it from -a [snapshot](../Data_Recovery/File_Recovery.md). +a [snapshot](File_Recovery.md). Snapshots are taken daily of `home/` and `project` directories If you cannot find it in a snapshot, please ask us to recover it for you by {% include "partials/support_request.html" %} @@ -213,7 +212,7 @@ though this is mitigated by space and bandwidth savings. Transparent file data compression can be controlled and applied by users via file attributes, you can find out more about using this method on -our [Data Compression support page](../../Storage/File_Systems_and_Quotas/Data_Compression.md). +our [Data Compression support page](Data_Compression.md). File data compression can also be automatically applied by administrators through the Scale policy engine. We leverage this latter feature to regularly identify and compress inactive data on the `/nesi/project` @@ -228,7 +227,6 @@ cold data. We may decrease this in future. Additionally, we only automatically compress files in the range of 4kB - 10GB in size. Files larger than this can be compressed by user interaction - see the instructions for the `mmchattr` command on -the [Data Compression support -page](../../Storage/File_Systems_and_Quotas/Data_Compression.md). Also +the [Data Compression supportpage](Data_Compression.md). Also note that the Scale filesystem will only store compressed blocks when the compression space saving is >=10%. diff --git a/docs/Storage/Data_Transfer_Services/.pages.yml b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/.pages.yml similarity index 100% rename from docs/Storage/Data_Transfer_Services/.pages.yml rename to docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/.pages.yml diff --git a/docs/Storage/Data_Transfer_Services/Data_Transfer_using_Globus_V5.md b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Data_Transfer_using_Globus_V5.md similarity index 83% rename from docs/Storage/Data_Transfer_Services/Data_Transfer_using_Globus_V5.md rename to docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Data_Transfer_using_Globus_V5.md index 22b1facbd..f11d62355 100644 --- a/docs/Storage/Data_Transfer_Services/Data_Transfer_using_Globus_V5.md +++ b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Data_Transfer_using_Globus_V5.md @@ -16,15 +16,14 @@ data between the NeSI Wellington DTN V5 and your personal workstation endpoint, or an endpoint from your institution. With Globus, high data transfer rates are achievable. This service allows data to be accessible to any person who has a Globus account. The newest -implementation (v5) provides [extra features and some key differences -from the previous setup](https://docs.globus.org/globus-connect-server/). +implementation (v5) provides [extra features and some key differences from the previous setup](https://docs.globus.org/globus-connect-server/). To use Globus on NeSI platforms, you need: 1. A Globus account (see - [Initial Globus Sign-Up and Globus ID](../../Storage/Data_Transfer_Services/Initial_Globus_Sign_Up-and_your_Globus_Identities.md)) + [Initial Globus Sign-Up and Globus ID](Initial_Globus_Sign_Up-and_your_Globus_Identities.md)) 2. An active NeSI account (see - [Creating a NeSI Account](../../Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md)) + [Creating a NeSI Account](Creating_a_NeSI_Account_Profile.md)) 3. Access privileges on the non-NeSI Globus endpoint/collection you plan on transferring data from or to. This other endpoint/collection could be a personal one on your workstation, or it could be managed @@ -98,7 +97,7 @@ endpoint "NeSI Wellington DTN V5" from the list, and you will be asked to authenticate your access to the endpoint. Click Continue to the next step. -![mceclip0.png](../../assets/images/Data_Transfer_using_Globus_V5.png) +![mceclip0.png](Data_Transfer_using_Globus_V5.png) You can choose either of **<username>@wlg-dtn-oidc.nesi.org.nz** or NeSI Wellington OIDC Server (wlg-dtn-oidc.nesi.org.nz), they are all @@ -106,7 +105,7 @@ linked to the same website. If this is your first time login, you may ask to *bind* your primary identity to the OIDC login, you need to allow that. -![mceclip1.png](../../assets/images/Data_Transfer_using_Globus_V6.png) +![mceclip1.png](Data_Transfer_using_Globus_V6.png) The NeSI Wellington DTN V5 endpoint is protected by a second factor authentication (2FA-same as accessing NeSI clusters).  In the @@ -117,7 +116,7 @@ authentication (2FA-same as accessing NeSI clusters).  In the not*** use any additional characters or spaces between your password and the token number.) -![mceclip0.png](../../assets/images/Data_Transfer_using_Globus_V7.png) +![mceclip0.png](Data_Transfer_using_Globus_V7.png) After the login, you will navigate to the default root(display as "/") path, then you could change the path to @@ -129,29 +128,29 @@ path, then you could change the path to \(3\) project sub-directories of ***/nesi/nobackup/<project\_code>***  - see -[Globus Paths,Permissions, Storage Allocation](../../Storage/Data_Transfer_Services/Globus_V5_Paths-Permissions-Storage_Allocation.md). +[Globus Paths,Permissions, Storage Allocation](Globus_V5_Paths-Permissions-Storage_Allocation.md). Navigate to your selected directory. e.g. the `nobackup` filesystem `/nesi/nobackup/` and select the two-endpoint panel for transfer. -![mceclip3.png](../../assets/images/Data_Transfer_using_Globus_V8.png) +![mceclip3.png](Data_Transfer_using_Globus_V8.png) Select the target endpoint and authenticate. When you have activated endpoints in both transfer windows, you can start transferring files between them. -![mceclip4.png](../../assets/images/Data_Transfer_using_Globus_V9.png) +![mceclip4.png](Data_Transfer_using_Globus_V9.png) Select files you wish to transfer and select the corresponding "Start" button: -![mceclip5.png](../../assets/images/Data_Transfer_using_Globus_V10.png) +![mceclip5.png](Data_Transfer_using_Globus_V10.png) To find other NeSI endpoints, type in "nesi#": -![filemanage\_nesi.png](../../assets/images/Data_Transfer_using_Globus_V11.png) +![filemanage\_nesi.png](Data_Transfer_using_Globus_V11.png) ## In brief @@ -161,29 +160,28 @@ To find other NeSI endpoints, type in "nesi#": - If this is your first time, you will need to create a Globus account. - Open the two-endpoint panel - ![two_endpoint.png](../../assets/images/Data_Transfer_using_Globus_V12.png){: style="height:2em;"} located + ![two_endpoint.png](Data_Transfer_using_Globus_V12.png){: style="height:2em;"} located on the top-right of the *File Manager* page. - Select the Endpoints you wish to move files between (start typing "nesi#" to see the list of NeSI DTNs to select from). - [Authenticate](../../Storage/Data_Transfer_Services/Globus_V5_endpoint_activation.md) + [Authenticate](Globus_V5_endpoint_activation.md) at both endpoints. - At Globus.org the endpoint **defaults to `/home/` path** (represented by `~`) on Mahuika or Māui. We do not recommend uploading data to your home directory, as home directories are very small. Instead, navigate to an appropriate project directory under /nobackup (see - [Globus Paths, Permissions, Storage Allocation](../../Storage/Data_Transfer_Services/Globus_V5_Paths-Permissions-Storage_Allocation.md)). + [Globus Paths, Permissions, Storage Allocation](Globus_V5_Paths-Permissions-Storage_Allocation.md)). - Transfer the files by clicking the appropriate - ![start.png](../../assets/images/Data_Transfer_using_Globus_V13.png){: style="height:1em;"} button + ![start.png](Data_Transfer_using_Globus_V13.png){: style="height:1em;"} button depending on the direction of the transfer. - Check your email for confirmation about the job completion report. ## Transferring data using a personal endpoint To transfer files into/out of your laptop, desktop computer or any other -system you control, configure it as a [Globus Personal -Endpoint](https://www.globus.org/globus-connect-personal) (see -[Personal Globus Endpoint Configuration](../../Storage/Data_Transfer_Services/Personal_Globus_Endpoint_Configuration.md) +system you control, configure it as a [Globus Personal Endpoint](https://www.globus.org/globus-connect-personal) (see +[Personal Globus Endpoint Configuration](Personal_Globus_Endpoint_Configuration.md) for transfers between personal endpoints). ## File sharing diff --git a/docs/Storage/Data_Transfer_Services/Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication.md b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication.md similarity index 78% rename from docs/Storage/Data_Transfer_Services/Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication.md rename to docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication.md index 3b8e3a784..e4443d423 100644 --- a/docs/Storage/Data_Transfer_Services/Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication.md +++ b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication.md @@ -11,11 +11,10 @@ zendesk_section_id: 360000040596 This article shows how to transfer potentially large amounts of data between NeSI and your personal computer, without requiring 2FA (two-factor authentication) each time you initiate the transfer.  This -is particularly useful in the context of automated, or [scripted data -transfers](../../Storage/Data_Transfer_Services/Syncing_files_between_NeSI_and_another_computer_with_globus_automate.md). +is particularly useful in the context of automated, or [scripted data transfers](Syncing_files_between_NeSI_and_another_computer_with_globus_automate.md). The approach is based on using -[Globus](../../Storage/Data_Transfer_Services/Data_Transfer_using_Globus_V5.md) +[Globus](Data_Transfer_using_Globus_V5.md) and a guest collection on the source side. **Globus** allows you to copy and synchronise files between NeSI's platforms and other computers, including your personal computer. @@ -24,8 +23,7 @@ A ***collection*** is a directory whose content can be shared. A ***guest collection*** allows you to share data without having to type in your credentials each time your transfer files. -See this [support -page](../../Storage/Data_Transfer_Services/Data_Transfer_using_Globus_V5.md) +See this [Data Transfer using Globus V5](Data_Transfer_using_Globus_V5.md) on how to set up Globus. Here, we assume you have an account on NeSI and have registered and created an account on Globus. @@ -44,15 +42,14 @@ have registered and created an account on Globus. - You should now see your new guest collection at -![mceclip0.png](../../assets/images/Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication.png) +![mceclip0.png](Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication.png) ## Step 2: Download and install Globus Connect Personal On your personal computer, download "Globus Connect Personal" from . Versions exist for Mac, Windows and Linux. Follow the instructions to install and set up the -software. Also see our support page about [Personal Globus Endpoint -Configuration](../../Storage/Data_Transfer_Services/Personal_Globus_Endpoint_Configuration.md). +software. Also see our support page about [Personal Globus Endpoint Configuration](Personal_Globus_Endpoint_Configuration.md). ## Step 3: Share a directory on your personal computer @@ -66,7 +63,7 @@ Note: By default your entire home directory will be exposed. It is good practice to only share specific directories. You can remove your home directory by highlighting it and clicking on the "-" sign. -![mceclip1.png](../../assets/images/Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication_0.png) +![mceclip1.png](Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication_0.png) ## Step 4: Test a file transfer @@ -78,4 +75,4 @@ directory by highlighting it and clicking on the "-" sign. be seen in the picture below. - Click on the files you want to transfer and press "Start" -![mceclip3.png](../../assets/images/Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication_1.png) +![mceclip3.png](Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication_1.png) diff --git a/docs/Storage/Data_Transfer_Services/Download_and_share_CMIP6_data_for_NIWA_researchers.md b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Download_and_share_CMIP6_data_for_NIWA_researchers.md similarity index 90% rename from docs/Storage/Data_Transfer_Services/Download_and_share_CMIP6_data_for_NIWA_researchers.md rename to docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Download_and_share_CMIP6_data_for_NIWA_researchers.md index 1f86b0cab..698953c80 100644 --- a/docs/Storage/Data_Transfer_Services/Download_and_share_CMIP6_data_for_NIWA_researchers.md +++ b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Download_and_share_CMIP6_data_for_NIWA_researchers.md @@ -8,10 +8,8 @@ zendesk_article_id: 360001287235 zendesk_section_id: 360000040596 --- -The [Coupled Model Intercomparison -Project](https://www.wcrp-climate.org/wgcm-cmip), which began in 1995 -under the auspices of the [World Climate Research Programme -(WCRP)](https://www.wcrp-climate.org/about-wcrp/wcrp-overview), is now +The [Coupled Model Intercomparison Project](https://www.wcrp-climate.org/wgcm-cmip), which began in 1995 +under the auspices of the [World Climate Research Programme (WCRP)](https://www.wcrp-climate.org/about-wcrp/wcrp-overview), is now in its sixth phase (CMIP6). CMIP6 orchestrates somewhat independent model intercomparison activities and their experiments, which have adopted a common infrastructure for collecting, organising, and @@ -26,7 +24,7 @@ hence will also be accessible to your collaborators. The instructions are geared towards members of the `niwa02916` group - {% include "partials/support_request.html" %} if you are a NIWA employee and want to become part of this group. Other NeSI users may want to -read [this](../../Scientific_Computing/Supported_Applications/Synda.md), +read [this](Synda.md), which explains how to install the Synda tool. Once installed, you can then type similar commands to the ones below to test your configuration. @@ -40,7 +38,7 @@ source /nesi/project/niwa02916/synda_env.sh This will load the Anaconda3 environment and set the `ST_HOME` variable. You should also now be able to invoke -[Synda](../../Scientific_Computing/Supported_Applications/Synda.md) +[Synda](Synda.md) commands, a tool that can be used to synchronise CMIP data with Earth System Grid Federation archives. A full list of options can be obtained with diff --git a/docs/Storage/Data_Transfer_Services/Globus_Quick_Start_Guide.md b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Globus_Quick_Start_Guide.md similarity index 84% rename from docs/Storage/Data_Transfer_Services/Globus_Quick_Start_Guide.md rename to docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Globus_Quick_Start_Guide.md index 960bfca93..8928a4aef 100644 --- a/docs/Storage/Data_Transfer_Services/Globus_Quick_Start_Guide.md +++ b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Globus_Quick_Start_Guide.md @@ -33,11 +33,10 @@ up using one of the available options on the page. Please note that the organisation is not listed, please sign in (sign up) using any of the other methods. -![Globus\_login.png](../../assets/images/Globus_Quick_Start_Guide.png) +![Globus\_login.png](Globus_Quick_Start_Guide.png) For more detailed instructions please see [Initial Globus Sign-Up, and -your Globus -Identities](../../Storage/Data_Transfer_Services/Initial_Globus_Sign_Up-and_your_Globus_Identities.md). +your Globus Identities](Initial_Globus_Sign_Up-and_your_Globus_Identities.md). ## Globus Endpoint Activation @@ -53,9 +52,8 @@ copying to and from. Please note that the NeSI `project` directory is read only, and `nobackup` is read and write. A list of some Institutional endpoints can be found here: -[National-Data-Transfer-Platform](../../Storage/Data_Transfer_Services/National_Data_Transfer_Platform.md). -You can also set up your own [personal -endpoint](../../Storage/Data_Transfer_Services/Personal_Globus_Endpoint_Configuration.md) +[National-Data-Transfer-Platform](National_Data_Transfer_Platform.md). +You can also set up your own [personal endpoint](Personal_Globus_Endpoint_Configuration.md) to transfer data to or from your personal computer, however, administrative access to your computer is required @@ -72,7 +70,7 @@ bar on the left. **do not** save your password on "*Browser settings*" as it will change every time due to the 2nd factor requirement. -![NeSI_Globus_Authenticate.png](../../assets/images/Globus_Quick_Start_Guide_0.png) +![NeSI_Globus_Authenticate.png](Globus_Quick_Start_Guide_0.png) ## Transferring Data @@ -82,7 +80,7 @@ initiate the transfer, select one of the two directional arrows. In the image below, the 'config' folder is being transferred from the location on the right, to the location on the left. -![Globus_transfer_data.png](../../assets/images/Globus_Quick_Start_Guide_1.png) +![Globus_transfer_data.png](Globus_Quick_Start_Guide_1.png) To see the progress of the transfer, please click 'Activity' on the left hand menu bar. diff --git a/docs/Storage/Data_Transfer_Services/Globus_V5_Paths-Permissions-Storage_Allocation.md b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Globus_V5_Paths-Permissions-Storage_Allocation.md similarity index 93% rename from docs/Storage/Data_Transfer_Services/Globus_V5_Paths-Permissions-Storage_Allocation.md rename to docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Globus_V5_Paths-Permissions-Storage_Allocation.md index 9ec124b65..6cd20e104 100644 --- a/docs/Storage/Data_Transfer_Services/Globus_V5_Paths-Permissions-Storage_Allocation.md +++ b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Globus_V5_Paths-Permissions-Storage_Allocation.md @@ -14,7 +14,7 @@ If you point Globus File Manager to an endpoint collection where you have an account/access, it will open a single panel pointing to the root path directory, displayed as '`/home/`'. -![mceclip0.png](../../assets/images/Globus_V5_Paths-Permissions-Storage_Allocation.png) +![mceclip0.png](Globus_V5_Paths-Permissions-Storage_Allocation.png) ###  On NeSI's Māui/Mahuika clusters this means @@ -25,7 +25,7 @@ path directory, displayed as '`/home/`'. | `/nesi/project/` | yes | `/nesi/project/` | yes | **read only** access | For more information about NeSI filesystem, check -[NeSI_File_Systems_and_Quotas](../../Storage/File_Systems_and_Quotas/NeSI_File_Systems_and_Quotas.md). +[NeSI_File_Systems_and_Quotas](NeSI_File_Systems_and_Quotas.md). ## Performing Globus transfers to/from Māui/Mahuika diff --git a/docs/Storage/Data_Transfer_Services/Globus_V5_endpoint_activation.md b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Globus_V5_endpoint_activation.md similarity index 89% rename from docs/Storage/Data_Transfer_Services/Globus_V5_endpoint_activation.md rename to docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Globus_V5_endpoint_activation.md index 3ddf6294b..9fd97c648 100644 --- a/docs/Storage/Data_Transfer_Services/Globus_V5_endpoint_activation.md +++ b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Globus_V5_endpoint_activation.md @@ -13,7 +13,7 @@ zendesk_section_id: 360000040596 When you select an endpoint to transfer data to/from, you may be asked to authenticate with that endpoint: -![mceclip0.png](../../assets/images/Globus_V5_endpoint_activation.png) +![mceclip0.png](Globus_V5_endpoint_activation.png) Transfers are only possible once you have supplied credentials that authenticate your access to the endpoint. This process is known as "activating the endpoint".  The endpoint remains active for 24 hours.   @@ -27,7 +27,7 @@ authentication (2FA-same as accessing NeSI clusters).  In the not*** use any additional characters or spaces between your password and the token number.) - ![mceclip0.png](../../assets/images/Globus_V5_endpoint_activation_0.png) + ![mceclip0.png](Globus_V5_endpoint_activation_0.png) Check the status of your endpoints at [https://www.globus.org/app/console/endpoints](https://www.globus.org/app/console/endpoints) diff --git a/docs/Storage/Data_Transfer_Services/Initial_Globus_Sign_Up-and_your_Globus_Identities.md b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Initial_Globus_Sign_Up-and_your_Globus_Identities.md similarity index 88% rename from docs/Storage/Data_Transfer_Services/Initial_Globus_Sign_Up-and_your_Globus_Identities.md rename to docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Initial_Globus_Sign_Up-and_your_Globus_Identities.md index 6a254d307..9bfb0dc5b 100644 --- a/docs/Storage/Data_Transfer_Services/Initial_Globus_Sign_Up-and_your_Globus_Identities.md +++ b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Initial_Globus_Sign_Up-and_your_Globus_Identities.md @@ -17,7 +17,7 @@ Google or GlobusID. you can also use any of the available methods - this then becomes your primary identity in Globus. - ![Globus\_login.png](../../assets/images/Initial_Globus_Sign_Up-and_your_Globus_Identities.png) + ![Globus\_login.png](Initial_Globus_Sign_Up-and_your_Globus_Identities.png) 2. Link other Globus identities to your primary identity @@ -33,7 +33,7 @@ Google or GlobusID. If you have other identities in Globus (for example, a GlobusID), [link them to your Google ID account](https://docs.globus.org/how-to/link-to-existing/). - ![identities.png](../../assets/images/Initial_Globus_Sign_Up-and_your_Globus_Identities_0.png) + ![identities.png](Initial_Globus_Sign_Up-and_your_Globus_Identities_0.png) !!! warning If you had a Globus account before February 2016, that account ID is now diff --git a/docs/Storage/Data_Transfer_Services/National_Data_Transfer_Platform.md b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/National_Data_Transfer_Platform.md similarity index 100% rename from docs/Storage/Data_Transfer_Services/National_Data_Transfer_Platform.md rename to docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/National_Data_Transfer_Platform.md diff --git a/docs/Storage/Data_Transfer_Services/Personal_Globus_Endpoint_Configuration.md b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Personal_Globus_Endpoint_Configuration.md similarity index 83% rename from docs/Storage/Data_Transfer_Services/Personal_Globus_Endpoint_Configuration.md rename to docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Personal_Globus_Endpoint_Configuration.md index 58c41841e..9e7400867 100644 --- a/docs/Storage/Data_Transfer_Services/Personal_Globus_Endpoint_Configuration.md +++ b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Personal_Globus_Endpoint_Configuration.md @@ -18,11 +18,10 @@ install a personal Globus endpoint on your computer (OS-specific instructions). Once your personal endpoint is created and you have activated it, check -Globus's [Endpoints administered by -you](https://app.globus.org/endpoints?scope=administered-by-me) to see +Globus's [Endpoints administered by you](https://app.globus.org/endpoints?scope=administered-by-me) to see whether your endpoint shows up as active. -![mceclip0.png](../../assets/images/Personal_Globus_Endpoint_Configuration.png) +![mceclip0.png](Personal_Globus_Endpoint_Configuration.png) ## Personal Endpoint file-transfer and sharing @@ -34,10 +33,10 @@ Globus Plus is a part of NeSI's Globus subscription. To join Globus Plus, you must become a member of the Globus sponsor group *New Zealand eScience Infrastructure*. -Check if your account already has this membership by viewing the [Globus -Plus](https://app.globus.org/account/plus) tab under your Account: +Check if your account already has this membership by viewing the +[Globus Plus](https://app.globus.org/account/plus) tab under your Account: -![mceclip2.png](../../assets/images/Personal_Globus_Endpoint_Configuration_0.png) +![Personal_Globus_Endpoint_Configuration_0](Personal_Globus_Endpoint_Configuration_0.png) If you do not see an entry for *New Zealand eScience Infrastructure* on this page, then: diff --git a/docs/Storage/Data_Transfer_Services/Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V5.md b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V5.md similarity index 61% rename from docs/Storage/Data_Transfer_Services/Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V5.md rename to docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V5.md index 5fdc0a954..a2132f272 100644 --- a/docs/Storage/Data_Transfer_Services/Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V5.md +++ b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V5.md @@ -23,23 +23,22 @@ In summary: 1. To re-create existing Collections, select *Share* and *Create Guest Collection - ![globus14.jpg](../../assets/images/Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V5.jpg) + ![globus14.jpg](Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V5.jpg) -2. Enter the [file - path](../../Storage/Data_Transfer_Services/Globus_V5_Paths-Permissions-Storage_Allocation.md) +2. Enter the [file path](Globus_V5_Paths-Permissions-Storage_Allocation.md) of the directory to be shared. - ![globus10.jpg](../../assets/images/Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V6.jpg) + ![globus10.jpg](Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V6.jpg) This can also be copied from your existing Shared Collection on *NeSI Wellington DTN - ![globus07.jpg](../../assets/images/Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V7.jpg) + ![globus07.jpg](Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V7.jpg) 3. Add Permissions for an individual or a Group (existing, or create a new group) - ![globus11.jpg](../../assets/images/Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V8.jpg) + ![globus11.jpg](Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V8.jpg) 4. Users you share with will receive an email notification containing a link to the new *Guest Collection*. @@ -49,9 +48,9 @@ In summary: 1. Create bookmarks to **NeSI Wellington DTN V5** and new Guest Collections - ![globus13.jpg](../../assets/images/Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V9.jpg) + ![globus13.jpg](Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V9.jpg) 2. Bookmarks to *NeSI Wellington DTN* and Shared Collections on *NeSI Wellington DTN* should be deleted. -![globus12.jpg](../../assets/images/Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V10.jpg) +![globus12.jpg](Re_creating_Shared_Collections_and_Bookmarks_in_Globus_V10.jpg) diff --git a/docs/Storage/Data_Transfer_Services/Syncing_files_between_NeSI_and_another_computer_with_globus_automate.md b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Syncing_files_between_NeSI_and_another_computer_with_globus_automate.md similarity index 90% rename from docs/Storage/Data_Transfer_Services/Syncing_files_between_NeSI_and_another_computer_with_globus_automate.md rename to docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Syncing_files_between_NeSI_and_another_computer_with_globus_automate.md index a6bb667ef..6bb82f152 100644 --- a/docs/Storage/Data_Transfer_Services/Syncing_files_between_NeSI_and_another_computer_with_globus_automate.md +++ b/docs/High_Performance_Computing/Data_Management/Globus_Transfer_Service/Syncing_files_between_NeSI_and_another_computer_with_globus_automate.md @@ -25,7 +25,7 @@ We'll assume that you have a NeSI account, you have registered at [https://globus.org](https://globus.org), and have created a guest collections on NeSI and a private mapped collection on the destination computer (follow the instructions at -[Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication](../../Storage/Data_Transfer_Services/Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication.md)). +[Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication](Data_transfer_between_NeSI_and_a_PC_without_NeSI_two_factor_authentication.md)). A guest collection is directory whose content is shared via Globus. ## Step 1: Write a JSON file describing the transfer @@ -71,8 +71,7 @@ where `SYNC_LEVEL=0` will transfer new files that do not exist on destination. Leaving this setting out will overwrite all the files on destination. - [See how other sync\_level settings can be used to update data in - the destination directory based on modification time and checksums](https://docs.globus.org/api/transfer/task_submit/#transfer_specific_fields). + [See how other sync level settings can be used to update data in the destination directory based on modification time and checksums](https://docs.globus.org/api/transfer/task_submit/#transfer_specific_fields). ## Step 2: Initiate the transfer diff --git a/docs/Getting_Started/Next_Steps/Moving_files_to_and_from_the_cluster.md b/docs/High_Performance_Computing/Data_Management/Moving_files_to_and_from_the_cluster.md similarity index 75% rename from docs/Getting_Started/Next_Steps/Moving_files_to_and_from_the_cluster.md rename to docs/High_Performance_Computing/Data_Management/Moving_files_to_and_from_the_cluster.md index 50c937ae5..59d7052f0 100644 --- a/docs/Getting_Started/Next_Steps/Moving_files_to_and_from_the_cluster.md +++ b/docs/High_Performance_Computing/Data_Management/Moving_files_to_and_from_the_cluster.md @@ -15,33 +15,33 @@ vote_sum: 3 --- !!! prerequisite - Have an [active account and project.](../Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md) + Have an [active account and project.](../Mahuika_Cluster/Next_Steps/Creating_a_NeSI_Account_Profile.md) -Find more information on [the NeSI Filesystem](../../Storage/File_Systems_and_Quotas/NeSI_File_Systems_and_Quotas.md). +Find more information on [the NeSI Filesystem](../Mahuika_Cluster/Next_Steps/NeSI_File_Systems_and_Quotas.md). ## Using the Jupyter interface -The [Jupyter interface](../../Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md) +The [Jupyter interface](../Mahuika_Cluster/Next_Steps/Jupyter_on_NeSI.md) useful for running code on NeSI requiring only a web browser; the instructions are same whether your are connecting from a Windows, Mac or a Linux computer. To upload a file, click on the -![up arrow](../../assets/images/Moving_files_to_and_from_the_cluster.png) +![up arrow](../Mahuika_Cluster/Next_Steps/Moving_files_to_and_from_the_cluster.png) button, near the top left and generally under the Run button. To download a file, navigate the file browser on the left and right-click on the file to see the menu below, -![right click menu](../../assets/images/Moving_files_to_and_from_the_cluster_0.png) +![right click menu](../Mahuika_Cluster/Next_Steps/Moving_files_to_and_from_the_cluster_0.png) The Download button is at the bottom. ## Standard Terminal !!! prerequisite - Have SSH setup as described in [Standard Terminal Setup](../../Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md) + Have SSH setup as described in [Standard Terminal Setup](../Mahuika_Cluster/Next_Steps/Standard_Terminal_Setup.md) In a local terminal the following commands can be used to: @@ -59,7 +59,7 @@ scp mahuika: !!! note - This will only work if you have set up aliases as described in - [Terminal Setup](../../Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md). + [Terminal Setup](../Mahuika_Cluster/Next_Steps/Standard_Terminal_Setup.md). - As the terms 'maui' and 'mahuika' are defined locally, the above commands *only works when using a local terminal* (i.e. not on Mahuika). - If you are using Windows subsystem, the root paths are different @@ -75,7 +75,7 @@ your password. ## File Managers !!! prerequisite - Have SSH setup as described in [Standard Terminal Setup](../../Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md) + Have SSH setup as described in [Standard Terminal Setup](Standard_Terminal_Setup.md) Most file managers can be used to connect to a remote directory simply by typing in the address bar provided your have an active connection to @@ -88,32 +88,32 @@ This **does not** work for File Explorer (Windows default) This **does not** work for Finder (Mac default) -![files](../../assets/images/Moving_files_to_and_from_the_cluster_1.png) +![files](../Mahuika_Cluster/Next_Steps/Moving_files_to_and_from_the_cluster_1.png) If your default file manager does not support mounting over SFTP, see -[Can I use SSHFS to mount the cluster filesystem on my local machine?](../../General/FAQs/Can_I_use_SSHFS_to_mount_the_cluster_filesystem_on_my_local_machine.md). +[Can I use SSHFS to mount the cluster filesystem on my local machine?](../Mahuika_Cluster/Next_Steps/Can_I_use_SSHFS_to_mount_the_cluster_filesystem_on_my_local_machine.md). ## MobaXterm !!! prerequisite - [MobaXterm Setup Windows](../../Scientific_Computing/Terminal_Setup/MobaXterm_Setup_Windows.md) + [MobaXterm Setup Windows](../Mahuika_Cluster/Next_Steps/MobaXterm_Setup_Windows.md) Clicking the "*Scp*" tab (located on the left-hand side of the window) opens up a graphical user interface that can be used for basic file operations. You can drag and drop files in the file explorer or use the up and down arrows on the toolbar to upload and download files. -![moba terminal](../../assets/images/Moving_files_to_and_from_the_cluster_2.png) +![moba terminal](../Mahuika_Cluster/Next_Steps/Moving_files_to_and_from_the_cluster_2.png) You may also transfer files as described under 'Standard Terminal' (provided -[Windows_Subsystem_for_Linux](../../Scientific_Computing/Terminal_Setup/Windows_Subsystem_for_Linux_WSL.md) +[Windows_Subsystem_for_Linux](../Mahuika_Cluster/Next_Steps/Windows_Subsystem_for_Linux_WSL.md) is enabled). ## WinSCP !!! prerequisite - [WinSCP-PuTTY Setup Windows](../../Scientific_Computing/Terminal_Setup/WinSCP-PuTTY_Setup_Windows.md) + [WinSCP-PuTTY Setup Windows](../Mahuika_Cluster/Next_Steps/WinSCP-PuTTY_Setup_Windows.md) As WinSCP uses multiple tunnels for file transfer you will be required to authenticate again on your first file operation of the session. The @@ -125,7 +125,7 @@ authentication. Globus is available for those with large amounts of data, security concerns, or connection consistency issues. You can find more details in -[Data_Transfer_using_Globus_V5](../../Storage/Data_Transfer_Services/Data_Transfer_using_Globus_V5.md). +[Data_Transfer_using_Globus_V5](../Mahuika_Cluster/Next_Steps/Data_Transfer_using_Globus_V5.md). ## Rclone diff --git a/docs/Storage/Nearline_long_term_storage/Nearline_Long_Term_Storage_Service.md b/docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Nearline_Long_Term_Storage_Service.md similarity index 97% rename from docs/Storage/Nearline_long_term_storage/Nearline_Long_Term_Storage_Service.md rename to docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Nearline_Long_Term_Storage_Service.md index d47b91a9d..300eda9cd 100644 --- a/docs/Storage/Nearline_long_term_storage/Nearline_Long_Term_Storage_Service.md +++ b/docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Nearline_Long_Term_Storage_Service.md @@ -117,8 +117,8 @@ modified times, sizes and file paths. `nlcompare` is particularly useful if you want to compare a directory on Nearline to a corresponding directory in `/nesi/project` or -`/nesi/nobackup`. See [Verifying uploads to Nearline -storage](../../Storage/Nearline_long_term_storage/Verifying_uploads_to_Nearline_storage.md) +`/nesi/nobackup`. See +[Verifying uploads to Nearline storage](Verifying_uploads_to_Nearline_storage.md) for more information on how to do a comparison and verification. If the contents of the Nearline directory and the corresponding local @@ -258,8 +258,8 @@ As a good practice: directory once there is a copy of it on Nearline. - Before deleting any data from your project or nobackup directory that has been uploaded to Nearline, please consider whether you - require [verification of the - transfer](../../Storage/Nearline_long_term_storage/Verifying_uploads_to_Nearline_storage.md). + require + [verification of the transfer](Verifying_uploads_to_Nearline_storage.md). We recommend that you do at least a basic verification of all transfers. @@ -271,8 +271,7 @@ version of data from nobackup or project: (on `/nesi/nearline`). To look at one directory on `/nesi/nearline` at a time, use `nlls`; if you need to compare a large number of files across a range of directories, or for more - thorough verification (e.g. checksums), read [this - article](../../Storage/Nearline_long_term_storage/Verifying_uploads_to_Nearline_storage.md) + thorough verification (e.g. checksums), read [Verifying_uploads_to_Nearline_storage](Verifying_uploads_to_Nearline_storage.md) or {% include "partials/support_request.html" %}. 2. Once you know which files you need to update (i.e. only files whose Nearline version is out of date), remove the old files on Nearline @@ -300,7 +299,7 @@ exist on Nearline but are no longer on project or nobackup: Data can be retrieved from Nearline using then `nlget` command. The syntax is: -``` sh +```sh nlget [ --nowait ] { | } ``` @@ -461,7 +460,7 @@ will be merged in the Nearline file system. Further, when retrieving data from Nearline, keep in mind that the directory structure up to your projectID will be retrieved: -![librarian\_get\_put.jpeg](../../assets/images/Nearline_Long_Term_Storage_Service.png) +![librarian\_get\_put.jpeg](Nearline_Long_Term_Storage_Service.png) ## Underlying mechanism diff --git a/docs/Storage/Nearline_long_term_storage/Preparing_small_files_for_migration_to_Nearline_storage.md b/docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Preparing_small_files_for_migration_to_Nearline_storage.md similarity index 100% rename from docs/Storage/Nearline_long_term_storage/Preparing_small_files_for_migration_to_Nearline_storage.md rename to docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Preparing_small_files_for_migration_to_Nearline_storage.md diff --git a/docs/Storage/Nearline_long_term_storage/Release_Notes_Nearline/.pages.yml b/docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Release_Notes_Nearline/.pages.yml similarity index 100% rename from docs/Storage/Nearline_long_term_storage/Release_Notes_Nearline/.pages.yml rename to docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Release_Notes_Nearline/.pages.yml diff --git a/docs/Storage/Nearline_long_term_storage/Release_Notes_Nearline/Long_Term_Storage_Nearline_release_notes_v1-1-0-14.md b/docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Release_Notes_Nearline/Long_Term_Storage_Nearline_release_notes_v1-1-0-14.md similarity index 100% rename from docs/Storage/Nearline_long_term_storage/Release_Notes_Nearline/Long_Term_Storage_Nearline_release_notes_v1-1-0-14.md rename to docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Release_Notes_Nearline/Long_Term_Storage_Nearline_release_notes_v1-1-0-14.md diff --git a/docs/Storage/Nearline_long_term_storage/Release_Notes_Nearline/Long_Term_Storage_Nearline_release_notes_v1-1-0-21.md b/docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Release_Notes_Nearline/Long_Term_Storage_Nearline_release_notes_v1-1-0-21.md similarity index 100% rename from docs/Storage/Nearline_long_term_storage/Release_Notes_Nearline/Long_Term_Storage_Nearline_release_notes_v1-1-0-21.md rename to docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Release_Notes_Nearline/Long_Term_Storage_Nearline_release_notes_v1-1-0-21.md diff --git a/docs/Storage/Nearline_long_term_storage/Release_Notes_Nearline/Long_Term_Storage_Nearline_release_notes_v1-1-0-22.md b/docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Release_Notes_Nearline/Long_Term_Storage_Nearline_release_notes_v1-1-0-22.md similarity index 100% rename from docs/Storage/Nearline_long_term_storage/Release_Notes_Nearline/Long_Term_Storage_Nearline_release_notes_v1-1-0-22.md rename to docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Release_Notes_Nearline/Long_Term_Storage_Nearline_release_notes_v1-1-0-22.md diff --git a/docs/Storage/Nearline_long_term_storage/Release_Notes_Nearline/Long_term_Storage_Nearline_release_notes_v1-1-0-18.md b/docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Release_Notes_Nearline/Long_term_Storage_Nearline_release_notes_v1-1-0-18.md similarity index 100% rename from docs/Storage/Nearline_long_term_storage/Release_Notes_Nearline/Long_term_Storage_Nearline_release_notes_v1-1-0-18.md rename to docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Release_Notes_Nearline/Long_term_Storage_Nearline_release_notes_v1-1-0-18.md diff --git a/docs/Storage/Nearline_long_term_storage/Release_Notes_Nearline/Long_term_Storage_Nearline_release_notes_v1-1-0-19.md b/docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Release_Notes_Nearline/Long_term_Storage_Nearline_release_notes_v1-1-0-19.md similarity index 100% rename from docs/Storage/Nearline_long_term_storage/Release_Notes_Nearline/Long_term_Storage_Nearline_release_notes_v1-1-0-19.md rename to docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Release_Notes_Nearline/Long_term_Storage_Nearline_release_notes_v1-1-0-19.md diff --git a/docs/Storage/Nearline_long_term_storage/Release_Notes_Nearline/Long_term_Storage_Nearline_release_notes_v1-1-0-20.md b/docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Release_Notes_Nearline/Long_term_Storage_Nearline_release_notes_v1-1-0-20.md similarity index 100% rename from docs/Storage/Nearline_long_term_storage/Release_Notes_Nearline/Long_term_Storage_Nearline_release_notes_v1-1-0-20.md rename to docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Release_Notes_Nearline/Long_term_Storage_Nearline_release_notes_v1-1-0-20.md diff --git a/docs/Storage/Nearline_long_term_storage/Verifying_uploads_to_Nearline_storage.md b/docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Verifying_uploads_to_Nearline_storage.md similarity index 97% rename from docs/Storage/Nearline_long_term_storage/Verifying_uploads_to_Nearline_storage.md rename to docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Verifying_uploads_to_Nearline_storage.md index 4641e431c..de4386930 100644 --- a/docs/Storage/Nearline_long_term_storage/Verifying_uploads_to_Nearline_storage.md +++ b/docs/High_Performance_Computing/Data_Management/Nearline_long_term_storage/Verifying_uploads_to_Nearline_storage.md @@ -9,8 +9,7 @@ zendesk_section_id: 360000042255 --- -Our [Long-Term Storage -Service](../../Storage/Nearline_long_term_storage/Nearline_Long_Term_Storage_Service.md) +Our [Long-Term StorageService](Nearline_Long_Term_Storage_Service.md) is currently in an Early Access phase, and we encourage researchers using the service to verify their data before deleting it from the project directory (persistent storage) or nobackup directory (temporary diff --git a/docs/High_Performance_Computing/Data_Management/index.md b/docs/High_Performance_Computing/Data_Management/index.md new file mode 100644 index 000000000..ca1096d89 --- /dev/null +++ b/docs/High_Performance_Computing/Data_Management/index.md @@ -0,0 +1,23 @@ +--- +created_at: '2018-05-01T23:29:39Z' +tags: +- hpc +- info +title: Data Management +hide: + - toc +--- + +
+ +- ![](../../assets/icons/material/account-details.svg) [__File Systems And Quotas__](File_Systems_and_Quotas/) + + --- + Learn the different types of storage available on NeSI + +- ![](../../assets/icons/material/account-details.svg) [__File Transfer__](Moving_files_to_and_from_the_cluster.md) + + --- + Learn how to move files to and from the cluster. + +
diff --git a/docs/High_Performance_Computing/Mahuika_Cluster/.pages.yml b/docs/High_Performance_Computing/Mahuika_Cluster/.pages.yml new file mode 100644 index 000000000..e87f84f2e --- /dev/null +++ b/docs/High_Performance_Computing/Mahuika_Cluster/.pages.yml @@ -0,0 +1,4 @@ +--- +nav: + - Terminal_Setup + - ... diff --git a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Checksums.md b/docs/High_Performance_Computing/Mahuika_Cluster/Checksums.md similarity index 100% rename from docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Checksums.md rename to docs/High_Performance_Computing/Mahuika_Cluster/Checksums.md diff --git a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Hyperthreading.md b/docs/High_Performance_Computing/Mahuika_Cluster/Hyperthreading.md similarity index 98% rename from docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Hyperthreading.md rename to docs/High_Performance_Computing/Mahuika_Cluster/Hyperthreading.md index 370b716ad..d47937eac 100644 --- a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Hyperthreading.md +++ b/docs/High_Performance_Computing/Mahuika_Cluster/Hyperthreading.md @@ -38,7 +38,7 @@ once your job starts you will have twice the number of CPUs as `ntasks`. If you set `--cpus-per-task=n`, Slurm will request `n` logical CPUs per task, i.e., will set `n` threads for the job. Your code must be capable of running Hyperthreaded (for example using -[OpenMP](../../Scientific_Computing/HPC_Software_Environment/OpenMP_settings.md)) +[OpenMP](../Batch_Computing/OpenMP_settings.md)) if `--cpus-per-task > 1`. Setting `--hint=nomultithread` with `srun` or `sbatch` causes Slurm to @@ -190,8 +190,7 @@ considered a bonus. CPU, not per the number of threads or tasks.  For non-MPI jobs, or for MPI jobs that request the same number of tasks on every node, we recommend to specify `--mem` (i.e. memory per node) instead. See - [How to request memory - (RAM)](../../General/FAQs/How_do_I_request_memory.md) for more + [How to request memory (RAM)](../Batch_Computing/How_do_I_request_memory.md) for more information. - Non-MPI jobs which specify `--cpus-per-task` and use **srun** should also set `--ntasks=1`, otherwise the program will be run twice in diff --git a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/NetCDF-HDF5_file_locking.md b/docs/High_Performance_Computing/Mahuika_Cluster/NetCDF-HDF5_file_locking.md similarity index 100% rename from docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/NetCDF-HDF5_file_locking.md rename to docs/High_Performance_Computing/Mahuika_Cluster/NetCDF-HDF5_file_locking.md diff --git a/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/.pages.yml b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/.pages.yml new file mode 100644 index 000000000..1f68deb6d --- /dev/null +++ b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/.pages.yml @@ -0,0 +1,4 @@ +nav: + - SSH_Config_Setup.md + - ... + - X11_on_NeSI.md diff --git a/docs/Scientific_Computing/Terminal_Setup/Git_Bash_Windows.md b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/Git_Bash_Windows.md similarity index 82% rename from docs/Scientific_Computing/Terminal_Setup/Git_Bash_Windows.md rename to docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/Git_Bash_Windows.md index 1b759358d..bb85d7045 100644 --- a/docs/Scientific_Computing/Terminal_Setup/Git_Bash_Windows.md +++ b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/Git_Bash_Windows.md @@ -14,8 +14,8 @@ zendesk_section_id: 360000189696 --- !!! prerequisite - - Have a [NeSI account.](../../Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md)) - - Be a member of an [active project.](../../Getting_Started/Accounts-Projects_and_Allocations/Applying_to_join_an_existing_NeSI_project.md) + - Have a [NeSI account.](Creating_a_NeSI_Account_Profile.md)) + - Be a member of an [active project.](Applying_to_join_an_existing_NeSI_project.md) ## First time setup @@ -86,4 +86,4 @@ credentials every time you open a new terminal or try to move a file.* scp mahuika:~/ ``` -(For more info visit [data transfer](../../Getting_Started/Next_Steps/Moving_files_to_and_from_the_cluster.md). +(For more info visit [data transfer](Moving_files_to_and_from_the_cluster.md). diff --git a/docs/Scientific_Computing/Terminal_Setup/MobaXterm_Setup_Windows.md b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/MobaXterm_Setup_Windows.md similarity index 82% rename from docs/Scientific_Computing/Terminal_Setup/MobaXterm_Setup_Windows.md rename to docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/MobaXterm_Setup_Windows.md index 2d91c12a2..9fc02bdc6 100644 --- a/docs/Scientific_Computing/Terminal_Setup/MobaXterm_Setup_Windows.md +++ b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/MobaXterm_Setup_Windows.md @@ -12,9 +12,9 @@ zendesk_section_id: 360000189696 --- !!! prerequisite - - Have an [active account and project.](../../Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md) - - Set up your [Linux Password.](../../Getting_Started/Accessing_the_HPCs/Setting_Up_and_Resetting_Your_Password.md) - - Set up Second [Factor Authentication.](../../Getting_Started/Accessing_the_HPCs/Setting_Up_Two_Factor_Authentication.md) + - Have an [active account and project.](Creating_a_NeSI_Account_Profile.md) + - Set up your [Linux Password.](Setting_Up_and_Resetting_Your_Password.md) + - Set up Second [Factor Authentication.](Setting_Up_Two_Factor_Authentication.md) - Windows operating system. Setting up MobaXterm as shown below will allow you to connect to the @@ -43,9 +43,9 @@ transfer GUI. field, as well as your NeSI username in the Username field for the gateway SSH server then select OK to close the window. - ![mceclip4.png](../../assets/images/MobaXterm_Setup_Windows.png) + ![mceclip4.png](MobaXterm_Setup_Windows.png) - ![mceclip5.png](../../assets/images/MobaXterm_Setup_Windows_0.png) + ![mceclip5.png](MobaXterm_Setup_Windows_0.png) 6. Click 'OK' on the open window, usually this will start a new session immediately. *See usage below.* @@ -56,7 +56,7 @@ transfer GUI. This can be resolved by clicking "OK" each time you are prompted then logging in as normal once you are prompted for your `First Factor:` or `Password:`. - See [Login Troubleshooting](../../General/FAQs/Login_Troubleshooting.md) for more + See [Login Troubleshooting](Login_Troubleshooting.md) for more details ## Usage @@ -64,7 +64,7 @@ transfer GUI. You will see your saved session in the left hand panel under 'Sessions'. Double click to start. -![mceclip6.png](../../assets/images/MobaXterm_Setup_Windows_1.png) +![mceclip6.png](MobaXterm_Setup_Windows_1.png) You will be prompted by dialogue box. @@ -127,8 +127,8 @@ Two steps to try: `mahuika` as well). I recommend removing all of it and restart MobaXterm before the next login attempt -Then setup a new session [according to the support doc instructions](./MobaXterm_Setup_Windows.md) +Then setup a new session [according to the support doc instructions](MobaXterm_Setup_Windows.md) as before. !!! prerequisite "What Next?" - - [Moving files to/from a cluster.](../../Getting_Started/Next_Steps/Moving_files_to_and_from_the_cluster.md) + - [Moving files to/from a cluster.](Moving_files_to_and_from_the_cluster.md) diff --git a/docs/Getting_Started/Accessing_the_HPCs/Port_Forwarding.md b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/Port_Forwarding.md similarity index 88% rename from docs/Getting_Started/Accessing_the_HPCs/Port_Forwarding.md rename to docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/Port_Forwarding.md index d16bac5e0..2cb035154 100644 --- a/docs/Getting_Started/Accessing_the_HPCs/Port_Forwarding.md +++ b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/Port_Forwarding.md @@ -5,7 +5,7 @@ title: Port Forwarding --- !!! prerequisite - Have your [connection to the NeSI cluster](../../Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md) configured + Have your [connection to the NeSI cluster](../Connecting/Standard_Terminal_Setup.md) configured Some applications only accept connections from internal ports (i.e a port on the same local network), if you are running one such application @@ -23,12 +23,12 @@ to `127.0.0.1`. The alias `localhost` can also be used in most cases. **Host Alias:** An alias for the socket of your main connection to the cluster, `mahuika` or `maui` if you have set up your ssh config file as -described in [Standard Terminal Setup](../../Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md). +described in [Standard Terminal Setup](../Connecting/Standard_Terminal_Setup.md). **Remote Port:** The port number you will use on the remote machine (in this case the NeSI cluster) !!! note - The following examples use aliases as set up in [standard terminal setup](../../Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md). + The following examples use aliases as set up in [standard terminal setup](../Connecting/Standard_Terminal_Setup.md). This allows the forwarding from your local machine to the NeSI cluster, without having to re-tunnel through the lander node. @@ -66,7 +66,7 @@ procedure. If you are using port forwarding on a regular basis, and don't want the hassle of opening a new tunnel every time, you can include a port -forwarding line in your ssh config file ~/.ssh/config on your local +forwarding line in your ssh config file `~/.ssh/config` on your local machine. Under the alias for the cluster you want to connect to add the following @@ -120,12 +120,12 @@ method described above. This is the recommended method. You can tell if MobaXterm is using WSL as it will appear in the banner when starting a new terminal session. -![mceclip0.png](../../assets/images/Port_Forwarding.png) +![mceclip0.png](../Connecting/Port_Forwarding.png) You can also set up port forwarding using the MobaXterm tunnelling interface. -![mceclip1.png](../../assets/images/Port_Forwarding_0.png) +![mceclip1.png](../Connecting/Port_Forwarding_0.png) You will need to create **two** tunnels. One from lander to mahuika. And another from mahuika to itself. (This is what using an alias in the @@ -133,7 +133,7 @@ first two examples allows us to avoid). The two tunnels should look like this. -![mobakey.png](../../assets/images/Port_Forwarding_1.png) +![mobakey.png](../Connecting/Port_Forwarding_1.png) local port remote port @@ -203,5 +203,5 @@ ssh -Nf -R 6676:localhost:6676 ${SLURM_SUBMIT_HOST} !!! tip "What Next?" - Using - [JupyterLab](../../Scientific_Computing/Supported_Applications/JupyterLab.md) on the cluster. - - [Paraview](../../Scientific_Computing/Supported_Applications/ParaView.md) + [JupyterLab](../Connecting/JupyterLab.md) on the cluster. + - [Paraview](../Connecting/ParaView.md) diff --git a/docs/Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/SSH_Config_Setup.md similarity index 65% rename from docs/Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md rename to docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/SSH_Config_Setup.md index a704cb504..7a523d6b5 100644 --- a/docs/Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md +++ b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/SSH_Config_Setup.md @@ -7,13 +7,13 @@ description: How to setup your ssh config file in order to connect to the NeSI c --- !!! prerequisite - - Have an [active account and project.](../../Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md) - - Set up your [Linux Password.](../../Getting_Started/Accessing_the_HPCs/Setting_Up_and_Resetting_Your_Password.md) - - Set up [Second Factor Authentication.](../../Getting_Started/Accessing_the_HPCs/Setting_Up_Two_Factor_Authentication.md) + - Have an [active account and project.](Creating_a_NeSI_Account_Profile.md) + - Set up your [Linux Password.](Setting_Up_and_Resetting_Your_Password.md) + - Set up [Second Factor Authentication.](Setting_Up_Two_Factor_Authentication.md) - Have one of: - Built in Linux/Mac terminal - - [Windows Subsystem for Linux](../../Scientific_Computing/Terminal_Setup/Windows_Subsystem_for_Linux_WSL.md) - - [VSCode](../../Scientific_Computing/Terminal_Setup/VSCode.md) + - [Windows Subsystem for Linux](Windows_Subsystem_for_Linux_WSL.md) + - [VSCode](VSCode.md) ## First time setup @@ -66,5 +66,5 @@ ssh mahuika ``` !!! prerequisite "What Next?" - - [Moving files to/from a cluster.](../../Getting_Started/Next_Steps/Moving_files_to_and_from_the_cluster.md) - - Setting up an [X-Server](../../Scientific_Computing/Terminal_Setup/X11_on_NeSI.md) (optional). + - [Moving files to/from a cluster.](Moving_files_to_and_from_the_cluster.md) + - Setting up an [X-Server](X11_on_NeSI.md) (optional). diff --git a/docs/Scientific_Computing/Terminal_Setup/VSCode.md b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/VSCode.md similarity index 91% rename from docs/Scientific_Computing/Terminal_Setup/VSCode.md rename to docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/VSCode.md index 240a54163..ca2df761a 100644 --- a/docs/Scientific_Computing/Terminal_Setup/VSCode.md +++ b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/VSCode.md @@ -1,7 +1,11 @@ --- created_at: 2024-08-05 description: How to set up Visual Studio Code to access the NeSI cluster -tags: [ide, code, visual studio code, vscode] +tags: + - ide + - code + - visual studio code + - vscode --- 'Visual Studio Code' (not to be confused with 'Visual Studio') or 'VSCode', is a popular editor/IDE with many useful extensions. @@ -13,13 +17,13 @@ The 'Remote' extension allows you to connect to a remote computer (like NeSI). 1. Make sure you have set up an `~/.ssh/config` file as described in [Terminal Setup](Standard_Terminal_Setup.md). 2. In VSCode, open the 'Extensions' Tab, search `remote` and make sure you have 'Remote - SSH' and 'Remote Explorer' by Microsoft, installed. - ![vscode remote extension](../../assets/images/vscode-remote.png) + ![vscode remote extension](vscode-remote.png) === "Windows" 1. In VSCode, open the 'Extensions' Tab, search `remote` and make sure you have 'Remote - SSH' and 'Remote Explorer' by Microsoft installed. - ![vscode remote extension](../../assets/images/vscode-remote.png) + ![vscode remote extension](vscode-remote.png) 2. Open the 'Remote Explorer' Tab, then click on the 'Open SSH Config file' (gear symbol). If you are prompted to create a file, the first option is fine. - ![vscode remote explorer](../../assets/images/vscode-remote-windows.png) + ![vscode remote explorer](vscode-remote-windows.png) 3. Set up your SSH Config file as described in Step 2 of [Terminal Setup](Standard_Terminal_Setup.md#first-time-setup). 4. Remove or comment out the `Control Path` line under `Host *`. !!! warning @@ -31,7 +35,7 @@ The 'Remote' extension allows you to connect to a remote computer (like NeSI). 1. Set up WSL as described in [Windows Subsystem for Linux (WSL)](Windows_Subsystem_for_Linux_WSL.md). 2. In VSCode, open the 'Extensions' Tab, search `remote` and make sure you have 'Remote - SSH' and 'Remote Explorer' by Microsoft installed. - ![vscode remote extension](../../assets/images/vscode-remote.png) + ![vscode remote extension](vscode-remote.png) 3. Still in the 'Extensions' Tab, search `wsl` and make sure you have 'WSL' by Microsoft installed. 4. In `C:\Users\` create a file named `ssh.bat` with the following contents. ```bat @@ -58,7 +62,7 @@ The 'Remote' extension allows you to connect to a remote computer (like NeSI). Under the 'Remote Explorer' Tab on the left, you should now see the NeSI machines (as well as any other machines configured in your `~/.ssh/config` file) -![vscode explorer](../../assets/images/vscode-explorer.png) +![vscode explorer](vscode-explorer.png) Clicking on these will open a connection to that machine, you will then be prompted for your password and second factor, as per usual. @@ -72,7 +76,7 @@ Clicking on these will open a connection to that machine, you will then be promp You may find that VSCode is not utilising your preferred versions of software (e.g. when debugging or linting your Python code). -As the NeSI cluster utilises [Environment Modules](../../Getting_Started/Next_Steps/Submitting_your_first_job.md#environment-modules), changing the executable used is not just a matter of changing the path in VSCode configuration, as the libraries required will not be loaded. +As the NeSI cluster utilises [Environment Modules](../../Mahuika_Cluster/Next_Steps/Submitting_your_first_job.md#environment-modules), changing the executable used is not just a matter of changing the path in VSCode configuration, as the libraries required will not be loaded. The only way to make sure that VSCode has access to a suitable environment, is to load the required modules in your `~/.bashrc` diff --git a/docs/Scientific_Computing/Terminal_Setup/WinSCP-PuTTY_Setup_Windows.md b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/WinSCP-PuTTY_Setup_Windows.md similarity index 69% rename from docs/Scientific_Computing/Terminal_Setup/WinSCP-PuTTY_Setup_Windows.md rename to docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/WinSCP-PuTTY_Setup_Windows.md index d7973bb92..c370fdb3d 100644 --- a/docs/Scientific_Computing/Terminal_Setup/WinSCP-PuTTY_Setup_Windows.md +++ b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/WinSCP-PuTTY_Setup_Windows.md @@ -9,9 +9,9 @@ zendesk_section_id: 360000189696 --- !!! prerequisite - - Have an [active account and project.](../../Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md) - - Set up your [NeSI account password.](../../Getting_Started/Accessing_the_HPCs/Setting_Up_and_Resetting_Your_Password.md) - - Set up Second [Factor Authentication.](../../Getting_Started/Accessing_the_HPCs/Setting_Up_Two_Factor_Authentication.md) + - Have an [active account and project.](Creating_a_NeSI_Account_Profile.md) + - Set up your [NeSI account password.](Setting_Up_and_Resetting_Your_Password.md) + - Set up Second [Factor Authentication.](Setting_Up_Two_Factor_Authentication.md) - Be using the Windows operating system. WinSCP is an SCP client for windows implementing the SSH protocol from @@ -20,7 +20,7 @@ PuTTY. 1. [Download WinSCP](https://winscp.net/eng/download.php). 2. Upon startup: - ![WinSCP1.png](../../assets/images/WinSCP-PuTTY_Setup_Windows.png) + ![WinSCP1.png](WinSCP-PuTTY_Setup_Windows.png) 3. Add a *New Site* and set: @@ -34,11 +34,11 @@ PuTTY. better than SCP. Feel free to try both and see which works best for you. - ![WinSCP2.png](../../assets/images/WinSCP-PuTTY_Setup_Windows_0.png) + ![WinSCP2.png](WinSCP-PuTTY_Setup_Windows_0.png) 4. Open Advanced Settings. - ![WinSCP3.png](../../assets/images/WinSCP-PuTTY_Setup_Windows_1.png) + ![WinSCP3.png](WinSCP-PuTTY_Setup_Windows_1.png) 5. Navigate to *Connection & Tunnel* and set: - Enable "Connect through SSH tunnel". @@ -56,12 +56,12 @@ recommend you use the PuTTY terminal instead. 2. In WinSCP open 'Tools > Preferences' - ![WinSCP2-5.png](../../assets/images/WinSCP-PuTTY_Setup_Windows_2.png) + ![WinSCP2-5.png](WinSCP-PuTTY_Setup_Windows_2.png) 3. Under *Integration > Applications* enable *Remember session password and pass it to PuTTY* - ![WinSCP4.png](../../assets/images/WinSCP-PuTTY_Setup_Windows_3.png) + ![WinSCP4.png](WinSCP-PuTTY_Setup_Windows_3.png) ## Setup for Xming (Optional) @@ -75,7 +75,7 @@ SSH Client' is selected). 2\. Under *Integration > Applications* and add -X after PuTTY/Terminal client path. -*![WinSCP6.png](../../assets/images/WinSCP-PuTTY_Setup_Windows_4.png)* +*![WinSCP6.png](WinSCP-PuTTY_Setup_Windows_4.png)* 3\. Restart your session. !!! prerequisite Important @@ -87,24 +87,24 @@ PuTTY/Terminal client path. Files can be dragged, dropped and modified in the WinSCP GUI just like in any windows file system. -![WinSCP5.png](../../assets/images/WinSCP-PuTTY_Setup_Windows_5.png) +![WinSCP5.png](WinSCP-PuTTY_Setup_Windows_5.png) -![putTerm.png](../../assets/images/WinSCP-PuTTY_Setup_Windows_6.png) Will +![putTerm.png](WinSCP-PuTTY_Setup_Windows_6.png) Will open a **PuTTY terminal**. Assuming you followed the steps setting up PuTTY, this should automatically enter your details. -![winTerm.png](../../assets/images/WinSCP-PuTTY_Setup_Windows_7.png) Will +![winTerm.png](WinSCP-PuTTY_Setup_Windows_7.png) Will open the default **WinSCP terminal**. While the functionality is identical to any other terminal the interface is slightly abstracted, with a separate window for input and command history drop-down. -![winAdd.png](../../assets/images/WinSCP-PuTTY_Setup_Windows_8.png) Type +![winAdd.png](WinSCP-PuTTY_Setup_Windows_8.png) Type here to **change directory**. The GUI doesn't follow your current terminal directory like MobaXterm so must be changed manually. (Recommend making this larger as the default is hard to type in). -![winBook.png](../../assets/images/WinSCP-PuTTY_Setup_Windows_9.png) **Bookmark** +![winBook.png](WinSCP-PuTTY_Setup_Windows_9.png) **Bookmark** current directory. ### Troubleshooting @@ -116,13 +116,13 @@ Occasionally this can lead to an excessive number of prompts. Limiting number of tunnels will reduce the number of times you are prompted. 1. Open settings - ![winscp\_settings.png](../../assets/images/WinSCP-PuTTY_Setup_Windows_10.png) + ![winscp\_settings.png](WinSCP-PuTTY_Setup_Windows_10.png) 2. Under 'Transfer' -> 'Background', set the 'Maximal number of transfers at the same time' to '1' and un-tick 'Use multiple connections for a single transfer'. -![winscp\_Settings2.png](../../assets/images/WinSCP-PuTTY_Setup_Windows_11.png) +![winscp\_Settings2.png](WinSCP-PuTTY_Setup_Windows_11.png) !!! warning As WinSCP uses multiple tunnels for file transfer you will be required @@ -131,8 +131,7 @@ for a single transfer'. with login authentication. !!! prerequisite "What Next?" - - [Moving files to/from a - cluster.](../../Getting_Started/Next_Steps/Moving_files_to_and_from_the_cluster.md) + - [Moving files to/from a cluster.](Moving_files_to_and_from_the_cluster.md) - Setting up - an [X-Server](../../Scientific_Computing/Terminal_Setup/X11_on_NeSI.md) + an [X-Server](X11_on_NeSI.md) (optional). diff --git a/docs/Scientific_Computing/Terminal_Setup/Windows_Subsystem_for_Linux_WSL.md b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/Windows_Subsystem_for_Linux_WSL.md similarity index 79% rename from docs/Scientific_Computing/Terminal_Setup/Windows_Subsystem_for_Linux_WSL.md rename to docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/Windows_Subsystem_for_Linux_WSL.md index ac464e1cf..fe23a0134 100644 --- a/docs/Scientific_Computing/Terminal_Setup/Windows_Subsystem_for_Linux_WSL.md +++ b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/Windows_Subsystem_for_Linux_WSL.md @@ -28,9 +28,9 @@ WSL is enabled by default on later versions of Windows 10. ## Enabling WSL 1. Open 'Turn Windows features on or off' - ![WSL1.png](../../assets/images/Windows_Subsystem_for_Linux_WSL.png) + ![WSL1.png](Windows_Subsystem_for_Linux_WSL.png) 2. Scroll down and tick the 'Windows Subsystem for Linux' option. - ![WSL2.png](../../assets/images/Windows_Subsystem_for_Linux_WSL_0.png) + ![WSL2.png](Windows_Subsystem_for_Linux_WSL_0.png) And click OK @@ -47,16 +47,16 @@ Distributions can be obtained through the Microsoft Store, or using command line latest version of the Ubuntu LTS it should look something like 'Ubuntu 20.04 LTS' , though you may find a later version. - ![MS store](../../assets/images/Ubuntu_LTS_terminal_Windows.png) - ![MS store](../../assets/images/Ubuntu_LTS_terminal_Windows_0.png) + ![MS store](Ubuntu_LTS_terminal_Windows.png) + ![MS store](Ubuntu_LTS_terminal_Windows_0.png) - Close the “Add your Microsoft account.. dialogue box as you do not need an account for the installation.You may have to click “Install” for a second time (If the above dialogue box reappears, close as before and download/install will begin). - ![MS store](../../assets/images/Ubuntu_LTS_terminal_Windows_1.png) - ![MS store](../../assets/images/Ubuntu_LTS_terminal_Windows_2.png) + ![MS store](Ubuntu_LTS_terminal_Windows_1.png) + ![MS store](Ubuntu_LTS_terminal_Windows_2.png) === "Using Command Line" - Open 'Windows Power Shell' and type @@ -68,12 +68,12 @@ Distributions can be obtained through the Microsoft Store, or using command line and press Enter. This can be anything you want, although we reccomend using the same as your Windows username. - ![ubuntu1.png](../../assets/images/Ubuntu_LTS_terminal_Windows_3.png) + ![ubuntu1.png](Ubuntu_LTS_terminal_Windows_3.png) - Now, type in a new password for the username you picked and press Enter (this password can be anything you want, although you shouldn't need to enter it again). Then retype the password to confirm and press Enter. - ![ubuntu2.png](../../assets/images/Ubuntu_LTS_terminal_Windows_4.png) + ![ubuntu2.png](Ubuntu_LTS_terminal_Windows_4.png) ## Creating a Symlink (optional) @@ -87,4 +87,4 @@ ln -s /mnt/c/Users/YourWindowsUsername/ WinFS ``` !!! prerequisite What "Next?" - - Set up your [SSH config file](../../Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md). + - Set up your [SSH config file](SSH_Config_Setup.md). diff --git a/docs/Scientific_Computing/Terminal_Setup/X11_on_NeSI.md b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/X11_on_NeSI.md similarity index 81% rename from docs/Scientific_Computing/Terminal_Setup/X11_on_NeSI.md rename to docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/X11_on_NeSI.md index 2447c5507..dae74aee5 100644 --- a/docs/Scientific_Computing/Terminal_Setup/X11_on_NeSI.md +++ b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/X11_on_NeSI.md @@ -9,7 +9,7 @@ zendesk_section_id: 360000189696 !!! prerequisite - Have working - [terminal](../../Getting_Started/Accessing_the_HPCs/Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md) + [terminal](Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md) set up. X-11 is a protocol for rendering graphical user interfaces (GUIs) that @@ -27,16 +27,17 @@ Download links for X-servers can be found below. | ---------------- | ------------------------------------------------- | | MacOS | [Xquartz](https://www.xquartz.org/) | | Linux | [Xorg](https://www.x.org/wiki/Releases/Download/) | +| Linux (Debian) | `sudo apt install x11-apps -y` | | Windows | [Xming](https://sourceforge.net/projects/xming/) | Make sure you have launched the server and it is running in the -background, look for this ![mceclip0.png](../../assets/images/X11_on_NeSI.png) symbol in your taskbar. +background, look for this ![mceclip0.png](X11_on_NeSI.png) symbol in your taskbar. !!! note MobaXterm has a build in X server, no setup required. By default the server is started alongside MobaXterm. You can check it's status in the top left hand corner - (![xon.png](../../assets/images/X11_on_NeSI_0.png)=on, ![off.png](../../assets/images/X11_on_NeSI_1.png)=off). + (![xon.png](X11_on_NeSI_0.png)=on, ![off.png](X11_on_NeSI_1.png)=off). ## X-Forwarding @@ -57,12 +58,12 @@ ssh -Y login.nesi.org.nz ### MobaXterm - Under 'session settings' for your connection make sure the X-11 +Under 'session settings' for your connection make sure the X-11 forwarding box is checked. -![x11moba.png](../../assets/images/X11_on_NeSI_2.png) +![x11moba.png](X11_on_NeSI_2.png) -If the ![mceclip0.png](../../assets/images/X11_on_NeSI_3.png) button in +If the ![mceclip0.png](X11_on_NeSI_3.png) button in the top right corner of your window is coloured, the X-server should be running. diff --git a/docs/Getting_Started/Accessing_the_HPCs/Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/index.md similarity index 58% rename from docs/Getting_Started/Accessing_the_HPCs/Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md rename to docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/index.md index da2f75314..9e1936672 100644 --- a/docs/Getting_Started/Accessing_the_HPCs/Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md +++ b/docs/High_Performance_Computing/Mahuika_Cluster/Terminal_Setup/index.md @@ -5,37 +5,22 @@ tags: - mobaxterm - gitbash - login -vote_count: 1 -vote_sum: 1 -zendesk_article_id: 360001016335 -zendesk_section_id: 360000034315 +title: Terminal Setup --- !!! prerequisite - - Have an [active account and project](../../Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md). - - Set up your [NeSI Account Password](../../Getting_Started/Accessing_the_HPCs/Setting_Up_and_Resetting_Your_Password.md). - - Set up [Two-Factor Authentication](../../Getting_Started/Accessing_the_HPCs/Setting_Up_Two_Factor_Authentication.md). + - Have an [active account and project](../Connecting/Creating_a_NeSI_Account_Profile.md). + - Set up your [NeSI Account Password](../Connecting/Setting_Up_and_Resetting_Your_Password.md). + - Set up [Two-Factor Authentication](../Connecting/Setting_Up_Two_Factor_Authentication.md). Before you can start submitting work you will need some way of connecting to the NeSI clusters. -This is done by establishing an SSH (Secure SHell) connection, giving -you access to a command line interface (bash) on the cluster. In order -to set up such a connection, you will need a suitable Terminal (or -equivalent application). The correct option for you depends on your -operating system and level of experience. +This page will cover establishing an SSH (Secure SHell) connection, giving +you access to a command line interface (bash) on the cluster. -## Web Browser - -### JupyterHub - - JupyterHub is a service providing access to Jupyter Notebooks on - NeSI. A terminal similar to the other setups describe below can be - accessed through the Jupyter Launcher. - -!!! prerequisite "What next?" - - More info on [Jupyter Terminal](../../Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md#jupyter-terminal) - - Visit [jupyter.nesi.org.nz](https://jupyter.nesi.org.nz/hub/). +In order to set up such a connection, you will need a suitable Terminal program. +The best option for you depends on your operating system and level of experience. ## Linux or Mac OS @@ -46,14 +31,17 @@ installed, usually called, "Terminal." To find it, simply search for "terminal". Congratulations! You are ready to move to the next step. -!!! prerequisite "What next?" - Setting up your [Default Terminal](../../Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md) +!!! tip "What next?" + Set up your [SSH Config](SSH_Config_Setup.md) ### VSCode The inbuilt 'remotes' plugin allows connecting to remote hosts. -If you have set up your `~/.ssh/config` as described in [Standard_Terminal_Setup](../../Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md), -VSCode will detect this and show configured hosts in the 'Remote Explorer' Tab. +If you have set up your `~/.ssh/config` as described in [SSH Config Setup](SSH_Config_Setup.md), +VSCode will detect this and show configured hosts in the 'Remote Explorer' Tab. See [VSCode](VSCode.md#setup) for more info. + +!!! tip "What next?" + - [VSCode](VSCode.md#setup) ## Windows @@ -76,33 +64,23 @@ different options, listed in order of preference. !!! tip "What next?" - Enabling - [WSL](../../Scientific_Computing/Terminal_Setup/Windows_Subsystem_for_Linux_WSL.md) - - Setting up the [Ubuntu Terminal](../../Scientific_Computing/Terminal_Setup/Windows_Subsystem_for_Linux_WSL.md) - - Setting up - [X-Forwarding](../../Getting_Started/Accessing_the_HPCs/X_Forwarding_using_the_Ubuntu_Terminal_on_Windows.md) + [WSL](Windows_Subsystem_for_Linux_WSL.md) ### VSCode VSCode can be used with WSL or without. +!!! tip "What next?" + - [VSCode](VSCode.md#setup) + ### MobaXterm In addition to being a terminal emulator, MobaXterm also includes several useful features like multiplexing, X11 forwarding and a file transfer GUI. - MobaXterm can be downloaded from - [mobaxterm.mobatek.net](https://mobaxterm.mobatek.net/download-home-edition.html). - The portable edition will allow you to use MobaXterm without needing - administrator privileges, however it introduces several bugs so we - *highly* recommend using the installer edition if you have - administrator privileges on your workstation or if your - institution's IT team supports MobaXTerm. !!! tip "What next?" - - Setting up - [MobaXterm](../../Scientific_Computing/Terminal_Setup/MobaXterm_Setup_Windows.md) - -### Using a Virtual Machine + - Setting up Connecting In order to avoid the problems of using a Windows environment, it may be advisable to install a Linux Virtual machine. This may be @@ -132,7 +110,7 @@ for new users. !!! tip "What next?" - Setting up - [WinSCP](../../Scientific_Computing/Terminal_Setup/WinSCP-PuTTY_Setup_Windows.md) + [WinSCP](WinSCP-PuTTY_Setup_Windows.md) ### Git Bash @@ -145,12 +123,15 @@ your password, but lacks many of the features of MobaXterm or a native Unix-Like terminal. Therefore we do not recommend it as your primary terminal. +!!! tip "What next?" + Set up your [SSH Config](SSH_Config_Setup.md) + ### Windows PowerShell All Windows computers have PowerShell installed, however it will only be useful to you if Windows Subsystem for Linux (WSL) is also enabled, instructions can be found at -[Windows_Subsystem_for_Linux_WSL](../../Scientific_Computing/Terminal_Setup/Windows_Subsystem_for_Linux_WSL.md). +[Windows_Subsystem_for_Linux_WSL](Windows_Subsystem_for_Linux_WSL.md). Like Git Bash, PowerShell is perfectly adequate for testing your login or setting up your password, but lacks many of the features of diff --git a/docs/High_Performance_Computing/Mahuika_Cluster/index.md b/docs/High_Performance_Computing/Mahuika_Cluster/index.md new file mode 100644 index 000000000..ac37a4025 --- /dev/null +++ b/docs/High_Performance_Computing/Mahuika_Cluster/index.md @@ -0,0 +1,52 @@ +--- +created_at: '2018-05-01T23:29:39Z' +tags: +- hpc +- info +title: Mahuika Cluster +hide: + - toc +--- + +Mahuika is NeSI's High Performance Computing Cluster. + +## Getting Started + +
+ +- ![](../../assets/icons/material/account-details.svg) __NeSI Accounts__ + + --- + + You will need a __NeSI account__, and be a __member of an active project__ before you can access the NeSI HPC. + + - [Creating a NeSI Account](Creating_a_NeSI_Account_Profile.md) + - [Applying For a New NeSI Project](Applying_for_a_new_NeSI_project.md) + - [Applying to Join a NeSI Project](Applying_to_join_an_existing_NeSI_project.md) + +- ![](../../assets/icons/material/compass.svg) __Cluster Access__ + + --- + Once you have your account sorted, learn how to connect to the cluster. + + - [Connect Via Open OnDemand](../Open_OnDemand/index.md) - Best for new users. Or using [OnDemand Apps](). + - [Connect With SSH](Terminal_Setup/index.md) - For those familiar with command line, and accessing machines remotely. + +
+ +## Hardware + +| | | +| --- | --- | +| __Login nodes__ | 72 cores in 2× Broadwell (E5-2695v4, 2.1 GHz, dual socket 18 cores per socket) nodes | +| __Compute nodes__ | 8,136 cores in 226 × Broadwell (E5-2695v4, 2.1 GHz, dual socket 18 cores per socket) nodes;
7,552 cores in 64 HPE Apollo 2000 XL225n nodes ([AMD EPYC Milan 7713](https://www.amd.com/en/products/cpu/amd-epyc-7713)) the Milan partition | +| __Compute nodes (reserved for NeSI Cloud)
__ | 288 cores in 8 × Broadwell (E5-2695v4, 2.1 GHz, dual socket 18 cores per socket) nodes | +| __GPUs__ | 9 NVIDIA Tesla P100 PCIe 12GB cards (1 node with 1 GPU, 4 nodes with 2 GPUs)

7 NVIDIA A100 PCIe 40GB cards (3 nodes with 1 GPU, 2 nodes with 2 GPUs)

7 A100-1g.5gb instances (1 NVIDIA A100 PCIe 40GB card divided into [7 MIG GPU slices](https://www.nvidia.com/en-us/technologies/multi-instance-gpu/) with 5GB memory each)

4 NVIDIA HGX A100 (4 GPUs per board with 80GB memory each, 16 A100 GPUs in total)

4 NVIDIA A40 with 48GB memory each (2 nodes with 2 GPUs, but capacity for 6 additional GPUs already in place)| +| __Hyperthreading__ | Enabled (accordingly, SLURM will see ~31,500 cores) | +| __Theoretical Peak Performance__ | 308.6 TFLOPs | +| __Memory capacity per compute node__ | 128 GB | +| __Memory capacity per login (build) node__ | 512 GB | +| __Total System memory__ | 84.0 TB | +| __Interconnect__ | FDR (54.5Gb/s) InfiniBand to EDR (100Gb/s) Core fabric. 3.97:1 Fat-tree topology | +| __Workload Manager__ | Slurm (Multi-Cluster) | +| __Operating System__ | CentOS 7.4 & Rocky 8.5 on Milan | diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/.pages.yml b/docs/High_Performance_Computing/Open_OnDemand/.pages.yml similarity index 100% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/.pages.yml rename to docs/High_Performance_Computing/Open_OnDemand/.pages.yml diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/Release_Notes/index.md b/docs/High_Performance_Computing/Open_OnDemand/Release_Notes/index.md similarity index 100% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/Release_Notes/index.md rename to docs/High_Performance_Computing/Open_OnDemand/Release_Notes/index.md diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/changes_from_jupyter_on_nesi.md b/docs/High_Performance_Computing/Open_OnDemand/changes_from_jupyter_on_nesi.md similarity index 100% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/changes_from_jupyter_on_nesi.md rename to docs/High_Performance_Computing/Open_OnDemand/changes_from_jupyter_on_nesi.md diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/how_to_guide.md b/docs/High_Performance_Computing/Open_OnDemand/how_to_guide.md similarity index 100% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/how_to_guide.md rename to docs/High_Performance_Computing/Open_OnDemand/how_to_guide.md diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/index.md b/docs/High_Performance_Computing/Open_OnDemand/index.md similarity index 66% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/index.md rename to docs/High_Performance_Computing/Open_OnDemand/index.md index bbc99f7c8..abb901fe2 100644 --- a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/index.md +++ b/docs/High_Performance_Computing/Open_OnDemand/index.md @@ -3,7 +3,7 @@ !!! warning NeSI OnDemand is in development and accessible to early access users only. - If you are interested in helping us test it please [contact us](mailto:support@nesi.org.nz). + If you are interested in helping us test it please {% include "partials/support_request.html" %}. ## Overview @@ -22,15 +22,15 @@ For more information see the [How-to guide](how_to_guide.md). A number of interactive applications can be accessed through NeSI OnDemand, including: -- [JupyterLab](interactive_apps/JupyterLab/index.md) -- [RStudio](interactive_apps/RStudio.md) -- [MATLAB](interactive_apps/MATLAB.md) - currently under development, let us know if this is of interest -- [Code server](interactive_apps/code_server.md) - currently under development, let us know if this is of interest -- [Virtual desktop](interactive_apps/virtual_desktop.md) - currently under development, let us know if this is of interest +- [JupyterLab](index.md) +- [RStudio](RStudio.md) +- [MATLAB](MATLAB.md) - currently under development, let us know if this is of interest +- [Code server](code_server.md) - currently under development, let us know if this is of interest +- [Virtual desktop](virtual_desktop.md) - currently under development, let us know if this is of interest ## Release notes -Release notes can be found [here](Release_Notes/index.md) and the main differences +Release notes can be found at the main differences compared to the current Jupyter on NeSI service are described [here](changes_from_jupyter_on_nesi.md). ## Acknowledgements diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/.pages.yml b/docs/High_Performance_Computing/Open_OnDemand/interactive_apps/.pages.yml similarity index 100% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/.pages.yml rename to docs/High_Performance_Computing/Open_OnDemand/interactive_apps/.pages.yml diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/JupyterLab/.pages.yml b/docs/High_Performance_Computing/Open_OnDemand/interactive_apps/JupyterLab/.pages.yml similarity index 100% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/JupyterLab/.pages.yml rename to docs/High_Performance_Computing/Open_OnDemand/interactive_apps/JupyterLab/.pages.yml diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/JupyterLab/Jupyter_kernels_Manual_management.md b/docs/High_Performance_Computing/Open_OnDemand/interactive_apps/JupyterLab/Jupyter_kernels_Manual_management.md similarity index 94% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/JupyterLab/Jupyter_kernels_Manual_management.md rename to docs/High_Performance_Computing/Open_OnDemand/interactive_apps/JupyterLab/Jupyter_kernels_Manual_management.md index 48c6e4c60..abfc6c073 100644 --- a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/JupyterLab/Jupyter_kernels_Manual_management.md +++ b/docs/High_Performance_Computing/Open_OnDemand/interactive_apps/JupyterLab/Jupyter_kernels_Manual_management.md @@ -21,14 +21,14 @@ Python and R kernels by default, which can be selected from the Launcher. Many packages are preinstalled in our default Python and R environments and these can be extended further as described on the -[Python](../../../../Scientific_Computing/Supported_Applications/Python.md) and -[R](../../../../Scientific_Computing/Supported_Applications/R.md) support +[Python](Python.md) and +[R](R.md) support pages. ## Adding a custom Python kernel !!! note "see also" - See the [Jupyter kernels - Tool-assisted management](./Jupyter_kernels_Tool_assisted_management.md) + See the [Jupyter kernels - Tool-assisted management](Jupyter_kernels_Tool_assisted_management.md) page for the **preferred** way to register kernels, which uses the `nesi-add-kernel` command line tool to automate most of these manual steps. @@ -211,7 +211,7 @@ Launcher as "Shared Virtual Env". ## Custom kernel in a Singularity container An example showing setting up a custom kernel running in a Singularity -container can be found on our [Lambda Stack](../../../../Scientific_Computing/Supported_Applications/Lambda_Stack.md#lambda-stack-via-jupyter) +container can be found on our [Lambda Stack](../../../Supported_Applications/Lambda_Stack.md#lambda-stack-via-jupyter) support page. ## Adding a custom R kernel diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/JupyterLab/Jupyter_kernels_Tool_assisted_management.md b/docs/High_Performance_Computing/Open_OnDemand/interactive_apps/JupyterLab/Jupyter_kernels_Tool_assisted_management.md similarity index 98% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/JupyterLab/Jupyter_kernels_Tool_assisted_management.md rename to docs/High_Performance_Computing/Open_OnDemand/interactive_apps/JupyterLab/Jupyter_kernels_Tool_assisted_management.md index a33e0ace1..77793aacc 100644 --- a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/JupyterLab/Jupyter_kernels_Tool_assisted_management.md +++ b/docs/High_Performance_Computing/Open_OnDemand/interactive_apps/JupyterLab/Jupyter_kernels_Tool_assisted_management.md @@ -14,7 +14,7 @@ notebooks. For example, you may want to load a specific environment module in your kernel or use a Conda environment. To register a Jupyter kernel, you can follow the steps highlighted in -the [Jupyter kernels - Manual management](./Jupyter_kernels_Manual_management.md) +the [Jupyter kernels - Manual management](Jupyter_kernels_Manual_management.md) or use the `nesi-add-kernel` tool provided within the [Jupyter on NeSI service](https://jupyter.nesi.org.nz). This page details the latter option, which we recommend. diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/JupyterLab/index.md b/docs/High_Performance_Computing/Open_OnDemand/interactive_apps/JupyterLab/index.md similarity index 94% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/JupyterLab/index.md rename to docs/High_Performance_Computing/Open_OnDemand/interactive_apps/JupyterLab/index.md index 8ef7cb13a..d0cc89454 100644 --- a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/JupyterLab/index.md +++ b/docs/High_Performance_Computing/Open_OnDemand/interactive_apps/JupyterLab/index.md @@ -12,7 +12,7 @@ Jupyter allows you to create notebooks that contain live code, equations, visualisations and explanatory text. There are many uses for Jupyter, including data cleaning, analytics and visualisation, machine learning, numerical simulation, managing -[Slurm job submissions](../../../../Getting_Started/Next_Steps/Submitting_your_first_job.md) +[Slurm job submissions](Submitting_your_first_job.md) and workflows and much more. ## Accessing Jupyter on NeSI @@ -42,8 +42,8 @@ NeSI provides some default Python and R kernels that are available to all users of environment modules. It's also possible to create additional kernels that are visible only to you (they can optionally be made visible to other members of a specific NeSI project that you belong to). See: -- [Jupyter kernels - Tool-assisted management](./Jupyter_kernels_Tool_assisted_management.md) (recommended) -- [Jupyter kernels - Manual management](./Jupyter_kernels_Manual_management.md) +- [Jupyter kernels - Tool-assisted management](Jupyter_kernels_Tool_assisted_management.md) (recommended) +- [Jupyter kernels - Manual management](Jupyter_kernels_Manual_management.md) ### Jupyter terminal diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/MATLAB.md b/docs/High_Performance_Computing/Open_OnDemand/interactive_apps/MATLAB.md similarity index 68% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/MATLAB.md rename to docs/High_Performance_Computing/Open_OnDemand/interactive_apps/MATLAB.md index 98fb9c959..a365e7692 100644 --- a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/MATLAB.md +++ b/docs/High_Performance_Computing/Open_OnDemand/interactive_apps/MATLAB.md @@ -7,4 +7,4 @@ The MATLAB app is currently being developed. -The docs for MATLAB via Jupyter are [here](../../Interactive_computing_using_Jupyter/MATLAB_via_Jupyter_on_NeSI.md). +The docs for MATLAB via Jupyter are [here](MATLAB_via_Jupyter_on_NeSI.md). diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/RStudio.md b/docs/High_Performance_Computing/Open_OnDemand/interactive_apps/RStudio.md similarity index 89% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/RStudio.md rename to docs/High_Performance_Computing/Open_OnDemand/interactive_apps/RStudio.md index 7f7a5a840..ec242b3f7 100644 --- a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/RStudio.md +++ b/docs/High_Performance_Computing/Open_OnDemand/interactive_apps/RStudio.md @@ -6,7 +6,7 @@ If you are interested in helping us test it please [contact us](mailto:support@nesi.org.nz). ## Logging in -![UPDATE WITH PROJECT](../../../assets/images/RStudio_via_OOD_on_NeSI_0.png){width=35%} ![](../../../assets/images/RStudio_via_OOD_on_NeSI_1.png){fig.align="right" width=62%} +![UPDATE WITH PROJECT](RStudio_via_OOD_on_NeSI_0.png){width=35%} ![](RStudio_via_OOD_on_NeSI_1.png){fig.align="right" width=62%} ## Settings Recommendation to set *Save Workspace to Never* to avoid saving large files to the workspace. This can be done by going to `Tools` -> `Global Options` -> `General` and setting the `Save workspace to .RData on exit` to `Never`. This will prevent the workspace from being unable to load due to not enough memory in the selected session. @@ -18,7 +18,7 @@ The current R modules on NeSI OnDemand do not support the default graphics devic This can be done by going to `Tools` -> `Global Options` -> `Graphics` and switch `Default` to `AGG`. This will allow the plots to be displayed in the RStudio interface. You do not need to restart the RStudio session for this to take effect. -![](../../../assets/images/RStudio_via_OOD_on_NeSI_2.png) +![](RStudio_via_OOD_on_NeSI_2.png) Modules from 4.4 onwards will have this issue fixed. diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/code_server.md b/docs/High_Performance_Computing/Open_OnDemand/interactive_apps/code_server.md similarity index 100% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/code_server.md rename to docs/High_Performance_Computing/Open_OnDemand/interactive_apps/code_server.md diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/virtual_desktop.md b/docs/High_Performance_Computing/Open_OnDemand/interactive_apps/virtual_desktop.md similarity index 67% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/virtual_desktop.md rename to docs/High_Performance_Computing/Open_OnDemand/interactive_apps/virtual_desktop.md index 0d5695b05..c3bb0c182 100644 --- a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/interactive_apps/virtual_desktop.md +++ b/docs/High_Performance_Computing/Open_OnDemand/interactive_apps/virtual_desktop.md @@ -7,4 +7,4 @@ The Virtual Desktop app is currently being developed. -The docs for Virtual desktop via Jupyter are [here](../../Interactive_computing_using_Jupyter/Virtual_Desktop_via_Jupyter_on_NeSI.md). +The docs for Virtual desktop via Jupyter are [here](Virtual_Desktop_via_Jupyter_on_NeSI.md). diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/known_issues/index.md b/docs/High_Performance_Computing/Open_OnDemand/known_issues/index.md similarity index 100% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/known_issues/index.md rename to docs/High_Performance_Computing/Open_OnDemand/known_issues/index.md diff --git a/docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/known_issues/restart_web_server.png b/docs/High_Performance_Computing/Open_OnDemand/known_issues/restart_web_server.png similarity index 100% rename from docs/Scientific_Computing/Interactive_computing_with_NeSI_OnDemand/known_issues/restart_web_server.png rename to docs/High_Performance_Computing/Open_OnDemand/known_issues/restart_web_server.png diff --git a/docs/High_Performance_Computing/Parallel_Computing/.index.md b/docs/High_Performance_Computing/Parallel_Computing/.index.md new file mode 100644 index 000000000..46b029aff --- /dev/null +++ b/docs/High_Performance_Computing/Parallel_Computing/.index.md @@ -0,0 +1,15 @@ +--- +created_at: 2025-02-21 +--- + +Many scientific software applications are written to take advantage of multiple CPUs in some way. But often this must be specifically requested by the user at the time they run the program, rather than happening automatically. + +The are three types of parallel execution we will cover are [Multi-Threading](#multi-threading), [Distributed (MPI)](#mpi) and [Job Arrays](#job-arrays). + +!!! note + Whenever Slurm mentions CPUs it is referring to *logical* CPU's (**2** *logical* CPU's = **1** *physical* core). + - `--cpus-per-task=4` will give you 4 *logical* cores. + - `--mem-per-cpu=512MB` will give 512 MB of RAM per *logical* core. + - If `--hint=nomultithread` is used then `--cpus-per-task` will now refer to physical cores, but `--mem-per-cpu=512MB` still refers to logical cores. + +See [our article on hyperthreading](../Mahuika_Cluster/Next_Steps/Hyperthreading.md) for more information. diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Configuring_Dask_MPI_jobs.md b/docs/High_Performance_Computing/Parallel_Computing/Configuring_Dask_MPI_jobs.md similarity index 94% rename from docs/Scientific_Computing/HPC_Software_Environment/Configuring_Dask_MPI_jobs.md rename to docs/High_Performance_Computing/Parallel_Computing/Configuring_Dask_MPI_jobs.md index d9bdae323..07461b146 100644 --- a/docs/Scientific_Computing/HPC_Software_Environment/Configuring_Dask_MPI_jobs.md +++ b/docs/High_Performance_Computing/Parallel_Computing/Configuring_Dask_MPI_jobs.md @@ -39,7 +39,7 @@ MPI. While some of the MPI distributions should be compatible with each other, it is advisable to use the same MPI distribution as the host HPC -system for reliability. The Mahuika and Māui Ancil clusters use Intel +system for reliability. The Mahuika cluster uses Intel MPI. ## Using Dask-MPI on Mahuika @@ -51,7 +51,7 @@ available on Mahuika that come with the mpi4py package, e.g. module load Python/3.9.9-gimkl-2020a ``` -## Installing Dask-MPI with Conda on Mahuika and Māui Ancil +## Installing Dask-MPI with Conda on Mahuika Load an Anaconda3 or Miniconda3 module and use the following commands to install mpi4py with the Intel MPI distribution *before* installing the @@ -79,14 +79,14 @@ dependencies: ``` !!! info "See also" See the - [Miniconda3](../../Scientific_Computing/Supported_Applications/Miniconda3.md) + [Miniconda3](Miniconda3.md) page for more information on how to create and manage Miniconda environments on NeSI. ## Configuring Slurm At runtime, Slurm will launch a number of Python processes as requested -in the [Slurm configuration script](../../Getting_Started/Cheat_Sheets/Slurm-Reference_Sheet.md). +in the [Slurm configuration script](Slurm-Reference_Sheet.md). Each process is given an ID (or "rank") starting at rank 0. Dask-MPI then assigns different roles to the different ranks: @@ -98,7 +98,7 @@ then assigns different roles to the different ranks: This implies that **Dask-MPI jobs must be launched on at least 3 MPI ranks!** Ranks 0 and 1 often perform much less work than the other ranks, it can therefore be beneficial to use -[Hyperthreading](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Hyperthreading.md) +[Hyperthreading](Hyperthreading.md) to place these two ranks onto a single physical core. Ensure that activating hyperthreading does not slow down the worker ranks by running a short test workload with and without hyperthreading. @@ -263,8 +263,7 @@ Conda environment inside the container. !!! note Tips You can build this container on NeSI, using the Mahuika Extension - nodes, following the instructions from the [dedicated support - page](../../Scientific_Computing/HPC_Software_Environment/Build_an_Apptainer_container_on_a_Milan_compute_node.md). + nodes, following the instructions from the [dedicated support page](Build_an_Apptainer_container_on_a_Milan_compute_node.md). ### Slurm configuration diff --git a/docs/Getting_Started/Next_Steps/Parallel_Execution.md b/docs/High_Performance_Computing/Parallel_Computing/Job_Arrays.md similarity index 52% rename from docs/Getting_Started/Next_Steps/Parallel_Execution.md rename to docs/High_Performance_Computing/Parallel_Computing/Job_Arrays.md index 8e268bf1d..1b2db3203 100644 --- a/docs/Getting_Started/Next_Steps/Parallel_Execution.md +++ b/docs/High_Performance_Computing/Parallel_Computing/Job_Arrays.md @@ -1,95 +1,13 @@ --- -created_at: '2019-01-10T03:02:11Z' -tags: [] -vote_count: 7 -vote_sum: 5 -zendesk_article_id: 360000690275 -zendesk_section_id: 360000189716 +created_at: 2025-02-21 +description: --- -Many scientific software applications are written to take advantage of multiple CPUs in some way. But often this must be specifically requested by the user at the time they run the program, rather than happening automatically. - -The are three types of parallel execution we will cover are [Multi-Threading](#multi-threading), [Distributed (MPI)](#mpi) and [Job Arrays](#job-arrays). - -!!! note - Whenever Slurm mentions CPUs it is referring to *logical* CPU's (**2** *logical* CPU's = **1** *physical* core). - - `--cpus-per-task=4` will give you 4 *logical* cores. - - `--mem-per-cpu=512MB` will give 512 MB of RAM per *logical* core. - - If `--hint=nomultithread` is used then `--cpus-per-task` will now refer to physical cores, but `--mem-per-cpu=512MB` still refers to logical cores. - -See [our article on hyperthreading](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Hyperthreading.md) for more information. - -## Multi-threading - -Multi-threading is a method of parallelisation whereby the initial single thread of a process forks into a number of parallel threads, generally *via* a library such as OpenMP (Open MultiProcessing), TBB (Threading Building Blocks), or pthread (POSIX threads). - -![serial](../../assets/images/parallel_execution_serial.png) - -![parallel](../../assets/images/Parallel_Execution.png) -Multi-threading involves dividing the process into multiple 'threads' which can be run across multiple cores. - -Multi-threading is limited in that it requires shared memory, so all CPU cores used must be on the same node. However, because all the CPUs share the same memory environment things only need to be loaded into memory once, meaning that memory requirements will usually not increase proportionally to the number of CPUs. - -Example script: - -``` sl -#!/bin/bash -e -#SBATCH --job-name=MultithreadingTest # job name (shows up in the queue) -#SBATCH --time=00:01:00 # Walltime (HH:MM:SS) -#SBATCH --mem=2048MB # memory in MB -#SBATCH --cpus-per-task=4 # 2 physical cores per task. - -taskset -c -p $$ #Prints which CPUs it can use -``` - -The expected output being - -```txt -pid 13538's current affinity list: 7,9,43,45 -``` - -## MPI - -MPI stands for *Message Passing Interface*, and is a communication protocol used to achieve distributed parallel computation. - -Similar in some ways to multi-threading, MPI does not have the limitation of requiring shared memory and thus can be used across multiple nodes, but has higher communication and memory overheads. - -For MPI jobs you need to set `--ntasks` to a value larger than 1, or if you want all nodes to run the same number of tasks, set `--ntasks-per-node` and `--nodes` instead. - -MPI programs require a launcher to start the *ntasks* processes on multiple CPUs, which may belong to different nodes. -On Slurm systems like ours, the preferred launcher is `srun` rather than `mpi-run`. - -Since the distribution of tasks across different nodes may be unpredictable, `--mem-per-cpu` should be used instead of `--mem`. - -``` sl -#!/bin/bash -e -#SBATCH --job-name=MPIJob # job name (shows up in the queue) -#SBATCH --time=00:01:00 # Walltime (HH:MM:SS) -#SBATCH --mem-per-cpu=512MB # memory/cpu in MB (half the actual required memory) -#SBATCH --cpus-per-task=4 # 2 Physical cores per task. -#SBATCH --ntasks=2 # number of tasks (e.g. MPI) - -srun pwd # Prints working directory -``` - -The expected output being - -```txt -/home/user001/demo -/home/user001/demo -``` - -!!! warning - For non-MPI programs, either set `--ntasks=1` or do not use `srun` at all. - Using `srun` in conjunction with `--cpus-per-task=1` will cause `--ntasks` to default to 2. - -## Job Arrays - Job arrays are best used for tasks that are completely independent, such as parameter sweeps, permutation analysis or simulation, that could be executed in any order and don't have to run at the same time. This kind of work is often described as *embarrassingly parallel*. An embarrassingly parallel problem is one that requires no communication or dependency between the tasks (unlike distributed computing problems that need communication between tasks). -A job array will submit the same script repeatedly over a designated index using the SBATCH command `#SBATCH --array` +A job array will submit the same script repeatedly over a designated index using the SBATCH command `#SBATCH --array` For example, the following code: @@ -239,19 +157,19 @@ If your program makes use of a working directory make sure you set it e.g. ```bash mkdir .tmp/run_${SLURM_ARRAY_TASK_ID} # Create new directory -export TMPDIR=.tmp/run_${SLURM_ARRAY_TASK_ID}  # Set TMPDIR to point there +export TMPDIR=.tmp/run_${SLURM_ARRAY_TASK_ID} # Set TMPDIR to point there ``` If you have no control over the name/path of an output used by a program, this can be resolved in a similar manner. ```bash mkdir run_${SLURM_ARRAY_TASK_ID} # Create new directory -cd run_${SLURM_ARRAY_TASK_ID}        # CD to new directory +cd run_${SLURM_ARRAY_TASK_ID} # CD to new directory bash job.sh mv output.log ../outputs/output_${SLURM_ARRAY_TASK_ID}.log # Move and rename output -rm -r ../run_${SLURM_ARRAY_TASK_ID}                          # Clear directory +rm -r ../run_${SLURM_ARRAY_TASK_ID} # Clear directory ``` The Slurm documentation on job arrays can be found [here](https://slurm.schedmd.com/job_array.html). diff --git a/docs/Getting_Started/Next_Steps/Job_Scaling_Ascertaining_job_dimensions.md b/docs/High_Performance_Computing/Parallel_Computing/Job_Scaling_Ascertaining_job_dimensions.md similarity index 88% rename from docs/Getting_Started/Next_Steps/Job_Scaling_Ascertaining_job_dimensions.md rename to docs/High_Performance_Computing/Parallel_Computing/Job_Scaling_Ascertaining_job_dimensions.md index fb99632c1..45114effe 100644 --- a/docs/Getting_Started/Next_Steps/Job_Scaling_Ascertaining_job_dimensions.md +++ b/docs/High_Performance_Computing/Parallel_Computing/Job_Scaling_Ascertaining_job_dimensions.md @@ -34,7 +34,7 @@ ascertain how much of each of these resources you will need. Asking for too little or too much, however, can both cause problems: your jobs will be at increased risk of taking a long time in the queue or failing, and -your project's [fair share score](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Fair_Share.md) +your project's [fair share score](../Mahuika_Cluster/Next_Steps/Fair_Share.md) is likely to suffer. Your project's fair share score will be reduced in view of compute time spent regardless of whether you obtain a result or @@ -46,7 +46,7 @@ not. | Memory | The job may wait in the queue for longer. Your fair share score will fall more than necessary. | Your job will fail, probably with an 'OUT OF MEMORY' error, segmentation fault or bus error. This may not happen immediately. | | Wall time | The job may wait in the queue for longer than necessary | The job will run out of time and get killed. | -***See [What is an allocation?](../../Getting_Started/Accounts-Projects_and_Allocations/What_is_an_allocation.md) for more details on how each resource effects your compute usage.*** +***See [What is an allocation?](../Mahuika_Cluster/Next_Steps/What_is_an_allocation.md) for more details on how each resource effects your compute usage.*** It is therefore important to try and make your jobs resource requests reasonably accurate. In this article we will discuss how you can scale @@ -77,5 +77,5 @@ will not have waited for hours or days in the queue beforehand. !!! example - - [Multithreading Scaling](../../Getting_Started/Next_Steps/Multithreading_Scaling_Example.md) - - [MPI Scaling](../../Getting_Started/Next_Steps/MPI_Scaling_Example.md) + - [Multithreading Scaling](Multithreading_Scaling_Example.md) + - [MPI Scaling](MPI_Scaling_Example.md) diff --git a/docs/High_Performance_Computing/Parallel_Computing/MPI.md b/docs/High_Performance_Computing/Parallel_Computing/MPI.md new file mode 100644 index 000000000..86ada8043 --- /dev/null +++ b/docs/High_Performance_Computing/Parallel_Computing/MPI.md @@ -0,0 +1,39 @@ +--- +created_at: 2025-02-21 +--- + + + + +MPI stands for *Message Passing Interface*, and is a communication protocol used to achieve distributed parallel computation. + +Similar in some ways to multi-threading, MPI does not have the limitation of requiring shared memory and thus can be used across multiple nodes, but has higher communication and memory overheads. + +For MPI jobs you need to set `--ntasks` to a value larger than 1, or if you want all nodes to run the same number of tasks, set `--ntasks-per-node` and `--nodes` instead. + +MPI programs require a launcher to start the *ntasks* processes on multiple CPUs, which may belong to different nodes. +On Slurm systems like ours, the preferred launcher is `srun` rather than `mpi-run`. + +Since the distribution of tasks across different nodes may be unpredictable, `--mem-per-cpu` should be used instead of `--mem`. + +``` sl +#!/bin/bash -e +#SBATCH --job-name=MPIJob # job name (shows up in the queue) +#SBATCH --time=00:01:00 # Walltime (HH:MM:SS) +#SBATCH --mem-per-cpu=512MB # memory/cpu in MB (half the actual required memory) +#SBATCH --cpus-per-task=4 # 2 Physical cores per task. +#SBATCH --ntasks=2 # number of tasks (e.g. MPI) + +srun pwd # Prints working directory +``` + +The expected output being + +```txt +/home/user001/demo +/home/user001/demo +``` + +!!! warning + For non-MPI programs, either set `--ntasks=1` or do not use `srun` at all. + Using `srun` in conjunction with `--cpus-per-task=1` will cause `--ntasks` to default to 2. diff --git a/docs/Getting_Started/Next_Steps/MPI_Scaling_Example.md b/docs/High_Performance_Computing/Parallel_Computing/MPI_Scaling_Example.md similarity index 97% rename from docs/Getting_Started/Next_Steps/MPI_Scaling_Example.md rename to docs/High_Performance_Computing/Parallel_Computing/MPI_Scaling_Example.md index ce6ff12be..fc01d917b 100644 --- a/docs/Getting_Started/Next_Steps/MPI_Scaling_Example.md +++ b/docs/High_Performance_Computing/Parallel_Computing/MPI_Scaling_Example.md @@ -174,7 +174,7 @@ Let's run our Slurm script with sbatch and look at our output from Our job performed 5,000 seeds using 2 physical CPU cores (each MPI task will always receive 2 logical CPUs which is equal to 1 physical CPUs. For a more in depth explanation about logical and physical CPU cores see -our [Hyperthreading article](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Hyperthreading.md)) +our [Hyperthreading article](../Mahuika_Cluster/Next_Steps/Hyperthreading.md)) and a maximum memory of 166,744KB (0.16 GB). In total, the job ran for 18 minutes and 51 seconds. @@ -213,7 +213,7 @@ results: 6054939.0 python 00:06:51 01:18:37 6 174028K COMPLETED ``` -![MPIscalingMem.png](../../assets/images/MPI_Scaling_Example.png) +![MPIscalingMem.png](../Mahuika_Cluster/Next_Steps/MPI_Scaling_Example.png) First, looking at the plot (we used R here, but feel free to use excel or whatever your preferred plotting software) of memory usage per task @@ -237,7 +237,7 @@ memory usage of all your jobs. Looking at the memory usage for an 8 CPU job, it looks like an 8 CPU has a maximum memory requirement of 0.18 GB. -![MPIscalingSeeds.png](../../assets/images/MPI_Scaling_Example_0.png){ width=47% } ![MPIscalingSeedsLog.png](../../assets/images/MPI_Scaling_Example_1.png){ width=47% } +![MPIscalingSeeds.png](../Mahuika_Cluster/Next_Steps/MPI_Scaling_Example_0.png){ width=47% } ![MPIscalingSeedsLog.png](../Mahuika_Cluster/Next_Steps/MPI_Scaling_Example_1.png){ width=47% } The two above plots show the number of CPUs vs time and the Log2 of the CPUs vs time. @@ -295,7 +295,7 @@ increasing as we add more seeds, but the maximum memory per CPU doesn't seem to change much. Let's try plotting this data to help us better understand what is happening: -![MPIseedsvtime.png](../../assets/images/MPI_Scaling_Example_2.png) +![MPIseedsvtime.png](../Mahuika_Cluster/Next_Steps/MPI_Scaling_Example_2.png) This confirms our assumption of wall-time scaling linearly with number of iterations. Since our 5,000 seed job to 7 minutes and 41 seconds we diff --git a/docs/High_Performance_Computing/Parallel_Computing/Multithreading.md b/docs/High_Performance_Computing/Parallel_Computing/Multithreading.md new file mode 100644 index 000000000..65f022b2a --- /dev/null +++ b/docs/High_Performance_Computing/Parallel_Computing/Multithreading.md @@ -0,0 +1,31 @@ +--- +created_at: 2025-02-21 +description: +--- + +Multi-threading is a method of parallelisation whereby the initial single thread of a process forks into a number of parallel threads, generally *via* a library such as OpenMP (Open MultiProcessing), TBB (Threading Building Blocks), or pthread (POSIX threads). + +![serial](../Mahuika_Cluster/Next_Steps/parallel_execution_serial.png) + +![parallel](../Mahuika_Cluster/Next_Steps/Parallel_Execution.png) +Multi-threading involves dividing the process into multiple 'threads' which can be run across multiple cores. + +Multi-threading is limited in that it requires shared memory, so all CPU cores used must be on the same node. However, because all the CPUs share the same memory environment things only need to be loaded into memory once, meaning that memory requirements will usually not increase proportionally to the number of CPUs. + +Example script: + +``` sl +#!/bin/bash -e +#SBATCH --job-name=MultithreadingTest # job name (shows up in the queue) +#SBATCH --time=00:01:00 # Walltime (HH:MM:SS) +#SBATCH --mem=2048MB # memory in MB +#SBATCH --cpus-per-task=4 # 2 physical cores per task. + +taskset -c -p $$ #Prints which CPUs it can use +``` + +The expected output being + +```txt +pid 13538's current affinity list: 7,9,43,45 +``` diff --git a/docs/Getting_Started/Next_Steps/Multithreading_Scaling_Example.md b/docs/High_Performance_Computing/Parallel_Computing/Multithreading_Scaling_Example.md similarity index 96% rename from docs/Getting_Started/Next_Steps/Multithreading_Scaling_Example.md rename to docs/High_Performance_Computing/Parallel_Computing/Multithreading_Scaling_Example.md index a82359307..28b9a50a1 100644 --- a/docs/Getting_Started/Next_Steps/Multithreading_Scaling_Example.md +++ b/docs/High_Performance_Computing/Parallel_Computing/Multithreading_Scaling_Example.md @@ -131,7 +131,7 @@ seem to change much. Let's try plotting this data (we used R here, but feel free to use excel or whatever your preferred plotting software) to help us better understand what is happening: -![Plot1](../../assets/images/Multithreading_Scaling_Example.png){ width=47% } ![Plot2](../../assets/images/Multithreading_Scaling_Example_0.png){ width=47% } +![Plot1](../Mahuika_Cluster/Next_Steps/Multithreading_Scaling_Example.png){ width=47% } ![Plot2](../Mahuika_Cluster/Next_Steps/Multithreading_Scaling_Example_0.png){ width=47% } This confirms our assumption of wall-time scaling linearly with number of iterations. However, peak memory usage appears unchanged. @@ -183,7 +183,7 @@ our script with 2, 4, 6, 8, 10, 12, 14 and 16 CPUs and plot the results using `s 3106181.0 Rscript 00:00:59 11:59.998 16 1205991K COMPLETED ``` -![TvC-MT.png](../../assets/images/Multithreading_Scaling_Example_1.png){ width=47% } ![TvL2C-MT.png](../../assets/images/Multithreading_Scaling_Example_2.png){ width=47% } +![TvC-MT.png](../Mahuika_Cluster/Next_Steps/Multithreading_Scaling_Example_1.png){ width=47% } ![TvL2C-MT.png](../Mahuika_Cluster/Next_Steps/Multithreading_Scaling_Example_2.png){ width=47% } The two above plots show the number of CPUs vs time and the Log2 of the CPUs vs time. The reason we have both is that it can often be easier to @@ -205,7 +205,7 @@ small. We could try running our script with more than 16 CPU cores, however, in the case of this script we start to have a pretty significant drop in marginal speed-up after eight CPU cores. -![](../../assets/images/Multithreading_Scaling_Example_3.png) +![](../Mahuika_Cluster/Next_Steps/Multithreading_Scaling_Example_3.png) Looking at our jobs' memory use, we can see that as we increase the number of CPUs taken by a job, the job's memory requirements increase @@ -274,4 +274,4 @@ memory as we may otherwise have run out. about 20% more wall time and memory than you think you are going to need to minimise the chance of your jobs failing due to a lack of resources. Your project's fair share score considers the time actually used by the - job, not the time requested by the job. \ No newline at end of file + job, not the time requested by the job. diff --git a/docs/Scientific_Computing/HPC_Software_Environment/NVIDIA_GPU_Containers.md b/docs/High_Performance_Computing/Parallel_Computing/NVIDIA_GPU_Containers.md similarity index 94% rename from docs/Scientific_Computing/HPC_Software_Environment/NVIDIA_GPU_Containers.md rename to docs/High_Performance_Computing/Parallel_Computing/NVIDIA_GPU_Containers.md index 0ba139dba..f262e9ea7 100644 --- a/docs/Scientific_Computing/HPC_Software_Environment/NVIDIA_GPU_Containers.md +++ b/docs/High_Performance_Computing/Parallel_Computing/NVIDIA_GPU_Containers.md @@ -46,8 +46,7 @@ running the NAMD image on NeSI, based on the NVIDIA instructions directly, which does not require root access: !!! note - Please do refer [Build Environment - Variables](../../Scientific_Computing/Supported_Applications/Singularity.md#build-environment-variables) + Please do refer [Build Environment Variables](Singularity.md#build-environment-variables) prior to running the following `pull` command. ```sh diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Offloading_to_GPU_with_OpenACC.md b/docs/High_Performance_Computing/Parallel_Computing/Offloading_to_GPU_with_OpenACC.md similarity index 92% rename from docs/Scientific_Computing/HPC_Software_Environment/Offloading_to_GPU_with_OpenACC.md rename to docs/High_Performance_Computing/Parallel_Computing/Offloading_to_GPU_with_OpenACC.md index 05f91f0cf..880d24e9a 100644 --- a/docs/Scientific_Computing/HPC_Software_Environment/Offloading_to_GPU_with_OpenACC.md +++ b/docs/High_Performance_Computing/Parallel_Computing/Offloading_to_GPU_with_OpenACC.md @@ -9,8 +9,7 @@ zendesk_section_id: 360000040056 --- Many codes can be accelerated significantly by offloading computations -to a GPU. Some NeSI [Mahuika nodes have GPUs attached to -them](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/GPU_use_on_NeSI.md). +to a GPU. Some NeSI [Mahuika nodes have GPUs attached them](GPU_use_on_NeSI.md). If you want your code to run faster, if you're developing your own code or if you have access to the source code and you feel comfortable editing the code, read on. @@ -117,5 +116,5 @@ time srun --ntasks=1 --cpus-per-task=1 --gpus-per-node=P100:1 ./totalAccGpu | total | 7.6 | | totalAccGpu | 0.41 | -Check out [this page](../../Scientific_Computing/HPC_Software_Environment/Offloading_to_GPU_with_OpenMP.md) +Check out [this page](Offloading_to_GPU_with_OpenMP.md) to find out how you can offload computations to a GPU using OpenMP. diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Offloading_to_GPU_with_OpenMP.md b/docs/High_Performance_Computing/Parallel_Computing/Offloading_to_GPU_with_OpenMP.md similarity index 100% rename from docs/Scientific_Computing/HPC_Software_Environment/Offloading_to_GPU_with_OpenMP.md rename to docs/High_Performance_Computing/Parallel_Computing/Offloading_to_GPU_with_OpenMP.md diff --git a/docs/Scientific_Computing/HPC_Software_Environment/OpenMP_settings.md b/docs/High_Performance_Computing/Parallel_Computing/OpenMP_settings.md similarity index 85% rename from docs/Scientific_Computing/HPC_Software_Environment/OpenMP_settings.md rename to docs/High_Performance_Computing/Parallel_Computing/OpenMP_settings.md index dc2118d5c..1ded8c00c 100644 --- a/docs/Scientific_Computing/HPC_Software_Environment/OpenMP_settings.md +++ b/docs/High_Performance_Computing/Parallel_Computing/OpenMP_settings.md @@ -12,10 +12,9 @@ zendesk_section_id: 360000040056 programming interface that lets you write parallel programs on shared memory platforms. In a parallel section, OpenMP code can create multiple threads that run on separate cores, executing their shares of the total -workload concurrently. OpenMP is suited for the Mahuika and Māui HPCs as -each platform has 36 and 40 physical cores per node respectively.  Each +workload concurrently. Each physical core can handle up to two threads in parallel using -[Hyperthreading](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Hyperthreading.md). +[Hyperthreading](Hyperthreading.md). Therefore you can run up to 72 threads on Mahuika and 80 threads on Māui The environment variable that controls the number of threads is @@ -36,8 +35,7 @@ export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK ``` in your Slurm script - although this can sometimes be more complicated, -e.g., with [TensorFlow on -CPUs](../../Scientific_Computing/Supported_Applications/TensorFlow_on_CPUs.md). +e.g., with [TensorFlow on CPUs](TensorFlow_on_CPUs.md). On Mahuika, you will be charged for the number of physical cores that you requested - the second logical core on a physical core is free, @@ -48,9 +46,8 @@ In order to achieve good and consistent parallel scaling, additional settings may be required. This is particularly true on Mahuika whose nodes are shared between different Slurm jobs. Following are some settings that can help improve scaling and/or make your timings more -consistent, additional information can be found in our article [Thread -Placement and Thread -Affinity](../../Scientific_Computing/HPC_Software_Environment/Thread_Placement_and_Thread_Affinity.md). +consistent, additional information can be found in our article +[Thread Placement and Thread Affinity](Thread_Placement_and_Thread_Affinity.md). 1. `--hint=nomultithread`. Set this in conjunction with srun or sbatch to tell Slurm that you don't want to use hyperthreads. Your program will @@ -65,12 +62,11 @@ generally advisable to pin the threads to avoid delays caused by thread migration. 3. `OMP_PLACES`. Set this to "cores" if you want to pin the threads to -physical cores, or to "threads" if you want to use hyperthreading.  +physical cores, or to "threads" if you want to use hyperthreading. The effect of each setting is illustrated below. In this experiment we measured the execution time twice of the finite difference -code [upwindCxx -numCells 256 -numSteps -10.](https://github.com/pletzer/fidibench) The code was built with the +code [upwindCxx -numCells 256 -numSteps 10.](https://github.com/pletzer/fidibench) The code was built with the gimpi/2018b toolchain on Mahuika. diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Run_an_executable_under_Apptainer_in_parallel.md b/docs/High_Performance_Computing/Parallel_Computing/Run_an_executable_under_Apptainer_in_parallel.md similarity index 100% rename from docs/Scientific_Computing/HPC_Software_Environment/Run_an_executable_under_Apptainer_in_parallel.md rename to docs/High_Performance_Computing/Parallel_Computing/Run_an_executable_under_Apptainer_in_parallel.md diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Run_an_executable_under_Apptainer_on_gpu.md b/docs/High_Performance_Computing/Parallel_Computing/Run_an_executable_under_Apptainer_on_gpu.md similarity index 100% rename from docs/Scientific_Computing/HPC_Software_Environment/Run_an_executable_under_Apptainer_on_gpu.md rename to docs/High_Performance_Computing/Parallel_Computing/Run_an_executable_under_Apptainer_on_gpu.md diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Thread_Placement_and_Thread_Affinity.md b/docs/High_Performance_Computing/Parallel_Computing/Thread_Placement_and_Thread_Affinity.md similarity index 96% rename from docs/Scientific_Computing/HPC_Software_Environment/Thread_Placement_and_Thread_Affinity.md rename to docs/High_Performance_Computing/Parallel_Computing/Thread_Placement_and_Thread_Affinity.md index fab6fd8e9..5401883ba 100644 --- a/docs/Scientific_Computing/HPC_Software_Environment/Thread_Placement_and_Thread_Affinity.md +++ b/docs/High_Performance_Computing/Parallel_Computing/Thread_Placement_and_Thread_Affinity.md @@ -10,8 +10,8 @@ zendesk_section_id: 360000040056 Multithreading with OpenMP and other threading libraries is an important way to parallelise scientific software for faster execution (see our -article on [Parallel -Execution](../../Getting_Started/Next_Steps/Parallel_Execution.md) for +article on +[Parallel Execution](Parallel_Execution.md) for an introduction). Care needs to be taken when running multiple threads on the HPC to achieve best performance - getting it wrong can easily increase compute times by tens of percents, sometimes even more. This is @@ -37,7 +37,7 @@ performance, as a socket connects the processor to its RAM and other processors. A processor in each socket consists of multiple physical cores, and each physical core is split into two logical cores using a technology called -[Hyperthreading](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Hyperthreading.md)). +[Hyperthreading](Hyperthreading.md)). A processor also includes caches - a [cache](https://en.wikipedia.org/wiki/CPU_cache) is very fast memory @@ -51,7 +51,7 @@ cores (our current HPCs have 18 to 20 cores). Each core can also be further divided into two logical cores (or hyperthreads, as mentioned before). -![NodeSocketCore.png](../../assets/images/Thread_Placement_and_Thread_Affinity.png) +![NodeSocketCore.png](Thread_Placement_and_Thread_Affinity.png) It is very important to note the following: @@ -119,7 +119,7 @@ int main() } ``` -On Mahuika or Māui Ancil, compile the program using the commands +On Mahuika compile the program using the commands ``` sh module load intel/2018b @@ -328,8 +328,7 @@ OMP: Info #247: KMP_AFFINITY: pid 180198 tid 180202 thread 3 bound to OS proc se [...] ``` -Please refer to the [Intel -documentation](https://software.intel.com/en-us/cpp-compiler-developer-guide-and-reference-thread-affinity-interface-linux-and-windows) +Please refer to the [Inteldocumentation](https://software.intel.com/en-us/cpp-compiler-developer-guide-and-reference-thread-affinity-interface-linux-and-windows) for further information on "KMP\_AFFINITY". ## Tips @@ -358,4 +357,4 @@ thread affinity by choosing: export KMP_AFFINITY=granularity=fine,compact,0,0 ``` -You can now try out other configurations and compare runtimes. \ No newline at end of file +You can now try out other configurations and compare runtimes. diff --git a/docs/High_Performance_Computing/Software/.pages.yml b/docs/High_Performance_Computing/Software/.pages.yml new file mode 100644 index 000000000..f3c036dd6 --- /dev/null +++ b/docs/High_Performance_Computing/Software/.pages.yml @@ -0,0 +1,5 @@ +nav: + - Software_Catalouge + - Building_Software + - Profiling_and_Debugging + - ... diff --git a/docs/High_Performance_Computing/Software/Building_Software/.pages.yml b/docs/High_Performance_Computing/Software/Building_Software/.pages.yml new file mode 100644 index 000000000..58088b568 --- /dev/null +++ b/docs/High_Performance_Computing/Software/Building_Software/.pages.yml @@ -0,0 +1,2 @@ +nav: + - ... diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Build_an_Apptainer_container_on_a_Milan_compute_node.md b/docs/High_Performance_Computing/Software/Building_Software/Build_an_Apptainer_container_on_a_Milan_compute_node.md similarity index 91% rename from docs/Scientific_Computing/HPC_Software_Environment/Build_an_Apptainer_container_on_a_Milan_compute_node.md rename to docs/High_Performance_Computing/Software/Building_Software/Build_an_Apptainer_container_on_a_Milan_compute_node.md index 357c61a52..d1946aa31 100644 --- a/docs/Scientific_Computing/HPC_Software_Environment/Build_an_Apptainer_container_on_a_Milan_compute_node.md +++ b/docs/High_Performance_Computing/Software/Building_Software/Build_an_Apptainer_container_on_a_Milan_compute_node.md @@ -9,17 +9,15 @@ zendesk_section_id: 360000040056 --- This article describes a technique to build -[Apptainer](https://apptainer.org/) containers using [Milan compute -nodes](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Milan_Compute_Nodes.md), +[Apptainer](https://apptainer.org/) containers using [Milan compute nodes](Milan_Compute_Nodes.md), via a Slurm job. You can also build -[Singularity](../../Scientific_Computing/Supported_Applications/Singularity.md) +[Singularity](Singularity.md) container using this technique. ## Building container via Slurm The new Milan compute nodes can be used to build Apptainer containers -using the [fakeroot -feature](https://apptainer.org/docs/user/main/fakeroot.html). This +using the [fakeroot feature](https://apptainer.org/docs/user/main/fakeroot.html). This functionality is only available on these nodes at the moment due to their operating system version. @@ -74,8 +72,7 @@ Option `--force` will rebuild *my_container.sif* even if it already is in the directory. More information about how to submit a Slurm job is available in the -[Submitting your first -job](../../Getting_Started/Next_Steps/Submitting_your_first_job.md) +[Submitting your first job](Submitting_your_first_job.md) support page. !!! info "Build environment variables" diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Compiling_software_on_Mahuika.md b/docs/High_Performance_Computing/Software/Building_Software/Compiling_software.md similarity index 99% rename from docs/Scientific_Computing/HPC_Software_Environment/Compiling_software_on_Mahuika.md rename to docs/High_Performance_Computing/Software/Building_Software/Compiling_software.md index cc63ad030..a06429abe 100644 --- a/docs/Scientific_Computing/HPC_Software_Environment/Compiling_software_on_Mahuika.md +++ b/docs/High_Performance_Computing/Software/Building_Software/Compiling_software.md @@ -1,11 +1,7 @@ --- created_at: '2018-07-12T03:48:47Z' tags: [] -title: "Compiling software: Mahuika" -vote_count: 1 -vote_sum: 1 -zendesk_article_id: 360000329015 -zendesk_section_id: 360000040056 +title: "Compiling software" --- ## Where to build diff --git a/docs/High_Performance_Computing/Software/Building_Software/Creating_Your_Own_Modules.md b/docs/High_Performance_Computing/Software/Building_Software/Creating_Your_Own_Modules.md new file mode 100644 index 000000000..1b845d825 --- /dev/null +++ b/docs/High_Performance_Computing/Software/Building_Software/Creating_Your_Own_Modules.md @@ -0,0 +1,41 @@ +--- +created_at: 2025-02-21 +tags: + - lmod + - modules +--- + +You can create personalised module environments, which can load modules +and set up environment variables. For example, you could define a +modules in a project directory +`/nesi/project//modulefiles/ProdXY` as the following: + +In the first lines, we can set conflicts with other modules (here named +ProdABC). Then we load some dependency modules and provide some +description. The additional lines depend on your requirements for the +module. With *set* you can define internal variables (within this module +file). The command *setenv* defines a environment variable. And +*prepend-path* and *append-path* extend an environment variable at the +front or end. + +There are common environment variables like: + +- *PATH* for providing executabl, +- *LD\_LIBRARY\_PATH* for self created libraries, +- *PYTHONPATH* for providing Python modules, +- *CONDA\_ENVS\_PATH* for providing Conda environments, +- etc. + +And others which are very application specific. + +To use the module (or all in that directory and sub-directories) we need +to register that directory to the module environment. This can be done +by setting the following environment variable: + +by adding that line to your `$HOME/.bashrc` you will have the modules +always available. + +The module then can be loaded by: + +These modules can easily be shared with collaborators. They just need to +specify the last two steps. diff --git a/docs/High_Performance_Computing/Software/Building_Software/Installing_Third_Party_applications.md b/docs/High_Performance_Computing/Software/Building_Software/Installing_Third_Party_applications.md new file mode 100644 index 000000000..a491c2ddd --- /dev/null +++ b/docs/High_Performance_Computing/Software/Building_Software/Installing_Third_Party_applications.md @@ -0,0 +1,30 @@ +--- +created_at: '2018-09-24T01:51:32Z' +tags: [] +title: Building_Software +vote_count: 3 +vote_sum: 3 +--- + +Installation instruction vary from application to application. In any +case we suggest to read the provided installing instructions. +Nevertheless, the following should give you an impression which steps +you usually need to consider: + +- Change into a desired source code directory. We suggest to use + `/nesi/nobackup/` or `/nesi/project/` +- download the source code. This could be done via a repository + checkout (`git clone `) or + via downloading a tarball (`wget `). Unpack the + tarball using `tar xf `. Change into source + directory. +- load compiler module and modules for additional libraries + (`module load gimkl FFTW`) +- run the configure with appropriate options + `./configure --prefix= --use-fftw=$EBROOTFFTW  `(options + can be listed using `./configure --help`) +- In other applications you need to adjust the provided `Makefile` to + reflect compiler, and library options (see below) +- compile code (`make`) +- install the binaries and libraries into the specified directory + (`make install`) diff --git a/docs/Scientific_Computing/Profiling_and_Debugging/.pages.yml b/docs/High_Performance_Computing/Software/Profiling_and_Debugging/.pages.yml similarity index 100% rename from docs/Scientific_Computing/Profiling_and_Debugging/.pages.yml rename to docs/High_Performance_Computing/Software/Profiling_and_Debugging/.pages.yml diff --git a/docs/Scientific_Computing/Profiling_and_Debugging/Debugging.md b/docs/High_Performance_Computing/Software/Profiling_and_Debugging/Debugging.md similarity index 77% rename from docs/Scientific_Computing/Profiling_and_Debugging/Debugging.md rename to docs/High_Performance_Computing/Software/Profiling_and_Debugging/Debugging.md index 46351f56f..a337863ac 100644 --- a/docs/Scientific_Computing/Profiling_and_Debugging/Debugging.md +++ b/docs/High_Performance_Computing/Software/Profiling_and_Debugging/Debugging.md @@ -83,7 +83,7 @@ information are provided into the application stdout and a HTML file is created. Thus this could also be a handy alternative for print statements without touching the code. -![DDT-offline-example.PNG](../../assets/images/Debugging.PNG) +![DDT-offline-example.png](Debugging.png) [See full example page here.](https://mand35.github.io/NeSI_docu_ext/ddt_sample.html) @@ -108,7 +108,7 @@ launching an application with DDT (RUN). In the RUN menu the different settings for the executable need to be specified. -![RUN\_ddt.PNG](../../assets/images/Debugging_0.PNG) +![RUN\_ddt.png](Debugging_0.png) Beside Application location and name, we need to specify arguments, working directory, MPI and OpenMP settings. If we have no interactive @@ -122,7 +122,7 @@ Variables section you can load necessary modules. After submitting the task, DDT launches the application (wait for the workload manager if necessary) and opens the following window. -![DDT\_overview.PNG](../../assets/images/Debugging_1.PNG) +![DDT\_overview.png](Debugging_1.png) In the top part the processes and threads can be selected. The application is paused at the initialization phase, giving the user the @@ -130,36 +130,3 @@ opportunity to set break/watch points, and define the type execution (in/over/out of functions or just until next break point). For more detailed information see the [DDT manual](https://developer.arm.com/docs/101136/latest/ddt) -## ATP (Cray Abnormal Termination Processing) - -!!! warning - This tool is only available on Māui. - -Abnormal Termination Processing (ATP) is a system that monitors Cray XC -System (Maui) user applications, and should an application take a system -trap, ATP preforms analysis on the dying application. All of the stack -backtraces of the application processes are gathered into a merged -stack back trace tree and written to disk as the file `atpMergedBT.dot`. -The stack back trace for the first process to die is sent to stderr as is -the number of the signal that caused the death. If the core file size -limit (`RLIMIT_CORE`) is non-zero, a heuristically selected set of -processes dump their core. - -An example output looks like: - -```out -Application 427046 is crashing. ATP analysis proceeding... - -ATP Stack walkback for Rank 0 starting: - _start@start.S:118 - __libc_start_main@libc-start.c:289 - main@fail.c:65 - m_routine@fail.c:38 - calculation@fail.c:31 - do_task@fail.c:25 -ATP Stack walkback for Rank 0 done -Process died with signal 8: 'Floating point exception' -Forcing core dumps of ranks 0, 1 -View application merged backtrace tree with: stat-view atpMergedBT.dot -You may need to: module load stat -``` diff --git a/docs/Scientific_Computing/Profiling_and_Debugging/Profiler-ARM_MAP.md b/docs/High_Performance_Computing/Software/Profiling_and_Debugging/Profiler-ARM_MAP.md similarity index 94% rename from docs/Scientific_Computing/Profiling_and_Debugging/Profiler-ARM_MAP.md rename to docs/High_Performance_Computing/Software/Profiling_and_Debugging/Profiler-ARM_MAP.md index 06a4830c3..163fb556b 100644 --- a/docs/Scientific_Computing/Profiling_and_Debugging/Profiler-ARM_MAP.md +++ b/docs/High_Performance_Computing/Software/Profiling_and_Debugging/Profiler-ARM_MAP.md @@ -83,7 +83,7 @@ map ``` Then click on “PROFILE”. -![MAP\_profile\_python.PNG](../../assets/images/Profiler-ARM_MAP.png) +![MAP\_profile\_python.png](Profiler-ARM_MAP.png) In the profile menu we need to specify the *executable/application* (in this case `python`), the arguments (here `scatter.py` and any additional @@ -102,7 +102,7 @@ profile information. By default the profile window is divided into the following three main sections (click on picture to enlarge). -![example-map-scatter](../../assets/images/Profiler-ARM_MAP_0.png) +![example-map-scatter](Profiler-ARM_MAP_0.png) On top, various metrics can be selected in the “Metrics” menu. In the middle part, a source code navigator connects line by line source code @@ -126,8 +126,6 @@ using the *Metrics* Menu*.* As an example, “CPU instructions” presents the usage of different instruction sets during the program run time. -[![example-map-scatter\_CPU](../../assets/images/Profiler-ARM_MAP_1.png)](https://nesi.github.io/perf-training/python-scatter/images/ARM_MAP_scatter_mpi_CPU.png) - The lower part can also be used to check the *application output* or show statistics on basis of *files* or *functions*. diff --git a/docs/Scientific_Computing/Profiling_and_Debugging/Profiler-VTune.md b/docs/High_Performance_Computing/Software/Profiling_and_Debugging/Profiler-VTune.md similarity index 100% rename from docs/Scientific_Computing/Profiling_and_Debugging/Profiler-VTune.md rename to docs/High_Performance_Computing/Software/Profiling_and_Debugging/Profiler-VTune.md diff --git a/docs/Scientific_Computing/Profiling_and_Debugging/Slurm_Native_Profiling.md b/docs/High_Performance_Computing/Software/Profiling_and_Debugging/Slurm_Native_Profiling.md similarity index 100% rename from docs/Scientific_Computing/Profiling_and_Debugging/Slurm_Native_Profiling.md rename to docs/High_Performance_Computing/Software/Profiling_and_Debugging/Slurm_Native_Profiling.md diff --git a/docs/Scientific_Computing/Supported_Applications/ABAQUS.md b/docs/High_Performance_Computing/Software/Software_Catalouge/ABAQUS.md similarity index 92% rename from docs/Scientific_Computing/Supported_Applications/ABAQUS.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/ABAQUS.md index efaa8fe6f..32e82b652 100644 --- a/docs/Scientific_Computing/Supported_Applications/ABAQUS.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/ABAQUS.md @@ -45,7 +45,7 @@ parameter `academic=TEACHING` or `academic=RESEARCH` in a relevant intuitive formula ⌊ 5 x N0.422 where `N` is number of CPUs. -[Hyperthreading](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Hyperthreading.md) +[Hyperthreading](Hyperthreading.md) can provide significant speedup to your computations, however hyperthreaded CPUs will use twice the number of licence tokens. It may be worth adding `#SBATCH --hint nomultithread` to your slurm script if @@ -76,7 +76,7 @@ Not all solvers are compatible with all types of parallelisation. === "Serial" For when only one CPU is required, generally as part of - a [job array](../../Getting_Started/Next_Steps/Parallel_Execution.md#job-arrays) + a [job array](../../Mahuika_Cluster/Next_Steps/Parallel_Execution.md#job-arrays) ```sl #!/bin/bash -e @@ -183,14 +183,12 @@ source code. Extra compiler options can be set in your local `abaqus_v6.env` [file](#environment-file). The default compile commands are for `imkl`, other compilers can be -loaded with `module load`, you may have to change the [compile -commands](../../Scientific_Computing/HPC_Software_Environment/Compiling_software_on_Mahuika.md) +loaded with `module load`, you may have to change the [compile commands](Compiling_software_on_Mahuika.md) in your local `.env` file. ## Environment file -The [ABAQUS environment -file](http://media.3ds.com/support/simulia/public/v613/installation-and-licensing-guides/books/sgb/default.htm?startat=ch04s01.html) contains +The [ABAQUS environmentfile](http://media.3ds.com/support/simulia/public/v613/installation-and-licensing-guides/books/sgb/default.htm?startat=ch04s01.html) contains a number of parameters that define how the your job will run, some of these you may with to change. @@ -219,7 +217,7 @@ rm "abaqus_v6.env" ## Performance -![ABAQUS\_speedup\_SharedVMPI.png](../../assets/images/ABAQUS.png) +![ABAQUS\_speedup\_SharedVMPI.png](ABAQUS.png) *Note: Hyperthreading off, testing done on small mechanical FEA model. Results highly model dependant. Do your own tests.* diff --git a/docs/Scientific_Computing/Supported_Applications/ANSYS.md b/docs/High_Performance_Computing/Software/Software_Catalouge/ANSYS.md similarity index 98% rename from docs/Scientific_Computing/Supported_Applications/ANSYS.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/ANSYS.md index fd7f9a018..0b35a6b83 100644 --- a/docs/Scientific_Computing/Supported_Applications/ANSYS.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/ANSYS.md @@ -146,8 +146,7 @@ the use of variables in what might otherwise be a fixed input. ## Fluent -[Some great documentation on journal -files](https://docs.hpc.shef.ac.uk/en/latest/referenceinfo/ANSYS/fluent/writing-fluent-journal-files.html) +[Some great documentation on journal files](https://docs.hpc.shef.ac.uk/en/latest/referenceinfo/ANSYS/fluent/writing-fluent-journal-files.html) `fluent -help` for a list of commands. @@ -215,8 +214,7 @@ Must have one of these flags. While it will always be more time and resource efficient using a slurm script as shown above, there are occasions where the GUI is required. If you only require a few CPUs for a short while you may run the fluent on -the login node, otherwise use of an [slurm interactive -session](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Slurm_Interactive_Sessions.md) +the login node, otherwise use of an [slurm interactive session](Slurm_Interactive_Sessions.md) is recommended. For example. @@ -570,7 +568,7 @@ the command `fensapiceGUI` from within your FENSAP project directory. 1. Launch the run and select the desired number of (physical) CPUs. 2. Open the 'configure' panel. - ![FENSAP gui](../../assets/images/ANSYS.png) + ![FENSAP gui](ANSYS.png) 3. Under 'Additional mpirun parameters' add your inline SLURM options. You should include at least. @@ -585,7 +583,7 @@ number of (physical) CPUs. Note: All these parameters will be applied to each individual step. 4. Start the job. You can track progress under the 'log' tab. - ![FENSAP GUI](../../assets/images/ANSYS_0.png) + ![FENSAP GUI](ANSYS_0.png) You may close your session and the job will continue to run on the compute nodes. You will be able to view the running job at any time by @@ -624,8 +622,7 @@ Progress can be tracked through the GUI as usual. ## ANSYS-Electromagnetic -ANSYS-EM jobs can be submitted through a slurm script or by [interactive -session](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Slurm_Interactive_Sessions.md). +ANSYS-EM jobs can be submitted through a slurm script or by [interactive session](Slurm_Interactive_Sessions.md). ### RSM diff --git a/docs/Scientific_Computing/Supported_Applications/AlphaFold.md b/docs/High_Performance_Computing/Software/Software_Catalouge/AlphaFold.md similarity index 94% rename from docs/Scientific_Computing/Supported_Applications/AlphaFold.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/AlphaFold.md index 20638b588..ff77316b6 100644 --- a/docs/Scientific_Computing/Supported_Applications/AlphaFold.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/AlphaFold.md @@ -17,9 +17,7 @@ zendesk_section_id: 360000040076 !!! prerequisite Tips An extended version of AlphaFold2 on NeSI Mahuika cluster which - contains additional information such as visualisation of AlphaFold - outputs, etc [can be found - here](https://nesi.github.io/alphafold2-on-mahuika/) + contains additional information such as [visualisation of AlphaFold outputs, etc](https://nesi.github.io/alphafold2-on-mahuika/) ## Description @@ -30,10 +28,10 @@ as AlphaFold throughout the rest of this document. Any publication that discloses findings arising from using this source code or the model parameters -should [cite](https://github.com/deepmind/alphafold#citing-this-work) the [AlphaFold -paper](https://doi.org/10.1038/s41586-021-03819-2). Please also refer to -the [Supplementary -Information](https://static-content.springer.com/esm/art%3A10.1038%2Fs41586-021-03819-2/MediaObjects/41586_2021_3819_MOESM1_ESM.pdf) for +should [cite](https://github.com/deepmind/alphafold#citing-this-work) the  +[AlphaFold paper](https://doi.org/10.1038/s41586-021-03819-2). +Please also refer to the +[Supplementary Information](https://static-content.springer.com/esm/art%3A10.1038%2Fs41586-021-03819-2/MediaObjects/41586_2021_3819_MOESM1_ESM.pdf) for a detailed description of the method. Home page is at @@ -191,8 +189,7 @@ run_alphafold.py \ If you would like to use a version prior to 2.3.2, It can be done via the Singularity containers. -We prepared a Singularity container image based on the [official -Dockerfile](https://hub.docker.com/r/catgumag/alphafold) with some +We prepared a Singularity container image based on the [official Dockerfile](https://hub.docker.com/r/catgumag/alphafold) with some modifications. Image (.*simg*) and the corresponding definition file (*.def*) are stored in `/opt/nesi/containers/AlphaFold/` @@ -286,8 +283,7 @@ singularity exec --nv /opt/nesi/containers/AlphaFold/alphafold_2.2.0.simg python were identical. Therefore, the above example was set to former via `P100:1` 3. The `--nv` flag enables GPU support. -4. `--pwd /app/alphafold` is to workaround this [existing - issue](https://github.com/deepmind/alphafold/issues/32) +4. `--pwd /app/alphafold` is to workaround this [existing issue](https://github.com/deepmind/alphafold/issues/32) ### AlphaFold2 : Initial Release ( this version does not support `multimer`) diff --git a/docs/Scientific_Computing/Supported_Applications/BLAST.md b/docs/High_Performance_Computing/Software/Software_Catalouge/BLAST.md similarity index 100% rename from docs/Scientific_Computing/Supported_Applications/BLAST.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/BLAST.md diff --git a/docs/Scientific_Computing/Supported_Applications/BRAKER.md b/docs/High_Performance_Computing/Software/Software_Catalouge/BRAKER.md similarity index 98% rename from docs/Scientific_Computing/Supported_Applications/BRAKER.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/BRAKER.md index 0681703c2..3d06da270 100644 --- a/docs/Scientific_Computing/Supported_Applications/BRAKER.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/BRAKER.md @@ -59,7 +59,7 @@ Artistic License !!! info "prerequisite Obtain GeneMark-ES/ET Academic License" GeneMark-ES/ET which is one of the dependencies for BRAKER requires an individual academic license  (this is free). This can be obtained as below - Download URL - - ![genemark\_es\_license.png](../../assets/images/BRAKER.png) + - ![genemark\_es\_license.png](BRAKER.png) - Downloaded filename will be in the format of `gm_key_64.gz` - Decompress this file with `gunzip gm_key_64.gz`  and move it to home directory as  a **hidden** file under the filename `.gm_key` .i.e. `~/.gm_key` diff --git a/docs/Scientific_Computing/Supported_Applications/CESM.md b/docs/High_Performance_Computing/Software/Software_Catalouge/CESM.md similarity index 98% rename from docs/Scientific_Computing/Supported_Applications/CESM.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/CESM.md index ab3fc26cd..962765017 100644 --- a/docs/Scientific_Computing/Supported_Applications/CESM.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/CESM.md @@ -175,8 +175,7 @@ Make sure you still have the environment variable set with your project code: export PROJECT_CODE= ``` -Here we will run the test described in the CESM [quick start -guide](https://escomp.github.io/CESM/release-cesm2/quickstart.html). The +Here we will run the test described in the CESM [quick start guide](https://escomp.github.io/CESM/release-cesm2/quickstart.html). The following are basic instructions to create and run the case, see the above link for more information. diff --git a/docs/Scientific_Computing/Supported_Applications/COMSOL.md b/docs/High_Performance_Computing/Software/Software_Catalouge/COMSOL.md similarity index 93% rename from docs/Scientific_Computing/Supported_Applications/COMSOL.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/COMSOL.md index 849dd1318..551469c65 100644 --- a/docs/Scientific_Computing/Supported_Applications/COMSOL.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/COMSOL.md @@ -7,10 +7,6 @@ tags: - cfd - fea description: Running COMSOL multiphysics on the NeSI cluster. -vote_count: 1 -vote_sum: 1 -zendesk_article_id: 360000871556 -zendesk_section_id: 360000040076 --- {% set app_name = page.title | trim %} @@ -51,6 +47,7 @@ distribution. #!/bin/bash -e #SBATCH --job-name COMSOL-serial + #SBATCH --account nesi99991 #SBATCH --licenses comsol@uoa_foe #SBATCH --time 00:05:00 # Walltime #SBATCH --mem 1512 # total mem @@ -63,7 +60,9 @@ distribution. ```sl #!/bin/bash -e + #SBATCH --job-name COMSOL-shared + #SBATCH --account nesi99991 #SBATCH --licenses comsol@uoa_foe #SBATCH --time 00:05:00 # Walltime #SBATCH --cpus-per-task 8 @@ -78,6 +77,7 @@ distribution. #!/bin/bash -e #SBATCH --job-name COMSOL-distributed + #SBATCH --account nesi99991 #SBATCH --licenses comsol@uoa_foe #SBATCH --time 00:05:00 # Walltime #SBATCH --ntasks 8 @@ -91,7 +91,9 @@ distribution. ```sl #!/bin/bash -e + #SBATCH --job-name COMSOL-hybrid + #SBATCH --account nesi99991 #SBATCH --licenses comsol@uoa_foe #SBATCH --time 00:05:00 # Walltime #SBATCH --ntasks 4 @@ -105,9 +107,10 @@ distribution. === "LiveLink" ```sl - #!/bin/bash -e + #SBATCH --job-name COMSOL-livelink + #SBATCH --account nesi99991 #SBATCH --licenses comsol@uoa_foe #SBATCH --time 00:05:00 #SBATCH --cpus-per-task 16 @@ -128,7 +131,7 @@ distribution. ## Interactive Use -Providing you have [set up X11](../../Scientific_Computing/Terminal_Setup/X11_on_NeSI.md), you can +Providing you have [set up X11](X11_on_NeSI.md), you can open the COMSOL GUI by running the command `comsol`. Large jobs should not be run on the login node. @@ -160,12 +163,11 @@ Multithreading will benefit jobs using less than 8 CPUs, but is not recommended on larger jobs. *Performance is highly depended on the model used. The above should only be used as a rough guide.* -![Speedup](../../assets/images/speedup_smoothed.png) +![Speedup](speedup_smoothed.png) -## Tmpdir +## TmpDir If you find yourself receiving the error 'Disk quota exceeded', yet `nn_storage_quota` shows plenty of room in your filesystem, you may be running out of tmpdir. -This can be fixed by using the `--tmpdir` flag in the comsol command line, e.g. `comsol --tmpdir /nesi/nobackup/nesi99991/comsoltmp`, or by exporting `TMPDIR` before running the command, e.g. `export TMPDIR=/nesi/nobackup/nesi99991/comsoltmp`. +This can be fixed by using the `--tmpdir` flag in the COMSOL command line, e.g. `comsol --tmpdir /nesi/nobackup/nesi99991/comsoltmp`, or by exporting `TMPDIR` before running the command, e.g. `export TMPDIR=/nesi/nobackup/nesi99991/comsoltmp`. You may also want to set this at the Java level with `export _JAVA_OPTIONS=-Djava.io.tmpdir=/nesi/nobackup/nesi99991/comsoltmp` - diff --git a/docs/Scientific_Computing/Supported_Applications/Clair3.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Clair3.md similarity index 100% rename from docs/Scientific_Computing/Supported_Applications/Clair3.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Clair3.md diff --git a/docs/Scientific_Computing/Supported_Applications/Cylc.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Cylc.md similarity index 96% rename from docs/Scientific_Computing/Supported_Applications/Cylc.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Cylc.md index 69435b164..7fafe603a 100644 --- a/docs/Scientific_Computing/Supported_Applications/Cylc.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/Cylc.md @@ -29,8 +29,7 @@ See the NeSI  [Snakemake](https://snakemake-on-nesi.sschmeier.com/) page for another, possible choice. In this article, we show how you can create a simple workflow and run it -on NeSI's platform. Consult the [Cylc -documentation](https://cylc.github.io/documentation/) for more elaborate +on NeSI's platform. Consult the [Cylc documentation](https://cylc.github.io/documentation/) for more elaborate examples, including some with a cycling (repeated) graph pattern. One of the strengths of Cylc is that simple workflows can be executed simply while allowing for very complex workflows, with thousands of tasks, @@ -197,17 +196,17 @@ $ cylc graph simple ``` which will generate a png file, generally in the /tmp directory with a -name like /tmp/tmpzq3bjktw.PNG. Take note of the name of the png file. +name like /tmp/tmpzq3bjktw.png. Take note of the name of the png file. To visualise the file you can type ``` sh -$ display  /tmp/tmpzq3bjktw.PNG # ADJUST the file name +$ display  /tmp/tmpzq3bjktw.png # ADJUST the file name ``` Here, we see that our workflow "simple" has a "taskC", which waits for "taskA" and "taskB" to complete, -![simple.png](../../assets/images/Cylc.png) +![simple.png](Cylc.png) The "1" indicates that this workflow graph is executed only once. @@ -249,8 +248,7 @@ $ ssh -N -L PORT:localhost:PORT HOST ``` where **PORT** is a valid port number and **HOST** can be Māui or -mahuika. See the [NeSI -page](../../Getting_Started/Accessing_the_HPCs/Port_Forwarding.md) for +mahuika. See the [Port Forwarding](Port_Forwarding.md) for the range of allowed ports (currently 1024-49151). Choose any number in this range but make sure your port number is fairly unique to avoid clashing with other users. Option -N is optional: it opens the diff --git a/docs/Scientific_Computing/Supported_Applications/Delft3D.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Delft3D.md similarity index 96% rename from docs/Scientific_Computing/Supported_Applications/Delft3D.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Delft3D.md index 347fc07e6..d5bb103eb 100644 --- a/docs/Scientific_Computing/Supported_Applications/Delft3D.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/Delft3D.md @@ -16,7 +16,7 @@ tags: === "Serial" - For when only one CPU is required, generally as part of a [job array](../../Getting_Started/Next_Steps/Parallel_Execution.md#job-arrays). + For when only one CPU is required, generally as part of a [job array](Parallel_Execution.md#job-arrays). ```sl #!/bin/bash -e diff --git a/docs/Scientific_Computing/Supported_Applications/Dorado.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Dorado.md similarity index 100% rename from docs/Scientific_Computing/Supported_Applications/Dorado.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Dorado.md diff --git a/docs/Scientific_Computing/Supported_Applications/FDS.md b/docs/High_Performance_Computing/Software/Software_Catalouge/FDS.md similarity index 88% rename from docs/Scientific_Computing/Supported_Applications/FDS.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/FDS.md index 919ebea2f..38ae124ea 100644 --- a/docs/Scientific_Computing/Supported_Applications/FDS.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/FDS.md @@ -26,9 +26,9 @@ General documentation can be found [here](https://github.com/firemodels/fds/releases/download/FDS6.7.1/FDS_User_Guide.pdf). FDS can utilise both -[MPI](../../Getting_Started/Next_Steps/Parallel_Execution.md#mpi) +[MPI](../../Mahuika_Cluster/Next_Steps/Parallel_Execution.md#mpi) and -[OpenMP](../../Getting_Started/Next_Steps/Parallel_Execution.md#multi-threading) +[OpenMP](../../Mahuika_Cluster/Next_Steps/Parallel_Execution.md#multi-threading) ## Example Script @@ -64,8 +64,8 @@ srun fds ${input} ### Scaling with MPI -![FDS scaling distrubuted mem](../../assets/images/FDS.png) +![FDS scaling distrubuted mem](FDS.png) ### Scaling with oMP -![FDS scaling shared mem](../../assets/images/FDS_0.png) +![FDS scaling shared mem](FDS_0.png) diff --git a/docs/Scientific_Computing/Supported_Applications/FlexiBLAS.md b/docs/High_Performance_Computing/Software/Software_Catalouge/FlexiBLAS.md similarity index 100% rename from docs/Scientific_Computing/Supported_Applications/FlexiBLAS.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/FlexiBLAS.md diff --git a/docs/Scientific_Computing/Supported_Applications/GATK.md b/docs/High_Performance_Computing/Software/Software_Catalouge/GATK.md similarity index 96% rename from docs/Scientific_Computing/Supported_Applications/GATK.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/GATK.md index 3198bda47..354c41390 100644 --- a/docs/Scientific_Computing/Supported_Applications/GATK.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/GATK.md @@ -15,8 +15,7 @@ zendesk_section_id: 360000040076 {% include "partials/app_header.html" %} [//]: <> (APPS PAGE BOILERPLATE END) -The Genome Analysis Toolkit (GATK), developed at the [Broad -Institute](http://www.broadinstitute.org/), provides a wide variety of +The Genome Analysis Toolkit (GATK), developed at the [Broad Institute](http://www.broadinstitute.org/), provides a wide variety of tools focusing primarily on variant discovery and genotyping. It is regarded as the industry standard for identifying SNPS and indels in germline DNA and RNAseq data. diff --git a/docs/Scientific_Computing/Supported_Applications/GROMACS.md b/docs/High_Performance_Computing/Software/Software_Catalouge/GROMACS.md similarity index 96% rename from docs/Scientific_Computing/Supported_Applications/GROMACS.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/GROMACS.md index 3ace136bb..3e0850d76 100644 --- a/docs/Scientific_Computing/Supported_Applications/GROMACS.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/GROMACS.md @@ -25,9 +25,7 @@ but since GROMACS is extremely fast at calculating the nonbonded interactions (that usually dominate simulations) many groups are also using it for research on non-biological systems, e.g. polymers. -GROMACS is available to anyone at no cost under the terms of [the GNU -Lesser General Public -Licence](http://www.gnu.org/licenses/lgpl-2.1.html). Gromacs is a joint +GROMACS is available to anyone at no cost under the terms of [the GNU Lesser General Public Licence](http://www.gnu.org/licenses/lgpl-2.1.html). Gromacs is a joint effort, with contributions from developers around the world: users agree to acknowledge use of GROMACS in any reports or publications of results obtained with the Software. @@ -164,7 +162,7 @@ that is the number of vCPUs per node. NVIDIA has a GPU accelerated version of GROMACS in its NGC container registry (more details about NGC -[here](../../Scientific_Computing/HPC_Software_Environment/NVIDIA_GPU_Containers.md)). +[here](NVIDIA_GPU_Containers.md)). We have pulled a version of their container and stored it at this location (you can also pull your own version if you wish): */opt/nesi/containers/nvidia/gromacs-2020\_2.sif*. We have also provided @@ -175,4 +173,4 @@ an example submission script that calls the Singularity image here: [GROMACS Homepage](http://www.gromacs.org/) -[GROMACS Manual](http://www.gromacs.org/Documentation/Manual) \ No newline at end of file +[GROMACS Manual](http://www.gromacs.org/Documentation/Manual) diff --git a/docs/Scientific_Computing/Supported_Applications/Gaussian.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Gaussian.md similarity index 100% rename from docs/Scientific_Computing/Supported_Applications/Gaussian.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Gaussian.md diff --git a/docs/Scientific_Computing/Supported_Applications/Java.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Java.md similarity index 100% rename from docs/Scientific_Computing/Supported_Applications/Java.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Java.md diff --git a/docs/Scientific_Computing/Supported_Applications/Julia.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Julia.md similarity index 95% rename from docs/Scientific_Computing/Supported_Applications/Julia.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Julia.md index e93c77e1f..491cacd5c 100644 --- a/docs/Scientific_Computing/Supported_Applications/Julia.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/Julia.md @@ -191,10 +191,8 @@ $ export JULIA_LOAD_PATH="/nesi/project/nesi12345/julia:${JULIA_LOAD_PATH}" ## Profiling Julia code -In addition to the Julia Profile module (see the [official -documentation](https://docs.julialang.org/en/v1/manual/profile/)), it is -also possible to profile Julia code with [external -profilers](https://docs.julialang.org/en/v1/manual/profile/#External-Profiling-1). +In addition to the Julia Profile module (see the [official documentation](https://docs.julialang.org/en/v1/manual/profile/)), it is +also possible to profile Julia code with [external profilers](https://docs.julialang.org/en/v1/manual/profile/#External-Profiling-1). On Mahuika we have installed "-VTune" variants of Julia, which are built from source with support for profiling using Intel VTune. VTune is a nice tool for profiling parallel code (e.g. code making use of threading @@ -236,5 +234,4 @@ have X11 forwarding enabled: amplxe-gui --path-to-open ``` - Additional information about VTune can be found in the [User -Guide](https://software.intel.com/en-us/vtune-amplifier-help). + Additional information about VTune can be found in the [User Guide](https://software.intel.com/en-us/vtune-amplifier-help). diff --git a/docs/Scientific_Computing/Supported_Applications/Keras.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Keras.md similarity index 94% rename from docs/Scientific_Computing/Supported_Applications/Keras.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Keras.md index f90c18b66..726ad3739 100644 --- a/docs/Scientific_Computing/Supported_Applications/Keras.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/Keras.md @@ -10,8 +10,8 @@ zendesk_section_id: 360000040076 Keras is a modular and extendable API for building neural networks in Python. Keras is included with TensorFlow. Note that there are -[CPU and](../../Scientific_Computing/Supported_Applications/TensorFlow_on_CPUs.md) -[GPU versions](../../Scientific_Computing/Supported_Applications/TensorFlow_on_GPUs.md) of +[CPU and](TensorFlow_on_CPUs.md) +[GPU versions](TensorFlow_on_GPUs.md) of TensorFlow, here we'll use TensorFlow 1.10 for GPUs, which is available as an environment module. @@ -50,7 +50,7 @@ An example of image is test/img49.jpg. display test/img49.jpg ``` -![img49.jpg](../../assets/images/Keras.jpg) +![img49.jpg](Keras.jpg) which shows five, partially overlapping dots. Note that along with the images, a comma separated values (csv) file (e.g. train/train.csv) @@ -125,7 +125,7 @@ same directory as classify.py. This file contains the predictions for the first 50 test images, which will vary for each training but the result will look like: -![someResults.png](../../assets/images/Keras.png) +![someResults.png](Keras.png) (The purple images have no dots.) With each image the number of dots is displayed as well as the value inferred by the model in parentheses. The diff --git a/docs/Scientific_Computing/Supported_Applications/Lambda_Stack.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Lambda_Stack.md similarity index 97% rename from docs/Scientific_Computing/Supported_Applications/Lambda_Stack.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Lambda_Stack.md index 277aeebbb..92ab16e15 100644 --- a/docs/Scientific_Computing/Supported_Applications/Lambda_Stack.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/Lambda_Stack.md @@ -10,8 +10,7 @@ zendesk_section_id: 360000040076 ## Introduction -[Lambda -Stack](https://lambdalabs.com/lambda-stack-deep-learning-software) is an +[Lambda Stack](https://lambdalabs.com/lambda-stack-deep-learning-software) is an AI software stack from Lambda containing PyTorch, TensorFlow, CUDA, cuDNN and more. On NeSI you can run Lambda Stack via [Singularity](https://sylabs.io/guides/3.7/user-guide/) (based on the @@ -21,7 +20,7 @@ have provided some prebuilt Singularity images (under */opt/nesi/containers/lambda-stack/*) or you can build your own (see the guide below). In the following sections, we will show you how to run Lambda Stack in a Slurm job or interactively via -[JupyterLab](../../Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md). +[JupyterLab](Jupyter_on_NeSI.md). You can list the available Lambda Stack version on NeSI by running: @@ -123,7 +122,7 @@ ${SINGULARITY} echo "Hello World" The following steps will create a custom Lambda Stack kernel that can be accessed via NeSI's Jupyter service (based on the instructions at -[Jupyter_on_NeSI](../../Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_kernels_Tool_assisted_management.md)). +[Jupyter_on_NeSI](Jupyter_kernels_Tool_assisted_management.md)). First, we need to create a kernel definition and wrapper that will launch the Singularity image. Run the following commands on the Mahuika diff --git a/docs/Scientific_Computing/Supported_Applications/MAKER.md b/docs/High_Performance_Computing/Software/Software_Catalouge/MAKER.md similarity index 100% rename from docs/Scientific_Computing/Supported_Applications/MAKER.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/MAKER.md diff --git a/docs/Scientific_Computing/Supported_Applications/MATLAB.md b/docs/High_Performance_Computing/Software/Software_Catalouge/MATLAB.md similarity index 95% rename from docs/Scientific_Computing/Supported_Applications/MATLAB.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/MATLAB.md index c05a3af57..112fee9eb 100644 --- a/docs/Scientific_Computing/Supported_Applications/MATLAB.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/MATLAB.md @@ -80,7 +80,7 @@ utilise more than a 4-8 CPUs this way. !!! tip If your code is explicitly parallel at a high level it is preferable to use - [SLURM job arrays](../../Getting_Started/Next_Steps/Parallel_Execution.md) + [SLURM job arrays](Parallel_Execution.md) as there is less computational overhead and the multiple smaller jobs will queue faster and therefore improve your throughput. @@ -176,7 +176,7 @@ CUDA modules and select the appropriate one. For example, for MATLAB R2021a, use `module load CUDA/11.0.2` before launching MATLAB. If you want to know more about how to access the different type of -available GPUs on NeSI, check the [GPU use on NeSI](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/GPU_use_on_NeSI.md) +available GPUs on NeSI, check the [GPU use on NeSI](GPU_use_on_NeSI.md) support page. !!! tip "Support for A100 GPUs" @@ -186,7 +186,7 @@ support page. !!! tip "GPU cost" A GPU device-hour costs more than a core-hour, depending on the type - of GPU. You can find a comparison table in our [What is an allocation?](../../Getting_Started/Accounts-Projects_and_Allocations/What_is_an_allocation.md) + of GPU. You can find a comparison table in our [What is an allocation?](What_is_an_allocation.md) support page. ### GPU Example @@ -222,8 +222,7 @@ specify it with ### mexopencv -mexopencv is [mex wrapper MATLAB wrapper for the openCV -library.](https://github.com/kyamagu/mexopencv) +mexopencv is [mex wrapper MATLAB wrapper for the openCV library.](https://github.com/kyamagu/mexopencv) Some of the internal MATLAB libraries clash with those used by OpenCV, to avoid problems cause by this @@ -245,7 +244,7 @@ Fortunately MATLAB lets programmers extend their scripts with C/C++ or Fortran, which is referred to as [mexing](https://au.mathworks.com/help/matlab/ref/mex.html). -more info about [compiling software on NeSI](../../Scientific_Computing/HPC_Software_Environment/Compiling_software_on_Mahuika.md). +more info about [compiling software on NeSI](Compiling_software_on_Mahuika.md). ### Writing mex functions diff --git a/docs/Scientific_Computing/Supported_Applications/Miniconda3.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Miniconda3.md similarity index 87% rename from docs/Scientific_Computing/Supported_Applications/Miniconda3.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Miniconda3.md index 9478c327c..c539e8a92 100644 --- a/docs/Scientific_Computing/Supported_Applications/Miniconda3.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/Miniconda3.md @@ -24,17 +24,9 @@ packages and no curation by the NeSI team. !!! note "Alternatives" - If you want a more reproducible and isolated environment, we - recommend using the [Singularity - containers](../../Scientific_Computing/Supported_Applications/Singularity.md). + recommend using the [Singularity containers](Singularity.md). - If you only need access to Python and standard numerical libraries - (numpy, scipy, matplotlib, etc.), you can use the [Python - environment - module](../../Scientific_Computing/Supported_Applications/Python.md). - -!!! tip "Māui Ancillary Nodes" - On Māui Ancillary Nodes, you can also use the `Anaconda3` module, - which provides a default environment pre-installed with a set of - numerical libraries (numpy, scipy, matplotlib, etc.). + (numpy, scipy, matplotlib, etc.), you can use the [Python environment module](Python.md). ## Module loading and conda environments isolation @@ -80,8 +72,7 @@ Here are the explanations for each line of this snippet: ## Prevent conda from using /home storage Conda environments and the conda packages cache can take a lot of -storage space. By default, Conda use [/home -storage](../../Storage/File_Systems_and_Quotas/NeSI_File_Systems_and_Quotas.md), +storage space. By default, Conda use [`/home` storage](NeSI_File_Systems_and_Quotas.md), which is restricted to 20GB on NeSI. Here are some techniques to avoid running out of space when using Conda. @@ -96,8 +87,8 @@ where `` should be replace with your project code. This setting is saved in your `~/.condarc` configuration file. !!! prerequisite Note Your package cache will be subject to the nobackup autodelete process - (details available in the [Nobackup - autodelete](../../Storage/File_Systems_and_Quotas/Automatic_cleaning_of_nobackup_file_system.md) + (details available in the + [Nobackup autodelete](Automatic_cleaning_of_nobackup_file_system.md) support page). The package cache folder is for temporary storage so it is safe if files within the cache folder are removed. diff --git a/docs/Scientific_Computing/Supported_Applications/Molpro.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Molpro.md similarity index 96% rename from docs/Scientific_Computing/Supported_Applications/Molpro.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Molpro.md index b1e371181..adfb70f2d 100644 --- a/docs/Scientific_Computing/Supported_Applications/Molpro.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/Molpro.md @@ -35,8 +35,7 @@ permit cluster use. If you are unsure whether you are eligible to access Molpro or any particular version of it on a NeSI cluster, please speak to your supervisor, or the person with responsibility for your institution or -department's software procurement. Alternatively, you can contact [our -support desk](mailto:support@.nesi.org.nz). +department's software procurement. Alternatively, you can {% include "partials/support_request.html" %}. ### Licence tokens @@ -72,8 +71,7 @@ you will need to update the key file manually from time to time. If you are provided with a Molpro licence key file but cannot read the file or access the directory in which it resides due to UNIX -permissions, please email [the NeSI support -desk](mailto:support@nesi.org.nz). +permissions, {% include "partials/support_request.html" %}. ## Example Slurm script diff --git a/docs/Scientific_Computing/Supported_Applications/NWChem.md b/docs/High_Performance_Computing/Software/Software_Catalouge/NWChem.md similarity index 96% rename from docs/Scientific_Computing/Supported_Applications/NWChem.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/NWChem.md index 4dc8c2132..66bde7677 100644 --- a/docs/Scientific_Computing/Supported_Applications/NWChem.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/NWChem.md @@ -27,8 +27,7 @@ relativity. The NWChem home page is at . NWChem is available to anyone as open source software at no cost under -the terms of the [Educational Community Licence, version -2.0](http://opensource.org/licenses/ecl2.php). +the terms of the [Educational Community Licence, version 2.0](http://opensource.org/licenses/ecl2.php). ## Example Slurm script diff --git a/docs/Scientific_Computing/Supported_Applications/ORCA.md b/docs/High_Performance_Computing/Software/Software_Catalouge/ORCA.md similarity index 96% rename from docs/Scientific_Computing/Supported_Applications/ORCA.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/ORCA.md index d4c2fa1e7..7ed1deac6 100644 --- a/docs/Scientific_Computing/Supported_Applications/ORCA.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/ORCA.md @@ -30,8 +30,7 @@ at [https://orcaforum.kofo.mpg.de](https://orcaforum.kofo.mpg.de) ## Licensing requirements ORCA is released as precompiled binaries at no cost, pursuant to a -closed-source licence.  Users are advised that the terms of [the ORCA -licence](https://orcaforum.kofo.mpg.de/app.php/dlext/?view=detail&df_id=41) +closed-source licence.  Users are advised that the terms of [the ORCA licence](https://orcaforum.kofo.mpg.de/app.php/dlext/?view=detail&df_id=41) allow its use in the course of academic research only, and that each research group is expected to register with the ORCA developers. If you have any questions regarding your eligibility to access ORCA or any @@ -98,4 +97,4 @@ To restart from an existing GBW file, you should do the following: For more information about restarting from an older GBW file, including how to restart from GBW files produced using earlier versions of ORCA, -please consult the ORCA manual. \ No newline at end of file +please consult the ORCA manual. diff --git a/docs/Scientific_Computing/Supported_Applications/OpenFOAM.md b/docs/High_Performance_Computing/Software/Software_Catalouge/OpenFOAM.md similarity index 98% rename from docs/Scientific_Computing/Supported_Applications/OpenFOAM.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/OpenFOAM.md index 95b4f21ea..33a4c370f 100644 --- a/docs/Scientific_Computing/Supported_Applications/OpenFOAM.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/OpenFOAM.md @@ -70,7 +70,7 @@ reconstructPar -latestTime #Collect OpenFOAM generates a large number of files during run-time. In addition to the I/O load there is also the danger of using up available -[inodes](../../Storage/File_Systems_and_Quotas/NeSI_File_Systems_and_Quotas.md). +[inodes](NeSI_File_Systems_and_Quotas.md). **Filesystems in excess of their allocation will cause any job trying to write there to crash.** @@ -157,7 +157,7 @@ Generally your custom solver will be stored in a git repo. Make sure you have the same version as the OpenFOAM you plan to use, this may require changing branch. -![git\_releases.png](../../assets/images/OpenFOAM.png) +![git\_releases.png](OpenFOAM.png) #### Release diff --git a/docs/Scientific_Computing/Supported_Applications/OpenSees.md b/docs/High_Performance_Computing/Software/Software_Catalouge/OpenSees.md similarity index 100% rename from docs/Scientific_Computing/Supported_Applications/OpenSees.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/OpenSees.md diff --git a/docs/Scientific_Computing/Supported_Applications/ParaView.md b/docs/High_Performance_Computing/Software/Software_Catalouge/ParaView.md similarity index 94% rename from docs/Scientific_Computing/Supported_Applications/ParaView.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/ParaView.md index f104900c8..f2a8baacd 100644 --- a/docs/Scientific_Computing/Supported_Applications/ParaView.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/ParaView.md @@ -58,13 +58,13 @@ setup: - Launch the ParaView GUI on your local machine and go to "File > Connect" or click - the ![paraview.png](../../assets/images/ParaView.png) button. + the ![paraview.png](ParaView.png) button. - Click on "Add Server", choose server type "Client / Server", host "localhost" (as we will be using the SSH tunnel), and port "11111", then click on "Configure" . -- ![paraview.png](../../assets/images/ParaView_0.png) +- ![paraview.png](ParaView_0.png) - Select the new server and click on "Connect" diff --git a/docs/Scientific_Computing/Supported_Applications/Python.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Python.md similarity index 100% rename from docs/Scientific_Computing/Supported_Applications/Python.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Python.md diff --git a/docs/Scientific_Computing/Supported_Applications/R.md b/docs/High_Performance_Computing/Software/Software_Catalouge/R.md similarity index 98% rename from docs/Scientific_Computing/Supported_Applications/R.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/R.md index 345b3a600..a086c6074 100644 --- a/docs/Scientific_Computing/Supported_Applications/R.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/R.md @@ -27,8 +27,7 @@ to participation in that activity. ## Licence -R is made available at no cost under the terms of version 2 of the [GNU -General Public Licence](https://www.r-project.org/COPYING). +R is made available at no cost under the terms of version 2 of the [GNU General Public Licence](https://www.r-project.org/COPYING). ## NeSI Customisations @@ -233,8 +232,7 @@ png(filename="plot.png") This statement instructs R to export all future graphical output to a PNG file named `plot.png`, until a different device driver is selected. -For more information about graphical device drivers, please see [the R -documentation](https://cran.r-project.org/doc/manuals/R-intro.html#Device-drivers). +For more information about graphical device drivers, please see [the R documentation](https://cran.r-project.org/doc/manuals/R-intro.html#Device-drivers). ## Dealing with packages diff --git a/docs/Scientific_Computing/Supported_Applications/RAxML.md b/docs/High_Performance_Computing/Software/Software_Catalouge/RAxML.md similarity index 93% rename from docs/Scientific_Computing/Supported_Applications/RAxML.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/RAxML.md index bb2a8ceb1..853e7f907 100644 --- a/docs/Scientific_Computing/Supported_Applications/RAxML.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/RAxML.md @@ -50,8 +50,7 @@ srun raxmlHPC-PTHREADS-AVX -T $SLURM_CPUS_PER_TASK -m GTRCAT -s aln.fasta -n tre ## Documentation -`raxmlHPC-AVX -help` and the [RAxML -manual](https://github.com/stamatak/standard-RAxML/tree/master/manual). +`raxmlHPC-AVX -help` and the [RAxML manual](https://github.com/stamatak/standard-RAxML/tree/master/manual). ## Parallel Versions @@ -86,4 +85,4 @@ The "AVX" executables use the AVX SIMD instructions, while the "SSE3" executables use the older and slower Intel SIMD (Single Instruction Multiple Data) instructions, which can be anywhere from 10% to 30% slower. There should be no need to use an SSE3 executable, unless you -find that an AVX executable doesn't work for any reason. \ No newline at end of file +find that an AVX executable doesn't work for any reason. diff --git a/docs/Scientific_Computing/Supported_Applications/Relion.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Relion.md similarity index 100% rename from docs/Scientific_Computing/Supported_Applications/Relion.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Relion.md diff --git a/docs/Scientific_Computing/Supported_Applications/Singularity.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Singularity.md similarity index 88% rename from docs/Scientific_Computing/Supported_Applications/Singularity.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Singularity.md index 01c043c95..8b55f8b68 100644 --- a/docs/Scientific_Computing/Supported_Applications/Singularity.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/Singularity.md @@ -34,15 +34,13 @@ enabling simple portability and supporting reproducibility of scientific results. Unlike a virtual machine, a running *container instance* shares the host -operating system's kernel, relying heavily on [Linux -namespaces](https://en.wikipedia.org/wiki/Linux_namespaces) (kernel +operating system's kernel, relying heavily on [Linux namespaces](https://en.wikipedia.org/wiki/Linux_namespaces) (kernel partitioning and isolation features for previously global Linux system resources). Resources and data outside of the container can be mapped into the container to achieve integration, for example, Singularity makes it simple to expose GPUs to the container and to access input/output files & directories mounted on the host (such as those on -[shared -filesystems](../../Storage/File_Systems_and_Quotas/NeSI_File_Systems_and_Quotas.md)). +[shared filesystems](NeSI_File_Systems_and_Quotas.md)). Contrary to other containerisation tools such as Docker, Singularity removes the need for elevated privileges ("root access", e.g., via the @@ -65,21 +63,18 @@ supported Singularity version. ## Building a new container For more general information on building containers please see the -[Singularity -Documentation](https://sylabs.io/guides/3.0/user-guide/build_a_container.html).  +[Singularity Documentation](https://sylabs.io/guides/3.0/user-guide/build_a_container.html).  As building a container requires root privileges in general, this cannot -be done directly on any NeSI nodes. You will need to copy a [Singularity -Image Format (SIF)](https://github.com/sylabs/sif) to the cluster from +be done directly on any NeSI nodes. You will need to copy a [Singularity Image Format (SIF)](https://github.com/sylabs/sif) to the cluster from on a local Linux machine or the cloud. Alternatively you can make use of a remote build service (currently only the [syslabs](https://cloud.sylabs.io/builder) builder is available). However, it is possible to build *some* containers directly on NeSI, using the Milan compute nodes and [Apptainer](https://apptainer.org/). -Specific instructions are provided in a dedicated support page [Build an -Apptainer container on a Milan compute -node](../../Scientific_Computing/HPC_Software_Environment/Build_an_Apptainer_container_on_a_Milan_compute_node.md). +Specific instructions are provided in a dedicated support page +[Build an Apptainer container on a Milan compute node](Build_an_Apptainer_container_on_a_Milan_compute_node.md). Please note **this may fail** to build some containers and encourage you to contact us at if you encounter an issue. @@ -127,8 +122,8 @@ filesystem by: - Copying the image file from your local computer with basic file transfer tools - please refer to our documentation on - [Moving files to/from the cluster](../../Getting_Started/Next_Steps/Moving_files_to_and_from_the_cluster.md) - and [Data Transfer using Globus](../../Storage/Data_Transfer_Services/Data_Transfer_using_Globus_V5.md)(if you have a large container) for details + [Moving files to/from the cluster](Moving_files_to_and_from_the_cluster.md) + and [Data Transfer using Globus](Data_Transfer_using_Globus_V5.md)(if you have a large container) for details - Downloading the container from an online repository To download a container, use commands such as @@ -156,8 +151,7 @@ singularity pull ubuntu.sif docker://ubuntu ``` Access to private containers that needs registration is also supported, -as detailed in the [Singularity -documentation](https://sylabs.io/guides/master/user-guide/singularity_and_docker.html). +as detailed in the [Singularity documentation](https://sylabs.io/guides/master/user-guide/singularity_and_docker.html). If you are building your own containers, you can also use Docker containers as basis for a Singularity image, by specifying it in the @@ -171,11 +165,9 @@ From: ubuntu:latest # intallation instructions go here ``` -## Running a container on Mahuika or Māui Ancil +## Running a container on Mahuika -Singularity is not currently available on the Māui XC50 supercomputer. - -Singularity containers can easily be run on Mahuika or Māui Ancil once +Singularity containers can easily be run on Mahuika once they are uploaded to a NeSI filesystem. Load the Singularity module first by running the command @@ -255,8 +247,7 @@ export SINGULARITY_BIND="/nesi/project//inputdata:/var/inputdat ### Accessing a GPU -If your Slurm job has requested access to an NVIDIA GPU (see [GPU use on -NeSI](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/GPU_use_on_NeSI.md) +If your Slurm job has requested access to an NVIDIA GPU (see [GPU use on NeSI](GPU_use_on_NeSI.md) to learn how to request a GPU), a singularity container can transparently access it using the `--nv` flag: @@ -308,8 +299,7 @@ Note that the output directory "outputdata" in the HPC file system is automatically suffixed with the Slurm job ID in the above example, but it is always available under the same path "/var/outputdata" from within the container. This makes it easy to run multiple containers in separate -Slurm jobs. Please refer to our [SLURM: Reference -Sheet](../../Getting_Started/Cheat_Sheets/Slurm-Reference_Sheet.md) for +Slurm jobs. Please refer to our [SLURM: Reference Sheet](Slurm-Reference_Sheet.md) for further details on using Slurm. ## Tips & Tricks diff --git a/docs/Scientific_Computing/Supported_Applications/Supernova.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Supernova.md similarity index 96% rename from docs/Scientific_Computing/Supported_Applications/Supernova.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Supernova.md index 39a844a3a..9682462c6 100644 --- a/docs/Scientific_Computing/Supported_Applications/Supernova.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/Supernova.md @@ -128,7 +128,7 @@ takes the following general form `ssh -L :: -N ` - <d> An integer -- <server> see: [Standard Terminal Setup](../../Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md) +- <server> see: [Standard Terminal Setup](Standard_Terminal_Setup.md) When details are added to the general form from the specifics in the snippet above, the following could be run.. @@ -149,7 +149,7 @@ take <d> and <auth> from the code snippet above.. http://localhost:9999/?auth=Bx2ccMZmJxaIfRNBOZ_XO_mQd1njNGL3rZry_eNI1yU ``` -![Screen\_Shot\_2019-01-28\_at\_2.17.29\_PM.png](../../assets/images/Supernova.png) +![Screen\_Shot\_2019-01-28\_at\_2.17.29\_PM.png](Supernova.png) ## Things to watch out for diff --git a/docs/Scientific_Computing/Supported_Applications/Synda.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Synda.md similarity index 100% rename from docs/Scientific_Computing/Supported_Applications/Synda.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Synda.md diff --git a/docs/Scientific_Computing/Supported_Applications/TensorFlow_on_CPUs.md b/docs/High_Performance_Computing/Software/Software_Catalouge/TensorFlow_on_CPUs.md similarity index 86% rename from docs/Scientific_Computing/Supported_Applications/TensorFlow_on_CPUs.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/TensorFlow_on_CPUs.md index 66d1dbdaf..3f07ad7f8 100644 --- a/docs/Scientific_Computing/Supported_Applications/TensorFlow_on_CPUs.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/TensorFlow_on_CPUs.md @@ -15,7 +15,7 @@ zendesk_section_id: 360000040076 TensorFlow is a popular software library for machine learning applications, see our -[TensorFlow](../../Scientific_Computing/Supported_Applications/TensorFlow_on_GPUs.md) +[TensorFlow](TensorFlow_on_GPUs.md) article for further information. It is often used with GPUs, as runtimes of the computationally demanding training and inference steps are often shorter compared to multicore CPUs. However, running TensorFlow on CPUs @@ -42,8 +42,7 @@ application - they are only intended as an example. ## Choosing the right Python package It is very important to choose the right TensorFlow package for optimal -performance. Intel provide [optimised TensorFlow -packages](https://software.intel.com/en-us/articles/intel-optimization-for-tensorflow-installation-guide) +performance. Intel provide [optimised TensorFlow packages](https://software.intel.com/en-us/articles/intel-optimization-for-tensorflow-installation-guide) with [Intel oneDNN](https://github.com/oneapi-src/oneDNN) (previously called MKL-DNN) support for the conda package manager. It is not recommended to build your own package, unless you need a specific @@ -59,14 +58,6 @@ conda create -p /nesi/project//conda_envs/tf_cpu tensorflow-mkl source activate /nesi/project//conda_envs/tf_cpu ``` -To install TensorFlow on Māui Ancil, run - -``` sh -module load Anaconda3 -conda create -p /nesi/project//conda_envs/tf_cpu tensorflow-mkl -source activate /nesi/project//conda_envs/tf_cpu -``` - Conda will create a new environment in your project directory with an optimised CPU version of TensorFlow. You can choose a specific version as well using the syntax "tensorflow-mkl==x.y.z". @@ -112,8 +103,7 @@ srun python my_tensorflow_program.py ``` If you are unsure about setting up the memory and runtime parameters, -have a look at our article [Ascertaining job -dimensions](../../Getting_Started/Next_Steps/Job_Scaling_Ascertaining_job_dimensions.md). +have a look at our article [Ascertaining job dimensions](Job_Scaling_Ascertaining_job_dimensions.md). Please also read the section on operator parallelisation below before you choose a number of CPUs. @@ -121,10 +111,8 @@ Environment variables "KMP\_BLOCKTIME" and "KMP\_AFFINITY" configure threading behaviour of the Intel oneDNN library. While these settings should work well for a lot of applications, it is worth trying out different setups (e.g., longer blocktimes) and compare runtimes. Please -see our article on [Thread Placement and Thread -Affinity](../../Scientific_Computing/HPC_Software_Environment/Thread_Placement_and_Thread_Affinity.md) -as well as this [Intel -article](https://software.intel.com/en-us/articles/tensorflow-optimizations-on-modern-intel-architecture) +see our article on [Thread Placement and Thread Affinity](Thread_Placement_and_Thread_Affinity.md) +as well as this [Intel article](https://software.intel.com/en-us/articles/tensorflow-optimizations-on-modern-intel-architecture) for further information and tips for improving peformance on CPUs. ## Setting up operator parallelisation in TensorFlow 1.x diff --git a/docs/Scientific_Computing/Supported_Applications/TensorFlow_on_GPUs.md b/docs/High_Performance_Computing/Software/Software_Catalouge/TensorFlow_on_GPUs.md similarity index 88% rename from docs/Scientific_Computing/Supported_Applications/TensorFlow_on_GPUs.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/TensorFlow_on_GPUs.md index d4a43e59c..8f88bd738 100644 --- a/docs/Scientific_Computing/Supported_Applications/TensorFlow_on_GPUs.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/TensorFlow_on_GPUs.md @@ -25,19 +25,16 @@ running TensorFlow with GPU support. !!! tip "See also" - To request GPU resources using `--gpus-per-node` option of Slurm, - see the [GPU use on - NeSI](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/GPU_use_on_NeSI.md) + see the [GPU use on NeSI](GPU_use_on_NeSI.md) documentation page. - To run TensorFlow on CPUs instead, have a look at our article - [TensorFlow on - CPUs](TensorFlow_on_CPUs.md) + [TensorFlow on CPUs](TensorFlow_on_CPUs.md) for tips on how to configure TensorFlow and Slurm for optimal performance. ## Use NeSI modules -TensorFlow is available on Mahuika as an [environment -module](../../Getting_Started/Next_Steps/The_HPC_environment.md) +TensorFlow is available on Mahuika as an [environment module](The_HPC_environment.md) ``` sh module load TensorFlow/2.4.1-gimkl-2020a-Python-3.8.2 @@ -132,8 +129,7 @@ pip install tensorflow==2.5.0 ``` To use TensorFlow on GPUs, you also need to load cuDNN/CUDA modules with -the proper versions. See the official documentation about [tested -configurations](https://www.tensorflow.org/install/source#gpu) for +the proper versions. See the official documentation about [tested configurations](https://www.tensorflow.org/install/source#gpu) for compatibilities. For example, Tensorflow 2.5.0 requires you to load the `cuDNN/8.1.1.33-CUDA-11.2.0` module: @@ -151,17 +147,6 @@ module spider cuDNN Please contact us at [support@nesi.org.nz](mailto:support@nesi.org.nz) if you need a version not available on the platform. -!!! note "Māui Ancillary Nodes" - - Load the Anaconda3 module instead of Miniconda3 to manipulate - conda environments: - ``` sl - module load Anaconda3/2020.02-GCC-7.1.0 - ``` - - Use `module avail` to list available versions of modules, e.g. - ``` sl - module avail cuDNN - ``` - Additionnally, depending your version of TensorFlow, you may need to take into consideration the following: @@ -190,24 +175,21 @@ take into consideration the following: You can use containers to run your application on the NeSI platform. We provide support for -[Singularity](../../Scientific_Computing/Supported_Applications/Singularity.md) +[Singularity](Singularity.md) containers, that can be run by users without requiring additional privileges. Note that Docker containers can be converted into Singularity containers. -For TensorFlow, we recommend using the [official container provided by -NVIDIA](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow). +For TensorFlow, we recommend using the [official container provided by NVIDIA](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow). More information about using Singularity with GPU enabled containers is -available on the [NVIDIA GPU -Containers](../../Scientific_Computing/HPC_Software_Environment/NVIDIA_GPU_Containers.md) +available on the [NVIDIA GPU Containers](NVIDIA_GPU_Containers.md) support page. ## Specific versions for A100 Here are the recommended options to run TensorFlow on the A100 GPUs: -- If you use TensorFlow 1, use the TF1 [container provided by - NVIDIA](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow), +- If you use TensorFlow 1, use the TF1 [container provided by NVIDIA](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow), which comes with a version of TensorFlow 1.15 compiled specifically to support the A100 GPUs (Ampere architecture). Other official Python packages won't support the A100, triggering various crashes diff --git a/docs/Scientific_Computing/Supported_Applications/Trinity.md b/docs/High_Performance_Computing/Software/Software_Catalouge/Trinity.md similarity index 97% rename from docs/Scientific_Computing/Supported_Applications/Trinity.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/Trinity.md index adb53566d..ed088b3c8 100644 --- a/docs/Scientific_Computing/Supported_Applications/Trinity.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/Trinity.md @@ -15,9 +15,7 @@ zendesk_section_id: 360000040076 {% include "partials/app_header.html" %} [//]: <> (APPS PAGE BOILERPLATE END) -Trinity, developed at the [Broad -Institute](http://www.broadinstitute.org/) and the [Hebrew University of -Jerusalem](http://www.cs.huji.ac.il/), performs _de novo_ reconstruction +Trinity, developed at the [Broad Institute](http://www.broadinstitute.org/) and the [Hebrew University of Jerusalem](http://www.cs.huji.ac.il/), performs _de novo_ reconstruction of transcriptomes from RNA-seq data. It combines three independent software modules: Inchworm, Chrysalis, and Butterfly, applied sequentially to process large volumes of RNA-seq reads. Trinity @@ -43,8 +41,7 @@ data, compared to running both phases in one multithreaded job (see the ### File system considerations -You should run Trinity within your [nobackup project -directory](../../Storage/File_Systems_and_Quotas/NeSI_File_Systems_and_Quotas.md), +You should run Trinity within your [nobackup project directory](NeSI_File_Systems_and_Quotas.md), which has no limit on disk space usage but does have a file count quota. Trinity creates a large number of files, particularly in the "read\_partitions" directory, thus it is important that you {% include "partials/support_request.html" %} before running Trinity on NeSI, as we diff --git a/docs/Scientific_Computing/Supported_Applications/TurboVNC.md b/docs/High_Performance_Computing/Software/Software_Catalouge/TurboVNC.md similarity index 98% rename from docs/Scientific_Computing/Supported_Applications/TurboVNC.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/TurboVNC.md index a61115044..15e8c1024 100644 --- a/docs/Scientific_Computing/Supported_Applications/TurboVNC.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/TurboVNC.md @@ -186,7 +186,7 @@ will be 5902; and so on. As an alternative to steps 1 and 2, if using MobaXTerm in Windows, set up and then start port forwarding connections to look like this: - ![2020-02-10\_TurboVNC\_MobaXTerm\_ssh\_tunnel\_setup.png](../../assets/images/TurboVNC.png) + ![2020-02-10\_TurboVNC\_MobaXTerm\_ssh\_tunnel\_setup.png](TurboVNC.png) - The tunnel through the lander node must be started before the tunnel through localhost can be started. @@ -262,4 +262,4 @@ display number is 2. If you have several Xvnc processes open on that host, you can kill those you don't want to keep by means of the `kill` command, or alternatively -by using the `vncserver` command described above. \ No newline at end of file +by using the `vncserver` command described above. diff --git a/docs/Scientific_Computing/Supported_Applications/VASP.md b/docs/High_Performance_Computing/Software/Software_Catalouge/VASP.md similarity index 95% rename from docs/Scientific_Computing/Supported_Applications/VASP.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/VASP.md index b1ec19630..961517340 100644 --- a/docs/Scientific_Computing/Supported_Applications/VASP.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/VASP.md @@ -144,7 +144,7 @@ team {% include "partials/support_request.html" %}. ### VASP runs faster on Milan nodes -[Milan compute nodes](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Milan_Compute_Nodes.md) +[Milan compute nodes](Milan_Compute_Nodes.md) are not only our most powerful compute nodes, but often have shorter queues! These nodes are still opt-in at the moment, meaning you need to specify `--partition=milan` in your Slurm script, which we strongly @@ -170,11 +170,9 @@ parallelisation the FFTs (controlled with optimise your VASP job parallelisation in these ways, see the following links: -[Basic -parallisation](https://www.vasp.at/wiki/index.php/Category:Parallelization) +[Basic parallisation](https://www.vasp.at/wiki/index.php/Category:Parallelization) -[Optimising the -parallelisation](https://www.vasp.at/wiki/index.php/Optimizing_the_parallelization#Optimizing_the_parallelization) +[Optimising the parallelisation](https://www.vasp.at/wiki/index.php/Optimizing_the_parallelization#Optimizing_the_parallelization) ### Our VASP5 modules do not support OpenMP @@ -241,15 +239,12 @@ None of them affect VASP unless specified in your `INCAR` file. #### VTST -The [VASP Transition State -Tools](http://theory.cm.utexas.edu/vtsttools/), a third-party package +The [VASP Transition State Tools](http://theory.cm.utexas.edu/vtsttools/), a third-party package for finding transition states and computing rate constants. #### BEEF -Our recent non-CUDA VASP executables all include BEEF ([Bayesian Error -Estimation -Functionals](http://suncat.stanford.edu/#/theory/facility/software/functional/)). +Our recent non-CUDA VASP executables all include BEEF ([Bayesian Error Estimation Functionals](http://suncat.stanford.edu/#/theory/facility/software/functional/)). #### VASP-Sol @@ -288,8 +283,7 @@ As per the VASP documentation, "LIBXC2can be used only if the functional specified with[LIBXC1](https://www.vasp.at/wiki/index.php/LIBXC1 "LIBXC1") corresponds to only exchange and not to exchange and correlation." For -more information on correct usage of LIBXC please see[VASP's -documentation](https://www.vasp.at/wiki/index.php/LIBXC1) on this. +more information on correct usage of LIBXC please see[VASP's documentation](https://www.vasp.at/wiki/index.php/LIBXC1) on this. ### Which VASP executable should I use? @@ -333,9 +327,9 @@ production you should take into account performance and compute unit cost. General information about using GPUs on NeSI can be found -[here](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/GPU_use_on_NeSI.md) +[here](GPU_use_on_NeSI.md) and details about the available GPUs on NeSI -[here](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Available_GPUs_on_NeSI.md). +[here](Available_GPUs_on_NeSI.md). Here are some additional notes specific to running VASP on GPUs on NeSI: diff --git a/docs/Scientific_Computing/Supported_Applications/VTune.md b/docs/High_Performance_Computing/Software/Software_Catalouge/VTune.md similarity index 96% rename from docs/Scientific_Computing/Supported_Applications/VTune.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/VTune.md index e87ffe73b..b09fdb3f3 100644 --- a/docs/Scientific_Computing/Supported_Applications/VTune.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/VTune.md @@ -22,8 +22,7 @@ execution time of a program is spent. This is known as profiling. It is good practice to profile a code before attempting to modify the code to improve its performance. VTune collects key profiling data and presents them in an intuitive way.  Another tool that provides similar -information is [ARM -MAP](../../Scientific_Computing/Profiling_and_Debugging/Profiler-ARM_MAP.md). +information is [ARM MAP](Profiler-ARM_MAP.md). ## How to use VTune diff --git a/docs/Scientific_Computing/Supported_Applications/VirSorter.md b/docs/High_Performance_Computing/Software/Software_Catalouge/VirSorter.md similarity index 100% rename from docs/Scientific_Computing/Supported_Applications/VirSorter.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/VirSorter.md diff --git a/docs/Scientific_Computing/Supported_Applications/WRF.md b/docs/High_Performance_Computing/Software/Software_Catalouge/WRF.md similarity index 97% rename from docs/Scientific_Computing/Supported_Applications/WRF.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/WRF.md index bc08c9db5..bb17598f7 100644 --- a/docs/Scientific_Computing/Supported_Applications/WRF.md +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/WRF.md @@ -52,7 +52,7 @@ export J="-j 12" !!! Note Please select option 34 (dmpar gfortran/gccGNU) when asked `Please select from among the following Linux x86_64 options`. -It will take some time for WRF to compile (~30 minutes). You may wish to run this from a [tmux](https://docs.nesi.org.nz/Getting_Started/Cheat_Sheets/tmux-Reference_sheet/) session to minimise the risk of disconnecting. Check the `wrf_build.log` file for any error or warning messages when finished. +It will take some time for WRF to compile (~30 minutes). You may wish to run this from a [tmux](tmux-Reference_sheet.md) session to minimise the risk of disconnecting. Check the `wrf_build.log` file for any error or warning messages when finished. ### Running WRF on Mahuika diff --git a/docs/High_Performance_Computing/Software/Software_Catalouge/index.md b/docs/High_Performance_Computing/Software/Software_Catalouge/index.md new file mode 100644 index 000000000..9ca87cb7e --- /dev/null +++ b/docs/High_Performance_Computing/Software/Software_Catalouge/index.md @@ -0,0 +1,13 @@ +--- +title: Supported Applications +template: supported_apps.html +hide: + - toc +--- + + + +For more information on environment-modules see [Finding Software](Finding_Software.md). + +On **Mahuika**, software packages are provided using 'Lmod' an implementation of Environment Modules with [additional features](https://lmod.readthedocs.io/en/latest/010_user.html). +A list of available software can be obtained with the `module spider` command. diff --git a/docs/Scientific_Computing/Supported_Applications/ipyrad.md b/docs/High_Performance_Computing/Software/Software_Catalouge/ipyrad.md similarity index 100% rename from docs/Scientific_Computing/Supported_Applications/ipyrad.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/ipyrad.md diff --git a/docs/Scientific_Computing/Supported_Applications/ont-guppy-gpu.md b/docs/High_Performance_Computing/Software/Software_Catalouge/ont-guppy-gpu.md similarity index 100% rename from docs/Scientific_Computing/Supported_Applications/ont-guppy-gpu.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/ont-guppy-gpu.md diff --git a/docs/Scientific_Computing/Supported_Applications/snpEff.md b/docs/High_Performance_Computing/Software/Software_Catalouge/snpEff.md similarity index 100% rename from docs/Scientific_Computing/Supported_Applications/snpEff.md rename to docs/High_Performance_Computing/Software/Software_Catalouge/snpEff.md diff --git a/docs/High_Performance_Computing/Software/index.md b/docs/High_Performance_Computing/Software/index.md new file mode 100644 index 000000000..33682179e --- /dev/null +++ b/docs/High_Performance_Computing/Software/index.md @@ -0,0 +1,120 @@ +--- +created_at: '2019-08-16T01:22:03Z' +title: Software +tags: + - lmod + - modules + - containers + - environment +description: Details the different methods available to access software on the NeSI cluster. +--- + +{{description}} + +## Environment Modules + +Environment Modules are a convenient way to access already installed applications on the cluster, +a list of which can be found in the [Software Catalouge](Software_Catalouge/index.md). + +'loading' a module prepares the environment you need to run an application. + +### Module Command + +This is all done using the `module` command. + +| Command | Description | +|-------------------------------|---------------------------------------------------------------| +| `module spider` | Lists all available modules. | +| `module spider [module name]` | Searches available modules for [module name] | +| `module load [module name]` | Loads [module name], and its dependencies | +| `module unload [module name]` | Unloads [module name], and its dependencies | +| `module purge` | Unloads all modules. | +| `module list [module name]` | Lists currently loaded modules. | +| `module show [module name]` | Shows information about [module name] | +|-------------------------------|---------------------------------------------------------------| + +For a full list of module commands run `man module` or visit the +[lmod documentation](https://lmod.readthedocs.io/en/latest/010_user.html). + +!!! tip + You can create your own modules. See + [Installing Third Party applications](Installing_Third_Party_applications.md). + +### Version Management + +Much of the software installed on the NeSI cluster have multiple +versions available as shown in the +[Software Catalouge](Software_Catalouge/index.md) +or by using the `module spider` command. + +If only the application name is given a default version will be chosen, +generally the most recent one. However it is good practice to load +modules using the specific version so you can ensure consistent +execution of your job even after the default version has been changed. + +If you need a specific version of software, feel free to ask support and +we may install it, for example. + +``` sh +module load ANSYS +``` + +Will load the default version of ANSYS, in this case {{applications.ANSYS.default}}, however +this may change. + +``` sh +module load ANSYS/18.1 +``` + +Will always load that version specifically. + +## Installing Your Own Software + +You are welcome to install software yourself, either in your `home` or `project` directories. +See [Building Software](Building_Software/index.md) for more info. + +If you require assistance, don't hesitate to {% include "partials/support_request.html" %}. + +## Containers + +You may run OCI containers using Apptainer, see [Apptainer](Singularity.md) for more details. + +## Installion Request + +To request that we install a scientific application (either a new +application, or a new version of an already installed application), +please {% include "partials/support_request.html" %}. + +??? info "Information to Include in Request" + - What is the name and version number of the software you would like + to be installed? If you wish to use a copy from a version control + repository, what tag or release do you need? Please be aware that we + usually require a stable release version of a piece of software + before we will install it for all users. + - Why would you like us to install this software package? + - What is the web site or home web page of the package? If you don't + know this information or the package doesn't have a web site, who is + the author or lead developer? In some cases, there exist two or more + packages with the same or very similar names. If we know the web + site we can be sure that we are installing the same package that you + are requesting. + - How is the package installed? For example, compiled from source, + precompiled binary, or installed as a Python, Perl, R, etc. library? + - What dependencies, if any, does the package require? Please be aware + that the exact dependency list may depend on the particular use + cases you have in mind (like the ability to read and write a + specific file format). + - Have you (or another member of your project team) tried to install + it yourself on a NeSI system? If so, were you successful? + - If you or your institution doesn't own the copyright in the + software, under what licence are you permitted to use it? Does that + licence allow you to install and run it on a NeSI system? (Hint: + Most free, open-source software licences will allow you to do this.) + - Who else do you know of who wants to use that software on a NeSI + system? Please provide their names, institutional affiliations, and + NeSI project codes (if you know them). + - What tests do you have that will allow us to verify that the + software is performing correctly and at an acceptable speed? + +Our team will review your request and will make a decision as to whether +we will install the application and make it generally available. diff --git a/docs/High_Performance_Computing/index.md b/docs/High_Performance_Computing/index.md new file mode 100644 index 000000000..31f232622 --- /dev/null +++ b/docs/High_Performance_Computing/index.md @@ -0,0 +1,72 @@ +--- +created_at: 2025-02-20 +title: High Performance Computing +hide: + - toc +--- + +Technical Documentation for NeSI's High Performance Computing Cluster, Mahuika. + +
+ +- ![](../assets/icons/material/account-details.svg) [__Mahuika Cluster__](Mahuika_Cluster/) + + --- + Learn about our High Performance Computer _Mahuika_, and how to access. + + - [How to Get Access](Mahuika_Cluster/index.md) + + +- ![](../assets/icons/material/compass.svg) [__Data Management__](Data_Management/) + + --- + + Learn about the [NeSI filesystem](Data_Management/File_Systems_and_Quotas/index.md), and how to [transfer files](Data_Management/Moving_files_to_and_from_the_cluster.md). + +- ![](../assets/icons/material/cog-transfer-outline.svg) [__Software__](Software/) + + --- + + How to [load software](Software/index.md) + + Browse [installed software](Software/Software_Catalouge/index.md) + + How to [build software on Mahuika](Software/Building_Software/index.md). + +- ![](../assets/icons/material/cog-transfer-outline.svg) [__Batch Computing__](Batch_Computing/) + + --- + + Learn about our scheduler [Slurm](), and how to make the most of your allocation. + +
+ +## Popular + +
+ +- [__NeSI Accounts__](../Access/Accounts-Projects_and_Allocations/Applying_to_join_an_existing_NeSI_project.md) + + some description + +- [__NeSI Accounts__](../Access/Accounts-Projects_and_Allocations/Applying_to_join_an_existing_NeSI_project.md) + + some description + + +- [__NeSI Accounts__](../Access/Accounts-Projects_and_Allocations/Applying_to_join_an_existing_NeSI_project.md) + + some description + +
+ +## Announcements + +{% for file in files %} +{% if file.is_documentation_page() and file.src_uri.split("/")[0] == "Announcements" %} + +[{{file.name}}]({{file.src_uri}}) + +{% endif %} + +{% endfor %} diff --git a/docs/NEWPAGE.md b/docs/NEWPAGE.md index bedff0a85..fbcc56b4c 100644 --- a/docs/NEWPAGE.md +++ b/docs/NEWPAGE.md @@ -179,17 +179,17 @@ The following sections detail the most usual entries. | - | - | - | - | | `description` | Used for internal and external search indexing. This will appear as the page preview when searching in Google. Try not to include words and information here that is not in the body of the article. | string | `description: A short summary.` | | `icon` | Page icon. | Path | | -| `status` | Will display a symbol on nav | `new` or `deprecated` | | -| `hide` | Used to turn off features (e.g. table of content) | [`tags` `toc` `nav`]| | -| `tags` | Used for internal and external search indexing | String[] | `tags: [ "slurm", "containers" ]` | -| `search: exclude` | Used to exclude page from internal search | Bool | `search: exclude: True`| -| `search: boost` | Used to increase or decrease weight in internal search | Float | `search: boost: 0.1` to lower weight, `search: boost: 10` to raise weight | +| `status` | Will display a symbol on nav | String `new`, `deprecated` | `status: deprecated` to add symbol | +| `hide` | Used to turn off features (e.g. table of content) | String [ ] `tags`, `toc`, `nav`|
hide:
- toc
Hide table of contents. | +| `tags` | Used for internal and external search indexing | String [ ] |
tags:
- slurm
- containers
| +|
search:
exclude
| Used to exclude page from internal search | Bool |
search:
exclude: True
| +|
search:
boost
| Used to increase or decrease weight in internal search | Float |
search:
boost: 0.1
to lower weight,
search:
boost: 10
to raise weight | ### Zendesk Imported The following fields were imported from Zendesk Page: -- `vote_count` +- `vote_count` - `vote_sum` - `zendesk_article_id` - `zendesk_section_id` diff --git a/docs/Getting_Started/Getting_Help/NeSI_wide_area_network_connectivity.md b/docs/NeSI_wide_area_network_connectivity.md similarity index 100% rename from docs/Getting_Started/Getting_Help/NeSI_wide_area_network_connectivity.md rename to docs/NeSI_wide_area_network_connectivity.md diff --git a/docs/Researcher_Developer_Cloud/.pages.yml b/docs/Researcher_Developer_Cloud/.pages.yml new file mode 100644 index 000000000..d0629897c --- /dev/null +++ b/docs/Researcher_Developer_Cloud/.pages.yml @@ -0,0 +1,6 @@ +nav: + - index.md + - user-guides + - security + - release-notes + diff --git a/docs/Researcher_Developer_Cloud/index.md b/docs/Researcher_Developer_Cloud/index.md new file mode 100644 index 000000000..f75841c8d --- /dev/null +++ b/docs/Researcher_Developer_Cloud/index.md @@ -0,0 +1,57 @@ +--- +hide: toc +--- + +# Research Developer Cloud + +
+ +- ![](iconmonstr-cloud-15.svg) __Cloud-native services__ + + --- + Use NeSI's cloud infrastructure and on-demand services to develop and manage custom, + interactive solutions. + +- ![](iconmonstr-git-5.svg) __Programmable infrastructure__ + + --- + Apply DevOps practices enabled by Infrastructure as Code (IaC) to automate, measure, + collaborate, and learn. + +- ![](iconmonstr-handshake-4.svg) __Partnership-led approaches__ + + --- + Partner with NeSI's DevOps specialists to build a platform or tools that can benefit your research community. + +
+ +NeSI is building a research cloud computing platform for easier and more adaptable collaboration around research data. + +Research teams can use our Research Developer Cloud to develop novel solutions that enable research. + +
+ +- [__Get started__](./index.md#get-started) + + --- + Scroll down to see how to apply for early access. + +- [__Talk to us__](mailto:support@cloud.nesi.org.nz?subject=NeSI's%20Research%20Developer%20Cloud) + + --- + If you want to chat with us about how our platform can support your research + +
+ +## Get started + +We invite you to apply for early access if you are: + +- Developing complex research software and data solutions that support custom workflows and a range of user experiences + +- Seeking a locally owned and sovereign cloud computing platform to build and develop tools that can support research data collaborations + +- Interested in partnering with NeSI to apply DevOps approaches and best practice to your research activities + +[Apply for early access](https://docs.google.com/forms/d/e/1FAIpQLScYsLxe1HswOW9DFUNuhyTcYhdWY7-SZqTF3RpeBpFcyNdhUA/viewform){ .md-button .md-button--primary } + diff --git a/docs/Researcher_Developer_Cloud/release-notes/20230918-essential-building-blocks-v0.9.md b/docs/Researcher_Developer_Cloud/release-notes/20230918-essential-building-blocks-v0.9.md new file mode 100644 index 000000000..5f4006304 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/release-notes/20230918-essential-building-blocks-v0.9.md @@ -0,0 +1,58 @@ +--- +hidden: false +label_names: +- release-note +position: 3 +title: Essential building blocks v0.9 - 20230918 NZRSE23 +--- + + +##Overview + +This is our first release of essential building blocks for cloud native services. Please see [The Research Developer Cloud page](https://www.nesi.org.nz/developercloud) for more information on how to gain access. + +##Services and features + +####Compute + +On-demand instances available for computational needs + +* Launch and manage compute instances +* Wide range of scale and three different flavours avaialble for tailored needs + +####Volume storage + +Highly scalable persistant storage options + +* Mount volume storage to any compute instance +* Option to encrypt the volume for added security + +####Network + +Network management for your solution, built on high speed REANNZ network + +* Set up and manage network configurations +* Pool of public IP addresses available for use + +####Images + +Image repository for managing images that can be used with compute + +* Common operating systems are available and maintained by NeSI +* Upload and manage custom images for your project or wider community + +####Identity + +Identity management service + +* Create and manage application credentials + +####Other + +* Programmable environment with API based access to all of the above features +* Dashboard for user friendly GUI based interactions +* Utilising OpenStack technology to provide interoperability + +##Known issues + +* Custom image upload feature is currently not working on the dashboard diff --git a/docs/Researcher_Developer_Cloud/release-notes/20231205-research-developer-cloud-updates-and-fixes-v0.10.md b/docs/Researcher_Developer_Cloud/release-notes/20231205-research-developer-cloud-updates-and-fixes-v0.10.md new file mode 100644 index 000000000..c71be9147 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/release-notes/20231205-research-developer-cloud-updates-and-fixes-v0.10.md @@ -0,0 +1,32 @@ +--- +hidden: false +label_names: +- release-note +position: 2 +title: Research Developer Cloud updates and fixes v0.10 - 20231205 +--- + +##Cloud services + +####Images +* UPDATE: Standard images provided are now regularly patched and updated via behind the scenes automated build process +* FIX: Uploading custom images via the dashboard is now fixed + +####Block storage +* FIX: Issues with detaching volumes has now been fixed + +##Infrastructure + +* [Yoga1](https://docs.openstack.org/yoga/index.html) containers have been updated to the latest patched versions within the release, which fixed bugs and patched security vulnerabilities +* Significant improvements have been made on our infrastructure testing mechanism to enable more automated processes of testing and improved resilience and visibility to incidents + +##Other updates +* Our security documentations have been updated. See here for more details +* Proof of concept usage of GPU accelerated compute flavors. We’ve worked with partners at AgResearch to test a Windows server instance supporting a GPU accelerated Proteomics workload, using the [flavor name], which includes passthrough of 2x NVIDIA A40 GPUs into the instance +* Prototyped a managed identity solution with KeyCloak + +We will continue to improve our services and we are currently testing object storage functionalities before releasing. The Research Developer Cloud has SLA of 9-5 weekdays, with best effort response time. Our team will be away during the Christmas and New Years holidays, so we may not respond to your requests on the last week of December and the first week of January. Have a wonderful holiday! + +

+ +1 Yoga is the version of OpenStack our services are on. OpenStack is an open source cloud computing infrastructure software project adopted by many different research institutions and public cloud providers. \ No newline at end of file diff --git a/docs/Researcher_Developer_Cloud/release-notes/20240205-research-developer-cloud-updates-v0.11.md b/docs/Researcher_Developer_Cloud/release-notes/20240205-research-developer-cloud-updates-v0.11.md new file mode 100644 index 000000000..750d1a37e --- /dev/null +++ b/docs/Researcher_Developer_Cloud/release-notes/20240205-research-developer-cloud-updates-v0.11.md @@ -0,0 +1,47 @@ +--- +hidden: false +label_names: +- release-note +position: 3 +title: Research Developer Cloud updates v0.11 - 20240205 +--- + +##Cloud services + +####Images +* UPDATE: [Rocky1](https://rockylinux.org/) images are now available for tenants to use on VMs + +####Object Storage +* UPDATE: Now operational and is in a user-testing phase. Please reach out if you would like some quota and help getting started! Early documentation can be found here: +[Create and Manage Object Storage - Research Developer Cloud](../user-guides/create-and-manage-object-storage/index.md) + +##Blueprints + +On top of ongoing development of our Cloud Services, we are now working towards building Blueprints for useful patterns that can support your research applications and pipelines. If you’ve a specific use case in mind, let us know. + +####Kubernetes + +We are starting out with K8s (Kubernetes, a container orchestration system). Deploying your applications on top of K8s can support gains in scalability, robustness, portability, and more. Starting with the basics, the following blueprint GitHub repositories support setting up a K8s management cluster and a workload cluster. + +* Management Cluster: [GitHub - nesi/nesi.rdc.kind-bootstrap-capi](https://github.com/nesi/nesi.rdc.kind-bootstrap-capi) +* Workload Cluster: [GitHub - nesi/nesi.rdc.capi.workload](https://github.com/nesi/nesi.rdc.capi.workload) + +More guides around when and how to use K8s with your application are under development. Watch this space! + +##Infrastructure + +####Platform testing + +A full suite of CI/CD functional testing is now running hourly 24 x 7 against our core Research Developer Cloud infrastructure, supporting early identification of any emerging problems or incidents. + +####Infrastructure observability + +There is a common need to understand utilisation of resources for any cloud use case. We are in the process of creating a per tenant view of utilisation, which will be delivered via dashboards (using Grafana). We are prototyping this through our collaboration with AgResearch, to inform options towards more visibility for regular research developer cloud tenants in the future. Let us know of your needs for infrastructure observability. + +####Platform maintenance + +We are almost finished upgrading the operating systems of all hosts in our Ceph-based storage and OpenStack-based hosting platform (in both data centers) to Rocky Linux 9.2 from CentOS Stream 8. This upgrade improves maintainability, supportability, security, performance, and hardware compatibility. This is a significant upgrade and is in preparation for our next regular update to the newest versions of core OpenStack services, tentatively scheduled before mid-year. + +

+ +1 Rocky Linux is an open-source enterprise operating system designed to be 100% bug-for-bug compatible with Red Hat Enterprise Linux. diff --git a/docs/Researcher_Developer_Cloud/release-notes/pages.yml b/docs/Researcher_Developer_Cloud/release-notes/pages.yml new file mode 100644 index 000000000..c00b9a01d --- /dev/null +++ b/docs/Researcher_Developer_Cloud/release-notes/pages.yml @@ -0,0 +1 @@ +order: desc diff --git a/docs/Researcher_Developer_Cloud/security/index.md b/docs/Researcher_Developer_Cloud/security/index.md new file mode 100644 index 000000000..2a85e796e --- /dev/null +++ b/docs/Researcher_Developer_Cloud/security/index.md @@ -0,0 +1,17 @@ +--- +hidden: false +label_names: +- security +position: 1 +title: Protect and Manage Security +vote_count: 1 +vote_sum: 1 +--- + +NeSI is striving to foster security capability within the research community through increased visibility and collaboration. We will be sharing our efforts and collecting your feedback here. + +- [NeSI's Security Policy](security-policy.md) + +- [Security Practices at NeSI](security-practices/index.md) + +- [Shared Responsibility Model](shared-responsibility.md) diff --git a/docs/Researcher_Developer_Cloud/security/security-policy.md b/docs/Researcher_Developer_Cloud/security/security-policy.md new file mode 100644 index 000000000..2fed48f68 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/security/security-policy.md @@ -0,0 +1,15 @@ +--- +hidden: false +label_names: +- security +position: 1 +title: Security Policy +--- + +## NeSI Security Policy + +!!! note + NeSI has published a new security policy [NeSI Security Policy](https://www.nesi.org.nz/about-us/security-privacy/security-policy) + + +## \ No newline at end of file diff --git a/docs/Researcher_Developer_Cloud/security/security-practices/building-eff-rsp.md b/docs/Researcher_Developer_Cloud/security/security-practices/building-eff-rsp.md new file mode 100644 index 000000000..6a773a0da --- /dev/null +++ b/docs/Researcher_Developer_Cloud/security/security-practices/building-eff-rsp.md @@ -0,0 +1,57 @@ +--- +label_names: +- security +- practices +title: Building Effective and Efficient Research Security Programs +--- + +## Geting Started + +A broad approach to building effective and research security programs + +`5 Ws and an H` +Identifing the answers to 5 simple questions helps scope the engagement and resourcing + +When - Timing and length +Where - Where will you make impact (indivduals, teams, services) +Who - The audiance (researchers, staff, students) +What - What you intend to impact with the engagement (capability, simplification, culture, policy) +Why - Is there an external driver or internal improvement? +How - The plan: getting executive support, marketing, training, consulting, measurement, reporting, closure. + +
+ ![Alt text](5w-and-1h.png) +
+ +`Think Like a Researcher` +We must find the balance between compulsory and optional. Researchers don't like being told that they must do something, so we need to build the story of why they are being asked to do this and then make it as easy as possible for them to comply. + +Understand the challenges they face on a day to day basis. Take the questions they ask as they begin and execute a project then create the map to the answers. It is very likely they are spread across teams, services, platforms, or even specific individuals. That is a massive barrier to a someone solving their own problems, which researchers are very good at! They aren't good at intuiting the solution you think they should choose unless they know it's an option. + +
+ ![Alt text](topic-maze.png) +
+ +Align services as answers to questions in single platform, service, or space. Map activities to the research lifecycle and how each one can be accessed, leveraged, or simply requested to help them achieve and maintain compliance + +
+ ![Alt text](topic-maze-solved.png) +
+ +`Telling the Story` + +You need executive buy in and support prior to rolling these programs out to researchers. Researchers will ask their deans, department heads, and DVCRs directly `Why?`, and they need to be able to answer it concisely and completely. Provide metrics, statistics, and easy to share material to make those discussions easier. Here are some examples + +
+ ![Alt text](ee-supporting-evidence.png) +
+ +`Resources` + +[2018 IT Risk Report, Netwrix](https://www.netwrix.com/2018itrisksreport.html) +[Backblaze Hard Drive Stats Q3 2023](https://www.backblaze.com/blog/backblaze-drive-stats-for-q3-2022/) +[Gov UK Education Cybersecurity breaches](https://www.gov.uk/government/statistics/cyber-security-breaches-survey-2023/cyber-security-breaches-survey-2023-education-institutions-annex) +[Data Breach Numbers](https://www.govtech.com/blogs/lohrmann-on-cybersecurity/data-breach-numbers-costs-and-impacts-all-rise-in-2021) + + +## diff --git a/docs/Researcher_Developer_Cloud/security/security-practices/index.md b/docs/Researcher_Developer_Cloud/security/security-practices/index.md new file mode 100644 index 000000000..0fac38254 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/security/security-practices/index.md @@ -0,0 +1,14 @@ +--- +hidden: false +label_names: +- security +position: 1 +title: Security Practices +vote_count: 1 +vote_sum: 1 +--- + +We are sharing what NeSI and our community members have learned and want to make available. + +- [Building Effective and Efficient Research Security Programs](building-eff-rsp.md) + diff --git a/docs/Researcher_Developer_Cloud/security/shared-responsibility.md b/docs/Researcher_Developer_Cloud/security/shared-responsibility.md new file mode 100644 index 000000000..319a7f977 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/security/shared-responsibility.md @@ -0,0 +1,16 @@ +--- +hidden: false +label_names: +- security +title: Shared Responsibility Model +--- + +# Shared Responsibility - What is it? + +## Establishing a Shared Responsibility Model + +The way we work together on this platform is captured within a Shared Responsibility model. Shared Responsibility emphasises the kaitiakitanga, the collaboration, and the coordination essential to its maintenance and to protect the research communities it enables. + +This shared way of working is becoming more common, yet there is plenty to learn. NeSI is openly exploring how we can bring greater value to, or reduce the risk of your research efforts, and of how best to refine the attributes of this sovereign platform and how we carry our shared responsibilities. We’ll share our experiences and insights, and welcome you to share yours. + +## \ No newline at end of file diff --git a/docs/Researcher_Developer_Cloud/security/terms.md b/docs/Researcher_Developer_Cloud/security/terms.md new file mode 100644 index 000000000..6b81bcf2d --- /dev/null +++ b/docs/Researcher_Developer_Cloud/security/terms.md @@ -0,0 +1,12 @@ +--- +hidden: false +label_names: +- security +- terms +position: 1 +title: Terms +--- + +## Terms + +Adding Terms Soon! \ No newline at end of file diff --git a/docs/Researcher_Developer_Cloud/user-guides/.pages.yml b/docs/Researcher_Developer_Cloud/user-guides/.pages.yml new file mode 100644 index 000000000..7041d1ba8 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/.pages.yml @@ -0,0 +1,11 @@ +nav: + - index.md + - launch-and-manage-instances + - uploading-and-managing-Images + - create-and-manage-volumes + - create-and-manage-networks + - create-and-manage-identity + - create-and-manage-keypairs + - create-and-manage-object-storage + - setting-up-your-CLI-environment + - ... diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-identity/creating-and-managing-application-credentials-via-cli.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-identity/creating-and-managing-application-credentials-via-cli.md new file mode 100644 index 000000000..56e698026 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-identity/creating-and-managing-application-credentials-via-cli.md @@ -0,0 +1,133 @@ +--- +hidden: false +label_names: +- identity +- create +- manage +- cli +title: Creating and Managing Application Credentials via CLI +--- + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. Please read [Setting up your CLI environment](../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + +## Create Application Credential + +Running the below command will generate a new `Application Credential` + +``` +openstack application credential create + [--secret ] + [--role ] + [--expiration ] + [--description ] + [--unrestricted] + [--restricted] + [--access-rules ] + +``` + +`--secret ` +: Secret to use for authentication. If not provided, one will be generated + +`--role ` +: Roles to authorize (name or ID) (repeat option to set multiple values), if not provided this will default to same roles as the user that creates it + +`--expiration ` +: Sets an expiration date for the application credential, format of YYYY-mm-ddTHH:MM:SS, if not provided, the application credential will not expire. + +`--description ` +: Application credential description + +`--unrestricted` +: Enable application credential to create and delete other application credentials and trusts + +!!! warning + This is potentially dangerous behavior and is disabled by default + +`--restricted` +: Prohibit application credential from creating and deleting other application credentials and trusts, this is enabled by default. + +`--access-rules ` +: Either a string or file path containing a JSON-formatted list of access rules, each containing a request method, path, and service, for example ‘[{“method”: “GET”, “path”: “/v2.1/servers”, “service”: “compute”}]’ + +`name` +: Name of the application credential + +Command example below with only a name supplied + +``` +openstack application credential create wiki-test-app-creds +``` + +``` { .sh .no-copy } ++--------------+----------------------------------------------------------------------------------------+ +| Field | Value | ++--------------+----------------------------------------------------------------------------------------+ +| description | None | +| expires_at | None | +| id | 0f81c516aa6e443dba0aec93b0bbd87e | +| name | wiki-test-app-creds | +| project_id | 4f07cc254d6c4471805d49bae1f739b9 | +| roles | heat_stack_owner reader _member_ load-balancer_member member | +| secret | | +| system | None | +| unrestricted | False | +| user_id | fb9a3d02c89e4cfdbe64658ad43ece97 | ++--------------+----------------------------------------------------------------------------------------+ +``` + +!!! note + Once the `Application Credentails` are created the secret will be displayed. You need to take note of this now as there is no way to get that secret again and a new `Application Credential` will need to be created should you misplace it. + +## List Application Credentials + +Running the below command will list all `Application Credentials` in your project + +``` +openstack application credential list +``` + +``` { .sh .no-copy } ++----------------------------------+---------------------+----------------------------------+-------------+------------+ +| ID | Name | Project ID | Description | Expires At | ++----------------------------------+---------------------+----------------------------------+-------------+------------+ +| 0f81c516aa6e443dba0aec93b0bbd87e | wiki-test-app-creds | 4f07cc254d6c4471805d49bae1f739b9 | None | None | ++----------------------------------+---------------------+----------------------------------+-------------+------------+ +``` + +## Show Application Credentials details + +Running the below command will present additional details about the Application Credentials + +``` +openstack application credential show APPLICATION_CRED_ID +``` + +``` { .sh .no-copy } ++--------------+--------------------------------------------------------------+ +| Field | Value | ++--------------+--------------------------------------------------------------+ +| description | None | +| expires_at | None | +| id | 0f81c516aa6e443dba0aec93b0bbd87e | +| name | wiki-test-app-creds | +| project_id | 4f07cc254d6c4471805d49bae1f739b9 | +| roles | reader load-balancer_member _member_ member heat_stack_owner | +| system | None | +| unrestricted | False | +| user_id | fb9a3d02c89e4cfdbe64658ad43ece97 | ++--------------+--------------------------------------------------------------+ +``` + +## Deleting Application Credentials + +Run the command `openstack application credential list` to get the `ID` of the `Application Credentials` you would like to delete + +Then with the `ID` run the below command to delete it + +``` +openstack application credential delete APPLICATION_CRED_ID +``` + +There will be no response, so you can run the list command again to confirm deletion \ No newline at end of file diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-identity/creating-and-managing-application-credentials-with-the-dashboard.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-identity/creating-and-managing-application-credentials-with-the-dashboard.md new file mode 100644 index 000000000..cb65e672a --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-identity/creating-and-managing-application-credentials-with-the-dashboard.md @@ -0,0 +1,101 @@ +--- +hidden: false +label_names: +- networks +- create +- manage +- dashboard +position: 1 +title: Create and manage networks via the dashboard +--- + +## Create a Network + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Identity` tab and click `Application Credentials` category + +
+ ![Alt text](app-creds-overview.png) +
+ +Click `Create Application Credentials` + +Within the `Create Application Credentials` dialog we have the following options + +`Name` +: This is a required field. A name for your Application Credentials, an example could be terraform-deployment + +`Description` +: A friendly description for the Application Credentials + +`Secret` +: If this field is left blank then one will be automatically generated for you, however you have the ability to provide one yourself should you choose. + +`Expiration Date` +: The day that you wish for this Application Credentials to expire, example is you may only want these to last 1 month and as part of maintenance each month you roll new credentials. If left blank it will default to never expire + +`Expiration Time` +: The time you wish for this Application Credentials to expire, example could be 23:59 on the first Monday of each month so it gives you the Monday morning to roll new credentials. If left blank and `Expiration Date` is set it will default to 00:00:00 + +!!! note + Expiration Date and Time will be in UTC format. + +`Roles` +: You may select one or more roles for this application credential. If you do not select any, all of the roles you have assigned on the current project will be applied to the application credential. Example here is my user has `[admin, member, reader]` associated with it so if I don't pick any roles the `Application Credentials` will be given the same permissions. + +`Access Rules` +: If you want more fine-grained access control delegation, you can create one or more access rules for this application credential. The list of access rules must be a JSON- or YAML-formatted list of rules each containing a service type, an HTTP method, and a URL path, for example: + ``` json title="JSON example" + [ +   { + "service": "compute", +    "method": "POST", +    "path": "/v2.1/servers" + } + ] + ``` + + ``` yaml title="YAML example" + - service: compute + method: POST + path: /v2.1/servers + ``` + +`Unrestricted (Dangerous)` +: By default, for security reasons, application credentials are forbidden from being used for creating additional application credentials or keystone trusts. If your application credential needs to be able to perform these actions, check "unrestricted". + + !!! warning + This is potentially dangerous behavior and is disabled by default + +Once all the required fields are completed click on `Create Application Credential` + +Another dialog will appear that has your newly created `Application Credential` secret. The application credential secret will not be available after closing this page, so you must capture it now or download it. If you lose this secret, you must generate a new application credential. + +## Deleting Application Credentials + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Identity` tab and click `Application Credentials` category + +Using the check box select the `Application Credentials` you would like to delete and the `Delete Application Credentials` should become clickable in the top right + +
+ ![Alt text](app-cred-selected.png) +
+ +Click `Delete Application Credentials` and a confirmation dialog will appear + +Confirm the deletion by clicking `Delete Application Credentials` diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-identity/index.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-identity/index.md new file mode 100644 index 000000000..77610cb98 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-identity/index.md @@ -0,0 +1,24 @@ +--- +hidden: false +label_names: +- identity +- create +- manage +position: 1 +title: Create and Manage identity +vote_count: 1 +vote_sum: 1 +--- + +`Application Credentials` help you to avoid the practice of embedding user account credentials in configuration files. Instead, the user creates an Application Credential that receives delegated access to a single project and has its own distinct secret. The user can also limit the delegated privileges to a single role in that project. This allows you to adopt the principle of least privilege, where the authenticated service only gains access to the one project and role that it needs to function, rather than all of them. + +This approach allows you to consume an API with revealing your user credentials, and lets applications authenticate to Keystone without requiring embedded user credentials. + +Within FlexiHPC you are able to mange `Application Credentials` from the dashboard and/or the CLI. + +- [Creating and Managing Application Credentials from the dashboard](creating-and-managing-application-credentials-with-the-dashboard.md) + +- [Creating and Managing Application Credentials via CLI](creating-and-managing-application-credentials-via-cli.md) + +!!! note + The Application Credential is dependent on the user account that created it, so it will terminate if that account is ever deleted, or loses access to the relevant role. \ No newline at end of file diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-keypairs/create-and-manage-keypairs-via-cli.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-keypairs/create-and-manage-keypairs-via-cli.md new file mode 100644 index 000000000..4a6326b8d --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-keypairs/create-and-manage-keypairs-via-cli.md @@ -0,0 +1,121 @@ +--- +hidden: false +label_names: +- keypairs +- create +- manage +- cli +position: 1 +title: Create and manage keypairs via CLI +--- + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. Please read [Setting up your CLI environment](../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + +## Create a new Keypair + +Running the following command will generate a new SSH keypair for use on the RDC + +``` { .sh } +openstack keypair create KEY_PAIR_NAME +``` + +You will get a response from the server that contains your private key + + +``` { .sh .no-copy } +-----BEGIN OPENSSH PRIVATE KEY----- +A BIG STRING OF NUMBERS AND LETTERS +-----END OPENSSH PRIVATE KEY----- +``` + +You will need to take that output and save it to a file. An example below on how to do that: + +``` +nano ~/.ssh/id_rdc_key +``` + +That will open up and empty file in `nano` called `~/.ssh/id_rdc_key` + +You will then want to take the `private key` response and paste that into the file within your text editor and save it. + +You should now have a file under `~/.ssh` called `idc_rdc_key` + +We will need to change its permissions so that only you can read and write to the file, run the following command: + +``` +chmod 0600 ~/.ssh/id_rdc_key +``` + +## Import a Keypair + +To import a keypair that you have for use on the RDC the command is the same as the create expect with a new parameter + +``` +openstack keypair create --public-key PUBLIC_KEY_FILE KEY_PAIR_NAME +``` + +You will need to replace `PUBLIC_KEY_FILE` with the Public Key file location on your machine, running the above command will give no response so you will need to list the key pairs to see if its been successfuly created + +## List your Keypairs + +Running the below command will list all your keypairs that are on the RDC + +``` +openstack keypair list +``` + +``` { .sh .no-copy } +$ openstack keypair list ++------------+-------------------------------------------------+------+ +| Name | Fingerprint | Type | ++------------+-------------------------------------------------+------+ +| wiki-test | d5:0a:41:68:e0:84:fc:08:b6:cc:34:23:d8:9a:b4:c3 | ssh | ++------------+-------------------------------------------------+------+ +``` + +## Details of a Keypair + +Running the below command will show details about the key pair + +``` +openstack keypair show KEY_PAIR_NAME +``` + +``` { .sh .no-copy } +$ openstack keypair show wiki-test ++-------------+-------------------------------------------------+ +| Field | Value | ++-------------+-------------------------------------------------+ +| created_at | 2023-11-02T20:28:15.000000 | +| fingerprint | d5:0a:41:68:e0:84:fc:08:b6:cc:34:23:d8:9a:b4:c3 | +| id | wiki-test | +| is_deleted | False | +| name | wiki-test | +| private_key | None | +| type | ssh | +| user_id | fb9a3d02c89e4cfdbe64658ad43ece97 | ++-------------+-------------------------------------------------+ +``` + +Adding the paramter `--public_key` will output the public key for that key pair + +``` +openstack keypair show --public-key KEY_PAIR_NAME +``` + +``` { .sh .no-copy } +$ openstack keypair show --public-key wiki-test +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILW2gFl/ax1FW1y5u2ihfJfPow7fFbX/aFsZ4Wv49yY4 +``` + +## Delete a Keypair + +To delete a keypair from the RDC run the below command + +``` +openstack keypair delete KEY_PAIR_NAME +``` + +Their will be no response from the server so running the list command will confirm that the keypair has been removed. + \ No newline at end of file diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-keypairs/create-and-manage-keypairs-with-the-dashboard.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-keypairs/create-and-manage-keypairs-with-the-dashboard.md new file mode 100644 index 000000000..a2c2884f9 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-keypairs/create-and-manage-keypairs-with-the-dashboard.md @@ -0,0 +1,86 @@ +--- +hidden: false +label_names: +- keypairs +- create +- manage +- dashboard +position: 1 +title: Create and manage keypairs via the dashboard +--- + +## Create a Keypair + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Compute` tab and select the `Key Pairs` category + +Click `Create Key Pair`. + +
+ ![Alt text](new-key-pair.png) +
+ +In the `Create Key Pair` dialog box, enter a name for your `key pair,` and select a `Key Type` + +`Key Type` +: Select one of the following options + + - `SSH Key` + : This will be the common picked `Key Type` as we will use this to SSH to most compute instances. + + - `X509 Certificate` + : This will be used to generate an Certificate based key. + +
+ ![Alt text](new-key-pair-filled.png) +
+ +Once all fields are supplied click `Create Key Pair` + +The private key will be downloaded automatically + +
+ ![Alt text](new-key-pair-download.png) +
+ +To change its permissions so that only you can read and write to the file, run the following command: + +``` +chmod 0600 yourPrivateKey.pem +``` + +!!! note + If you are using the Dashboard from a Windows computer, use PuTTYgen to load the `*.pem` file and convert and save it as `*.ppk`. For more information see the [WinSCP web page for PuTTYgen](https://winscp.net/eng/docs/ui_puttygen). + +To make the key pair known to SSH, run the ssh-add command. + +``` +ssh-add yourPrivateKey.pem +``` + +## Import a Key Pair + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Compute` tab and select the `Key Pairs` category + +Click `Import Key Pair`. + +In the `Import Key Pair` dialog box, enter the name of your key pair, copy the public key into the `Public Key` box, and then click `Import Key Pair`. + +The Compute database registers the public key of the key pair. + +The Dashboard lists the key pair on the `Key Pairs` tab. diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-keypairs/index.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-keypairs/index.md new file mode 100644 index 000000000..552d74cb0 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-keypairs/index.md @@ -0,0 +1,22 @@ +--- +hidden: false +label_names: +- identity +- create +- manage +position: 1 +title: Create and Manage Keypairs +vote_count: 1 +vote_sum: 1 +--- + +Key pairs are SSH credentials that are injected into a FlexiHPC instance when it is launched. These are used to access and manage your instances. + +You are able to create a new SSH Key pair on the RDC or import one of your own. + + +Key pairs can be managed a few ways + +- [Create and manage key pairs via the dashboard](create-and-manage-keypairs-with-the-dashboard.md) + +- [Create and manage key pairs via CLI](create-and-manage-keypairs-via-cli.md) \ No newline at end of file diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/index.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/index.md new file mode 100644 index 000000000..150639808 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/index.md @@ -0,0 +1,55 @@ +--- +hidden: false +label_names: +- networks +- create +- manage +position: 1 +title: Create and Manage networks +vote_count: 1 +vote_sum: 1 +--- + +Within FlexiHPC you are able to use the default network that comes with the FlexiHPC Project or you are able to create your own with a specific IP range. + +The networks within FlexiHPC are all `Software Defined Networks` so can overlap each other in different projects. + +Networks can be created and managed in 2 ways + +- [Create and Manage networks with the Dashboard](with_the_dashboard/create-and-manage-networks-with-the-dashboard.md) + +- [Create and Manage networks via CLI](with_the_CLI/create-and-manage-networks-via-cli.md) + +Within the network tab you also have the following that you are able to manage + +## Security Groups + +A security group acts as a virtual firewall for servers and other resources on a network. It is a container for security group rules which specify the network access rules. + +Security Groups can be created and managed within the FlexiHPC dashboard. However, advanced users can take advantage of the OpenStack CLI to manage Security Groups. + +- [Create and Manage Security groups with the Dashboard](with_the_dashboard/manage-security-groups-with-the-dashboard.md) + +- [Manage Security groups via CLI](with_the_CLI/manage-security-groups-via-cli.md) + +## Floating IPs + +When an instance is created in FlexiHPC, it is automatically assigned a fixed IP address in the network to which the instance is assigned. This IP address is permanently associated with the instance until the instance is terminated. + +However, in addition to the fixed IP address, a floating IP address can also be attached to an instance. Unlike fixed IP addresses, floating IP addresses can have their associations modified at any time, regardless of the state of the instances involved. This procedure details the reservation of a floating IP address from an existing pool of addresses and the association of that address with a specific instance. + +If you wish to connect to an instance within the FlexiHPC platform from outside then these are required. + +- [Manage Floating IPs with the Dashboard](with_the_dashboard/manage-floating-ips-via-the-dashboard.md) + +- [Manage Floating IPs with the CLI](with_the_CLI/manage-floating-ips-via-cli.md) + +## Static IPs + +If you wanted to create an instance with a fixed static IP address this can be achieved by using network ports. A port is a connection point for attaching a single device, such as the NIC of a server, to an OpenStack network. A network port also describes the associated network configuration, such as the MAC and IP addresses to be used on that port. + +These network ports can be managed 2 ways + +- [Create and Manage network ports with the Dashboard](with_the_dashboard/create-and-manage-network-ports-with-the-dashboard.md) + +- [Create and manage network ports via CLI](with_the_CLI/create-and-manage-network-ports-via-cli.md) diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_CLI/create-and-manage-network-ports-via-cli.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_CLI/create-and-manage-network-ports-via-cli.md new file mode 100644 index 000000000..4f9e16c99 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_CLI/create-and-manage-network-ports-via-cli.md @@ -0,0 +1,383 @@ +--- +hidden: false +label_names: +- instance +- launch +position: 2 +title: Create and manage network ports via CLI +vote_count: 1 +vote_sum: 1 +--- + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. + Please read [Setting up your CLI environment](../../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + +First we need to get a few details, `Network ID` we want to connect to the network port to, `the Subnet ID` we want to connect the IP to and the `IP Address` we want to assign to the network port unless you want it to be assigned an IP from the DHCP + +Run the following command to get the `Network ID` + +``` +openstack network list +``` + +``` { .sh .no-copy } ++--------------------------------------+--------------------------------+--------------------------------------+ +| ID | Name | Subnets | ++--------------------------------------+--------------------------------+--------------------------------------+ +| 33d0c11b-b659-4b77-9afc-5676fe965839 | external | 5c2644ad-7253-42f5-ad69-40970b84dea6 | +| 79029286-80ad-4923-a2e6-7d1216a9f2be | rally_verify_88403f86_qmojdKSJ | | +| bcfd4714-ef9c-4c0b-aa58-ad8bcc1a999e | rally_verify_51cf3f2d_mQ0taHVb | | +| d3a7ddb5-6582-42cf-978a-c99b4ed25ad4 | NeSI-Training-Test | f5715775-270c-4230-bfa7-fdbdf51352dc | +| d780f680-9640-430f-813f-dbf2128b445c | azimuth-demo | 70dc21e9-d8f4-4232-bda9-2f0a0d508105 | ++--------------------------------------+--------------------------------+--------------------------------------+ +``` + +Take note of the `Network ID` and the `Subnet ID` + +!!! note + For this example we will use the `azimuth-demo` network so the `Network ID: d780f680-9640-430f-813f-dbf2128b445c` and the `Subnet ID: 70dc21e9-d8f4-4232-bda9-2f0a0d508105` + +Should you not know the IP range of the subnet run the following command to get the IP address range from that chosen subnet + +``` +openstack subnet show SUBNET_ID +``` + +Example response below using the `Subnet ID: 70dc21e9-d8f4-4232-bda9-2f0a0d508105` + +``` +openstack subnet show 70dc21e9-d8f4-4232-bda9-2f0a0d508105 +``` + +``` { .sh .no-copy } ++----------------------+--------------------------------------+ +| Field | Value | ++----------------------+--------------------------------------+ +| allocation_pools | 192.168.100.2-192.168.100.254 | +| cidr | 192.168.100.0/24 | +| created_at | 2023-08-11T02:47:17Z | +| description | | +| dns_nameservers | | +| dns_publish_fixed_ip | False | +| enable_dhcp | True | +| gateway_ip | 192.168.100.1 | +| host_routes | | +| id | 70dc21e9-d8f4-4232-bda9-2f0a0d508105 | +| ip_version | 4 | +| ipv6_address_mode | None | +| ipv6_ra_mode | None | +| name | azimuth-demo | +| network_id | d780f680-9640-430f-813f-dbf2128b445c | +| project_id | 4f07cc254d6c4471805d49bae1f739b9 | +| revision_number | 0 | +| segment_id | None | +| service_types | | +| subnetpool_id | None | +| tags | | +| updated_at | 2023-08-11T02:47:17Z | ++----------------------+--------------------------------------+ +``` + +Take note of the `allocation_pools` as that will specify the pool of IP addresses + +## Create a network port + +Run the following command to create a network port + +``` +openstack port create --network NETWORK_ID --description PORT_DESCRIPTION --fixed-ip subnet=SUBNET_ID,ip-address=IP_ADDRESS --enable PORT_NAME +``` + +Using the example IDs we prepared earlier it will look like this + +``` +openstack port create --network d780f680-9640-430f-813f-dbf2128b445c --description wiki-network-port --fixed-ip subnet=70dc21e9-d8f4-4232-bda9-2f0a0d508105,ip-address=192.168.100.60 --enable Wiki-Network-Port-CLI +``` + +We have specified the `Network ID` as the `azimuth-demo` network, with the `Subnet ID` within that network and assigning that network port an IP address of `192.168.100.60`, we also gave it a description and a friendly name. + +The response from the command + +``` { .sh .no-copy } ++-------------------------+---------------------------------------------------------------------------------------------------------+ +| Field | Value | ++-------------------------+---------------------------------------------------------------------------------------------------------+ +| admin_state_up | UP | +| allowed_address_pairs | | +| binding_host_id | None | +| binding_profile | None | +| binding_vif_details | None | +| binding_vif_type | None | +| binding_vnic_type | normal | +| created_at | 2023-08-29T01:29:45Z | +| data_plane_status | None | +| description | wiki-network-port | +| device_id | | +| device_owner | | +| device_profile | None | +| dns_assignment | fqdn='host-192-168-100-60.openstacklocal.', hostname='host-192-168-100-60', ip_address='192.168.100.60' | +| dns_domain | | +| dns_name | | +| extra_dhcp_opts | | +| fixed_ips | ip_address='192.168.100.60', subnet_id='70dc21e9-d8f4-4232-bda9-2f0a0d508105' | +| id | 09e94e3f-ee9e-42f5-851e-a9b4d957b563 | +| ip_allocation | None | +| mac_address | fa:16:3e:05:c9:dd | +| name | Wiki-Network-Port-CLI | +| network_id | d780f680-9640-430f-813f-dbf2128b445c | +| numa_affinity_policy | None | +| port_security_enabled | True | +| project_id | 4f07cc254d6c4471805d49bae1f739b9 | +| propagate_uplink_status | None | +| qos_network_policy_id | None | +| qos_policy_id | None | +| resource_request | None | +| revision_number | 1 | +| security_group_ids | f2f15d6f-2a04-4196-8102-a058042694b3 | +| status | DOWN | +| tags | | +| trunk_details | None | +| updated_at | 2023-08-29T01:29:45Z | ++-------------------------+---------------------------------------------------------------------------------------------------------+ +``` + +Should you not want to assign an IP yourself and allow the DHCP to assign it then run the command without the `ip-address `parameter like below + +``` +openstack port create --network NETWORK_ID --description PORT_DESCRIPTION --fixed-ip subnet=SUBNET_ID --enable PORT_NAME +``` + +Command with response + +``` +openstack port create --network d780f680-9640-430f-813f-dbf2128b445c --description wiki-network-port --fixed-ip subnet=70dc21e9-d8f4-4232-bda9-2f0a0d508105 --enable Wiki-Network-Port-CLI +``` + +``` { .sh .no-copy } ++-------------------------+------------------------------------------------------------------------------------------------------------+ +| Field | Value | ++-------------------------+------------------------------------------------------------------------------------------------------------+ +| admin_state_up | UP | +| allowed_address_pairs | | +| binding_host_id | None | +| binding_profile | None | +| binding_vif_details | None | +| binding_vif_type | None | +| binding_vnic_type | normal | +| created_at | 2023-08-29T01:38:28Z | +| data_plane_status | None | +| description | wiki-network-port | +| device_id | | +| device_owner | | +| device_profile | None | +| dns_assignment | fqdn='host-192-168-100-182.openstacklocal.', hostname='host-192-168-100-182', ip_address='192.168.100.182' | +| dns_domain | | +| dns_name | | +| extra_dhcp_opts | | +| fixed_ips | ip_address='192.168.100.182', subnet_id='70dc21e9-d8f4-4232-bda9-2f0a0d508105' | +| id | d91d923e-a91f-4e00-baa9-eda3ba842dd5 | +| ip_allocation | None | +| mac_address | fa:16:3e:35:5a:e1 | +| name | Wiki-Network-Port-CLI | +| network_id | d780f680-9640-430f-813f-dbf2128b445c | +| numa_affinity_policy | None | +| port_security_enabled | True | +| project_id | 4f07cc254d6c4471805d49bae1f739b9 | +| propagate_uplink_status | None | +| qos_network_policy_id | None | +| qos_policy_id | None | +| resource_request | None | +| revision_number | 1 | +| security_group_ids | f2f15d6f-2a04-4196-8102-a058042694b3 | +| status | DOWN | +| tags | | +| trunk_details | None | +| updated_at | 2023-08-29T01:38:28Z | ++-------------------------+------------------------------------------------------------------------------------------------------------+ +``` + +Running the below command will list the network ports within the project and we should be able to see our newly created one + +``` +openstack port list +``` + +``` { .sh .no-copy } ++--------------------------------------+-----------------------+-------------------+--------------------------------------------------------------------------------+--------+ +| ID | Name | MAC Address | Fixed IP Addresses | Status | ++--------------------------------------+-----------------------+-------------------+--------------------------------------------------------------------------------+--------+ +| 09e94e3f-ee9e-42f5-851e-a9b4d957b563 | Wiki-Network-Port-CLI | fa:16:3e:05:c9:dd | ip_address='192.168.100.60', subnet_id='70dc21e9-d8f4-4232-bda9-2f0a0d508105' | DOWN | +| 0e1dc631-2c63-43b4-9bd2-fcdfbedb854c | | fa:16:3e:77:0d:c0 | ip_address='10.1.0.5', subnet_id='f5715775-270c-4230-bfa7-fdbdf51352dc' | ACTIVE | +| 19737c3e-5717-4d19-8717-d362c53f552a | | fa:16:3e:21:99:fa | ip_address='10.1.0.2', subnet_id='f5715775-270c-4230-bfa7-fdbdf51352dc' | ACTIVE | ++--------------------------------------+-----------------------+-------------------+--------------------------------------------------------------------------------+--------+ +``` + +!!! note + The full openstack documentation is [here](https://docs.openstack.org/python-openstackclient/pike/cli/command-objects/port.html#port-create) should you need more advanced parameters + +## List network ports + +Run the below command to list all network ports within your project + +``` +openstack port list +``` + +``` { .sh .no-copy } ++--------------------------------------+-----------------------+-------------------+--------------------------------------------------------------------------------+--------+ +| ID | Name | MAC Address | Fixed IP Addresses | Status | ++--------------------------------------+-----------------------+-------------------+--------------------------------------------------------------------------------+--------+ +| 09e94e3f-ee9e-42f5-851e-a9b4d957b563 | Wiki-Network-Port-CLI | fa:16:3e:05:c9:dd | ip_address='192.168.100.60', subnet_id='70dc21e9-d8f4-4232-bda9-2f0a0d508105' | DOWN | +| 0e1dc631-2c63-43b4-9bd2-fcdfbedb854c | | fa:16:3e:77:0d:c0 | ip_address='10.1.0.5', subnet_id='f5715775-270c-4230-bfa7-fdbdf51352dc' | ACTIVE | +| 19737c3e-5717-4d19-8717-d362c53f552a | | fa:16:3e:21:99:fa | ip_address='10.1.0.2', subnet_id='f5715775-270c-4230-bfa7-fdbdf51352dc' | ACTIVE | ++--------------------------------------+-----------------------+-------------------+--------------------------------------------------------------------------------+--------+ +``` + +## Attach network port to an Instance + +If you also wish to attach the newly created `Network Port` to an instance then you will also need the `Instance ID` + +Running the below will return a list of all instances within your project + +``` +openstack server list +``` + +``` { .sh .no-copy } ++--------------------------------------+---------------------+--------+----------------------------------------------+------------------------------+--------------------+ +| ID | Name | Status | Networks | Image | Flavor | ++--------------------------------------+---------------------+--------+----------------------------------------------+------------------------------+--------------------+ +| 610ee950-cdf8-425d-a3f3-52de500522ee | k8s-worker-node-1 | ACTIVE | NeSI-Training-Test=10.1.0.101, FLEXIHPC_IP | Ubuntu-Focal-20.04 | balanced1.2cpu4ram | +| 10389ba9-15a9-48b0-91f3-b7cbccdce72b | k8s-worker-node-0 | ACTIVE | NeSI-Training-Test=10.1.0.81, FLEXIHPC_IP | Ubuntu-Focal-20.04 | balanced1.2cpu4ram | +| af6fb776-b80e-49b9-a8d4-a1d88b272b63 | k8s-control-plane-0 | ACTIVE | NeSI-Training-Test=10.1.0.176, FLEXIHPC_IP | Ubuntu-Focal-20.04 | balanced1.2cpu4ram | +| 6d1d5418-a70e-4996-a0f5-4f4c03cfd138 | ood-cluster-admin | ACTIVE | NeSI-Training-Test=10.1.0.69, FLEXIHPC_IP | N/A (booted from volume) | devtest1.4cpu4ram | ++--------------------------------------+---------------------+--------+----------------------------------------------+------------------------------+--------------------+ +``` + +Take note of the `Instance ID` + +!!! note + For this example we will use `Instance ID: 6d1d5418-a70e-4996-a0f5-4f4c03cfd138` + +We then want to list our network ports + +``` +openstack port list +``` + +``` { .sh .no-copy } ++--------------------------------------+-----------------------+-------------------+--------------------------------------------------------------------------------+--------+ +| ID | Name | MAC Address | Fixed IP Addresses | Status | ++--------------------------------------+-----------------------+-------------------+--------------------------------------------------------------------------------+--------+ +| d91d923e-a91f-4e00-baa9-eda3ba842dd5 | Wiki-Network-Port-CLI | fa:16:3e:35:5a:e1 | ip_address='192.168.100.182', subnet_id='70dc21e9-d8f4-4232-bda9-2f0a0d508105' | DOWN | +| f1c54ee3-80c5-468d-a1cb-2828c1fee5cc | | fa:16:3e:ad:6b:06 | ip_address='10.1.0.1', subnet_id='f5715775-270c-4230-bfa7-fdbdf51352dc' | ACTIVE | ++--------------------------------------+-----------------------+-------------------+--------------------------------------------------------------------------------+--------+ +``` + +Take note of the `Network Port ID` + +!!! note + For this example we will use `d91d923e-a91f-4e00-baa9-eda3ba842dd5` + +We then run the following command + +``` +openstack server add port INSTANCE_ID NETWORK_PORT_ID +``` + +Command with our example ID’s + +``` +openstack server add port 6d1d5418-a70e-4996-a0f5-4f4c03cfd138 d91d923e-a91f-4e00-baa9-eda3ba842dd5 +``` + +There will be no response from the server so running the below command will display the new network port added to the instance + +``` +openstack server show 6d1d5418-a70e-4996-a0f5-4f4c03cfd138 +``` + +``` { .sh .no-copy } ++-------------------------------------+----------------------------------------------------------------------------+ +| Field | Value | ++-------------------------------------+----------------------------------------------------------------------------+ +| accessIPv4 | | +| accessIPv6 | | +| access_ipv4 | | +| access_ipv6 | | +| addresses | NeSI-Training-Test=10.1.0.69, 163.7.177.243; azimuth-demo=192.168.100.182 | +| adminPass | None | +| admin_password | None | +| availability_zone | nova | +| block_device_mapping | None | +| block_device_mapping_v2 | None | +| compute_host | None | +| config_drive | True | +| created | 2023-07-19T03:45:21Z | +| created_at | 2023-07-19T03:45:21Z | +| description | None | +| disk_config | AUTO | +| fault | None | +| flavor | devtest1.4cpu4ram (devtest1.4cpu4ram) | +| flavorRef | None | +| flavor_id | None | +| has_config_drive | True | +| hostId | f40676c3043f50b6efeeefb163a9d9f7a0994b288b09dfddcdccac9b | +| host_id | f40676c3043f50b6efeeefb163a9d9f7a0994b288b09dfddcdccac9b | +| host_status | None | +| hostname | ood-cluster-admin | + +| launched_at | 2023-07-19T03:45:28.000000 | ++-------------------------------------+----------------------------------------------------------------------------+ +``` + +You should see the additional network port under `addresses` for your instance + +## Delete a network port + +Run the command `openstack port list` and take note of the `Network Port ID` + +``` +openstack port list +``` + +``` { .sh .no-copy } ++--------------------------------------+-----------------------+-------------------+--------------------------------------------------------------------------------+--------+ +| ID | Name | MAC Address | Fixed IP Addresses | Status | ++--------------------------------------+-----------------------+-------------------+--------------------------------------------------------------------------------+--------+ +| d91d923e-a91f-4e00-baa9-eda3ba842dd5 | Wiki-Network-Port-CLI | fa:16:3e:35:5a:e1 | ip_address='192.168.100.182', subnet_id='70dc21e9-d8f4-4232-bda9-2f0a0d508105' | DOWN | +| f1c54ee3-80c5-468d-a1cb-2828c1fee5cc | | fa:16:3e:ad:6b:06 | ip_address='10.1.0.1', subnet_id='f5715775-270c-4230-bfa7-fdbdf51352dc' | ACTIVE | ++--------------------------------------+-----------------------+-------------------+--------------------------------------------------------------------------------+--------+ +``` + +Take note of the ID for that network port + +!!! note + For this example we will use `d91d923e-a91f-4e00-baa9-eda3ba842dd5` + +Then run the below command, supplying the `Network Port ID` you want to delete + +``` +openstack port delete NETWORK_PORT_ID +``` + +Command with our example ID + +``` +openstack port delete d91d923e-a91f-4e00-baa9-eda3ba842dd5 +``` + +There is no response from the server so run `openstack port list` to see the network port has been removed + +``` +openstack port list +``` + +``` { .sh .no-copy } ++--------------------------------------+-----------------------+-------------------+--------------------------------------------------------------------------------+--------+ +| ID | Name | MAC Address | Fixed IP Addresses | Status | ++--------------------------------------+-----------------------+-------------------+--------------------------------------------------------------------------------+--------+ +| f1c54ee3-80c5-468d-a1cb-2828c1fee5cc | | fa:16:3e:ad:6b:06 | ip_address='10.1.0.1', subnet_id='f5715775-270c-4230-bfa7-fdbdf51352dc' | ACTIVE | ++--------------------------------------+-----------------------+-------------------+--------------------------------------------------------------------------------+--------+ +``` diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_CLI/create-and-manage-networks-via-cli.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_CLI/create-and-manage-networks-via-cli.md new file mode 100644 index 000000000..598f739d5 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_CLI/create-and-manage-networks-via-cli.md @@ -0,0 +1,257 @@ +--- +hidden: false +label_names: +- instance +- launch +position: 2 +title: Create and manage network via CLI +vote_count: 1 +vote_sum: 1 +--- + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. + Please read [Setting up your CLI environment](../../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + +## Create a network + +Running the below command will generate a network without a subnet + +``` +openstack network create NETWORK_NAME +``` + +Our example command with response + +``` +openstack network create wiki-net +``` + +``` { .sh .no-copy } ++---------------------------+--------------------------------------+ +| Field | Value | ++---------------------------+--------------------------------------+ +| admin_state_up | UP | +| availability_zone_hints | | +| availability_zones | | +| created_at | 2023-09-10T21:10:02Z | +| description | | +| dns_domain | | +| id | 15274353-ceae-476c-a374-dc7142a676f4 | +| ipv4_address_scope | None | +| ipv6_address_scope | None | +| is_default | False | +| is_vlan_transparent | None | +| mtu | 8942 | +| name | wiki-net | +| port_security_enabled | True | +| project_id | 4f07cc254d6c4471805d49bae1f739b9 | +| provider:network_type | None | +| provider:physical_network | None | +| provider:segmentation_id | None | +| qos_policy_id | None | +| revision_number | 1 | +| router:external | Internal | +| segments | None | +| shared | False | +| status | ACTIVE | +| subnets | | +| tags | | +| tenant_id | 4f07cc254d6c4471805d49bae1f739b9 | +| updated_at | 2023-09-10T21:10:02Z | ++---------------------------+--------------------------------------+ +``` + +Take note of the `id` that is returned for the new network + +!!! note + Our `id` from above is `15274353-ceae-476c-a374-dc7142a676f4` + +## Create a network subnet + +Running the below command will generate a subnet for the network that you supply the id from + +``` +openstack subnet create SUBNET_NAME --network NETWORK_ID --subnet-range IP_RANGE_CIDR +``` + +Our example command using the id from above will look like the following + +``` +openstack subnet create wiki-subnet --network 15274353-ceae-476c-a374-dc7142a676f4 --subnet-range 192.0.2.0/24 +``` + +``` { .sh .no-copy } ++----------------------+--------------------------------------+ +| Field | Value | ++----------------------+--------------------------------------+ +| allocation_pools | 192.0.2.2-192.0.2.254 | +| cidr | 192.0.2.0/24 | +| created_at | 2023-09-10T21:11:13Z | +| description | | +| dns_nameservers | | +| dns_publish_fixed_ip | False | +| enable_dhcp | True | +| gateway_ip | 192.0.2.1 | +| host_routes | | +| id | ae9277e7-0a2c-4325-8eb1-33ad86eec974 | +| ip_version | 4 | +| ipv6_address_mode | None | +| ipv6_ra_mode | None | +| name | wiki-subnet | +| network_id | 15274353-ceae-476c-a374-dc7142a676f4 | +| project_id | 4f07cc254d6c4471805d49bae1f739b9 | +| revision_number | 0 | +| segment_id | None | +| service_types | | +| subnetpool_id | None | +| tags | | +| updated_at | 2023-09-10T21:11:13Z | ++----------------------+--------------------------------------+ +``` + +## List all networks and subnets + +Running the below command will list all networks within your project + +``` +openstack network list +``` + +``` { .sh .no-copy } ++--------------------------------------+--------------------------------+--------------------------------------+ +| ID | Name | Subnets | ++--------------------------------------+--------------------------------+--------------------------------------+ +| 15274353-ceae-476c-a374-dc7142a676f4 | wiki-net | ae9277e7-0a2c-4325-8eb1-33ad86eec974 | +| 33d0c11b-b659-4b77-9afc-5676fe965839 | external | 5c2644ad-7253-42f5-ad69-40970b84dea6 | +| 79029286-80ad-4923-a2e6-7d1216a9f2be | rally_verify_88403f86_qmojdKSJ | | +| bcfd4714-ef9c-4c0b-aa58-ad8bcc1a999e | rally_verify_51cf3f2d_mQ0taHVb | | +| d3a7ddb5-6582-42cf-978a-c99b4ed25ad4 | NeSI-Training-Test | f5715775-270c-4230-bfa7-fdbdf51352dc | +| d780f680-9640-430f-813f-dbf2128b445c | azimuth-demo | 70dc21e9-d8f4-4232-bda9-2f0a0d508105 | ++--------------------------------------+--------------------------------+--------------------------------------+ +``` + +Running the below will list all subnets within your project + +``` +openstack subnet list +``` + +``` { .sh .no-copy } ++--------------------------------------+--------------------+--------------------------------------+------------------+ +| ID | Name | Network | Subnet | ++--------------------------------------+--------------------+--------------------------------------+------------------+ +| 5c2644ad-7253-42f5-ad69-40970b84dea6 | external | 33d0c11b-b659-4b77-9afc-5676fe965839 | 163.7.177.0/24 | +| 70dc21e9-d8f4-4232-bda9-2f0a0d508105 | azimuth-demo | d780f680-9640-430f-813f-dbf2128b445c | 192.168.100.0/24 | +| ae9277e7-0a2c-4325-8eb1-33ad86eec974 | wiki-subnet | 15274353-ceae-476c-a374-dc7142a676f4 | 192.0.2.0/24 | +| f5715775-270c-4230-bfa7-fdbdf51352dc | NeSI-Training-Test | d3a7ddb5-6582-42cf-978a-c99b4ed25ad4 | 10.1.0.0/24 | ++--------------------------------------+--------------------+--------------------------------------+------------------+ +``` + +## Delete a subnet + +Run the below command to list out all subnets + +``` +openstack subnet list +``` + +``` { .sh .no-copy } ++--------------------------------------+--------------------+--------------------------------------+------------------+ +| ID | Name | Network | Subnet | ++--------------------------------------+--------------------+--------------------------------------+------------------+ +| 5c2644ad-7253-42f5-ad69-40970b84dea6 | external | 33d0c11b-b659-4b77-9afc-5676fe965839 | 163.7.177.0/24 | +| 70dc21e9-d8f4-4232-bda9-2f0a0d508105 | azimuth-demo | d780f680-9640-430f-813f-dbf2128b445c | 192.168.100.0/24 | +| ae9277e7-0a2c-4325-8eb1-33ad86eec974 | wiki-subnet | 15274353-ceae-476c-a374-dc7142a676f4 | 192.0.2.0/24 | +| f5715775-270c-4230-bfa7-fdbdf51352dc | NeSI-Training-Test | d3a7ddb5-6582-42cf-978a-c99b4ed25ad4 | 10.1.0.0/24 | ++--------------------------------------+--------------------+--------------------------------------+------------------+ +``` + +Take note of the subnet id that you wish to delete + +!!! note + For our example we will use `ae9277e7-0a2c-4325-8eb1-33ad86eec974` + +We then run the following command while supplying it with the id we have chosen to delete + +``` +openstack subnet delete SUBNET_ID +``` + +The server will not give a response if successful so you will need to run `openstack subnet list` to confirm its removal + +``` +openstack subnet list +``` + +Using our example id our command and list looks like the following + +``` +openstack subnet delete ae9277e7-0a2c-4325-8eb1-33ad86eec974 +``` + +``` { .sh .no-copy } ++--------------------------------------+--------------------+--------------------------------------+------------------+ +| ID | Name | Network | Subnet | ++--------------------------------------+--------------------+--------------------------------------+------------------+ +| 5c2644ad-7253-42f5-ad69-40970b84dea6 | external | 33d0c11b-b659-4b77-9afc-5676fe965839 | 163.7.177.0/24 | +| 70dc21e9-d8f4-4232-bda9-2f0a0d508105 | azimuth-demo | d780f680-9640-430f-813f-dbf2128b445c | 192.168.100.0/24 | +| f5715775-270c-4230-bfa7-fdbdf51352dc | NeSI-Training-Test | d3a7ddb5-6582-42cf-978a-c99b4ed25ad4 | 10.1.0.0/24 | ++--------------------------------------+--------------------+--------------------------------------+------------------+ +``` + +## Delete a network + +Run the below command to list all networks + +``` +openstack network list +``` + +``` { .sh .no-copy } ++--------------------------------------+--------------------------------+--------------------------------------+ +| ID | Name | Subnets | ++--------------------------------------+--------------------------------+--------------------------------------+ +| 15274353-ceae-476c-a374-dc7142a676f4 | wiki-net | ae9277e7-0a2c-4325-8eb1-33ad86eec974 | +| 33d0c11b-b659-4b77-9afc-5676fe965839 | external | 5c2644ad-7253-42f5-ad69-40970b84dea6 | +| 79029286-80ad-4923-a2e6-7d1216a9f2be | rally_verify_88403f86_qmojdKSJ | | +| bcfd4714-ef9c-4c0b-aa58-ad8bcc1a999e | rally_verify_51cf3f2d_mQ0taHVb | | +| d3a7ddb5-6582-42cf-978a-c99b4ed25ad4 | NeSI-Training-Test | f5715775-270c-4230-bfa7-fdbdf51352dc | +| d780f680-9640-430f-813f-dbf2128b445c | azimuth-demo | 70dc21e9-d8f4-4232-bda9-2f0a0d508105 | ++--------------------------------------+--------------------------------+--------------------------------------+ +``` + +Take note of the network id we want to remove + +!!! note + For our example we will use `15274353-ceae-476c-a374-dc7142a676f4` + +Then run the below command while supplying the id for the network you wish to remove + +``` +openstack network delete NETWORK_ID +``` + +There will be no response from the server when you send the command so will need to list the networks to confirm removal. + +Example command and list response + +``` +openstack network delete 15274353-ceae-476c-a374-dc7142a676f4 +``` + +``` +openstack network list +``` + +``` { .sh .no-copy } ++--------------------------------------+--------------------------------+--------------------------------------+ +| ID | Name | Subnets | ++--------------------------------------+--------------------------------+--------------------------------------+ +| 33d0c11b-b659-4b77-9afc-5676fe965839 | external | 5c2644ad-7253-42f5-ad69-40970b84dea6 | +| 79029286-80ad-4923-a2e6-7d1216a9f2be | rally_verify_88403f86_qmojdKSJ | | +| bcfd4714-ef9c-4c0b-aa58-ad8bcc1a999e | rally_verify_51cf3f2d_mQ0taHVb | | +| d3a7ddb5-6582-42cf-978a-c99b4ed25ad4 | NeSI-Training-Test | f5715775-270c-4230-bfa7-fdbdf51352dc | +| d780f680-9640-430f-813f-dbf2128b445c | azimuth-demo | 70dc21e9-d8f4-4232-bda9-2f0a0d508105 | ++--------------------------------------+--------------------------------+--------------------------------------+ +``` diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_CLI/manage-floating-ips-via-cli.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_CLI/manage-floating-ips-via-cli.md new file mode 100644 index 000000000..8ca912b7a --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_CLI/manage-floating-ips-via-cli.md @@ -0,0 +1,143 @@ +--- +label_names: +- instance +- launch +title: Manage Floating IPs via CLI +vote_count: 1 +vote_sum: 1 +--- + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. + Please read [Setting up your CLI environment](../../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + + +Use the `openstack` commands to manage floating IP addresses. + +## List floating IP address information + +To list all floating IP addresses that are allocated to the current project, run: + +``` +openstack floating ip list +``` + +``` { .sh .no-copy } ++--------------------------------------+---------------------+------------------+--------------------------------------+--------------------------------------+----------------------------------+ +| ID | Floating IP Address | Fixed IP Address | Port | Floating Network | Project | ++--------------------------------------+---------------------+------------------+--------------------------------------+--------------------------------------+----------------------------------+ +| 1c59da88-9b5c-4214-930e-8447cebd3980 | | None | None | 33d0c11b-b659-4b77-9afc-5676fe965839 | 4f07cc254d6c4471805d49bae1f739b9 | ++--------------------------------------+---------------------+------------------+--------------------------------------+--------------------------------------+----------------------------------+ +``` + +## Associate floating IP addresses + +You can assign a floating IP address to a project and to an instance. + +Run the following command to allocate a floating IP address to the current project. By default, the floating IP address is allocated from the `external` pool. The command outputs the allocated IP address: + +``` +openstack floating ip create external +``` + +``` { .sh .no-copy } ++---------------------+--------------------------------------+ +| Field | Value | ++---------------------+--------------------------------------+ +| created_at | 2023-07-27T01:29:31Z | +| description | | +| dns_domain | | +| dns_name | | +| fixed_ip_address | None | +| floating_ip_address | | +| floating_network_id | 33d0c11b-b659-4b77-9afc-5676fe965839 | +| id | 5c8781cd-399b-4b37-8ced-41ca4a38c128 | +| name | | +| port_details | None | +| port_id | None | +| project_id | 4f07cc254d6c4471805d49bae1f739b9 | +| qos_policy_id | None | +| revision_number | 0 | +| router_id | None | +| status | DOWN | +| subnet_id | None | +| tags | [] | +| updated_at | 2023-07-27T01:29:31Z | ++---------------------+--------------------------------------+ +``` + +List all project instances with which a floating IP address could be associated. + +``` +openstack server list +``` + +``` { .sh .no-copy } ++--------------------------------------+---------------------------------------+--------+----------------------------------------------+--------------------------+--------------------+ +| ID | Name | Status | Networks | Image | Flavor | ++--------------------------------------+---------------------------------------+--------+----------------------------------------------+--------------------------+--------------------+ +| 8b08a4fb-7372-4269-a583-9dbc91779ffe | test-instance-wiki | ACTIVE | NeSI-Training-Test=10.1.0.134 | Ubuntu-Jammy-22.04 | devtest1.2cpu2ram | ++--------------------------------------+---------------------------------------+--------+----------------------------------------------+--------------------------+--------------------+ +``` + +Note the server ID to use. + +List ports associated with the selected server. + +``` +openstack port list --device-id SERVER_ID +``` + +``` { .sh .no-copy } ++--------------------------------------+------+-------------------+---------------------------------------------------------------------------+--------+ +| ID | Name | MAC Address | Fixed IP Addresses | Status | ++--------------------------------------+------+-------------------+---------------------------------------------------------------------------+--------+ +| 09c1ebd1-0fa0-40ec-98ef-bae2417f33ef | | fa:16:3e:14:0c:32 | ip_address='10.1.0.134', subnet_id='f5715775-270c-4230-bfa7-fdbdf51352dc' | ACTIVE | ++--------------------------------------+------+-------------------+---------------------------------------------------------------------------+--------+ +``` + +Note the port ID to use. + +Associate an IP address with an instance in the project, as follows: + +``` +openstack floating ip set --port PORT_ID FLOATING_IP_ADDRESS +``` + +For example: + +``` +openstack floating ip set --port 09c1ebd1-0fa0-40ec-98ef-bae2417f33ef +``` + +The instance is now associated with two IP addresses: + +``` +openstack server list +``` + +``` { .sh .no-copy } ++--------------------------------------+---------------------------------------+--------+----------------------------------------------+--------------------------+--------------------+ +| ID | Name | Status | Networks | Image | Flavor | ++--------------------------------------+---------------------------------------+--------+----------------------------------------------+--------------------------+--------------------+ +| 8b08a4fb-7372-4269-a583-9dbc91779ffe | test-instance-wiki | ACTIVE | NeSI-Training-Test=10.1.0.134, | Ubuntu-Jammy-22.04 | devtest1.2cpu2ram | ++--------------------------------------+---------------------------------------+--------+----------------------------------------------+--------------------------+--------------------+ +``` + +After you associate the IP address and configure security group rules for the instance, the instance is publicly available at the floating IP address. + +## Disassociate floating IP addresses + +To disassociate a floating IP address from an instance: + +``` +openstack floating ip unset --port PORT_ID FLOATING_IP_ADDRESS +``` + +To remove the floating IP address from a project: + +``` +openstack floating ip delete FLOATING_IP_ADDRESS +``` + +The IP address is returned to the pool of IP addresses that is available for all projects. If the IP address is still associated with a running instance, it is automatically disassociated from that instance. diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_CLI/manage-security-groups-via-cli.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_CLI/manage-security-groups-via-cli.md new file mode 100644 index 000000000..9a1bfd63b --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_CLI/manage-security-groups-via-cli.md @@ -0,0 +1,183 @@ +--- +label_names: +- security-groups +- launch +- cli +title: Manage Security groups via CLI +vote_count: 1 +vote_sum: 1 +--- + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. + Please read [Setting up your CLI environment](../../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + +## Creating a Security Group + +Run the following command to create a Security Group with a specified name and description. + +``` +openstack security group create --description FRIENDLY_DESCRIPTION NAME_FOR_GROUP +``` + +An example command to create a security group called `My_Wiki_SG` + +``` +openstack security group create --description "A testing group for wiki" My_Wiki_SG +``` + +We can check the security group is created by running + +``` +openstack security group list +``` + +``` { .sh .no-copy } ++--------------------------------------+--------------------------+---------------------------------------------------------+----------------------------------+------+ +| ID | Name | Description | Project | Tags | ++--------------------------------------+--------------------------+---------------------------------------------------------+----------------------------------+------+ +| 339bd140-e6a0-4afd-9b24-029c3243e779 | My_Wiki_SG | A testing group for wiki | 4f07cc254d6c4471805d49bae1f739b9 | [] | +| 7200b28f-9089-4797-a094-39f1995e6f0c | SSH Allow All | This is an open SSH that allows anyone to connect to 22 | 4f07cc254d6c4471805d49bae1f739b9 | [] | +| b5d30ed4-13b3-4f7a-bc5a-c48175566ea3 | My-Security-Group | This is my security group | 4f07cc254d6c4471805d49bae1f739b9 | [] | +| f2f15d6f-2a04-4196-8102-a058042694b3 | default | Default security group | 4f07cc254d6c4471805d49bae1f739b9 | [] | ++--------------------------------------+--------------------------+---------------------------------------------------------+----------------------------------+------+ +``` + +## Create and manage Security Group rules + +You can modify Security Group rules with the `openstack security group rule` command. + +### Create new rules for a group + +Allow access from all IP addresses (specified as IP subnet `0.0.0.0/0` in CIDR notation) for the port `8080` + +``` +openstack security group rule create --proto tcp --dst-port 8080 SECURITY_GROUP_ID +``` + +The command and response looks like the following + +``` +openstack security group rule create --proto tcp --dst-port 8080 339bd140-e6a0-4afd-9b24-029c3243e779 +``` + +``` { .sh .no-copy } ++-------------------------+--------------------------------------+ +| Field | Value | ++-------------------------+--------------------------------------+ +| created_at | 2023-08-10T00:59:36Z | +| description | | +| direction | ingress | +| ether_type | IPv4 | +| id | f0bce470-8d94-453f-9dfa-3e3e34b0c80e | +| name | None | +| normalized_cidr | 0.0.0.0/0 | +| port_range_max | 8080 | +| port_range_min | 8080 | +| project_id | 4f07cc254d6c4471805d49bae1f739b9 | +| protocol | tcp | +| remote_address_group_id | None | +| remote_group_id | None | +| remote_ip_prefix | 0.0.0.0/0 | +| revision_number | 0 | +| security_group_id | 339bd140-e6a0-4afd-9b24-029c3243e779 | +| tags | [] | +| updated_at | 2023-08-10T00:59:36Z | ++-------------------------+--------------------------------------+ +``` + +If you check the rules again, you'll see the new one has been added + +``` +openstack security group rule list 339bd140-e6a0-4afd-9b24-029c3243e779 +``` + +``` { .sh .no-copy } ++--------------------------------------+-------------+-----------+-----------+------------+-----------+-----------------------+----------------------+ +| ID | IP Protocol | Ethertype | IP Range | Port Range | Direction | Remote Security Group | Remote Address Group | ++--------------------------------------+-------------+-----------+-----------+------------+-----------+-----------------------+----------------------+ +| b0f0edd2-7a55-44b4-84a8-9650de36a7ec | None | IPv6 | ::/0 | | egress | None | None | +| f0bce470-8d94-453f-9dfa-3e3e34b0c80e | tcp | IPv4 | 0.0.0.0/0 | 8080:8080 | ingress | None | None | +| f3925a01-5d47-4c55-ac73-1647cca5b739 | None | IPv4 | 0.0.0.0/0 | | egress | None | None | ++--------------------------------------+-------------+-----------+-----------+------------+-----------+-----------------------+----------------------+ +``` + +### Delete a Security Group rule + +First, run the following command to view all Security Groups. + +``` +openstack security group list +``` + +``` { .sh .no-copy } ++--------------------------------------+--------------------------+---------------------------------------------------------+----------------------------------+------+ +| ID | Name | Description | Project | Tags | ++--------------------------------------+--------------------------+---------------------------------------------------------+----------------------------------+------+ +| 339bd140-e6a0-4afd-9b24-029c3243e779 | My_Wiki_SG | A testing group for wiki | 4f07cc254d6c4471805d49bae1f739b9 | [] | +| 5150840c-9c27-45a9-91a1-61c5978de8ff | https | | 4f07cc254d6c4471805d49bae1f739b9 | [] | +| 7200b28f-9089-4797-a094-39f1995e6f0c | SSH Allow All | This is an open SSH that allows anyone to connect to 22 | 4f07cc254d6c4471805d49bae1f739b9 | [] | +| 8873336a-02e6-4f84-8fd8-5aa3b929f955 | hpc-toolset-docker-ports | Docker Ports used for the HPC Toolset | 4f07cc254d6c4471805d49bae1f739b9 | [] | +| b24e8bef-969a-4938-8b18-0a33769b181d | kubeapi_whitelist | | 4f07cc254d6c4471805d49bae1f739b9 | [] | +| b5d30ed4-13b3-4f7a-bc5a-c48175566ea3 | My-Security-Group | This is my security group | 4f07cc254d6c4471805d49bae1f739b9 | [] | +| f2f15d6f-2a04-4196-8102-a058042694b3 | default | Default security group | 4f07cc254d6c4471805d49bae1f739b9 | [] | ++--------------------------------------+--------------------------+---------------------------------------------------------+----------------------------------+------+ +``` + +Locate the Security Group that you wish to remove a rule from and take note of its ID + +!!! note + For this example we are using `339bd140-e6a0-4afd-9b24-029c3243e779` + +Running the following command will return all rules associated with that security group. + +``` +openstack security group rule list 339bd140-e6a0-4afd-9b24-029c3243e779 +``` + +``` { .sh .no-copy } ++--------------------------------------+-------------+-----------+-----------+------------+-----------+-----------------------+----------------------+ +| ID | IP Protocol | Ethertype | IP Range | Port Range | Direction | Remote Security Group | Remote Address Group | ++--------------------------------------+-------------+-----------+-----------+------------+-----------+-----------------------+----------------------+ +| b0f0edd2-7a55-44b4-84a8-9650de36a7ec | None | IPv6 | ::/0 | | egress | None | None | +| f0bce470-8d94-453f-9dfa-3e3e34b0c80e | tcp | IPv4 | 0.0.0.0/0 | 8080:8080 | ingress | None | None | +| f3925a01-5d47-4c55-ac73-1647cca5b739 | None | IPv4 | 0.0.0.0/0 | | egress | None | None | ++--------------------------------------+-------------+-----------+-----------+------------+-----------+-----------------------+----------------------+ +``` + +Take note of the Security Group Rule ID + +!!! note + For this example we will use f0bce470-8d94-453f-9dfa-3e3e34b0c80e + +To delete a rule, run the following command with the correct rule ID. + +``` +openstack security group rule delete f0bce470-8d94-453f-9dfa-3e3e34b0c80e +``` + +Re-run the list command to confirm the rule has been deleted + +``` +openstack security group rule list 339bd140-e6a0-4afd-9b24-029c3243e779 +``` + +``` { .sh .no-copy } ++--------------------------------------+-------------+-----------+-----------+------------+-----------+-----------------------+----------------------+ +| ID | IP Protocol | Ethertype | IP Range | Port Range | Direction | Remote Security Group | Remote Address Group | ++--------------------------------------+-------------+-----------+-----------+------------+-----------+-----------------------+----------------------+ +| b0f0edd2-7a55-44b4-84a8-9650de36a7ec | None | IPv6 | ::/0 | | egress | None | None | +| f3925a01-5d47-4c55-ac73-1647cca5b739 | None | IPv4 | 0.0.0.0/0 | | egress | None | None | ++--------------------------------------+-------------+-----------+-----------+------------+-----------+-----------------------+----------------------+ +``` + +## Deleting a Security Group + +Run the following to delete a Security Group + +``` +openstack security group delete SECURITY_GROUP_ID +``` + +!!! warning + You cannot delete the `default` Security Group from your project. It's also not possible to delete a Security Group that is assigned to an instance. diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_dashboard/.pages.yml b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_dashboard/.pages.yml new file mode 100644 index 000000000..73ed4a085 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_dashboard/.pages.yml @@ -0,0 +1,6 @@ +nav: + - create-and-manage-networks-with-the-dashboard.md + - manage-security-groups-with-the-dashboard.md + - create-and-manage-network-ports-with-the-dashboard.md + - manage-floating-ips-via-the-dashboard.md + - ... diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_dashboard/create-and-manage-network-ports-with-the-dashboard.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_dashboard/create-and-manage-network-ports-with-the-dashboard.md new file mode 100644 index 000000000..4e8c68b6d --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_dashboard/create-and-manage-network-ports-with-the-dashboard.md @@ -0,0 +1,197 @@ +--- +hidden: false +label_names: +- networks +- create +- manage +- dashboard +position: 1 +title: Create and manage network ports with the dashboard +--- + +## Create a Network port + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Network` tab, select the `Networks` category and then select the network you want to assign the network port to. + +Click on the `Ports` tab + +
+ ![Alt text](network-ports-overview.png) +
+ +Then click `Create Port` + +Within the `Create Port` dialog fill in the options + +`Name` +: A friendly name for your network port + +`Device ID` +: The ID of the instance you want to attach this too, this can be done later should you not know the ID of the instance. + +`Device Owner` +: The owner of the device, this can be added later if you don't know the device owner. + +`Specify IP address or subnet` +: You have 3 options to chose from + + `Unspecified` + : This option will assign a DHCP IP to the network port + + `Subnet` + : This option will allow you to pick the subnet within the network to attach the port too and assign a DHCP IP to it. + + `Fixed IP Address` + : This option will allow you to specify the IP address that will be assigned to the network port + +Depending on the option chosen above there will be additional settings to chose below + +`Unspecified` +: `Mac Address` + : The MAC address of the NIC you want to assign, you can leave this blank should you not know the MAC address and it will be assigned later + +`Subnet` +: `Subnet` + : This is a required field, pick the subnet that you wish the network port to be associated with + + `Mac Address` + : The MAC address of the NIC you want to assign, you can leave this blank should you not know the MAC address and it will be assigned later + +`Fixed IP Address` +: `Fixed IP Address` + : This is a required field, specify the network port IP that you want to use + + `Mac Address` + : The MAC address of the NIC you want to assign, you can leave this blank should you not know the MAC address and it will be assigned later + +After the above we have the last 2 options within the `Create Port` dialog + +`Port Security` +: This is checked by default, it enables anti-spoofing rules for the network port + +`VNIC Type` +: This is the type of Virtual Network Interface Card that is bound to the port. The default of Normal should be enough for most cases. + +There is also the option to assign security groups to this network port as well. The user guide [Manage Security Groups](manage-security-groups-with-the-dashboard.md) will provide more information about them. + +We then click on `Create` + +
+ ![Alt text](network-ports-create-dialog.png) +
+ +This will then create the network port so that we can assign it to a instance later on + +
+ ![Alt text](network-ports-added.png) +
+ +!!! note + If you provided a Device ID during the creation of the network port then it will be assigned to the instance you provided + +## Assigning a network port to an instance + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Compute` tab and select `Instances` + +
+ ![Alt text](compute-overview.png) +
+ +Under the `Actions` dropdown select `Attach Interface` + +
+ ![Alt text](instance-action-menu.png) +
+ +Within the `Attach Interface` dialog we have a few options to attach the network port. We can create a new one here or attach a previously created one. + +The first option is to create a new one from within the dialog, so we pick the option `by Network (and IP Address)` within `The way to specify an Interface` + +`Network` +: This is a required field, pick the network you wish for this port to be assigned too + +`IP Address` +: Leaving this blank will assign an IP address from the DHCP, you can also specify the IP Address you wish for this network port to use + +The second option is to assign a previously created network port, so we pick the option `by Port` within `The way to specify an Interface` + +`Port` +: This is a required field, pick the previously created network port from the options provided. + +Click `Attach Interface` + +The instance should now have the new network port attached + +
+ ![Alt text](compute-network-port-attached.png) +
+ +## Detach a network port from an instance + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Compute` tab and select `Instances` + +
+ ![Alt text](compute-overview.png) +
+ +Under the `Actions` dropdown select `Detach Interface` + +
+ ![Alt text](instance-action-menu.png) +
+ +Within the `Detach Interface` dialog select the IP address for the Network port you wish to detach + +Click `Detach Interface` + +
+ ![Alt text](compute-network-port-removed.png) +
+ +## Delete a network port + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Network` tab, select the `Networks` category and then select the network you want to assign the network port to. + +Click on the `Ports` tab + +
+ ![Alt text](network-ports-overview-selected.png) +
+ +Select the port you wish to delete with the check box and the `Delete Port` option so become solid. + +Within the `Delete Port` dialog ensure you are deleting the correct one + +Click `Delete Ports` diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_dashboard/create-and-manage-networks-with-the-dashboard.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_dashboard/create-and-manage-networks-with-the-dashboard.md new file mode 100644 index 000000000..91b9cfc47 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_dashboard/create-and-manage-networks-with-the-dashboard.md @@ -0,0 +1,114 @@ +--- +hidden: false +label_names: +- networks +- create +- manage +- dashboard +position: 1 +title: Create and manage networks via the dashboard +--- + +## Create a Network + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Network` tab and select the `Networks` category + +
+ ![Alt text](networks-overview.png) +
+ +Click `Create Network` on the right hand side + +Within the `Create Network > Network` dialog you have the following options. + +`Network Name` +: A friendly name for your network + +`Enable Admin State` +: If checked this will enable the network, by default leave this set to true + +`Create Subnet` +: If checked this will enable the creation of a subnet, by default leave this set to true + +`Availability Zone Hints` +: Availability zones where the DHCP agents can be scheduled. Leaving this unset is the same as selecting all Availability zones. As FlexiHPC only has 1 zone nova this value can be left unset + +`MTU` +: Maximum Transmission Unit. An MTU is the largest size frame or packet -- in bytes or octets (eight-bit bytes) -- that can be transmitted across a data link. Leaving this unset is the default unless you know what you are doing here. + +Click `Next` + +Within the `Create Network > Subnet dialog` you have the following options + +`Subnet Name` +: A friendly name for your subnet + +`Network Address` +: The network address in CIDR format e.g. 192.168.0.0/24 if its IPv4 + +`IP Version` +: If your Network Address is an IPv4 then pick that otherwise pick IPv6. IPv4 is generally the default + +`Gateway IP` +: IP address of the gateway is generally the first IP of the network address, from our Network Address example above it would be 192.168.0.1. To use the default leave this blank. + +`Disable Gateway` +: Should you not want a gateway then check this. By default you will want your network to have a gateway. + +Click `Next` + +Within the `Create Network > Subnet Details` dialog you have the following options + +`Enable DHCP` +: This allows the network to auto assign an IP when a compute instance is attached to it + +`Allocation Pools` +: The IP’s you would like to allocate to compute instances when they are attached. The format should be start_ip_address,end_ip_address using our Network Address example it would be 192.168.0.20,192.168.0.50. Leave blank if you want to use any IP addresses from the specified Network Address + +`DNS Name Servers` +: Should you wish to use different name servers then the ones FlexiHPC uses please enter then here, one per line. + +`Host Routes` +: Additional Routes announced to the hosts. The format should be destination_cidr,nexthop. This is used should you wish to have separate networks that want to communicate with each other. + +Click `Create` and it should start to create your new network + +
+ ![Alt text](specific-network-view.png) +
+ +## Delete a network + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Network` tab and select the `Networks` category + +Select the network you wish to delete with the check box + +
+ ![Alt text](networks-overview-selected.png) +
+ +The `Delete Network` button should become a solid red + +Click `Delete Network` + +Within the `Confirm Delete Network` dialog ensure you have selected the correct network that is displayed. + +Click `Delete Networks` + +The network should now be deleted diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_dashboard/manage-floating-ips-via-the-dashboard.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_dashboard/manage-floating-ips-via-the-dashboard.md new file mode 100644 index 000000000..22bb6c0dd --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_dashboard/manage-floating-ips-via-the-dashboard.md @@ -0,0 +1,71 @@ +--- +hidden: false +label_names: +- instance +- launch +position: 2 +title: Manage Floating IPs via the Dashboard +vote_count: 1 +vote_sum: 1 +--- + +## Assign Floating IP address + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Network` tab and select `Floating IPs` + +Click `Allocate IP to Project` + +Within the `Allocate Floating IP` dialog you have the following options + +`Pool` +: The pool that the floating ip should be allocated from. There is only external currently so is set as the default. + +`Description` +: A friendly description for what this IP is used for + +`DNS Domain` +: TODO: Confirm with Sean what this means + +`DNS Name` +: TODO: Confirm with Sean what this means + +!!! note + The default settings are fine should you not wish to configure anything further. + +Click `Allocate IP` + +
+ ![Alt text](floating-ips.png) +
+ +Under `Actions` click `Associate` + +Within the `Managing Floating IP Associations` dialog you want to ensure the `IP Address` is the one you wish to assign, and under the `Ports to be assocaited` select the compute instance you wish to assign the IP too. + +Click `Associate` + +## Un-assign Floating IP address + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Network` tab and select `Floating IPs` + +Under `Actions` click `Disassociate` + +Within the `Confrim Disassociate` dialog confirm the IP you are disassociating + +Click `Disassociate` diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_dashboard/manage-security-groups-with-the-dashboard.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_dashboard/manage-security-groups-with-the-dashboard.md new file mode 100644 index 000000000..29ecdf1b4 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-networks/with_the_dashboard/manage-security-groups-with-the-dashboard.md @@ -0,0 +1,144 @@ +--- +hidden: false +label_names: +- security-groups +- create +- manage +- dashboard +position: 1 +title: Create and manage Security groups with the dashboard +--- + +## The Default secuirty group + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Network` tab and select the `Security Groups` category + +!!! note + We recommend that no new rules are added to the `deafult` security group to keep things clean and tidy. We also recommend that no rules be removed from this group. + +
+ ![Alt text](default-security-group-rules.png) +
+ +There are 2 Egress and 2 Ingress rules, each one is associated with either IPv4 or IPv6. + +The 2 Egress rules allow the resources within the FlexiHPC project to communicate with the outside world. + +The 2 Ingress rules allow any other resource within the FlexiHPC project that has this security group associated to it is allowed to communicate with each other. + +## Creating a new Security Group + +First start by clicking on `Create Security Group` + +
+ ![Alt text](security-groups-overview.png) +
+ +Within the `Create Security Group` dialog you have the following options. + +`Name` +: A friendly name for your network, this field is requried. + +`Description` +: A friendly description to identify what this is used for + +Once those have been filled out hit the `Create Security Group` button and that will take you to the newly created security group + +
+ ![Alt text](new-security-group-rules.png) +
+ +## Updating a Security Groups Rules + +We have a few ways of adding new rules to the security groups, either with predefined rules or customs rules. + +### Using Predefined Rules + +Find the security group that you would like to update the rules for and click `Manage Rules` under the `Action` column + +
+ ![Alt text](security-group-manage-rules.png) +
+ +Once in the security group we want to click on `Add Rule` on the top right + +
+ ![Alt text](security-group-add-rule.png) +
+ +We are presented with the following dialog that allows us to add new rules based on the direction(Ingress/Egress) that we want to apply the rule. Think of these as Firewall Rules + +
+ ![Alt text](security-group-add-rule-dialog.png) +
+ +There are all ready a few predefined rules that are ready to use if you so chose + +
+ ![Alt text](security-group-predefined-rules.png) +
+ +For this example we will create an `SSH` rule + +
+ ![Alt text](security-group-predefined-rule-example.png) +
+ +We have specified the `SSH` Rule, given it a description so it explains what it is for other users, we have chosen `CIDR` as the remote and left the `CIDR` as `Allow All`. + +!!! info + If we changed that CIDR to 192.168.0.0/16 then only machines that reside within that IP Range will be able to pass through this Rule. + + For the above example we want to be able to SSH to a Compute Service within the FlexiHPC space however the IP that we would be connecting from would be our Public IP, therefore using the above CIDR would actually block our attempts to SSH to the compute service that has this rule applied. + + If you wish to restrict the Rule to only your Public IP then google whats my ip and use the IP that is provided prefixed with /32 + +Clicking `Add` will update the Security group with the newly created rule + +
+ ![Alt text](security-group-add-rule-new.png) +
+ +### Using Custom Rules + +Find the security group that you would like to update the rules for and click `Manage Rules` under the `Action` column + +
+ ![Alt text](security-group-manage-rules.png) +
+ +Once in the security group we want to click on `Add Rule` on the top right + +
+ ![Alt text](security-group-add-rule.png) +
+ +We are presented with the following dialog that allows us to add new rules based on the direction(Ingress/Egress) that we want to apply the rule. Think of these as Firewall Rules + +
+ ![Alt text](security-group-add-rule-dialog.png) +
+ +For this Example rule we will allow port `6443`. + +So the things we need to ensure are Rule has been set to `Custom TCP Rule`, The Direction is `Ingress` as we are allowing the port into the FlexiHPC space and the Port we would like to allow, in this case `6443`. + +We will also leave Remote as `CIDR` and the `CIDR` as allow all, as denoted by `0.0.0.0/0` + +
+ ![Alt text](security-group-add-rule-custom-example.png) +
+ +We click `Add` and our rule is now added to the security group + +
+ ![Alt text](security-group-add-custom-rule-new.png) +
diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/.pages.yml b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/.pages.yml new file mode 100644 index 000000000..3b614aa9b --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/.pages.yml @@ -0,0 +1,7 @@ +nav: + - with_the_dashboard + - with_the_CLI + - creating-and-managing-ec2-credentials-via-cli.md + - using-boto3-to-interact-with-object-storage.md + - using-cyberduck-to-interact-with-object-storage.md + - ... diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/creating-and-managing-ec2-credentials-via-cli.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/creating-and-managing-ec2-credentials-via-cli.md new file mode 100644 index 000000000..c3128af78 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/creating-and-managing-ec2-credentials-via-cli.md @@ -0,0 +1,63 @@ +--- +hidden: false +label_names: +- identity +- create +- manage +- cli +title: Creating and Managing EC2 Credentials via CLI +--- + +## Overview + +For using the OpenStack S3 API:s you need to generate an additional set of credentials. These can then be used to store data in the Swift Object store for applications that don’t have native Swift support but do support the S3 interfaces. + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. Please read [Setting up your CLI environment](../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + +## Create and fetch credentials using the CLI + +Running the below command will generate EC2 credentials + +``` { .sh } +openstack ec2 credentials create +``` + +With the following ouput + +``` { .sh .no-copy } ++------------+----------------------------------------------------------------------------------------------------------------+ +| Field | Value | ++------------+----------------------------------------------------------------------------------------------------------------+ +| access | | +| links | {'self': 'https://keystone.akl-1.cloud.nesi.org.nz/v3/users//credentials/OS-EC2/'} | +| project_id | | +| secret | | +| trust_id | None | +| user_id | | ++------------+----------------------------------------------------------------------------------------------------------------+ +``` + +Note the `access` field and the `secret` field. These are the 2 fields required to interact with the s3 protocol. + +The below command will fetch all EC2 credentails associated with the user + +``` { .sh } +openstack ec2 credentials list +``` + +``` { .sh .no-copy } ++---------------------+----------------------+--------------+-----------+ +| Access | Secret | Project ID | User ID | ++---------------------+----------------------+--------------+-----------+ +| | | | | ++---------------------+----------------------+--------------+-----------+ +``` + +## Delete credentials using the CLI + +Use the access key to refer to the credentials you wish to delete: + +``` { .sh } +openstack ec2 credentials delete USER_ACCESS_TOKEN +``` diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/index.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/index.md new file mode 100644 index 000000000..e5b53aa92 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/index.md @@ -0,0 +1,35 @@ +--- +hidden: false +label_names: +- Object Storage +- create +- manage +position: 1 +title: Create and Manage Object Storage +vote_count: 1 +vote_sum: 1 +--- + +Object Storage within FlexiHPC is a file system storage that is primarily used for storing static data. Unlike other storage systems, object storage is independent of virtual machines and operating system type. + +You are able to upload and download files from anywhere with network access using a few methods. + +The general use case for object storage is storing data that you upload once and read or download many times. Its not the best for files that are being modified consistently. + +There are a few ways to create and mange object storage within FlexiHPC + +- [Creating and Managing object storage via the Dashboard](with_the_dashboard/create-and-manage-object-storage-with-the-dashboard.md) + +- [Creating and Managing object storage via CLI](with_the_CLI/create-and-manage-object-storage-via-cli.md) + +If you are looking to interact with the s3 Protocol then you need to also generate yourself some EC2 credentials + +- [Creating and Managing EC2 credentials](creating-and-managing-ec2-credentials-via-cli.md) + +Interacting with the FlexiHPC Object storage can be done a few ways outside of the dashboard. + +- [Interacting with the S3 protocol with Boto3](using-boto3-to-interact-with-object-storage.md) + +- [Using FlexiHPC object storage for Terraform state file](../launch-and-manage-instances/other_tools/deployment-of-an-instance-with-terraform.md#using-flexihpc-object-storage-to-store-the-terraform-state-file) + +- [Accessing object storage with Cyberduck](using-cyberduck-to-interact-with-object-storage.md) diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/using-boto3-to-interact-with-object-storage.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/using-boto3-to-interact-with-object-storage.md new file mode 100644 index 000000000..d19dac8b0 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/using-boto3-to-interact-with-object-storage.md @@ -0,0 +1,111 @@ +## Interacting with the S3 protocol with Boto3 + +!!! note + Prior to starting this you will need to have read [Setting up your CLI environment](../setting-up-your-CLI-environment/index.md) and ran the commands to generate [EC2 Credentials](creating-and-managing-ec2-credentials-via-cli.md) + + Boto3 documentation can be found [here](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html) + +Since FlexiHPC object storage has the S3 protocol built on top of it you are able to use the python boto3 client to interact with it. + +When doing python development it is recommend that you do so within a python venv. As this article wont be covering what a python venv is please have a read of the the following [documentation](https://docs.python.org/3/library/venv.html#) on the python website + +Ensure your in a clean folder, for this example we will be in a new folder called `FLEXIHPC.Boto3.Example` + +Once inside that folder we will make a python venv by ruining the below command + +``` { .sh } +python3 -m venv boto3-dev +``` + +Once that has completed setting up the venv we want to activate that + +``` { .sh } +. boto3-dev/bin/activate +``` + +We then need to bring in the boto3 module for python + +``` { .sh } +pip3 install boto3 +``` + +This will also bring in any other required modules. + +Create a file called `main.py` and add the following to that file + +``` { .py } +import boto3 +import botocore + +#boto3.set_stream_logger(name='botocore') # this enables debug tracing +session = boto3.session.Session() +s3_client = session.client( + service_name='s3', + aws_access_key_id='EC2_ACCESS_TOKEN', + aws_secret_access_key='EC2_SECRET_TOKEN', + endpoint_url='https://object.akl-1.cloud.nesi.org.nz/', + config=botocore.client.Config(signature_version='s3v4') +) + +#List all buckets in the project +bucketsResponse = s3_client.list_buckets() + +print('Buckets:') +for bucket in bucketsResponse['Buckets']: + print(f' {bucket["Name"]}') +``` + +You will need to change `EC2_ACCESS_TOKEN` and `EC2_SECRET_TOKEN` to the ones that were generated when you ran the commands to generate [EC2 Credentials](../create-and-manage-identity/index.md) + +Save the file and call it using the python command + +``` { .sh } +python3 main.py +``` + +The output should look similar to the below + +``` { .sh .no-copy} +Buckets: + boto3-test + terraform-state +``` + +## Uploading a file to object storage + +You will need to know the name of the container you wish to upload the file too. You can either get this from the [dashboard](with_the_dashboard/create-and-manage-object-storage-with-the-dashboard.md) or [CLI](with_the_CLI/create-and-manage-object-storage-via-cli.md) + +We then use the below code to upload a file to the container we specify + +``` +s3_client.upload_file(, , ) +``` + +`` +: The local location of the file you wish to upload to FlexiHPC object storage + +`` +: The container name within FlexiHPC, example would be boto3-test + +`` +: The name of the file as it would be presented on the FlexiHPC object storage + +## Downloading a file from object storage + +You will need to know the name of the container you wish to download the file from. You can either get this from the [dashboard](with_the_dashboard/create-and-manage-object-storage-with-the-dashboard.md) or [CLI](with_the_CLI/create-and-manage-object-storage-via-cli.md) + +We use the below code to download a file from the container we specify + +``` +with open('', 'wb') as data: + s3_client.download_fileobj('', '', data) +``` + +`` +: The name of the file as it would be presented on the FlexiHPC object storage + +`` +: The container name within FlexiHPC, example would be boto3-test + +`` +: This is generally the `` however if its inside a folder then it might have that appended to the file name diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/using-cyberduck-to-interact-with-object-storage.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/using-cyberduck-to-interact-with-object-storage.md new file mode 100644 index 000000000..8bd460c61 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/using-cyberduck-to-interact-with-object-storage.md @@ -0,0 +1,45 @@ +## Accessing object storage with Cyberduck + +!!! note + You will need [EC2 credentials](creating-and-managing-ec2-credentials-via-cli.md) to use Cyberduck + +Cyberduck is a libre server and cloud storage browser for Mac and Windows with support for FTP, SFTP, WebDAV, Amazon S3, OpenStack Swift, Backblaze B2, Microsoft Azure & OneDrive, Google Drive and Dropbox. + +## Installation + +Cyberduck can be downloaded and installed from the [Cyberduck website](https://cyberduck.io/). You can also get it from the Windows Store or the Apple Mac App Store. Instructions for installing can be found at the respective locations. + +## Connecting using Cyberduck + +Once Cyberduck is installed you will want to start it and click on `Open Connection` + +
+ ![Alt text](cyberduck-overview.png) +
+ +Within the `Open Connection` dialog you will want to ensure that the dropdown has selected `Amazon S3` + +
+ ![Alt text](cyberduck-connection-dialog.png) +
+ +We then need to update the details to interact and auth with FlexiHPC + +`Server` +: Server should be updated to `object.akl-1.cloud.nesi.org.nz` and the port should be 443 + +`Access Key ID` +: This should be the EC2 Credentials Access key token + +`Secret Access Key` +: This should be the EC2 Credentials Secret key token + +
+ ![Alt text](cyberduck-connection-dialog-rdc.png) +
+ +Click on `Connect` to open the connection + +
+ ![Alt text](cyberduck-container-view.png) +
diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/with_the_CLI/create-and-manage-object-storage-via-cli.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/with_the_CLI/create-and-manage-object-storage-via-cli.md new file mode 100644 index 000000000..fc2235b25 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/with_the_CLI/create-and-manage-object-storage-via-cli.md @@ -0,0 +1,134 @@ +--- +hidden: false +label_names: +- object storage +- create +- manage +- cli +position: 1 +title: Create and manage object storage via CLI +--- + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. Please read [Setting up your CLI environment](../../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + +## Create new container + +Running the below command will generate a new container within the projects object storage + +``` { .sh } +openstack container create +``` + +You will get a response from the server that looks like the following + + +``` { .sh .no-copy } ++-------------+------------------+-----------------------------+ +| account | container | x-trans-id | ++-------------+------------------+-----------------------------+ +| AUTH_ | | tx00000-akl-1 | ++-------------+------------------+-----------------------------+ +``` + +## List containers + +Running the below command will list all containers within the projects object storage + +``` { .sh } +openstack container list +``` +``` { .sh .no-copy } ++------------------+ +| Name | ++------------------+ +| a-test-container | +| boto3-test | +| cli-container | +| terraform-state | ++------------------+ +``` + +The command `openstack container list` also has some additional parameters + +`--prefix ` +: Filter the list using a prefix, example if we use the prefix `a` then the only container returned would the `a-test-container` + +`--marker ` +: Start anchor for paging is used when you wish to return a specified list of containers should you have a lot of them + +`--end-marker ` +: End anchor for paging + +`--limit ` +: Limit the number of containers returned + +`--long` +: List additional fields in output that contain the amount of space used and number of files inside the container + +`--all` +: List all containers (default is 10000) + +## Display container details + +Running the below command will display additional details about the container specified + +``` { .sh } +openstack container show CONTAINER_NAME +``` +``` { .sh .no-copy } ++----------------+---------------------------------------+ +| Field | Value | ++----------------+---------------------------------------+ +| account | AUTH_ | +| bytes_used | 0 | +| container | | +| object_count | 0 | +| storage_policy | default-placement | ++----------------+---------------------------------------+ +``` + +## Save container contents local + +Running the below command will save all the container contents to your local directory where you run the command + +``` { .sh } +openstack container save CONTAINER_NAME +``` + +## Delete container + +Run the `openstack container list` command first to get the name of the container you wish to delete + +``` { .sh } +openstack container list +``` +``` { .sh .no-copy } ++------------------+ +| Name | ++------------------+ +| a-test-container | +| boto3-test | +| cli-container | +| terraform-state | ++------------------+ +``` + +Then run the below command to delete the container you wish to remove + +``` { .sh } +openstack container delete CONTAINER_NAME +``` + +Your container should then be removed, however should you container contain any files you will get the following error + +``` { .sh .no-copy} +openstack container delete a-test-container +Conflict (HTTP 409) (Request-ID: tx00000a9dff65235cbe523-0064dadec9-a09387f-akl-1) +``` + +Supplying the `--recursive, -r` parameter will delete all files within that container before deleting it + +``` { .sh } +openstack container delete --recursive +``` diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/with_the_dashboard/create-and-manage-object-storage-with-the-dashboard.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/with_the_dashboard/create-and-manage-object-storage-with-the-dashboard.md new file mode 100644 index 000000000..54d7739e5 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-object-storage/with_the_dashboard/create-and-manage-object-storage-with-the-dashboard.md @@ -0,0 +1,129 @@ +--- +hidden: false +label_names: +- object storage +- create +- manage +- dashboard +position: 1 +title: Create and manage object storage via the dashboard +--- + +## Creating an object storage container + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Object Storage` tab and select the `Containers` category + +Click `+ Container`. + +
+ ![Alt text](object-storage-overview.png) +
+ +Within the `Create Container` dialog you have a few options + +`Container Name` +: A friendly name for your container. It must not contain “/” in its name. + +`Storage Policy` +: This defaults to default-placement and is the only option available at this time. + +`Container Access` +: You have a choice between `public` or `not-public` + + A `Public` container will allow anyone with the public URL to gain access to your objects in the container + +Once you have given the container a name and decided if its public or private click `Submit` to create the container. + +## Deleting an object storage container + +!!! note + You are not able to delete a container if there are items present within it. Please delete all items before attempting to delete the container. + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Object Storage` tab and select the `Containers` category + +Select the container you would like to delete and it should highlight with blue + +
+ ![Alt text](object-storage-container-overview.png) +
+ +Hovering over the `Trashcan Icon` should show a tooltip that says *Delete Container* and clicking it should present a `Confirm Delete` dialog. + +If you are certain that you wish to delete the container click `Delete` and the container should be removed + +## Upload a file to a storage object container + +!!! note + Files uploaded into a FlexiHPC storage via the dashboard are saved as a binary/octet-stream which means they are only downloaded when they are requested via the URL. + + There is also an issue where the public URL provided via the dashboard doesn't auth correctly so you are unable to view/download files. + + The URL that currently works should you wish to view/download a file from object storage looks like the following https://object.akl-1.cloud.nesi.org.nz/*CONTAINER_NAME*/*FILE_NAME* + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Object Storage` tab and select the `Containers` category + +Select the container you would like to delete and it should highlight with blue + +
+ ![Alt text](object-storage-container-overview.png) +
+ +On the far right there should be 3 icons, `Upload File` `+ Folder` and `Delete Item` + +Click the `Upload File` icon and within the `Upload File` dialog clicking `Choose File` should allow you to browse to the file on your local machine. You are also able to give it a different name should you so choose. + +
+ ![Alt text](object-storage-upload-dialog.png) +
+ +Clicking `Upload File` will now begin to upload that file to the object storage container. The time it takes to complete the upload will depend on the file size and the network upload speed. + +## Create a folder like structure in an object storage container + +We say folder like structure as that is what it looks like from the dashboards perspective, however under lying this the structure is flat. + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Object Storage` tab and select the `Containers` category + +Select the container you would like to delete and it should highlight with blue + +
+ ![Alt text](object-storage-container-overview.png) +
+ +On the far right there should be 3 icons, `Upload File` `+ Folder` and `Delete Item` + +Clicking `+ Folder` will present you with the `Create Folder` dialog. + +Fill in the *Folder Name* and click `Create Folder` diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-volumes/.pages.yml b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-volumes/.pages.yml new file mode 100644 index 000000000..ac16daa3b --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-volumes/.pages.yml @@ -0,0 +1,5 @@ +nav: + - with_the_dashboard + - with_the_cli + - format-and-mount-volumes.md + - ... diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-volumes/format-and-mount-volumes.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-volumes/format-and-mount-volumes.md new file mode 100644 index 000000000..f312f4223 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-volumes/format-and-mount-volumes.md @@ -0,0 +1,158 @@ +--- +hidden: false +label_names: +- volumes +- create +- manage +- dashboard +position: 1 +title: Format and Mount volumes +--- + +# Format and Mount a Volume + +!!! warning + Formatting a device/volume erases all existing data on a device, if a file system already exists on the target device/volume. If you need to retain the data on your volume, you should skip to the mount section below. + +!!! note + This guide is based on a linux and is using Ubuntu as the Linux distro, however these commands are generic for most linux distros + +You can verify that the volume is attached to your instance via the NeSI RDC dashboard under `Project` > `Volumes` > `Volumes` + +
+ ![Alt text](volume-attached.png) +
+ +Connect to the instance with the attached Volume and using the command `lsblk` it should have an output similar to the following + +``` { .sh .no-copy } +$ lsblk +NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS +loop0 7:0 0 40.8M 1 loop /snap/snapd/20092 +loop1 7:1 0 111.9M 1 loop /snap/lxd/24322 +loop2 7:2 0 63.5M 1 loop /snap/core20/2015 +loop3 7:3 0 0B 0 loop +loop4 7:4 0 0B 0 loop +loop5 7:5 0 0B 0 loop +loop6 7:6 0 0B 0 loop +loop7 7:7 0 0B 0 loop +sr0 11:0 1 482K 0 rom +vda 252:0 0 80G 0 disk +└─vda1 252:1 0 80G 0 part / +vdb 252:16 0 10G 0 disk +``` + +If we reference that output with the dashboard we can see the following drives: + +``` { .sh .no-copy } +vda = my_ubuntu_volume +vdb = test-volume +``` + +## Format a volume + +Depending on how you created your volume, it may not have a file system and you need to create one before mounting, i.e. format the device. The exact format command syntax is dependent on the virtual machine’s operating system and the type of file system you need. The example below formats the volume attached as `/dev/vdb` in the Ubuntu-based instance using the `ext4` filesystem. + +``` +sudo mkfs -t ext4 /dev/vdb +``` + +!!! warning + Ensure that your drive path is the drive that you wish to format, picking the wrong one my result in data loss + +To make your volume/device available to the operating system you need to mount it on a directory called a mount point. You can mount your device using an in-memory-only mount, but the mount will be lost upon rebooting your instance. We recommend you configure the mounting of your device/volume filesystem persistently using the configuration file `/etc/fstab`. In both examples we will create a mount point called `/mnt-vol`. + +## In memory only mount + +You can use below commands to create a mount point called `/mnt-vol` and to mount the device `/dev/vdb` at that mount point. + +``` +sudo mkdir /mnt-vol +sudo mount /dev/vdb /mnt-vol -t auto +``` + +!!! note + On reboot this mount will be lost and the `mount` command will need to be run again + +## Using /etc/fstab + +To ensure that your Volume is remounted after a reboot of your instance, you should configure it in the file `/etc/fstab`. + +First create the mount point `/mnt-vol` using: + +``` { .sh } +sudo mkdir /mnt-vol +``` + +When we mount drives in the `fstab` file its higly recommended that we do so using the drives UUID, we can find the UUID of the drive a few ways. We will use the command `blkid` + +``` { .sh } +sudo blkid /dev/vdb +``` + +Sample output: + +``` { .sh .no-copy } +$ sudo blkid /dev/vdb +/dev/vdb: UUID="238c1032-4fcb-4dd7-86a2-957d3fc10201" BLOCK_SIZE="4096" TYPE="ext4" +``` + +Then use a text editor to open the `/etc/fstab `file. You can do this with the command below. We are using the nano text editor in this example but you can use whichever text editor your prefer, just replace nano with the name of the text editor (Vim etc). + +``` { .sh } +sudo nano /etc/fstab +``` + +You can then add the following line to `/etc/fstab`. The `/dev/vdb` is the device you’re mounting and `/mnt-vol` is the its target mount point. + +The syntax that we want to use looks like the following + +``` +UUID={YOUR-UID} {/path/to/mount/point} {file-system-type} defaults,nofail 0 2 +``` + +Using our example UUID from the ouput of the command `blkid` it should look like this + +``` +UUID=238c1032-4fcb-4dd7-86a2-957d3fc10201 /mnt-vol ext4 defaults,nofail 0 2 +``` + +This is appended to the `/etc/fstab` file. + +The example `fstab` file looks like this + +``` { .sh .no-copy } +LABEL=cloudimg-rootfs / ext4 defaults 0 1 +UUID=238c1032-4fcb-4dd7-86a2-957d3fc10201 /mnt-vol ext4 defaults,nofail 0 2 +``` + +Save that file and after adjusting the `/etc/fstab` file you need to initiate any changes. + +Use the mount all command: + +``` { .sh } +sudo mount --all +``` + +We can verify the mounted volume by running the command `lsblk` again + +``` { .sh .no-copy } +$ lsblk +NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS +loop0 7:0 0 40.8M 1 loop /snap/snapd/20092 +loop1 7:1 0 111.9M 1 loop /snap/lxd/24322 +loop2 7:2 0 63.5M 1 loop /snap/core20/2015 +sr0 11:0 1 482K 0 rom +vda 252:0 0 80G 0 disk +└─vda1 252:1 0 80G 0 part / +vdb 252:16 0 10G 0 disk /mnt-vol +``` + +We can see that the volume `vdb` is mounted at the path `/mnt-vol` + +You may have to change ownership or write privileges to enable writing to the ephemeral storage, using chown, chgrp or chmod, e.g. + +``` +sudo chown ubuntu:ubuntu /mnt-vol +``` + diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-volumes/index.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-volumes/index.md new file mode 100644 index 000000000..611b2a3e2 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-volumes/index.md @@ -0,0 +1,19 @@ +--- +hidden: false +label_names: +- volumes +- create +- manage +position: 1 +title: Create and manage volumes +vote_count: 1 +vote_sum: 1 +--- + +Volumes are block storage devices that you attach to instances to enable persistent storage. You can attach a volume to a running instance or detach a volume and attach it to another instance at any time. You can also create a snapshot from or delete a volume. + +Volumes can be created a few ways + +- Create and manage volumes via the dashboard + +- Create and manage volumes via CLI \ No newline at end of file diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-volumes/with_the_cli/create-and-manage-volumes-via-cli.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-volumes/with_the_cli/create-and-manage-volumes-via-cli.md new file mode 100644 index 000000000..d0d2e04ab --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-volumes/with_the_cli/create-and-manage-volumes-via-cli.md @@ -0,0 +1,218 @@ +--- +hidden: false +label_names: +- volumes +- create +- manage +- cli +position: 1 +title: Create and manage volumes via CLI +--- + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. Please read [Setting up your CLI environment](../../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + +## Create an empty volume + +Running the following command will create a volume with 8 gibibytes (GiB) of space, and specify the availability zone. This is creates an empty volume that does not contain a file system or a partition table: + +``` +openstack volume create --size 8 --availability-zone nova my-new-volume +``` + +``` { .sh .no-copy } ++---------------------+--------------------------------------+ +| Field | Value | ++---------------------+--------------------------------------+ +| attachments | [] | +| availability_zone | nova | +| bootable | false | +| consistencygroup_id | None | +| created_at | 2023-08-04T03:40:29.634209 | +| description | None | +| encrypted | False | +| id | f297c807-1eb3-4b48-8438-04d995ca498a | +| multiattach | False | +| name | my-new-volume | +| properties | | +| replication_status | None | +| size | 8 | +| snapshot_id | None | +| source_volid | None | +| status | creating | +| type | ceph-ssd | +| updated_at | None | +| user_id | fb9a3d02c89e4cfdbe64658ad43ece97 | ++---------------------+--------------------------------------+ +``` + +To verify that your volume was created successfully, list the available volumes: + +``` +openstack volume list +``` + +``` { .sh .no-copy } ++--------------------------------------+---------------+-----------+------+-------------+ +| ID | Name | Status | Size | Attached to | ++--------------------------------------+---------------+-----------+------+-------------+ +| f297c807-1eb3-4b48-8438-04d995ca498a | my-new-volume | available | 8 | | ++--------------------------------------+---------------+-----------+------+-------------+ +``` + +If your volume was created successfully, its status is `available`. If its status is `error`, you might have exceeded your quota. + +## Attach a volume to an instance + +Attach your volume to a server, specifying the server ID and the volume ID: + +``` +openstack server add volume --device /dev/vdb +``` + +Show information for your volume: + +``` +openstack volume show f297c807-1eb3-4b48-8438-04d995ca498a +``` + +The output shows that the volume is attached to the server with ID `84c6e57d-a6b1-44b6-81eb-fcb36afd31b5`, is in the nova availability zone, and is bootable. + +``` { .sh .no-copy } ++------------------------------+--------------------------------------+ +| Field | Value | ++------------------------------+--------------------------------------+ +| attachments | [{u'device': u'/dev/vdb', | +| | u'server_id': u'84c6e57d-a | +| | u'id': u'573e024d-... | +| | u'volume_id': u'573e024d... | +| availability_zone | nova | +| bootable | false | +| consistencygroup_id | None | +| created_at | 2023-08-04T03:40:29.000000 | +| description | None | +| encrypted | False | +| id | f297c807-1eb3-4b48-8438-04d995ca498a | +| multiattach | False | +| name | my-new-volume | +| os-vol-tenant-attr:tenant_id | 4f07cc254d6c4471805d49bae1f739b9 | +| properties | | +| replication_status | None | +| size | 8 | +| snapshot_id | None | +| source_volid | None | +| status | available | +| type | ceph-ssd | +| updated_at | 2023-08-04T03:40:29.000000 | +| user_id | fb9a3d02c89e4cfdbe64658ad43ece97 | ++------------------------------+--------------------------------------+ +``` + +## Detach a volume from an instance + +Detach your volume from a server, specifying the server ID and the volume ID: + +``` +openstack server remove volume +``` + +Show information for your volume: + +``` +openstack volume show f297c807-1eb3-4b48-8438-04d995ca498a +``` + +The output shows that the volume is no longer attached to the server: + +``` { .sh .no-copy } ++------------------------------+--------------------------------------+ +| Field | Value | ++------------------------------+--------------------------------------+ +| attachments | [] | +| availability_zone | nova | +| bootable | false | +| consistencygroup_id | None | +| created_at | 2023-08-04T03:40:29.000000 | +| description | None | +| encrypted | False | +| id | f297c807-1eb3-4b48-8438-04d995ca498a | +| multiattach | False | +| name | my-new-volume | +| os-vol-tenant-attr:tenant_id | 4f07cc254d6c4471805d49bae1f739b9 | +| properties | | +| replication_status | None | +| size | 8 | +| snapshot_id | None | +| source_volid | None | +| status | available | +| type | ceph-ssd | +| updated_at | 2023-08-04T03:40:29.000000 | +| user_id | fb9a3d02c89e4cfdbe64658ad43ece97 | ++------------------------------+--------------------------------------+ +``` + +## Resize a volume + +To resize your volume, you must first detach it from the server if it is current. To detach the volume from your server, pass the server ID and volume ID to the following command: + +``` +openstack server remove volume +``` + +This command does not provide any output. + +``` +openstack volume list +``` + +``` { .sh .no-copy } ++--------------------------------------+---------------+-----------+------+-------------+ +| ID | Name | Status | Size | Attached to | ++--------------------------------------+---------------+-----------+------+-------------+ +| f297c807-1eb3-4b48-8438-04d995ca498a | my-new-volume | available | 8 | | ++--------------------------------------+---------------+-----------+------+-------------+ +``` + +Note that the volume is now available. + +Resize the volume by passing the volume ID and the new size (a value greater than the old one) as parameters: + +``` +openstack volume set f297c807-1eb3-4b48-8438-04d995ca498a --size 10 +``` + +This command does not provide any output. + +## Delete a volume + +To delete your volume, you must first detach it from the server. Delete the volume the volume ID: + +``` +openstack volume delete f297c807-1eb3-4b48-8438-04d995ca498a +``` + +This command does not provide any output. + +List the volumes again, and note that the status of your volume is `deleting`: + +``` +openstack volume list +``` + +``` { .sh .no-copy } ++----------------+-----------------+-----------+------+-------------+ +| ID | Display Name | Status | Size | Attached to | ++----------------+-----------------+-----------+------+-------------+ +| f297c807-1e... | my-new-volume | deleting | 8 | | ++----------------+-----------------+-----------+------+-------------+ +``` + +When the volume is fully deleted, it disappears from the list of volumes: + +``` { .sh .no-copy } ++--------------------------------------+------+-----------+------+-------------+ +| ID | Name | Status | Size | Attached to | ++--------------------------------------+------+-----------+------+-------------+ +| d0d686e9-bcfe-499b-850d-50f4a998ad81 | | available | 30 | | ++--------------------------------------+------+-----------+------+-------------+ +``` diff --git a/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-volumes/with_the_dashboard/create-and-manage-volumes-with-the-dashboard.md b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-volumes/with_the_dashboard/create-and-manage-volumes-with-the-dashboard.md new file mode 100644 index 000000000..ba5a877a7 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/create-and-manage-volumes/with_the_dashboard/create-and-manage-volumes-with-the-dashboard.md @@ -0,0 +1,175 @@ +--- +hidden: false +label_names: +- volumes +- create +- manage +- dashboard +position: 1 +title: Create and manage volumes via the dashboard +--- + +## Create a volume + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Volume` tab and select the `Volumes` category + +Click `Create Volume`. + +In the dialog box that opens, enter or select the following values. + +`Volume Name` +: Specify a name for the volume. + +`Description` +: Optionally, provide a brief description for the volume. + +`Volume Source` +: Select one of the following options + + - `No source, empty volume` + : Creates an empty volume. An empty volume does not contain a file system or a partition table. + + - `Image` + : If you choose this option, a new field for Use image as a source displays. You can select the image from the list. + + - `Volume` + : If you choose this option, a new field for Use volume as a source displays. You can select the volume from the list. Options to use a snapshot or a volume as the source for a volume are displayed only if there are existing snapshots or volumes. + +`Type` +: Select one of the following options: + + - `ceph-ssd` + : Flash based storage, this has the fastest read and write speeds however is generally priced higher + + - `ceph-hdd` + : Spinning disk based storage, this has the slow read and write speeds compared to flash based however it is generally priced lower then flash based storage + + - `ceph-ssd-encrypted` + : Flash based storage that is further encrypted + + - `ceph-hdd-encrypted` + : Spinning disk based storage that is further encrypted + +!!! note + All storage types are encrypted on the FlexiHPC based systems, the encrypted drives are an option to further encrypt the volume + +`Size (GB)` +: The size of the volume in gibibytes (GiB). + +`Availability Zone` +: By default this will be nova. This specifies which zone it will be deployed too if there were multiple zones. The current default and only zone is nova at this point in time. + +Click `Create Volume` + +The dashboard shows the volume on the `Volumes` tab. + +## Attach a volume to an instance + +After you create one or more volumes, you can attach them to instances. You can attach a volume to one instance at a time. + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Volume` tab and select the `Volumes` category + +Select the volume to add to an instance, expand the `Actions` column and click `Manage Attachments`. + +In the `Manage Volume Attachments` dialog box, select an instance. + +Click `Attach Volume` + +The dashboard shows the instance to which the volume is now attached and the device name. + +You can view the status of a volume in the Volumes tab of the dashboard. The volume is either Available or In-Use. + +Now you can log in to the instance and mount, format, and use the disk. + +## Detach a volume from an instance + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Volume` tab and select the `Volumes` category + +Select the volume to add to an instance, expand the `Actions` column and click `Manage Attachments`. + +Click `Detach Volume` and confirm your changes. + +A message indicates whether the action was successful. + +## Create a snapshot from a volume + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Volume` tab and select the `Volumes` category + +Select the volume, expand the `Actions` column and click `Create Snapshot`. + +In the dialog box that opens, enter a snapshot name and a brief description. + +Confirm your changes. + +The dashboard shows the new volume snapshot in Volume Snapshots tab. + +## Edit a volume + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Volume` tab and select the `Volumes` category + +Select the volume, expand the `Actions` column and click `Edit Volume`. + +In the `Edit Volume` dialog box, update the name and description of the volume. + +Click `Edit Volume`. + +!!! note + You can extend a volume by using the Extend Volume option available in the Actions dropdown list and entering the new value for volume size. + +## Delete a volume + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Volume` tab and select the `Volumes` category + +Select the check boxes for the volumes that you want to delete. + +Click `Delete Volumes` and confirm your choice. + +A message indicates whether the action was successful. diff --git a/docs/Researcher_Developer_Cloud/user-guides/index.md b/docs/Researcher_Developer_Cloud/user-guides/index.md new file mode 100644 index 000000000..197a361e8 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/index.md @@ -0,0 +1,40 @@ +# Features + +We want to partner with early access users to explore and build scalable approaches for harnessing cloud technologies and DevOps practices in computational research. + +Our platform's building blocks include: + +`Compute` +: Virtual machines optimised for high-performance computational needs. Multiple flavours of CPU and GPU resources to support a range of compute and memory requirements. + + → Get started with our [Compute User Guides](launch-and-manage-instances/index.md) + +`Images` +: Tailored operating systems to meet your research computing and data needs. Ready-to-use options available, as well as capability to create custom images and contribute to a pool of community-developed images. + + → Get started with our [Images User Guides](uploading-and-managing-Images/index.md) + +`Storage` +: Scalable storage space that can be dynamically mounted to your Compute instances. Options to encrypt storage volumes for added security. + + → Get started with our [Storage User Guides](create-and-manage-volumes/index.md) + +`Networks` +: Fast, reliable, and secure connectivity built on the REANNZ national network. Options for network customisation and security groups. + + → Get started with our [Networks User Guides](create-and-manage-networks/index.md) + +`Identity` +: Identity management services to create application credentials and control access to projects. + + → Get started with our [Identity User Guides](create-and-manage-identity/index.md) + +`Object Storage` +: Openstack S3 compatiable object storage. + + → Get started with our [Object Storage User Guides](create-and-manage-object-storage/index.md) + +`Application Programming Interface (API)` +: All services are programmable via a public API to enable repeatable definition of infrastructure through software code. + + diff --git a/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/.pages.yml b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/.pages.yml new file mode 100644 index 000000000..e577b6b5b --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/.pages.yml @@ -0,0 +1,7 @@ +nav: + - with_the_dashboard + - with_the_CLI + - other_tools + - connect-to-instance-ssh.md + - default-user-nesi-images.md + - ... diff --git a/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/connect-to-instance-ssh.md b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/connect-to-instance-ssh.md new file mode 100644 index 000000000..a3d50176f --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/connect-to-instance-ssh.md @@ -0,0 +1,24 @@ +# Connect to your instance by using SSH + +To use SSH to connect to your instance, use the downloaded keypair file. You will also need to ensure you have created and assigned a `Security group` to your instance that allows connections on port 22. + +Read Create and Manage Security Groups via [Dashboard](../create-and-manage-networks/with_the_dashboard/manage-security-groups-with-the-dashboard.md) or [CLI](../create-and-manage-networks/with_the_CLI/manage-security-groups-via-cli.md) to create one that allows port 22 if you have not done so already. You will then need to add that to the Instance if you havent already via the [Dashboard](with_the_dashboard/manage-security-groups-of-an-Instance-via-the-dashboard.md#attach-a-security-group) or [CLI](with_the_CLI/manage-security-groups-of-an-Instance-via-the-cli.md#adding-a-security-group-to-an-instance) + +!!! note + The user name is `ubuntu` for the Ubuntu cloud images on FlexiHPC. We have a list of default users for the most common cloud images in [Default user for images](default-user-nesi-images.md) + +Insure your instance has a `floating ip` associated with it. If you need to assign one then check the following Assign Floating IP to an Instance via the Dashboard + +Copy the `floating ip` address for your instance. + +Use the **ssh** command to make a secure connection to the instance. For example: + +``` +ssh -i MyKey.pem ubuntu@10.0.0.2 +``` + +!!! note + A `MyKey.pem` private key is a key kept secret by the SSH user on their client machine. The user must never reveal the private key to anyone, including the server (server administrator), to ensure the their identity is never compromised. + Please look at [Create and Manage Keypairs](../create-and-manage-keypairs/index.md) to create or import a keypair for use on the RDC + +At the prompt, type `yes`. diff --git a/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/default-user-nesi-images.md b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/default-user-nesi-images.md new file mode 100644 index 000000000..3388d7aff --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/default-user-nesi-images.md @@ -0,0 +1,27 @@ +--- +hidden: false +label_names: +- instance +- launch +position: 1 +title: Launch and mange instances +vote_count: 1 +vote_sum: 1 +--- + +The following images provided to the Research Developer Cloud by NeSI all have the following default username when using ssh to connect + +`Ubuntu` +: The default user is `ubuntu` + +`CentOS` +: The default user is `centos` + +`FedoraCoreOS` +: The default user is `core` + +`Rocky` +: The default user is `cloud-user` + +`Rocky-Cloud` +: The default user is `rocky` \ No newline at end of file diff --git a/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/index.md b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/index.md new file mode 100644 index 000000000..f5cdbec0d --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/index.md @@ -0,0 +1,34 @@ +--- +hidden: false +label_names: +- instance +- launch +position: 1 +title: Launch and mange instances +vote_count: 1 +vote_sum: 1 +--- + +Instances are virtual machines that run inside of the FlexiHPC cloud space. + +FlexiHPC offers 2 ways to Launch and instance on the platform: + +- [Launch an Instance via Dashboard](with_the_dashboard/launch-an-instance-via-dashboard.md) + +This is the most user friendly way to get up and running as its all done via the FlexiHPC dashboard. + +- [Launch an Instance via CLI](with_the_CLI/launch-an-instance-via-cli.md) + +This is a more advance way of interacting with the FlexiHPC platform. It requires some environment setup however it gives you the most control over the your project within the platform. + +After reading one of the above you should be able to connect to the instance using ssh by following [Connecting to your instance with SSH](connect-to-instance-ssh.md) + +## Resizing an Instance + +Resizing an instance allows you to scale the instance either up, if your workload requires a bit more grunt, or down, should you no longer need to consume a large amount of resources. + +FlexiHPC allows you to resize and instance in 2 ways + +- [Resizing an instance via the dashboard](with_the_dashboard/resizing-an-Instance-via-the-dashboard.md) + +- [Resizing an instance via CLI](with_the_CLI/resizing-an-Instance-via-cli.md) diff --git a/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/other_tools/deployment-of-an-instance-with-terraform.md b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/other_tools/deployment-of-an-instance-with-terraform.md new file mode 100644 index 000000000..4b47d6144 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/other_tools/deployment-of-an-instance-with-terraform.md @@ -0,0 +1,200 @@ +--- +hidden: false +label_names: +- instance +- resize +title: Deployment of an instance with Terraform +position: 10 +--- + +!!! note + You will need to have Terraform installed on the machine that will be executing the commands. Follow the [Install Terraform](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli) link from the official documentation. We also highly recommend that you use [Application Credentials](../../create-and-manage-identity/creating-and-managing-application-credentials-with-the-dashboard.md) to do any automation + +Please make sure you have download the `clouds.yaml` file for your application credentials and its sitting in the directory `~/.config/openstack/` + +Generating Application Credentials is covered [here](../../create-and-manage-identity/creating-and-managing-application-credentials-with-the-dashboard.md) + +Normally creating a folder space for Terraform projects can be a good thing as this ensures the Terraform state file doesn't clash with another. + +Once you are in an empty folder, example `terraform-example-flexihpc`, we will need to create a file called `main.tf` + +Inside this file we will need to define the provider + +``` hcl +provider "openstack" { + cloud = "NAME_IN_CLOUDS_YAML" +} +``` + +Replace the placeholder `NAME_IN_CLOUDS_YAML` with the name of your openstack section in the clouds.yaml file. An example `clouds.yaml` with multiple entries looks like the following: + +``` { .yaml .no-copy } +clouds: + openstack-entry-1: + auth: + auth_url: https://keystone.akl-1.cloud.nesi.org.nz + application_credential_id: "APP_CREDS_ID" + application_credential_secret: "APP_CREDS_SECRET" + interface: "public" + identity_api_version: 3 + auth_type: "v3applicationcredential" + verify: false + openstack-entry-2: + auth: + auth_url: https://keystone.akl-1.cloud.nesi.org.nz + application_credential_id: "APP_CREDS_ID" + application_credential_secret: "APP_CREDS_SECRET" + region_name: "akl-1" + interface: "public" + identity_api_version: 3 + auth_type: "v3applicationcredential" + verify: false +``` +You will want to be using the name `openstack-entry-X` as your the value in `NAME_IN_CLOUDS_YAML` + +Then within the same file we want to define the compute instance + +``` hcl +resource "openstack_compute_instance_v2" "compute_instance" { + name = "compute-instance-0" + flavor_id = "FLEXIHPC_FLAVOR_ID" + image_id = "FLEXIHPC_IMAGE_ID" + key_pair = "FLEXIHPC_KEY_PAIR_NAME" + security_groups = ["FLEXIHPC_SECURITY_GROUP_NAME"] + + network { + name = "FLEXIHPC_NETWORK_NAME" + } +} +``` + +Replace the placeholders `FLEXIHPC_FLAVOR_ID`, `FLEXIHPC_IMAGE_ID`, `FLEXIHPC_KEY_PAIR_NAME`, `FLEXIHPC_SECURITY_GROUP_NAME`, and `FLEXIHPC_NETWORK_NAME` with appropriate values from your OpenStack environment. + +The network name is normally the same as your FlexiHPC project name. + +Then we want to apply a floating IP to the instance so we can connect from outside the FlexiHPC platform + +``` hcl +resource "openstack_networking_floatingip_v2" "floating_ip" { + pool = "external" +} + +resource "openstack_compute_floatingip_associate_v2" "floating_ip_association" { + floating_ip = openstack_networking_floatingip_v2.floating_ip.address + instance_id = openstack_compute_instance_v2.compute_instance.id +} +``` + +The floating IP pool is `external` within the FlexiHPC platform. + +Once all the above is filled in then you only need to run the standard terraform commands + +``` +terraform init +``` + +This will initialize the terraform directory with all the required modules + +Then we run the command to create our resources + +``` +terraform apply +``` + +Terraform will prompt you to confirm the changes. Type "yes" to proceed with the creation of the compute instance and the floating IP association. + +Terraform will then provision the compute instance and associate the floating IP to it. + +Remember that this is a basic example, and you might need to adapt it to your specific FlexiHPC environment and configurations. + +The full `main.tf` file for completeness + +``` hcl title="main.tf" +terraform { +required_version = ">= 0.14.0" + required_providers { + openstack = { + source = "terraform-provider-openstack/openstack" + version = "~> 1.51.1" + } + } +} + +provider "openstack" { + cloud = "NAME_IN_CLOUDS_YAML" +} + +resource "openstack_compute_instance_v2" "compute_instance" { + name = "compute-instance-0" + flavor_id = "FLEXIHPC_FLAVOR_ID" + image_id = "FLEXIHPC_IMAGE_ID" + key_pair = "FLEXIHPC_KEY_PAIR_NAME" + security_groups = ["FLEXIHPC_SECURITY_GROUP_NAME"] + + network { + name = "FLEXIHPC_NETWORK_NAME" + } +} + +resource "openstack_networking_floatingip_v2" "floating_ip" { + pool = "external" +} + +resource "openstack_compute_floatingip_associate_v2" "floating_ip_association" { + floating_ip = openstack_networking_floatingip_v2.floating_ip.address + instance_id = openstack_compute_instance_v2.compute_instance.id +} +``` + +## Using FlexiHPC object storage to store the Terraform state file + +Should you wish to not include the terraform state file within the git repo then you will want to update the above with a the backend that you wish to store that file + +Within the first chunk of the file you want to add the following so it looks like this + +``` hcl +terraform { +required_version = ">= 0.14.0" + required_providers { + openstack = { + source = "terraform-provider-openstack/openstack" + version = "~> 1.51.1" + } + } + + backend "s3" { + bucket = "" + key = "state/terraform.tfstate" + endpoint = "https://object.akl-1.cloud.nesi.org.nz/" + sts_endpoint = "https://object.akl-1.cloud.nesi.org.nz/" + access_key = "" + secret_key = "" + #region = "us-east-1" + force_path_style = "true" + skip_credentials_validation = "true" + } +} +``` + +We have added the `backend "s3"` chunk to the `terraform` block + +`` +: The container name within FlexiHPC object storage. You can create this either via the [dashboard](../../create-and-manage-object-storage/with_the_dashboard/create-and-manage-object-storage-with-the-dashboard.md) or [CLI](../../create-and-manage-object-storage/with_the_CLI/create-and-manage-object-storage-via-cli.md) + +You will need to update the following after generating [EC2 Credentials](../../create-and-manage-identity/index.md) + +`` +: The EC2 Credentials Access Token + +`` +: The EC2 Credentials User Secret + +Save that file and run + +``` { .sh } +terraform init -reconfigure +``` + +This will reconfigure the backend to store the state file on FlexiHPC, you can also pass `-migrate-state` instead of `-reconfigure` should you have a state file that you want to move there from a previous run. + +Your terraform state file should now be configured and stored on FlexiHPC object storage diff --git a/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/.pages.yml b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/.pages.yml new file mode 100644 index 000000000..953956c42 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/.pages.yml @@ -0,0 +1,6 @@ +nav: + - launch-an-instance-via-cli.md + - launch-an-instance-from-an-image.md + - launch-an-instance-from-a-volume.md + - resizing-an-Instance-via-cli.md + - manage-security-groups-of-an-Instance-via-the-cli.md diff --git a/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/launch-an-instance-from-a-volume.md b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/launch-an-instance-from-a-volume.md new file mode 100644 index 000000000..52c271e6a --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/launch-an-instance-from-a-volume.md @@ -0,0 +1,273 @@ +--- +hidden: false +label_names: +- instance +- launch +- cli +- volume +position: 2 +title: Launch an Instance from a volume +--- + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. Please read [Setting up your CLI environment](../../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + +You can create a volume from an existing image, volume, or snapshot. This procedure shows you how to create a volume from an image, and use the volume to boot an instance. + +List the available images. + +``` +openstack image list +``` + +``` { .sh .no-copy } ++--------------------------------------+----------------------------------------------------------+--------+ +| ID | Name | Status | ++--------------------------------------+----------------------------------------------------------+--------+ +| 701e29ac-8963-4cf9-b90a-e1b14095866d | CentOS-Stream-8 | active | +| 42ebfb1b-f136-4947-ae1a-025ad57b369c | CentOS-stream8-baremetal | active | +| 386c661a-4c30-4f09-b536-511a862102b4 | FedoraCoreOS35 | active | +| fe8c98d3-7a87-4b5b-9f9e-0f967c53f9bd | FedoraCoreOS37 | active | +| 622c4f3f-8e62-4c81-8809-69b0a34a28e0 | PostgreSQL-Ubuntu-20.04.4 | active | +| 3936d736-e5bb-4024-a361-512fd40413bc | RockyLinux-8.5 | active | +| eb40dbb5-7da6-4784-b47c-f417c9d3b126 | RockyLinux-8.6 | active | +| 2b00f364-1bd0-432c-91f9-8d1adda6fc9f | RockyLinux-8.6-OFED-5.7 | active | +| f366dd3a-5353-47dd-9e92-662055125174 | RockyLinux-8.7 | active | +| b443a9a2-32d1-48c1-8d84-bcc10adbb0c3 | RockyLinux-8.7-OFED-5.8 | active | +| 1276db65-e5de-4721-b2db-666a73929b3e | Ubuntu-22.10-Wiki-Test | active | +| ab67f1b1-44f9-4465-9a68-82cc35ed69c0 | Ubuntu-Bionic-18.04 | active | +| d73ef17b-6b0a-4148-b3b2-f4edcf2e480e | Ubuntu-Focal-20.04 | active | +| ce869c9d-35bb-46be-9b6d-d74d4035e2f8 | Ubuntu-Focal-20.04-baremetal | active | +| 885d01da-777b-4e67-a1ec-e114e4c2786e | Ubuntu-Focal-20.04-mark-testing-dib-2022-06-30T12:47:00Z | active | +| a5c9b7b2-e77b-4094-99ac-db0cf5181da5 | Ubuntu-Jammy-22.04 | active | +| 05f13645-2ac7-44ce-aa1c-64f31ca42761 | Ubuntu-Jammy-22.04-DEMOCREDS | active | +| c7e208cb-6521-422b-8d00-1b8f003c4646 | Ubuntu20.04 | active | +| 728719c2-0a75-4411-a8fa-3230fa5d50e5 | Ubuntu22.04 | active | +| a13f3659-eed9-411c-9a33-f1584fd00328 | Windows-Server-2012-R2-Eval | active | +| 8814f28f-1dbd-4253-84e8-8e45032855c6 | Windows-Server-2019 | active | +| 15f3eebe-4c15-4565-a4f8-7369f072b50d | cirros-0.4 | active | +| 534b8b78-f455-4f85-bd21-13c2b1b14e3e | cirros-0.5 | active | +| 64dead14-9c5a-41c3-b4d6-a122a2ca8f28 | linux-test-snapshot | active | +| d479470d-ab6d-40d6-afc9-04f5f253404d | linux-test-to-boot-2 | active | +| 40ed5c78-c970-4182-a9c8-27e18a6a4251 | linux-test-to-boot-cli | active | +| 5a8e5595-d893-4d1c-8600-d7982f3915bb | ood-keycloak-1 | active | +| 04e1a31a-adee-4af2-935e-0e6e7c4b3193 | test-break | active | +| 1a0480d1-55c8-4fd7-8c7a-8c26e52d8cbd | ubuntu-jammy-server-cloudimg | active | ++--------------------------------------+----------------------------------------------------------+--------+ +``` + +Note the ID of the image that you want to use to create a volume. + +!!! note + We will use the Ubuntu-Jammy-22.04 image with the ID `a5c9b7b2-e77b-4094-99ac-db0cf5181da5` + +We also need to list the available flavors. + +``` +openstack flavor list +``` + +``` { .sh .no-copy } ++--------------------------------------+-------------------------+--------+------+-----------+-------+-----------+ +| ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public | ++--------------------------------------+-------------------------+--------+------+-----------+-------+-----------+ +| 0f71f1e2-d327-41f9-87e3-0f6c29f51af1 | gb.bm.gpu | 524288 | 240 | 0 | 48 | True | +| 1281555c-6bcb-42e4-a48e-98352dcd0fd0 | compute1.2cpu4ram | 4096 | 30 | 0 | 2 | True | +| 14505c86-765f-4971-a36f-1e867216dccf | memory1.4cpu16ram | 16384 | 30 | 0 | 4 | True | +| 1dbac08-d9a9-4c27-8534-57293785433e | balanced1.32cpu64ram | 65536 | 30 | 0 | 32 | True | +| 2d02e6a4-3937-4ed3-951a-8e27867ff53e | balanced1.8cpu16ram | 16384 | 30 | 0 | 8 | True | +| 2e7b7cc7-9e29-4ff2-98dd-03dbb99dbb5c | compute1.16cpu32ram | 32768 | 30 | 0 | 16 | True | +| 3276cd5f-c96a-4e05-960f-f4f197142c98 | memory1.1cpu4ram | 4096 | 30 | 0 | 1 | True | +| 3b5a6e01-d3ad-49e3-a4f8-183c04444330 | balanced1.1cpu2ram | 2048 | 30 | 0 | 1 | True | +| 4a0425c8-7494-473e-a5bb-acc91c378615 | c1.cpu128.ram448.disk30 | 458752 | 30 | 0 | 128 | True | +| 4e8af724-f66d-4072-a692-114126de25a0 | compute1.1cpu2ram | 2048 | 30 | 0 | 1 | True | +| 4ec785be-a422-4207-9daa-cbb71c61f9ed | devtest1.4cpu4ram | 4096 | 30 | 0 | 4 | True | +| 674fa81a-69c7-4bf7-b3a9-59989fb63618 | balanced1.16cpu32ram | 32768 | 30 | 0 | 16 | True | +| 6b2e76a8-cce0-4175-8160-76e2525d3d3d | balanced1.2cpu4ram | 4096 | 30 | 0 | 2 | True | +| 7af5c672-43e7-4296-9608-5974394851b8 | memory1.2cpu8ram | 8192 | 30 | 0 | 2 | True | +| 7ffa092c-e75a-4cb5-be9f-db8c749e8801 | compute1.4cpu8ram | 8192 | 30 | 0 | 4 | True | +| 8aef7f54-1ed6-4275-a38c-3f1e61afabd9 | memory1.16cpu64ram | 65536 | 30 | 0 | 16 | True | +| 94ba9177-cb98-4b04-870c-9a696e1c5327 | memory1.32cpu128ram | 131072 | 30 | 0 | 32 | True | +| 9d536959-dd7a-4532-b0b7-db8bb8a72ddb | compute1.8cpu16ram | 16384 | 30 | 0 | 8 | True | +| b46e184c-0dcb-44b2-a53f-c2b8eff676c9 | compute1.32cpu64ram | 65536 | 30 | 0 | 32 | True | +| d6c2e93a-d430-44ca-822b-79a4b882c0c3 | piotr-gpu | 131072 | 100 | 0 | 8 | True | +| d6e3a25a-4f9e-4c87-9342-77f807ead537 | memory1.8cpu32ram | 32768 | 30 | 0 | 8 | True | +| e07cfee1-43af-4bf6-baac-3bdf7c1b88f8 | balanced1.4cpu8ram | 8192 | 30 | 0 | 4 | True | +| e3a1ec6d-9513-4b9f-9580-671c4eee1c21 | devtest1.2cpu2ram | 2048 | 30 | 0 | 2 | True | +| ee55c523-9803-4296-91be-1c34e986baaa | devtest1.1cpu1ram | 1024 | 30 | 0 | 1 | True | ++--------------------------------------+-------------------------+--------+------+-----------+-------+-----------+ +``` + +Note the ID of the flavor that you want to use + +!!! note + We will use the `balanced1.1cpu2ram` flavor with an ID `3b5a6e01-d3ad-49e3-a4f8-183c04444330` + +Get a list of networks + +``` +openstack network list +``` + +``` { .sh .no-copy } ++--------------------------------------+--------------------------------+--------------------------------------+ +| ID | Name | Subnets | ++--------------------------------------+--------------------------------+--------------------------------------+ +| 33d0c11b-b659-4b77-9afc-5676fe965839 | external | 5c2644ad-7253-42f5-ad69-40970b84dea6 | +| d3a7ddb5-6582-42cf-978a-c99b4ed25ad4 | NeSI-Training-Test | f5715775-270c-4230-bfa7-fdbdf51352dc | ++--------------------------------------+--------------------------------+--------------------------------------+ +``` + +Note the ID for the network that is not the external one and is named the same as your project space. + +!!! note + For this example we will use NeSI-Training-Test with an ID `d3a7ddb5-6582-42cf-978a-c99b4ed25ad4` + +With we above values we then have 2 choices to booting an instance from a volume + +- Create and boot from volume in a single step +> This option doesn't give us control over the volume creation process and only allows us to specify a size for our new volume + +- Creating the volume first and boot from it +> This allows us to specify more then the size on creation, example is we might want to encrypt it + +## Create and boot from volume in a single step + +We will then create an instance from that image using the `--boot-from-volume` parameter + +!!! note + We highly recommend that you supply the parameter `--key-name` to allow connections with your SSH key + +The command will look like the following + +``` +openstack server create --flavor --network --image --boot-from-volume --key-name --wait +``` + +Using or example values the command looks like the following + +``` +openstack server create --flavor 3b5a6e01-d3ad-49e3-a4f8-183c04444330 --network d3a7ddb5-6582-42cf-978a-c99b4ed25ad4 --image a5c9b7b2-e77b-4094-99ac-db0cf5181da5 --boot-from-volume 80 --key-name kahus-key --wait Ubuntu_boot_volume +``` + +``` { .sh .no-copy } ++-----------------------------+----------------------------------------------------------+ +| Field | Value | ++-----------------------------+----------------------------------------------------------+ +| accessIPv4 | | +| accessIPv6 | | +| addresses | NeSI-Training-Test=10.1.0.57 | +| adminPass | rCs2E9BP2RZu | +| config_drive | True | +| created | 2023-09-03T22:06:56Z | +| flavor | memory1.4cpu16ram (14505c86-765f-4971-a36f-1e867216dccf) | +| hostId | c79c5b9fc6c64341b07c0408e401a28ad0b20aa123a250f77fd8c249 | +| id | ddc5d676-db4e-4fd3-b0c9-14b91a1f16d5 | +| image | N/A (booted from volume) | +| key_name | kahus-key | +| name | Ubuntu_boot_volume | +| progress | 0 | +| project_id | 4f07cc254d6c4471805d49bae1f739b9 | +| properties | | +| security_groups | name='default' | +| status | ACTIVE | +| updated | 2023-09-03T22:07:04Z | +| user_id | fb9a3d02c89e4cfdbe64658ad43ece97 | +| volumes_attached | id='0aa677c7-072b-4241-a70a-05a4de020596' | ++-----------------------------+----------------------------------------------------------+ +``` + +!!! warning + Should you not provide a key file to the deployment you will need to remake the instance as by default the Flexi environment doesn't set passwords. + + Ubuntu and CentOS cloud images also don't allow password SSH by default. + +## Creating the volume first and boot from it + +Should you wish to have more control over the volume creation process we will first create the volume then boot an instance from that. + +Cinder makes a volume bootable when `--image` parameter is passed. + +``` +openstack volume create --image IMAGE_ID --size SIZE_IN_GB bootable_volume +``` + +We will use the `Ubuntu-Jammy-22.04` image with the ID `a5c9b7b2-e77b-4094-99ac-db0cf5181da5` the command will look like + +``` +openstack volume create --image a5c9b7b2-e77b-4094-99ac-db0cf5181da5 --size 80 my_ubuntu_volume +``` + +``` { .sh .no-copy } ++---------------------+--------------------------------------+ +| Field | Value | ++---------------------+--------------------------------------+ +| attachments | [] | +| availability_zone | nova | +| bootable | false | +| consistencygroup_id | None | +| created_at | 2023-08-09T21:57:52.675096 | +| description | None | +| encrypted | False | +| id | 3dd489d8-7c44-4c59-b4af-0c804ddf4729 | +| multiattach | False | +| name | my_ubuntu_volume | +| properties | | +| replication_status | None | +| size | 30 | +| snapshot_id | None | +| source_volid | None | +| status | creating | +| type | ceph-ssd | +| updated_at | None | +| user_id | fb9a3d02c89e4cfdbe64658ad43ece97 | ++---------------------+--------------------------------------+ +``` + +Take note of the volume ID + +The following command is used to boot an instance from a volume + +``` +openstack server create --flavor --volume --network --key-name +``` + +We will supply the `balanced1.1cpu2ram` ID for flavor and our volume ID of `3dd489d8-7c44-4c59-b4af-0c804ddf4729` from the volume we created before + +``` +openstack server create --flavor 3b5a6e01-d3ad-49e3-a4f8-183c04444330 --volume 3dd489d8-7c44-4c59-b4af-0c804ddf4729 --network d3a7ddb5-6582-42cf-978a-c99b4ed25ad4 --key-name kahus-key Ubuntu_From_Volume +``` + +``` { .sh .no-copy } ++-----------------------------+-----------------------------------------------------------+ +| Field | Value | ++-----------------------------+-----------------------------------------------------------+ +| accessIPv4 | | +| accessIPv6 | | +| addresses | | +| adminPass | MPu74uppSp4r | +| config_drive | | +| created | 2023-08-09T22:05:30Z | +| flavor | balanced1.1cpu2ram (3b5a6e01-d3ad-49e3-a4f8-183c04444330) | +| hostId | | +| id | 2d4db443-eb48-4d64-8770-5624568f29ad | +| image | N/A (booted from volume) | +| key_name | kahus-key | +| name | Ubuntu_From_Volume | +| progress | 0 | +| project_id | 4f07cc254d6c4471805d49bae1f739b9 | +| properties | | +| security_groups | name='default' | +| status | BUILD | +| updated | 2023-08-09T22:05:30Z | +| user_id | fb9a3d02c89e4cfdbe64658ad43ece97 | +| volumes_attached | | ++-----------------------------+-----------------------------------------------------------+ +``` + +!!! warning + Should you not provide a key file to the deployment you will need to remake the instance as by default the Flexi environment doesn't set passwords. + + Ubuntu and CentOS cloud images also don't allow password SSH by default. + +To allow external access a floating IP will need to be provided to the newly created instance, following [Manage Floating IPs via CLI](../../create-and-manage-networks/with_the_CLI/manage-floating-ips-via-cli.md) will complete this for you, You should then be able to connect to your instance using ssh which is explained more in [Connect to your instance by using SSH](../connect-to-instance-ssh.md). diff --git a/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/launch-an-instance-from-an-image.md b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/launch-an-instance-from-an-image.md new file mode 100644 index 000000000..1c52e5f53 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/launch-an-instance-from-an-image.md @@ -0,0 +1,90 @@ +--- +hidden: false +label_names: +- instance +- launch +- cli +- image +position: 2 +title: Launch an Instance from an Image +vote_count: 1 +vote_sum: 1 +--- + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. Please read + [Setting up your CLI environment](../../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + + We highly recommend that you supply the parameter `--key-name` to allow connections with your SSH key + +After you gather required parameters, run the following command to launch an instance. Specify the server `name`, `flavor ID`, and `image ID`. + +``` +openstack server create --flavor FLAVOR_ID --image IMAGE_ID --key-name KEY_NAME --security-group SEC_GROUP_ID --network NETWORK_ID INSTANCE_NAME +``` + +Example with the values supplied + +``` +openstack server create --flavor e3a1ec6d-9513-4b9f-9580-671c4eee1c21 --image a5c9b7b2-e77b-4094-99ac-db0cf5181da5 --key-name test-key-pair --security-group 7200b28f-9089-4797-a094-39f1995e6f0c --network d3a7ddb5-6582-42cf-978a-c99b4ed25ad4 test-instance-wiki +``` + +``` { .sh .no-copy } ++-----------------------------+-----------------------------------------------------------+ +| Field | Value | ++-----------------------------+-----------------------------------------------------------+ +| OS-DCF:diskConfig | MANUAL | +| OS-EXT-AZ:availability_zone | | +| OS-EXT-STS:power_state | NOSTATE | +| OS-EXT-STS:task_state | scheduling | +| OS-EXT-STS:vm_state | building | +| OS-SRV-USG:launched_at | None | +| OS-SRV-USG:terminated_at | None | +| accessIPv4 | | +| accessIPv6 | | +| addresses | | +| adminPass | | +| config_drive | | +| created | 2023-07-26T22:51:53Z | +| flavor | devtest1.2cpu2ram (e3a1ec6d-9513-4b9f-9580-671c4eee1c21) | +| hostId | | +| id | 8b08a4fb-7372-4269-a583-9dbc91779ffe | +| image | Ubuntu-Jammy-22.04 (a5c9b7b2-e77b-4094-99ac-db0cf5181da5) | +| key_name | test-key-pair | +| name | test-instance-wiki | +| progress | 0 | +| project_id | 4f07cc254d6c4471805d49bae1f739b9 | +| properties | | +| security_groups | name='7200b28f-9089-4797-a094-39f1995e6f0c' | +| status | BUILD | +| updated | 2023-07-26T22:51:53Z | +| user_id | fb9a3d02c89e4cfdbe64658ad43ece97 | +| volumes_attached | | ++-----------------------------+-----------------------------------------------------------+ +``` + +A status of `BUILD` indicates that the instance has started, but is not yet online. + +A status of `ACTIVE` indicates that the instance is active. + +Copy the server ID value from the `id` field in the output. Use the ID to get server details or to delete your server. + +Check if the instance is online + +``` +openstack server list +``` + +The list shows the ID, name, status, and private (and if assigned, public) IP addresses for all instances in the project to which you belong: + +``` { .sh .no-copy } ++--------------------------------------+---------------------------------------+--------+----------------------------------------------+--------------------------+--------------------+ +| ID | Name | Status | Networks | Image | Flavor | ++--------------------------------------+---------------------------------------+--------+----------------------------------------------+--------------------------+--------------------+ +| 8b08a4fb-7372-4269-a583-9dbc91779ffe | test-instance-wiki | ACTIVE | NeSI-Training-Test=10.1.0.134 | Ubuntu-Jammy-22.04 | devtest1.2cpu2ram | ++--------------------------------------+---------------------------------------+--------+----------------------------------------------+--------------------------+--------------------+ +``` + +If the status for the instance is `ACTIVE`, the instance is online. + +If you would like to access your Compute instance outside the FlexiHPC platform you will need to associate a `floating ip` to your instance. Reading [Manage Floating IPs via CLI](../../create-and-manage-networks/with_the_CLI/manage-floating-ips-via-cli.md), You should then be able to connect to your instance using ssh which is explained more in [Connect to your instance by using SSH](../connect-to-instance-ssh.md). diff --git a/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/launch-an-instance-via-cli.md b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/launch-an-instance-via-cli.md new file mode 100644 index 000000000..01c613fb8 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/launch-an-instance-via-cli.md @@ -0,0 +1,181 @@ +--- +hidden: false +label_names: +- instance +- launch +- cli +position: 2 +title: Launch an Instance via CLI +vote_count: 1 +vote_sum: 1 +--- + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. Please read [Setting up your CLI environment](../../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + +Before you can launch an instance via the CLI, gather the following parameters: + +- The `instance source` can be an image, snapshot, or block storage volume that contains an image or snapshot. + +- A `name` for your instance. + +- The `flavor` for your instance, which defines the compute, memory, and storage capacity of nova computing instances. A flavor is an available hardware configuration for a server. It defines the size of a virtual server that can be launched. + +- Access and security credentials, which include one or both of the following credentials: + + - A `key pair` for your instance, which are SSH credentials that are injected into images when they are launched. Create at least one key pair for each project. If you already have generated a key pair with an external tool, you can import it into OpenStack. You can use the key pair for multiple instances that belong to that project. Read [Create and Manage Key Pairs](../../create-and-manage-keypairs/index.md) if you would like to know more. + + - A `security group` that defines which incoming network traffic is forwarded to instances. Security groups hold a set of firewall policies, known as security group rules. + +- The `network` to attach the instance too. + +- If needed, you can assign a **floating (public) IP address** to a running instance to make it accessible from outside the cloud. + +- You can also attach a block storage device, or **volume**, for persistent storage. + +!!! note + Instances that use the default security group cannot, by default, be accessed from any IP address outside of the cloud. If you want those IP addresses to access the instances, you must modify the rules for the security group. Read How to add/update and remove security groups for more information. + +After you gather the parameters that you need to launch an instance, you can launch it from an image or a volume. + +## Gather parameters to launch an instance + +List the available flavors. + +``` { .sh .copy } +openstack flavor list +``` + +Note the ID of the flavor that you want to use for your instance: + +``` { .sh .no-copy } ++--------------------------------------+-------------------------+--------+------+-----------+-------+-----------+ +| ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public | ++--------------------------------------+-------------------------+--------+------+-----------+-------+-----------+ +| 0f71f1e2-d327-41f9-87e3-0f6c29f51af1 | gb.bm.gpu | 524288 | 240 | 0 | 48 | True | +| 1281555c-6bcb-42e4-a48e-98352dcd0fd0 | compute1.2cpu4ram | 4096 | 30 | 0 | 2 | True | +| 14505c86-765f-4971-a36f-1e867216dccf | memory1.4cpu16ram | 16384 | 30 | 0 | 4 | True | +| 1dbac08-d9a9-4c27-8534-57293785433e | balanced1.32cpu64ram | 65536 | 30 | 0 | 32 | True | +| 2d02e6a4-3937-4ed3-951a-8e27867ff53e | balanced1.8cpu16ram | 16384 | 30 | 0 | 8 | True | +| 2e7b7cc7-9e29-4ff2-98dd-03dbb99dbb5c | compute1.16cpu32ram | 32768 | 30 | 0 | 16 | True | +| 3276cd5f-c96a-4e05-960f-f4f197142c98 | memory1.1cpu4ram | 4096 | 30 | 0 | 1 | True | +| 3b5a6e01-d3ad-49e3-a4f8-183c04444330 | balanced1.1cpu2ram | 2048 | 30 | 0 | 1 | True | +| 4a0425c8-7494-473e-a5bb-acc91c378615 | c1.cpu128.ram448.disk30 | 458752 | 30 | 0 | 128 | True | +| 4e8af724-f66d-4072-a692-114126de25a0 | compute1.1cpu2ram | 2048 | 30 | 0 | 1 | True | +| 4ec785be-a422-4207-9daa-cbb71c61f9ed | devtest1.4cpu4ram | 4096 | 30 | 0 | 4 | True | +| 674fa81a-69c7-4bf7-b3a9-59989fb63618 | balanced1.16cpu32ram | 32768 | 30 | 0 | 16 | True | +| 6b2e76a8-cce0-4175-8160-76e2525d3d3d | balanced1.2cpu4ram | 4096 | 30 | 0 | 2 | True | +| 7af5c672-43e7-4296-9608-5974394851b8 | memory1.2cpu8ram | 8192 | 30 | 0 | 2 | True | +| 7ffa092c-e75a-4cb5-be9f-db8c749e8801 | compute1.4cpu8ram | 8192 | 30 | 0 | 4 | True | +| 8aef7f54-1ed6-4275-a38c-3f1e61afabd9 | memory1.16cpu64ram | 65536 | 30 | 0 | 16 | True | +| 94ba9177-cb98-4b04-870c-9a696e1c5327 | memory1.32cpu128ram | 131072 | 30 | 0 | 32 | True | +| 9d536959-dd7a-4532-b0b7-db8bb8a72ddb | compute1.8cpu16ram | 16384 | 30 | 0 | 8 | True | +| b46e184c-0dcb-44b2-a53f-c2b8eff676c9 | compute1.32cpu64ram | 65536 | 30 | 0 | 32 | True | +| d6c2e93a-d430-44ca-822b-79a4b882c0c3 | piotr-gpu | 131072 | 100 | 0 | 8 | True | +| d6e3a25a-4f9e-4c87-9342-77f807ead537 | memory1.8cpu32ram | 32768 | 30 | 0 | 8 | True | +| e07cfee1-43af-4bf6-baac-3bdf7c1b88f8 | balanced1.4cpu8ram | 8192 | 30 | 0 | 4 | True | +| e3a1ec6d-9513-4b9f-9580-671c4eee1c21 | devtest1.2cpu2ram | 2048 | 30 | 0 | 2 | True | +| ee55c523-9803-4296-91be-1c34e986baaa | devtest1.1cpu1ram | 1024 | 30 | 0 | 1 | True | ++--------------------------------------+-------------------------+--------+------+-----------+-------+-----------+ +``` + +List the available images. + +``` +openstack image list +``` + +Note the ID of the image from which you want to boot your instance: + +``` { .sh .no-copy } ++--------------------------------------+----------------------------------------------------------+--------+ +| ID | Name | Status | ++--------------------------------------+----------------------------------------------------------+--------+ +| 701e29ac-8963-4cf9-b90a-e1b14095866d | CentOS-Stream-8 | active | +| 42ebfb1b-f136-4947-ae1a-025ad57b369c | CentOS-stream8-baremetal | active | +| f94a4d02-661f-4df7-bb50-ad08ad89f816 | Centos-8.4-AGR | active | +| 386c661a-4c30-4f09-b536-511a862102b4 | FedoraCoreOS35 | active | +| fe8c98d3-7a87-4b5b-9f9e-0f967c53f9bd | FedoraCoreOS37 | active | +| 622c4f3f-8e62-4c81-8809-69b0a34a28e0 | PostgreSQL-Ubuntu-20.04.4 | active | +| 3936d736-e5bb-4024-a361-512fd40413bc | RockyLinux-8.5 | active | +| eb40dbb5-7da6-4784-b47c-f417c9d3b126 | RockyLinux-8.6 | active | +| 2b00f364-1bd0-432c-91f9-8d1adda6fc9f | RockyLinux-8.6-OFED-5.7 | active | +| f366dd3a-5353-47dd-9e92-662055125174 | RockyLinux-8.7 | active | +| b443a9a2-32d1-48c1-8d84-bcc10adbb0c3 | RockyLinux-8.7-OFED-5.8 | active | +| 9933eb25-b0c1-4ef2-b199-25e916c79906 | Ubuntu-20-AGR | active | +| ab67f1b1-44f9-4465-9a68-82cc35ed69c0 | Ubuntu-Bionic-18.04 | active | +| d73ef17b-6b0a-4148-b3b2-f4edcf2e480e | Ubuntu-Focal-20.04 | active | +| ce869c9d-35bb-46be-9b6d-d74d4035e2f8 | Ubuntu-Focal-20.04-baremetal | active | +| 885d01da-777b-4e67-a1ec-e114e4c2786e | Ubuntu-Focal-20.04-mark-testing-dib-2022-06-30T12:47:00Z | active | +| a5c9b7b2-e77b-4094-99ac-db0cf5181da5 | Ubuntu-Jammy-22.04 | active | +| 05f13645-2ac7-44ce-aa1c-64f31ca42761 | Ubuntu-Jammy-22.04-DEMOCREDS | active | +| c7e208cb-6521-422b-8d00-1b8f003c4646 | Ubuntu20.04 | active | +| 728719c2-0a75-4411-a8fa-3230fa5d50e5 | Ubuntu22.04 | active | +| a13f3659-eed9-411c-9a33-f1584fd00328 | Windows-Server-2012-R2-Eval | active | +| 8814f28f-1dbd-4253-84e8-8e45032855c6 | Windows-Server-2019 | active | +| 15f3eebe-4c15-4565-a4f8-7369f072b50d | cirros-0.4 | active | +| 534b8b78-f455-4f85-bd21-13c2b1b14e3e | cirros-0.5 | active | +| 04e1a31a-adee-4af2-935e-0e6e7c4b3193 | test-break | active | ++--------------------------------------+----------------------------------------------------------+--------+ +``` + +List the available security groups. + +``` +openstack security group list +``` + +Note the ID of the security group you want to attach to the instance: + +``` { .sh .no-copy } ++--------------------------------------+----------------------------------------------------------------+---------------------------------------------------------+----------------------------------+------+ +| ID | Name | Description | Project | Tags | ++--------------------------------------+----------------------------------------------------------------+---------------------------------------------------------+----------------------------------+------+ +| 7200b28f-9089-4797-a094-39f1995e6f0c | SSH Allow All | This is an open SSH that allows anyone to connect to 22 | 4f07cc254d6c4471805d49bae1f739b9 | [] | +| b24e8bef-969a-4938-8b18-0a33769b181d | kubeapi_whitelist | | 4f07cc254d6c4471805d49bae1f739b9 | [] | +| b5d30ed4-13b3-4f7a-bc5a-c48175566ea3 | My-Security-Group | This is my security group | 4f07cc254d6c4471805d49bae1f739b9 | [] | +| f2f15d6f-2a04-4196-8102-a058042694b3 | default | Default security group | 4f07cc254d6c4471805d49bae1f739b9 | [] | ++--------------------------------------+----------------------------------------------------------------+---------------------------------------------------------+----------------------------------+------+ +``` + +If you have not created any security groups, you can read How to add/update and remove security groups + +You can view rules for a specified security group: + +``` +openstack security group rule list b5d30ed4-13b3-4f7a-bc5a-c48175566ea3 +``` + +List the available key pairs, and note the key pair name that you use for SSH access. + +``` +openstack keypair list +``` + +List the available networks. + +``` +openstack network list +``` + +Note the ID for the network + +``` { .sh .no-copy } ++--------------------------------------+--------------------------------+--------------------------------------+ +| ID | Name | Subnets | ++--------------------------------------+--------------------------------+--------------------------------------+ +| 33d0c11b-b659-4b77-9afc-5676fe965839 | external | 5c2644ad-7253-42f5-ad69-40970b84dea6 | +| d3a7ddb5-6582-42cf-978a-c99b4ed25ad4 | NeSI-Training-Test | f5715775-270c-4230-bfa7-fdbdf51352dc | ++--------------------------------------+--------------------------------+--------------------------------------+ +``` + +!!! note + The recommend Network ID to take note of is the network that has the same name as your project. If external access is required then after creating the compute instance a floating ip is the recommend way to gain this external access. + +## Launch an instance + +You can launch an instance from various sources. + +- [Launch an instance from an image](launch-an-instance-from-an-image.md) + +- [Launch an instance from a volume](launch-an-instance-from-a-volume.md) diff --git a/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/manage-security-groups-of-an-Instance-via-the-cli.md b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/manage-security-groups-of-an-Instance-via-the-cli.md new file mode 100644 index 000000000..3e3453dd6 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/manage-security-groups-of-an-Instance-via-the-cli.md @@ -0,0 +1,116 @@ +--- +hidden: false +label_names: +- instance +- resize +position: 2 +title: Manage Security Groups of an Instance via CLI +vote_count: 1 +vote_sum: 1 +--- + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. Please read [Setting up your CLI environment](../../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + +When adding or removing `Security Groups` from an instance via the CLI there are few details you will need, the `Instance ID` for the compute instance you want to adjust and the `Security Group ID` you want to add or remove from the instance. + +Run the following command to return a list of servers within your project space + +``` +openstack server list +``` + +``` { .sh .no-copy } ++--------------------------------------+-----------+--------+-------------------------------+-------------------------------+--------------------+ +| ID | Name | Status | Networks | Image | Flavor | ++--------------------------------------+-----------+--------+-------------------------------+-------------------------------+--------------------+ +| 4f69a45d-78ad-48e7-b427-5694c8b09e45 | kahu-test | ACTIVE | NeSI-Training-Prod=10.1.0.250 | NeSI-FlexiHPC-Rocky-9.3_cloud | balanced1.2cpu4ram | ++--------------------------------------+-----------+--------+-------------------------------+-------------------------------+--------------------+ + +``` + +Taking note of the `Instance ID` + +!!! note + For this example we will use `4f69a45d-78ad-48e7-b427-5694c8b09e45` + +Then run the following command to return a list of Security Groups + +``` +openstack security group list +``` + +``` { .sh .no-copy } ++--------------------------------------+----------------+------------------------+----------------------------------+------+ +| ID | Name | Description | Project | Tags | ++--------------------------------------+----------------+------------------------+----------------------------------+------+ +| 050e0ec4-1416-46f2-98a0-b492f2c8d81b | ssh-allow-all | | 1b899a2883da444fa6b31172dcebbc56 | [] | +| 08749b3c-f8aa-443e-a881-80f6009fff59 | http | | 1b899a2883da444fa6b31172dcebbc56 | [] | +| 0ed77965-05bf-438e-b4e4-89060f814c4c | SSH Allow All | | 1b899a2883da444fa6b31172dcebbc56 | [] | +| b713d80c-1b7d-4991-b387-514261e59b94 | 6443_Allow_ALL | | 1b899a2883da444fa6b31172dcebbc56 | [] | +| cdad3d6b-a726-4020-a6a3-7c20b1afc79f | https | | 1b899a2883da444fa6b31172dcebbc56 | [] | +| e73a47e9-cc3a-4986-95f9-c3d101c3d448 | default | Default security group | 1b899a2883da444fa6b31172dcebbc56 | [] | ++--------------------------------------+----------------+------------------------+----------------------------------+------+ +``` + +Take note of the `ID` + +!!! note + For this example we will use the id `050e0ec4-1416-46f2-98a0-b492f2c8d81b` to add `ssh-allow-all` to our instance + +## Adding a Security Group to an Instance + +Taking the Instance ID and Security Group ID we will add those to the following command + +``` +openstack server add security group +``` + +Using our example values the command will look like this + +``` +openstack server add security group 4f69a45d-78ad-48e7-b427-5694c8b09e45 050e0ec4-1416-46f2-98a0-b492f2c8d81b +``` + +You will not get a response from the endpoint on success + +So you will want to run the following command to see if the Security Group was added + +``` +openstack server show +``` + +This will return the server details and there will be the security_groups field with the newly added group + +``` { .sh .no-copy } +| security_groups | name='ssh-allow-all' | +| | name='default' | +``` + +## Removing a Security Group to an Instance + +Taking the Instance ID and Security Group ID we will add those to the following command + +``` +openstack server remove security group +``` + +Using our example values the command will look like this + +``` +openstack server remove security group 4f69a45d-78ad-48e7-b427-5694c8b09e45 050e0ec4-1416-46f2-98a0-b492f2c8d81b +``` + +You will not get a response from the endpoint on success + +So you will want to run the following command to see if the Security Group was removed + +``` +openstack server show +``` + +This will return the server details and there will be the security_groups field with the removed group not present + +``` { .sh .no-copy } +| security_groups | name='ssh-allow-all' | +``` diff --git a/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/resizing-an-Instance-via-cli.md b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/resizing-an-Instance-via-cli.md new file mode 100644 index 000000000..0909c5ad0 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_CLI/resizing-an-Instance-via-cli.md @@ -0,0 +1,129 @@ +--- +hidden: false +label_names: +- instance +- resize +position: 2 +title: Resizing an Instance via CLI +vote_count: 1 +vote_sum: 1 +--- + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. Please read [Setting up your CLI environment](../../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + +When resizing an instance via the CLI there are few details you will need, the `Instance ID` for the compute instance you want to resize and the `Flavor ID` you want to resize the instance to. + +Run the following command to return a list of servers within your project space + +``` +openstack server list +``` + +``` { .sh .no-copy } ++--------------------------------------+--------------------------+-----------+---------------------------------------------------------------------------+--------------------------+-----------------------+ +| ID | Name | Status | Networks | Image | Flavor | ++--------------------------------------+--------------------------+-----------+---------------------------------------------------------------------------+--------------------------+-----------------------+ +| 6a91e5a1-cf20-4fc5-9b7c-edc2bf1b8de4 | kahu-disk-test | ACTIVE | NeSI-Internal-Sandbox=10.1.0.147, 163.7.177.51 | N/A (booted from volume) | balanced1.4cpu8ram | ++--------------------------------------+--------------------------+-----------+---------------------------------------------------------------------------+--------------------------+-----------------------+ +``` + +Taking note of the `Instance ID` + +!!! note + For this example we will use `6a91e5a1-cf20-4fc5-9b7c-edc2bf1b8de4` + +Then run the following command to return a list of flavors + +``` +openstack flavor list +``` + +``` { .sh .no-copy } ++--------------------------------------+-------------------------+--------+------+-----------+-------+-----------+ +| ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public | ++--------------------------------------+-------------------------+--------+------+-----------+-------+-----------+ +| 1281555c-6bcb-42e4-a48e-98352dcd0fd0 | compute1.2cpu4ram | 4096 | 30 | 0 | 2 | True | +| 14505c86-765f-4971-a36f-1e867216dccf | memory1.4cpu16ram | 16384 | 30 | 0 | 4 | True | +| 1dbac08-d9a9-4c27-8534-57293785433e | balanced1.32cpu64ram | 65536 | 30 | 0 | 32 | True | +| 2d02e6a4-3937-4ed3-951a-8e27867ff53e | balanced1.8cpu16ram | 16384 | 30 | 0 | 8 | True | +| 2e7b7cc7-9e29-4ff2-98dd-03dbb99dbb5c | compute1.16cpu32ram | 32768 | 30 | 0 | 16 | True | +| 3276cd5f-c96a-4e05-960f-f4f197142c98 | memory1.1cpu4ram | 4096 | 30 | 0 | 1 | True | +| 3b5a6e01-d3ad-49e3-a4f8-183c04444330 | balanced1.1cpu2ram | 2048 | 30 | 0 | 1 | True | +| 4e8af724-f66d-4072-a692-114126de25a0 | compute1.1cpu2ram | 2048 | 30 | 0 | 1 | True | +| 4ec785be-a422-4207-9daa-cbb71c61f9ed | devtest1.4cpu4ram | 4096 | 30 | 0 | 4 | True | +| 674fa81a-69c7-4bf7-b3a9-59989fb63618 | balanced1.16cpu32ram | 32768 | 30 | 0 | 16 | True | +| 6b2e76a8-cce0-4175-8160-76e2525d3d3d | balanced1.2cpu4ram | 4096 | 30 | 0 | 2 | True | +| 7af5c672-43e7-4296-9608-5974394851b8 | memory1.2cpu8ram | 8192 | 30 | 0 | 2 | True | +| 7ffa092c-e75a-4cb5-be9f-db8c749e8801 | compute1.4cpu8ram | 8192 | 30 | 0 | 4 | True | +| 8aef7f54-1ed6-4275-a38c-3f1e61afabd9 | memory1.16cpu64ram | 65536 | 30 | 0 | 16 | True | +| 94ba9177-cb98-4b04-870c-9a696e1c5327 | memory1.32cpu128ram | 131072 | 30 | 0 | 32 | True | +| 9d536959-dd7a-4532-b0b7-db8bb8a72ddb | compute1.8cpu16ram | 16384 | 30 | 0 | 8 | True | +| b46e184c-0dcb-44b2-a53f-c2b8eff676c9 | compute1.32cpu64ram | 65536 | 30 | 0 | 32 | True | +| d6e3a25a-4f9e-4c87-9342-77f807ead537 | memory1.8cpu32ram | 32768 | 30 | 0 | 8 | True | +| e07cfee1-43af-4bf6-baac-3bdf7c1b88f8 | balanced1.4cpu8ram | 8192 | 30 | 0 | 4 | True | +| e3a1ec6d-9513-4b9f-9580-671c4eee1c21 | devtest1.2cpu2ram | 2048 | 30 | 0 | 2 | True | +| ee55c523-9803-4296-91be-1c34e986baaa | devtest1.1cpu1ram | 1024 | 30 | 0 | 1 | True | ++--------------------------------------+-------------------------+--------+------+-----------+-------+-----------+ +``` + +Take note of the `Flavor ID` + +!!! note + For this example we will resize to balanced1.2cpu4ram with an id of `6b2e76a8-cce0-4175-8160-76e2525d3d3d` + +Taking the Instance ID and Flavor ID we will add those to the following command + +``` +openstack server resize --flavor +``` + +Using our example values the command will look like this + +``` +openstack server resize --flavor 6b2e76a8-cce0-4175-8160-76e2525d3d3d 6a91e5a1-cf20-4fc5-9b7c-edc2bf1b8de4 +``` + +You will not get a response from the endpoint on success + +So you will want to run the following command to see when its in the state of `verify_resize` + +``` +openstack server show +``` + +This will return the server details and there will be the status + +``` { .sh .no-copy } +| status | VERIFY_RESIZE +``` + +You will then need to run the command below to verify the resize + +``` +openstack server resize confirm +``` + +Using the example values the command will be the following + +``` +openstack server resize confirm 6a91e5a1-cf20-4fc5-9b7c-edc2bf1b8de4 +``` + +Again there wont be a response from the endpoint so we will call the command `openstack server show` again to confirm the status of the instance + +``` +openstack server show +``` + +The status of the instance should now be `ACTIVE` + +``` { .sh .no-copy } +| status | ACTIVE +``` + +You should also see the new `flavor` that the instance has been resized too + +``` { .sh .no-copy } +| flavor | balanced1.2cpu4ram (balanced1.2cpu4ram) +``` diff --git a/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_dashboard/.pages.yml b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_dashboard/.pages.yml new file mode 100644 index 000000000..d5f148238 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_dashboard/.pages.yml @@ -0,0 +1,4 @@ +nav: + - launch-an-instance-via-dashboard.md + - resizing-an-Instance-via-the-dashboard.md + - manage-security-groups-of-an-Instance-via-the-dashboard.md diff --git a/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_dashboard/launch-an-instance-via-dashboard.md b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_dashboard/launch-an-instance-via-dashboard.md new file mode 100644 index 000000000..1d0171562 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_dashboard/launch-an-instance-via-dashboard.md @@ -0,0 +1,151 @@ +--- +hidden: false +label_names: +- instance +- launch +position: 2 +title: Launch an Instance via Dashboard +vote_count: 1 +vote_sum: 1 +--- + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Compute` tab and select `Instances` + +Click `Launch Instance` + +
+ ![Alt text](instance-overview.png) +
+ +Within the `Launch Instance` dialog box there are the following values: + +**Details** + +`Project Name` +: The name of the project that this instance will be deployed under. + +`Instance Name` +: The name of the VM/Instance. The name you assign here becomes the initial host name of the server. + +!!! note + After the server is built, if you change the server name in the API or change the host name directly, the names are not updated in the dashboard. + + Server names are not guaranteed to be unique when created so you could have two instances with the same host name. + +`Description` +: A brief description of the VM/Instance. + +`Availability Zone` +: By default this will be “nova“. This specifies which zone it will be deployed too if there were multiple zones. The current default and only zone is “nova“ at this point in time. + +`Count` +: how many of the VMs/Instances you would like to deploy. + +`Source` +: Select Boot Source + + `Boot from image` + : Selecting this option allows you to choose from a list of images to boot from, a list of images will be presented to choose from. + + `Boot from instance snapshot` + : Selecting this option allows you to choose an instance snapshot to boot from, a list of instance snapshots will be presented to choose from. + + `Boot from volume` + : Selecting this option allows you to choose a volume to boot from, a list of volumes will be presented to choose from. + + `Boot from volume snapshot` + : Selecting this option allows you to choose a volume snapshot to boot from, a list of volume snapshots will be presented to choose from. + +`Delete Volume on Instance Delete` +: Selecting YES will enable the volume to be deleted when the instance is deleted. + +`Volume Size (GB)` +: This will be present should the user choose to boot from image or instance snapshot. This setting determines the size of the boot volume. This will be overridden when selecting a flavor if its less then the flavor's size. + +**Flavor** + +`Flavor` +: Specify the size of the instance to launch, click the `^` to select the instance size you are after. + +!!! note + Instance sizes have a brief explanation in the name of what they are best used for. + + Example `compute1.1cpu2ram` is best for Compute heavy workloads with 1 vCPU and 2 GB of RAM or `memory1.1cpu4ram` is best for Memory heavy workloads with 1 vCPU and 4 GB of RAM + +**Networks** + +`Selected Networks` +: To add a network to the instance, click the `^` in the Available field. + +!!! note + If you haven't configured a network prior to the creation of a compute instance then the recommend network should be the one that has the same name as the project you are in. Example, I am in the `NeSI-SandBox-Environment` so I will pick the `NeSI-SandBox-Environment` network. + + You shouldn't need to add the instance to an external network as a floating IP will be added later on should you need to access the instance from outside the FlexiHPC network. + +**Network Ports** + +This for now can be ignored as we will use Security Groups to assign port connections for the instance. + +**Security Groups** + +Activate the security groups that you want to assign to the instance. + +Security groups are a kind of cloud firewall that define which incoming network traffic is forwarded to instances. + +If you have not created any security groups, you can assign only the default security group to the instance. + +!!! note + By default the `default` security group is added to all instances. This is required should the instance want to communicate to the outside world or to any other instance within the FlexiHPC project. + + How to create `Security Groups` are explained here How to add/update and remove security groups + +**Key Pair** + +Specify a key pair that is associated with your FlexiHPC user login. If there is none available then you can create one by clicking `Create key Pair` + +If the image uses a static root password or a static key set (neither is recommended), you do not need to provide a key pair to launch the instance. + +!!! info + The following Creating and Importing Key Pairs will also explain on how to import and create key pairs + +**Configuration** + +`Customization Script Source` +: Specify a customization script that runs after your instance launches. This can either be applied by uploading a file from your local machine or entered manually within the textbox provided. + +**Server Groups** + +Server groups determine the Policy that FlexiHPC should apply during the deployment of the instance in regards to other servers within the same group. + +!!! info + By default an instance doesn't need to be in a Server Group, however should you want to apply policies to your servers then please read Server Groups within FlexiHPC to create a Server Group and also understand each policy. + + Server Group should be created prior to the creation of the instance if so required. + +**Scheduler Hints** + +These are tags or labels that tell the underlying FlexiHPC on where to deploy an instance. An example would be, If your workload or instance requires an intel based processor then you would apply the corresponding Scheduler Hint to the instance so it is only deployed to intel based processors. + +**Metadata** + +`Available Metadata` +: Add Metadata items to your instance. + +Click `Launch Instance`. + +The instance starts on a compute node in the cloud. + +!!! info + If you require an external connection to this instance then a floating IP is required. The article + + [Manage Floating IPs via the Dashboard](../../create-and-manage-networks/with_the_dashboard/manage-floating-ips-via-the-dashboard.md) will detail the requirements to complete this. + +You should now be able to connect to your instance using ssh which is explained more in [Connect to your instance by using SSH](../connect-to-instance-ssh.md). diff --git a/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_dashboard/manage-security-groups-of-an-Instance-via-the-dashboard.md b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_dashboard/manage-security-groups-of-an-Instance-via-the-dashboard.md new file mode 100644 index 000000000..660e6ddcc --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_dashboard/manage-security-groups-of-an-Instance-via-the-dashboard.md @@ -0,0 +1,77 @@ +--- +hidden: false +label_names: +- instance +- resize +position: 2 +title: Manage Security Groups of an Instance via the Dashboard +vote_count: 1 +vote_sum: 1 +--- + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Compute` tab and select `Instances` then select the compute instance you want to manage. + +## Attach a Security Group + +Under the `Actions` menu on the far right, select `Edit Security Groups` + +
+ ![Alt text](instance-action-menu.png) +
+ +Within the `Edit Instance` dialog you will have 2 columns + +`All Security Groups` +: These are all security groups created in your project + +`Instance Security Groups` +: These are the security groups attached to your instance + +
+ ![Alt text](manage-security-groups-dialog.png) +
+ +Clicking the `+` icon from the `All Security Groups` column will add them to the `Instance Security Groups` column + +
+ ![Alt text](manage-security-groups-add-dialog.png) +
+ +Once the desired `Secuirty Groups` have been added you then click `save` + + +## Remove a Security Group + +Under the `Actions` menu on the far right, select `Edit Security Groups` + +
+ ![Alt text](instance-action-menu.png) +
+ +Within the `Edit Instance` dialog you will have 2 columns + +`All Security Groups` +: These are all security groups created in your project + +`Instance Security Groups` +: These are the security groups attached to your instance + +
+ ![Alt text](manage-security-groups-add-dialog.png) +
+ +Clicking the `-` icon from the `Instance Security Groups` column will remove it and add it to the `All Security Groups` column + +
+ ![Alt text](manage-security-groups-dialog.png) +
+ +Once the desired `Secuirty Groups` have been removed you then click `save` diff --git a/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_dashboard/resizing-an-Instance-via-the-dashboard.md b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_dashboard/resizing-an-Instance-via-the-dashboard.md new file mode 100644 index 000000000..dbb11ad9c --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/launch-and-manage-instances/with_the_dashboard/resizing-an-Instance-via-the-dashboard.md @@ -0,0 +1,61 @@ +--- +hidden: false +label_names: +- instance +- resize +position: 2 +title: Resizing an Instance via the Dashboard +vote_count: 1 +vote_sum: 1 +--- + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Compute` tab and select `Instances` then select the compute instance you want to resize. + +Under the `Actions` menu on the far right, select `Resize Instance` + +
+ ![Alt text](instance-action-menu.png) +
+ +Within the `Resize Instance` dialog, under the `Flavor Choice` tab you have the following options + +`Old Flavor` +: This is the flavor your compute instance is currently + +`New Flavor` +: This is the flavor you would like to resize your compute instance too. + +
+ ![Alt text](resize-instance-dialog.png) +
+ +Under the Advanced Options tab + +`Disk Partition` +: The Default is Automatic if you don't want the disk to resize upon resize then select Manual. + +!!! note + The disk will resize if you booted from image and the image is bigger or smaller then the current one you are running. By default flavors have a disk size of 30 GB and that will generally stay at the same size + +Click `Resize` + +After a few moments you will be asked to confirm the Resize + +
+ ![Alt text](confirm-resize.png) +
+ +Under the `Actions` menu click `Confirm Resize/Migrate` to confirm and complete the resize. + +!!! note + A resize will result in the compute instance being shutdown and rebooted in the new flavor + + \ No newline at end of file diff --git a/docs/Researcher_Developer_Cloud/user-guides/setting-up-your-CLI-environment/index.md b/docs/Researcher_Developer_Cloud/user-guides/setting-up-your-CLI-environment/index.md new file mode 100644 index 000000000..e64441279 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/setting-up-your-CLI-environment/index.md @@ -0,0 +1,170 @@ +--- +label_names: +- instance +- launch +position: 1 +title: Set up your CLI environment +vote_count: 1 +vote_sum: 1 +--- + +## Linux or Mac OS CLI + +!!! note + Python 3+ is recommended for all commands in this article + +### Installation on Linux and Mac + +When installing the CLI using pip, it is recommended to use a python virtual environment to contain the required dependencies. The examples below all make reference to the use of a virtual environment. + +Operating system specific steps +Here is an outline for installing the Openstack command line tools on the common Linux/Unix based systems. This includes MacOS as it runs a BSD based system. + + +=== "Ubuntu 20.04" + # Make sure the package cache is up to date and ensure you have + # Python3 installed + sudo apt update + sudo apt install -y python3-venv python3-dev + + # create a virtual environment using the Python3 virtual environment module + python3 -m venv venv + + # activate the virtual environment + source venv/bin/activate + + # install the Openstack commandline tools into the virtual environment + pip install -U pip \ + wheel \ + python-openstackclient + +=== "MacOS" + # from a terminal session install virtualenv + sudo pip install virtualenv + + # if the above fails and your on python 2.x then run + sudo easy_install pip + sudo pip install virtualenv + + # Create a new virtual environment and activate it + virtualenv venv + source venv/bin/activate + + # Install the Python openstack client libraries into your virtual environment + pip install -U pip \ + python-openstackclient + +=== "Centos 8" + # Make sure the package cache is up to date and ensure you have + # Python3 installed + sudo yum update -y + sudo yum install -y python3 + + # create a virtual environment using the Python3 virtual environment module + python3 -m venv venv + + # activate the virtual environment + source venv/bin/activate + + # install the Openstack commandline tools into the virtual environment + pip install -U pip \ + python-openstackclient + +=== "MacOS - python3" + # Create a new virtual environment and activate it + python -m venv venv-openstackcli + source venv-openstackcli/bin/activate + + # Install the Python openstack client libraries into your virtual environment + pip install -U python-openstackclient + +### Upgrading the CLI + +To keep the command line tools up to date, you will need to perform upgrades on them after changes have come out. The following code snippet will upgrade all of the tools listed above; make sure that you have activated your virtual environment before running the command below: + +``` +pip install --upgrade pip \ +python-openstackclient +``` + +## Additional CLI tools + +Should you be moving into more advance projects that require the reader to interact with other parts of FlexiHPC then the following tools will need to be installed + +### Kubernetes CLI Tools + +!!! note + If installing the Networking or HOT templating CLI tools then you want need to install either of the following tools python-heatclient or python-neutronclient + +``` +pip install -U pip \ +python-heatclient \ +python-neutronclient \ +python-octaviaclient \ +python-magnumclient +``` + +### Networking CLI Tools + +!!! note + If installing the Kubernetes CLI tools this is included + +``` +pip install -U pip \ +python-neutronclient +``` + +### HOT Templating CLI Tools + +!!! note + If installing the Kubernetes CLI tools this is included + +``` +pip install -U pip \ +python-heatclient +``` + +### Object Storage CLI Tools + +``` +pip install -U pip \ +python-swiftclient +``` + +## Configuration on Linux and Mac + +### Source an openstack RC file + +When no configuration arguments are passed, the OpenStack client tools will try to obtain their configuration from environment variables. To help you define these variables, the cloud dashboard allows you to download an OpenStack RC file from which you can easily source the required configuration. + +To download an OpenStack RC file from the dashboard: + +- Log in to the FlexiHPC dashboard and select your project. +- From the left hand menu select “API Access” and click on “Download OpenStack RC File”. Save the “OpenStack RC File” file on to the host where the client tools are going to be used from. +- Source the configuration from the OpenStack RC file: +``` +source projectname-openrc.sh +``` +- When prompted for a password, enter the password of the user who downloaded the file. Note that your password is not displayed on the screen as you type it in. + +!!! warning + You should never type in your password on the command line (or pass it as an argument to the client tools), because the password will be stored in plain text in the shell history file. This is unsafe and could allow a potential attacker to compromise your credentials. + +- You can confirm the configuration works by running a simple command, such as `openstack network list` and ensuring it returns no errors. + +!!! note + You are also able to download the Openstack RC file from the top-right corner where your login details are display as shown below: +
+ ![Alt text](user-menu.png) +
+ + +## Using the CLI on Linux and Mac + +This page assumes that you have installed the python virtual environment and other dependencies from earlier in this section of the documentation. If you have, then the following should make sense. + +1. Activate your virtual environment. +1. Source an openstack RC file +1. Invoke the CLI with the openstack command + +For a reference of all commands supported by the CLI, refer to the [OpenStack Client documentation](https://docs.openstack.org/python-openstackclient/latest/). diff --git a/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/.pages.yml b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/.pages.yml new file mode 100644 index 000000000..682ec2b26 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/.pages.yml @@ -0,0 +1,7 @@ +nav: +- "Images: Upload and Manage": index.md +- with-the-dashboard +- with-the-cli +- converting_image-types.md +- image-filter.md +- ... diff --git a/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/converting_image-types.md b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/converting_image-types.md new file mode 100644 index 000000000..fc9c77cec --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/converting_image-types.md @@ -0,0 +1,59 @@ +--- +hidden: false +label_names: +- images +- convert +position: 1 +title: Converting Image types +vote_count: 1 +vote_sum: 1 +--- + +!!! note + The following commands are generally run from the CLI, you dont need the Openstack CLI to complete these tasks just ensure you have `qemu-img` pakcage installed + +Converting images from one format to another is generally straightforward and can be done from a single simple command + +The `qemu-img convert` command can do conversion between multiple formats, including qcow2, qed, raw, vdi, vhd, and vmdk. + +qemu-img format strings: + +| Image Format | Argument to qemu-img | +|:-:|:-:| +| QCOW2 (KVM, Xen) | `qcow2` | +| QED (KVM) | `qed` | +| raw | `raw` | +| VDI (VirtualBox) | `vdi` | +| VHD (Hyper-V) | `vpc` | +| VMDK (VMware) | `vmdk` | + +!!! note + The main formats used in the RDC are either `RAW` or `QCOW2` + + +The following command example will convert a raw image file named `image.img` to a `qcow2` image file, the `-f` specifies the first image format and the `-O` specifies the output format + +``` { .sh } +qemu-img convert -f raw -O qcow2 image.img image.qcow2 +``` + +The following command example will convert a vmdk image file to a raw image file + +``` { .sh } +qemu-img convert -f vmdk -O raw image.vmdk image.img +``` + +The following command example will convert a vmdk image file to a qcow2 image file + +``` { .sh } +qemu-img convert -f vmdk -O qcow2 image.vmdk image.qcow2 +``` + +!!! note + The `-f format` flag is optional. If omitted, qemu-img will try to infer the image format. + + When converting an image file with Windows, ensure the virtio driver is installed. Otherwise, you will get a blue screen when launching the image due to lack of the virtio driver. Another option is to set the image properties as below when you update the image in the Image service to avoid this issue, but it will reduce virtual machine performance significantly. + + ``` { .sh } + openstack image set --property hw_disk_bus='ide' image_name_or_id + ``` \ No newline at end of file diff --git a/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/image-filter.md b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/image-filter.md new file mode 100644 index 000000000..6a6c13b82 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/image-filter.md @@ -0,0 +1,27 @@ +--- +hidden: false +label_names: +- images +- visibility +- types +position: 1 +title: Image Visibility Types +vote_count: 1 +vote_sum: 1 +--- + +The NeSI Research Developer Cloud has different image tags that are broken down in the following way + +`Public` +: These images are built and mantained by NeSI, the nameing format is generally `NeSI-FlexiHPC-*` which is then appened with the OS flavor. + + For example `NeSI-FlexiHPC-Ubuntu-Jammy_22.04` + +`Private` +: These are images built and uploaded by the project you are in. Think of these as your images that you have created and uploaded to the Research Developer Cloud + +`Shared with Project` +: These are images that are shared from another project to your project. + +`Community` +: These are images upload by the community/other cloud users that are looking to share their images with the wider Research Cloud users \ No newline at end of file diff --git a/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/index.md b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/index.md new file mode 100644 index 000000000..0fce53efc --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/index.md @@ -0,0 +1,25 @@ +--- +hidden: false +label_names: +- images +- upload +- manage +position: 1 +title: Uploading and Managing Images +vote_count: 1 +vote_sum: 1 +--- + +FlexiHPC supplies many OS images within the Platform. However should you require a specific version for an OS there is the ability to upload that image to the Flexi HPC platform. + +The two ways to add an image are as follows: + +- [Upload an Image via the Dashboard](with-the-dashboard/upload-an-image-via-the-dashboard.md) + +- [Upload an Image via CLI](with-the-cli/upload-an-image-via-cli.md) + +When managing images within the FlexiHPC platform we have the same options either via the dashboard or the cli + +- [Managing Images via the Dashboard](with-the-dashboard/managing-images-via-the-dashboard.md) + +- [Managing Images via CLI](with-the-cli/managing-images-via-cli.md) diff --git a/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-cli/.pages.yml b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-cli/.pages.yml new file mode 100644 index 000000000..0e6bf0dbc --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-cli/.pages.yml @@ -0,0 +1,4 @@ +nav: + - "Upload Image": upload-an-image-via-cli + - "Managing Images": managing-images-via-cli + - ... diff --git a/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-cli/managing-images-via-cli.md b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-cli/managing-images-via-cli.md new file mode 100644 index 000000000..80087d7ee --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-cli/managing-images-via-cli.md @@ -0,0 +1,80 @@ +--- +hidden: false +label_names: +- images +- upload +- manage +- CLI +position: 1 +title: Upload an Image via CLI +vote_count: 1 +vote_sum: 1 +--- + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. Please read [Setting up your CLI environment](../../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + +## List images + +You are able to list the images that you have uploaded to the FlexiHPC platform using the command `openstack image list --` + +``` +openstack image list --private +``` + +``` { .sh .no-copy } ++--------------------------------------+------------------------+--------+ +| ID | Name | Status | ++--------------------------------------+------------------------+--------+ +| 64dead14-9c5a-41c3-b4d6-a122a2ca8f28 | linux-test-snapshot | active | +| d479470d-ab6d-40d6-afc9-04f5f253404d | linux-test-to-boot-2 | active | +| 40ed5c78-c970-4182-a9c8-27e18a6a4251 | linux-test-to-boot-cli | active | +| 5a8e5595-d893-4d1c-8600-d7982f3915bb | ood-keycloak-1 | active | +| 04e1a31a-adee-4af2-935e-0e6e7c4b3193 | test-break | active | ++--------------------------------------+------------------------+--------+ +``` + +``` +openstack image list --shared +``` + +``` { .sh .no-copy } ++--------------------------------------+------------------------------+--------+ +| ID | Name | Status | ++--------------------------------------+------------------------------+--------+ +| 2282be79-1b79-434b-9974-162b533dab00 | Ubuntu-22.10-Wiki-Test | active | +| 1a0480d1-55c8-4fd7-8c7a-8c26e52d8cbd | ubuntu-jammy-server-cloudimg | active | ++--------------------------------------+------------------------------+--------+ +``` + +Adding `--long` to either command will present more information regarding the image + +``` +openstack image list --shared --long +``` + +``` { .sh .no-copy } ++--------------------------------------+------------------------------+-------------+------------------+-----------+----------------------------------+--------+------------+-----------+----------------------------------+------+ +| ID | Name | Disk Format | Container Format | Size | Checksum | Status | Visibility | Protected | Project | Tags | ++--------------------------------------+------------------------------+-------------+------------------+-----------+----------------------------------+--------+------------+-----------+----------------------------------+------+ +| 2282be79-1b79-434b-9974-162b533dab00 | Ubuntu-22.10-Wiki-Test | raw | bare | 740491264 | 91c3094a3ff142ce651034d41aa860c3 | active | shared | False | 4f07cc254d6c4471805d49bae1f739b9 | | +| 1a0480d1-55c8-4fd7-8c7a-8c26e52d8cbd | ubuntu-jammy-server-cloudimg | qcow2 | bare | 688193536 | e05c516fa30cf6c0fd47930449b85ac7 | active | shared | False | 4f07cc254d6c4471805d49bae1f739b9 | | ++--------------------------------------+------------------------------+-------------+------------------+-----------+----------------------------------+--------+------------+-----------+----------------------------------+------+ +``` + +## Updating an image + +If you need to modify the image details or metadata, the general practice is to create a new image with the desired changes rather than directly editing the existing one. After creating the new image, you can delete the old image if it's no longer needed. + +If you need to update an Image this can be done via the Dashboard + +## Deleting an image + +Using the same command `openstack image list --` get the `image id` as you will need to supply that to the following command + +``` +openstack image delete image_id +``` + +!!! info + Deleting an image that has been used to create a compute instance will fail until that instance has been deleted diff --git a/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-cli/upload-an-image-via-cli.md b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-cli/upload-an-image-via-cli.md new file mode 100644 index 000000000..e41d77002 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-cli/upload-an-image-via-cli.md @@ -0,0 +1,85 @@ +--- +hidden: false +label_names: +- images +- upload +- manage +- CLI +position: 1 +title: Upload an Image via CLI +vote_count: 1 +vote_sum: 1 +--- + +!!! note + The openstack CLI will need to be setup to interact with the FlexiHPC system. Please read + [Setting up your CLI environment](../../setting-up-your-CLI-environment/index.md) to interact with FlexiHPC to get started. + +To upload an image to FlexiHPC we will be using the command `openstack image create` to upload the image. + +Using Ubuntu as an example, we will upload a new Ubuntu 22.10 (kinetic) image. Heading to the Ubuntu cloud images [link](https://cloud-images.ubuntu.com/kinetic/current/) for Kinetic images we want to select the image that has the description `QCow2 UEFI/GPT Bootable disk image with linux-kvm KVM optimised kernel` + +With that download and accessible from your CLI we can upload it to FlexiHPC + +``` +openstack image create --file /path/to/your/image-file --disk-format disk_format --container-format container_format image_name +``` + +Replace the placeholders with the appropriate values depending on your environment: + +`/path/to/your/image-file` +: The local path to the image file you want to upload. + +`disk_format` +: The format of the image on disk, e.g., qcow2, raw, vhd, vmdk, etc. If your unsure then set the format to qcow2 + +!!! note + When it comes to the different image formats we generally follow the very basic guideline for them. + + Images in the `QCOW2` format are for running a single compute instance. + + Images in the `RAW` format are for running multiple compute instances. + + If you wish to convert to another format please read Converting Images between formats + +`container_format` +: The format of the container for the image, e.g., bare, ovf, aki (kernel image), etc. If you are unsure here then set the format to bare + +`image_name` +: The name you want to give to the uploaded image. + +If we run the command with the supplied settings + +``` +openstack image create --file ~/openstackcli/iso/kinetic-server-cloudimg-amd64-disk-kvm.img --disk-format qcow2 --container-format bare Ubuntu-22.10-Wiki-Test +``` + +The command window will process silently as it uploads it to the FlexiHPC platform. We should then get a response from the FlexiHPC Platform + +``` { .sh .no-copy } ++------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Field | Value | ++------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| container_format | bare | +| created_at | 2023-08-01T02:55:50Z | +| disk_format | qcow2 | +| file | /v2/images/1276db65-e5de-4721-b2db-666a73929b3e/file | +| id | 1276db65-e5de-4721-b2db-666a73929b3e | +| min_disk | 0 | +| min_ram | 0 | +| name | Ubuntu-22.10-Wiki-Test | +| owner | 4f07cc254d6c4471805d49bae1f739b9 | +| properties | locations='[]', os_hidden='False', owner_specified.openstack.md5='', owner_specified.openstack.object='images/Ubuntu-22.10-Wiki-Test', owner_specified.openstack.sha256='' | +| protected | False | +| schema | /v2/schemas/image | +| status | queued | +| tags | | +| updated_at | 2023-08-01T02:55:50Z | +| visibility | shared | ++------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +``` + +!!! note + Adding `--wait` to the end of the command will give you a progress bar rather then returning with the status `queued` + +We should then be able to use the `id` returned to launch an instance from either the [Dashboard](../../launch-and-manage-instances/with_the_dashboard/launch-an-instance-via-dashboard.md) or the [CLI](../../launch-and-manage-instances/with_the_CLI/launch-an-instance-via-cli.md) diff --git a/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-dashboard/.pages.yml b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-dashboard/.pages.yml new file mode 100644 index 000000000..92e278f2e --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-dashboard/.pages.yml @@ -0,0 +1,4 @@ +nav: + - "Upload Image": upload-an-image-via-the-dashboard + - "Managing Images": managing-images-via-the-dashboard + - ... diff --git a/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-dashboard/managing-images-via-the-dashboard.md b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-dashboard/managing-images-via-the-dashboard.md new file mode 100644 index 000000000..02bfd2960 --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-dashboard/managing-images-via-the-dashboard.md @@ -0,0 +1,71 @@ +--- +hidden: false +label_names: +- images +- upload +- manage +position: 1 +title: Upload an Image via the Dashboard +vote_count: 1 +vote_sum: 1 +--- + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Compute` tab and select `Images` + +You should see a list of all images available for your project. You can use the filter to see images that are `public`,`private`,`shared` or `community` + +Images that are `public` are uploaded and configured by the FlexiHPC team. + +Images that are specific to your project are either `private` or `shared` + +## Updating an Image + +!!! note + If you need to modify the image details or metadata, the general practice is to create a new image with the desired changes rather than directly editing the existing one. After creating the new image, you can delete the old image if it's no longer needed. + +Find the image you would like to update and then click the down arrow at the end of the image row and select `Edit Image` + +Within the `Edit Image` dialog you are able to change the following items + +**Image Details** + +`Image Name` +: !!!note + When changing this property be very careful as some ones code or CLI based deployment could be targeting this property. We normally say use Image ID’s when using any CLI or code based deployments to avoid this issue as Image ID’s are GUID’s + +`Image Description` + +**Image Requirements** + +Nothing in here should be changed or updated. + +**Image Sharing** + +`Visibility` +: This can be changed at anytime + +`Protected` +: This can be changed at anytime + +Click on `Update Image` to apply any changes made to the image. + +## Deleting an Image + +Find the image you would like to delete and then click the down arrow at the end of the image row and select `Delete Image` + +A `Confirm Delete Image` dialog will appear explaining what image you have selected to delete. + +!!! warning + Deleting images is a non-recoverable exercise proceed with caution + +If you are happy to proceed then click `Delete Image` + +The image will then be deleted from the project and underlying storage. diff --git a/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-dashboard/upload-an-image-via-the-dashboard.md b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-dashboard/upload-an-image-via-the-dashboard.md new file mode 100644 index 000000000..e45a5ea1e --- /dev/null +++ b/docs/Researcher_Developer_Cloud/user-guides/uploading-and-managing-Images/with-the-dashboard/upload-an-image-via-the-dashboard.md @@ -0,0 +1,75 @@ +--- +hidden: false +label_names: +- images +- upload +- manage +position: 1 +title: Upload an Image via the Dashboard +vote_count: 1 +vote_sum: 1 +--- + +Log into the [NeSI FlexiHPC Dashboard](https://dashboard.cloud.nesi.org.nz/) + +Select the project you would like to deploy the new instance too (Use the project selector on the top left-hand side): + +
+ ![Alt text](project-selector.png) +
+ +Open the `Project` tab, open the `Compute` tab and select `Images` + +Click `Create Image` + +Within the `Create Image` dialog box there are the following values: + +**Image Details** + +`Image Name` +: A friendly name for your image + +`Image Description` +: A description for your image + +**Image Source** + +`File` +: The location on your machine where the image is located + +`Format` +: - ISO - Optical Disk Image + - PLOOP - Virtuozzo/Parallels Loopback Disk + - QCOW2 - QEMU Emulator + - RAW + - VDI - Virtual Disk Image + - VHD - Virtual Hard Disk + - VMDK - Virtual Machine Disk + - AKI - Amazon Kernel Image + - AMI - Amazon Machine Image + - ARI - Amazon Ramdisk Image + + Generally the image you downloaded will have its Format in the name/file extension, An example would be kinetic-server-cloudimg-amd64.vmdk which has the vmdk extension. If it doesn't specify the format in the name assume qcow2 + +!!! note + When it comes to the different image formats we generally follow the very basic guideline for them. + + Images in the `QCOW2` format are for running a single compute instance. + + Images in the `RAW` format are for running multiple compute instances. + + If you wish to convert to another format please read Converting Images between formats + +**Image Requirements** + +The following settings can remain blank as they are not needed to upload an image to the FlexiHPC platform. + +**Image Sharing** + +`Visibility` +: We have the options of Private,Shared or Community. If you don't want anyone else outside your project to use/see the new image then select private, however if you want this to be shared with other projects we would select either Shared or Community + +`Protected` +: This setting allows the image to be protected from deletion. + +Once we have filled out the required fields we can click `Create Image`, the FlexiHPC platform will then process and save the image to the underlying CEPHS file system. diff --git a/docs/Scientific_Computing/.pages.yml b/docs/Scientific_Computing/.pages.yml deleted file mode 100644 index d19d283a6..000000000 --- a/docs/Scientific_Computing/.pages.yml +++ /dev/null @@ -1,11 +0,0 @@ -nav: - - Supported_Applications - - Training - - Interactive_computing_using_Jupyter - - Interactive_computing_with_NeSI_OnDemand - - The_NeSI_High_Performance_Computers - - Running Jobs on Māui and Mahuika: Running_Jobs_on_Maui_and_Mahuika - - Profiling_and_Debugging - - HPC_Software_Environment - - Terminal_Setup - - ... diff --git a/docs/Scientific_Computing/HPC_Software_Environment/.pages.yml b/docs/Scientific_Computing/HPC_Software_Environment/.pages.yml deleted file mode 100644 index 6d51da845..000000000 --- a/docs/Scientific_Computing/HPC_Software_Environment/.pages.yml +++ /dev/null @@ -1,15 +0,0 @@ -nav: - - Build_an_Apptainer_container_on_a_Milan_compute_node.md - - OpenMP_settings.md - - Offloading_to_GPU_with_OpenMP.md - - Offloading_to_GPU_with_OpenACC.md - - NVIDIA_GPU_Containers.md - - Configuring_Dask_MPI_jobs.md - - Software_Version_Management.md - - Thread_Placement_and_Thread_Affinity.md - - Finding_Software.md - - Installing_Third_Party_applications.md - - Compiling_software_on_Mahuika.md - - Compiling_software_on_Maui.md - - Programming_environment_differences_between_Maui_and_Mahuika.md - - ... diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Compiling_software_on_Maui.md b/docs/Scientific_Computing/HPC_Software_Environment/Compiling_software_on_Maui.md deleted file mode 100644 index 355fd8f26..000000000 --- a/docs/Scientific_Computing/HPC_Software_Environment/Compiling_software_on_Maui.md +++ /dev/null @@ -1,290 +0,0 @@ ---- -created_at: '2018-07-16T23:39:19Z' -tags: [] -title: "Compiling software: Māui" -vote_count: 1 -vote_sum: 1 -zendesk_article_id: 360000336076 -zendesk_section_id: 360000040056 ---- - -Building Fortran, C, or C++ code on the XC50 platform requires using the Cray programming environment. From a user perspective, the programming environment consists of a set of environment modules that select a compiler, essential libraries such as the MPI library, a CPU target, and more. The build process on the XC50 thus differs slightly from a standard Linux system. Non-compiled code, such as Python or R programs, do not use the programming environment. Note, however, that loading a module provided by NeSI/NIWA to get access to, e.g., the “RegCM” code, may change the Cray programming environment in that Cray environment modules may be swapped. - -!!! warning - - It is essential to use a Programming Environment (PrgEnv-cray, PrgEnv-intel or PrgEnv-gnu) when you build code, otherwise it is likely that problems at build time or run time appear - - **Never** use `module purge` on the XC50 platform, this will render the programming environment unusable, and you will have to log out and log back in. - - Code that was built on the XC50 platform is unlikely to run on Māui’s CS500 platform or on Mahuika’s CS400 platform; - please rebuild your code when you change platform. - -## The build node - -Māui has a dedicated build node, `login.maui.nesi.org.nz`, which should be used for building code. Please do not build code on the compute nodes by submitting a build job through SLURM: - -- The compute nodes only run a thin operating system with few command line utilities, it is thus likely that your build will fail. -- The file system on XC50 compute nodes is optimised for handling large block IO, small block IO that is typical for a build job is inefficient. -- Submitting a job will allocate entire nodes. This is a waste of compute resources, especially if only one core or a few cores are used. - -Please keep in mind that the build node is a shared resource. Instead of using as many parallel build processes as possible with `make -j `, please limit the amount of processes (`make -j 5` for example). - -## Choosing a programming environment - -The following Programming Environments are provided on Māui, named after the underlying compiler suite: - -- `PrgEnv-cray` -- `PrgEnv-intel` -- `PrgEnv-gnu` - -The `PrgEnv-cray` environment is the default. If you want to change programming environment to use the Intel or GNU compilers, run - -```sh -module swap PrgEnv-cray PrgEnv-intel -``` - -or - -```sh -module swap PrgEnv-cray PrgEnv-gnu -``` - -Note that several compiler versions are currently installed, in case of GNU for example: - -```sh -module avail gcc -``` - -```out ----------- /opt/modulefiles ----------- -gcc/10.3.0 gcc/11.2.0 gcc/12.1.0(default) -``` - -To change GCC version, run for example - -```sh -module swap gcc gcc/11.2.0 -``` - -GCC v6.1.0 or later is required to build code that can make use of the Intel Skylake microarchitecture and its advanced capabilities, such as AVX-512, on the XC50 platform. -Note: There is not **the** best compiler. Depending on your application/algorithms, different compilers can optimise the code better. Keep in mind trying different compilers. - -## Targeting a CPU - -Compiling a program translates source code into machine instructions. It is important to let the compiler know for which CPU (“target”) the executable shall be build, to make best use of that CPU’s capabilities. Māui uses Intel Skylake microprocessors on all XC50 build and compute nodes, which come with AVX-512 vector instructions, enabling better performance for some codes. -CPU targets can be set by loading a module. By default, module `craype-x86-skylake` is loaded. In the rare case that you encounter problems with the Skylake target at build time or run time, try target for “Broadwell” processors instead: - -```sh -module swap craype-x86-skylake craype-broadwell -``` - -Choosing the “Broadwell” target is also necessary if you want to build code using the older GCC compilers prior to GCC 6.1.0, which were released before Skylake became available. If you see the error message - -```err -craype-x86-skylake requires cce/8.6 or later, intel/15.1 or later, or gcc/6.1 or later\ -``` - -when trying to swap to the `PrgEnv-gnu`` environment, or an error message of the kind - -```err -f951: error: bad value (skylake-avx512) for -march= switch -``` - -when you compile a program with a GNU compiler, run - -```sh -module swap craype-x86-skylake craype-broadwell -``` - -and try again. -Make sure that a target is always set. If you do not set a target, the compilers will produce generic code that runs on many processors of the “x86-64” family, and the program will thus not be able to benefit from capabilities such as AVX-512. You will see the following warning message when you run a compiler: - -```err -No supported cpu target is set, CRAY_CPU_TARGET=x86-64 will be used. -Load a valid targeting module or set CRAY_CPU_TARGET -``` - -## Using the compiler drivers - -The programming environment provides compiler drivers for compiling Fortran, C, and C++ code. This means that you will need to use the following commands instead of the actual compilers: - -```sh -ftn -o simpleMpi simpleMpi.f90 # compile Fortran code -cc -o simpleMpi simpleMpi.c # compile C code -CC -o simpleMpi simpleMpi.cxx # compile C++ code -``` - -The drivers will ensure correct linking of your code with compiler runtime libraries, and with Cray-supported libraries (such as Cray’s “libsci” scientific library, or Cray’s version of netCDF). It is therefore not recommended to use the compilers directly, there is a good chance that the executable will fail to build or run correctly. -The compiler drivers automatically add necessary compile and link flags to the compile/link line for the selected hardware and Cray-supported libraries. If you are interested in seeing what the compiler driver does, add the `-craype-verbose` flag: - -```sh -ftn -craype-verbose -o simpleMpi simpleMpi.f90 -``` - -## Compiling and Running MPI code - -The compiler drivers will also automatically build MPI codes correctly, there is no need to use special compilers or add additional compiler or linker flags. -Note that running an MPI code on the build node (`login.maui.nesi.org.nz`) using - -```sh -./simpleMPI -``` - -will fail with an error message, as there is no MPI runtime environment: - -```err -[Wed Oct 18 02:00:14 2017] [c0-0c0s3n1] Fatal error in MPI_Init: Other MPI error, error stack: - -MPIR_Init_thread(537): -MPID_Init(247).......: channel initialization failed -MPID_Init(636).......: PMI2 init failed: 1 -``` - -If you want to run a short test of your build, use SLURM’s srun command that submits your program to a compute node on the fly, e.g., - -```sl -SLURM_PARTITION=nesi_research srun -n 6 simpleMPI -``` - -## Common compiler options - -Although the compiler drivers `ftn`, `cc` and `CC` have a few options of their own, such as the `-craype-verbose` flag, they will pass through any additional compiler options to the underlying compiler. This means that you will still need to choose compiler flags that are specific to the Cray, Intel, or GNU compilers, and you will need to change them if you decide to switch compilers. - -For example, if you wanted to use the GFortran compiler, activate compiler warnings (`-Wall`), and require aggressive compiler optimisation (`-O3`), you would use the following commands: - -```sh -module swap PrgEnv-cray PrgEnv-gnu -ftn -Wall -O3 -o simpleMpi simpleMpi.f90 -``` - -The following table provides a list of commonly used compiler options: - -| Group | Cray | Intel | GNU | Notes | -|----------------------------------|--------------------------|----------------------------|-------------------------------------------|--------------------------------------------------------------------------------------------| -| Debugging | `-g` or `-G{0,1,2,fast}` | `-g` or `-debug [keyword]` | `-g` or `-g{0,1,2,3}` | Set level of debugging information, some levels may disable certain compiler optimisations | -| Light compiler optimisation | `-O2` | `-O2` | `-O2` | | -| Aggressive compiler optimisation | `-O3 -hfp3 ` | `-O3 -ipo` | `-O3 -ffast-math -funroll-loops` | This may affect numerical accuracy. | -| Vectorisation reports | `-hlist=m` | `-qopt-report` | `-fopt-info-vec` or `-fopt-info-missed` | | -| OpenMP | `-homp` (default) | `-openmp` | `-fopenmp` | | - -Additional compiler options are documented on the compiler man pages, which are accessible _after_ loading the corresponding programming environment: - -| language | cray | intel | gnu | -|----------|---------------|-------------|----------------| -| Fortran | `man crayftn` | `man ifort` | `man gfortran` | -| C | `man craycc` | `man icc` | `man gcc` | -| C++ | `man crayCC` | `man icpc` | `man g++` | - -## Building Code that Depends on External Libraries - -While linking external libraries, one need to pay attention to the correct compiler and linker setup. This, depends on the correct library version (working properly with the compiler and the link type) and the used link options. These depend on whether the libraries have been provided by Cray, by NeSI/NIWA, or if you built them yourself. - -Many libraries are provided in modules. You can search them using - -```sh -module avail -``` - -and look in the module description using: - -```sh -module help -``` - -Sometimes modules provide multiple libraries, e.g. `cray-libsci` - -## Using libraries provided by Cray - -If a library has been provided by Cray, the compiler drivers will automatically take care of adding search paths for include files and libraries, and they will add the library names to the linker line. For example, to build a program that uses the netCDF library provided by the `cray-netcdf` module, run the commands - -```sh -module load cray-netcdf -ftn -o simple_xy_wr simple_xy_wr.f90 -``` - -Keep in mind that such automatic treatment of dependencies will **only** work if the libraries have been provided by Cray - you can recognise those by their module name, which always starts with `cray-`, e.g., `cray-netcdf`, or `cray-libsci`. -Note also that correct versions of the libraries (Cray CCE, Intel, or GNU builds) will automatically be used after swapping programming environment. This is particularly important for libraries that provide Fortran 90 modules, due to their compiler-specific format. - -## Using libraries provided by NeSI/NIWA - -The situation is different when you use a library that is provided by NeSI/NIWA. They can be recognised by the `CrayCCE`, `CrayIntel`, or `CrayGNU` suffix attached to their version number. In this case, you will have to provide search paths using the `-I` flag for include files, and `-L` for library files, and the library names have to be explicitly added to the linker line. Libraries are not always provided for all compiler suites and versions. -Note that library names are specified in a specifically formatted form, `-l<library name>`. The linker then expects to find a library file named `lib<library name>.a` (for a static library) or `lib<library name>.so` (for a shared library), e.g., `libnetcdf.a`. Note that you may need to list several libraries to link successfully, e.g., `-lA -lB` for linking against libraries “A” and “B”. The order in which you list libraries matters, as the linker will go through the list in order of appearance. If library “A” depends on library “B”, specifying `-lA -lB` will work. If library “B” depends on “A”, use `-lB -lA`. If they depend on each other, use `-lA -lB -lA` (although such cases are quite rare). -Consider the following example where the `grib_api` library is used: - -```sh -module load grib_api/1.23.1-CrayGNU-18.08 -cc -I$EBROOTGRIB_API/include -o mygribprogram mygribprogram.c -L$EBROOTGRIB_API/lib -lgrib_api -``` - -The EasyBuild software management system that NeSI/NIWA use to provide modules automatically defines environment variables `$EBROOT<library name in upper case>` when a module is loaded, which help pointing the compiler and linker to include files and libraries as in the example above. If you are unsure which `$EBROOT<...>` variables are available, use - -```sh -module show grib_api/1.23.1-CrayGNU-18.08 -``` - -to find out. -Note that specifying search paths with `-I` and `-L` is not strictly necessary in case of the GNU and Intel compilers, which will use the contents of `CPATH`, `LIRARY_PATH`, and `LD_LIBRARY_PATH` provided by the NeSI/NIWA module. This will not work with the Cray compiler. -**Important note:** Make sure that you load the correct variant ofXC50 a library, depending on your choice of compiler. Switching compiler environment will not switch NeSI/NIWA modules automatically. Furthermore, loading a NeSI/NIWA module may switch programming environment if it was built with a different compiler. -As mentioned earlier, EasyBuild uses the following module naming conventions (“toolchain names”) to identify the programming environment that was used to build the software: - -- `CrayCCE` for libraries and tools built with the Cray compilers (`PrgEnv-cray`). -- `CrayIntel` for libraries and tools built with the Intel compilers (`PrgEnv-intel`). -- `CrayGNU` for libraries and tools built with the GNU compilers (`PrgEnv-gnu`). - -## Using your own libraries - -Linking against libraries that you built yourself is the same as linking against libraries provided by NeSI/NIWA - you will just need to point the compiler to the location where the include and library files are using the `-I` and `-L` flags. - -## Static and dynamic linking - -The XC50 compilers drivers default to static linking where possible for maximum efficiency, avoiding the need to load shared libraries for hundreds or thousands of MPI ranks at runtime. If all dependencies are available as static libraries, the resulting executables will be completely self-contained (although they may still need the Cray MPI environment at runtime). -Here is an example that shows how to find out how your code was linked: - -```sh -module load GSL/2.4-CrayGNU-2017.06 -cc -I$EBROOTGRIB_API/include -o mygribprogram mygribprogram.c -L$EBROOTGRIB_API/lib -lgrib_api -ldd mygribprogram -``` - -If you see the message `not a dynamic executable`, your program was statically linked. Otherwise you will see a list of shared library dependencies that are needed at runtime. -If you have to link your code dynamically, either set - -```sh -export CRAYPE_LINK_TYPE=dynamic -``` - -in your build environment (useful when using complex build systems), or add the `-dynamic` flag to the compiler driver commands, e.g., - -```sh -cc -I$EBROOTGRIB_API/include -o mygribprogram mygribprogram.c -L$EBROOTGRIB_API/lib -lgrib_api -dynamic -``` - -Using the `ldd` tool, you should now see a number of libraries that are dynamically linked. -You may occassionally see a warning message of the kind: - -```err -/opt/cray/pe/hdf5/1.10.1.1/INTEL/16.0/lib/libhdf5.a(H5PL.o): In function `H5PL_load': -H5PL.c:(.text+0x612): warning: Using 'dlopen' in statically linked applications requires at runtime the shared libraries from the glibc version used for linking -``` - -This simply means that the library must be accessible at runtime despite fully static linking and the program is thus not entirely self-contained, which is usually not an issue. - -## Common linker problems - -Linking can easily go wrong. Most often, you will see linker errors about “missing symbols” when the linker could not find a function used in your program or in one of the libraries that you linked against. To resolve this problem, have a closer look at the function names that the linker reported: - -- Are you missing some object code files (these are compiled source files and have suffix `.o`) that should appear on the linker line? This can happen if the build system was not configured correctly or has a bug. Try running the linking step manually with all source files and debug the build system (which can be a lengthy and cumbersome process, unfortunately). -- Do the missing functions have names that contain “mp” or “omp”? This could mean that some of your source files or external libraries were built with OpenMP support, which requires you to set an OpenMP flag (`-fopenmp` for GNU compilers, `-qopenmp` for Intel) in your linker command. For the Cray compilers, OpenMP is enabled by default and can be controlled using `-h[no]omp`. -- Do you see a very long list of complex-looking function names, and does your source code or external library dependency include C++ code? You may need to explicitly link against the C++ standard library (`-lstdc++` for GNU and Cray compilers, `-cxxlib` for Intel compilers); this is a particularly common problem for statically linked code. -- Do the function names end with an underscore (“_”)? You might be missing some Fortran code, either from your own sources or from a library that was written in Fortran, or parts of your Fortran code were built with flags such as `-assume nounderscore` (Intel) or `-fno-underscoring` (GNU), while others were using different flags (note that the Cray compiler always uses underscores). -- Do the function names end with double underscores (“__”)? Fortran compilers offer an option to add double underscores to Fortran subroutine names for compatibility reasons (`-h [no]second_underscore`, `-assume [no]2underscores`, `-f[no-]second-underscore`) which you may have to add or remove. - -Note that the linker requires that function names match exactly, so any variation in function name in your code will lead to a “missing symbols” error (with the exception of character case in Fortran source code). - -## Building code on the CS500 platform - -Building code on the CS500 platform is different from the XC50 platform: - -- The CS500 platform does not currently use compiler drivers (these will be made available by Cray in the near future). -- The CS500 module environment can be reset using `module purge` without problems - you will need to run `module load NeSI` afterwards to make the NeSI software stack available again. - -Building code on the CS500 platform follows the same process as building code on Mahuika. The only difference is that CS500 nodes use Intel Skylake CPUs, while Mahuika’s CS400 nodes use the older Intel Broadwell CPUs. This means that programs that were compiled on the CS500 platform may fail to run on Mahuika, producing either an error message (if built with the Intel compiler), or an “illegal instruction” error (if built with the Cray or GNU compilers). -Please refer to section [Compiling Software: Mahuika](Compiling_software_on_Mahuika.md) for further instructions. - \ No newline at end of file diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Finding_Software.md b/docs/Scientific_Computing/HPC_Software_Environment/Finding_Software.md deleted file mode 100644 index 04605ce4e..000000000 --- a/docs/Scientific_Computing/HPC_Software_Environment/Finding_Software.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -created_at: '2018-07-31T10:13:22Z' -tags: [] -title: Finding Software -vote_count: 1 -vote_sum: 1 -zendesk_article_id: 360000360576 -zendesk_section_id: 360000040056 ---- - -## Environment Modules - -NeSI uses environment modules to manage [installed software](../Supported_Applications/index.md). - -Using the `module` command you can: - -- View loaded modules: -- List all available modules -- Load a module: -- Switch out a loaded module for a different version: - -## Lmod on Mahuika - -As on Pan, Mahuika uses an enhanced version of modules called -[Lmod](https://lmod.readthedocs.io/en/latest/010_user.html) . - -Lmod extends the basic environment modules by adding simple shortcuts -and a more powerful search capability. The `ml` shortcut can be used in -place of `module`. With Lmod you can: - -- View loaded modules: -- List all available modules: -- Use “spider” to search for modules, e.g. “Python” modules: -- Load a module: -- Prefix a module with “-“ to unload it, e.g. switch from Python 2 to - Python 3: -- To get a fresh environment, we recommend that you log out and log in - again. By logging out and logging in again you will revert to not - only the default set of modules, but also the default set of - environment variables. - -Further information can be found in the online [User Guide for -Lmod](https://lmod.readthedocs.io/en/latest/010_user.html). - -## Modules on Māui - -On Māui and Māui\_Ancil we use top level modules to provide the -different software stacks. Per default the "NeSI" module is loaded, -which provides access to the different NeSI software stacks. - -On Māui XC nodes an improved version of the modules framework is -provided. Therewith you can also search for modules using a sub-string -using the "-S" option, e.g. - -as a result you will also find modules having the substring "netcdf" in -name, e.g. cray-netcdf. - -NOTE: The substring search will be soon implemented by default, then you -do not need to specify the -S anymore. Furthermore, this improvement -should be also ported to the Māui\_Ancil part. - -!!! tip - You can create your own modules. This is described - [here](../../Scientific_Computing/HPC_Software_Environment/Installing_Third_Party_applications.md). diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Installing_Third_Party_applications.md b/docs/Scientific_Computing/HPC_Software_Environment/Installing_Third_Party_applications.md deleted file mode 100644 index 6a35efcf2..000000000 --- a/docs/Scientific_Computing/HPC_Software_Environment/Installing_Third_Party_applications.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -created_at: '2018-09-24T01:51:32Z' -tags: [] -title: Installing (Third Party) applications -vote_count: 3 -vote_sum: 3 -zendesk_article_id: 360000474535 -zendesk_section_id: 360000040056 ---- - -NeSI provides a long list of various applications on its systems. -Nevertheless, if you need additional applications or libraries (below -called package), we distinguish: - -- you need a **newer version** of an already installed package: {% include "partials/support_request.html" %} for - an update -- you need an **older version** of an installed package: please use - the Easybuild installation procedure (below) to install it into your - working space -- you want to test a **new (not installed)** package: below we - collected some hints, how you can install it in your user space. - -In any case, if you have issues, do not hesitate to {% include "partials/support_request.html" %}. - -## Additional Packages for Python, R, etc. - -See [Python](../../Scientific_Computing/Supported_Applications/Python.md) or [R](../../Scientific_Computing/Supported_Applications/R.md), -or for other languages check if we have additional documentation for it -in our [application documentation](../Supported_Applications/index.md). - -## Third party applications - -Installation instruction vary from application to application. In any -case we suggest to read the provided installing instructions. -Nevertheless, the following should give you an impression which steps -you usually need to consider: - -- Change into a desired source code directory. We suggest to use - `/nesi/nobackup/` or `/nesi/project/` -- download the source code. This could be done via a repository - checkout (`git clone `) or - via downloading a tarball (`wget `). Unpack the - tarball using `tar xf `. Change into source - directory. -- load compiler module and modules for additional libraries - (`module load gimkl FFTW`) -- run the configure with appropriate options - `./configure --prefix= --use-fftw=$EBROOTFFTW  `(options - can be listed using `./configure --help`) -- In other applications you need to adjust the provided `Makefile` to - reflect compiler, and library options (see below) -- compile code (`make`) -- install the binaries and libraries into the specified directory - (`make install`) - -## Create your own modules (Optional) - -You can create personalised module environments, which can load modules -and set up environment variables. For example, you could define a -modules in a project directory -`/nesi/project//modulefiles/ProdXY` as the following: - -In the first lines, we can set conflicts with other modules (here named -ProdABC). Then we load some dependency modules and provide some -description. The additional lines depend on your requirements for the -module. With *set* you can define internal variables (within this module -file). The command *setenv* defines a environment variable. And -*prepend-path* and *append-path* extend an environment variable at the -front or end. - -There are common environment variables like: - -- *PATH* for providing executabl, -- *LD\_LIBRARY\_PATH* for self created libraries, -- *PYTHONPATH* for providing Python modules, -- *CONDA\_ENVS\_PATH* for providing Conda environments, -- etc. - -And others which are very application specific. - -To use the module (or all in that directory and sub-directories) we need -to register that directory to the module environment. This can be done -by setting the following environment variable: - -by adding that line to your `$HOME/.bashrc` you will have the modules -always available. - -The module then can be loaded by: - -These modules can easily be shared with collaborators. They just need to -specify the last two steps. diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Programming_environment_differences_between_Maui_and_Mahuika.md b/docs/Scientific_Computing/HPC_Software_Environment/Programming_environment_differences_between_Maui_and_Mahuika.md deleted file mode 100644 index dd21a2544..000000000 --- a/docs/Scientific_Computing/HPC_Software_Environment/Programming_environment_differences_between_Maui_and_Mahuika.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -created_at: '2018-04-23T00:52:59Z' -tags: -- info -- software -- application -- cs400 -- XC50 -title: Programming environment differences between Maui and Mahuika -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 360000164576 -zendesk_section_id: 360000040056 ---- - -The compile environment of Māui and Mahuika have various similarities, -but also significant differences. Both systems are configured with the -Cray Programming Environment (CPE), but these vary in detail. In general -we distinguish, between the XC50 part of Māui and the CS (Mahuika, -Mahuika Ancillary Nodes, and Māui Ancillary nodes) systems. - -Table 1: The Cray Programming Environment on Māui and Mahuika. Black -text indicates components common to both systems, green to components -only available on Mahuika, and blue to components only available on Māui -XC part. - - -------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Programming Languages

Programming Models

Compilers

Tools

Optimised Scientific Libraries

I/O Libraries

Fortran

-

C

-

C++

-

Chapel

Distributed Memory:

-

MPI Support:

-

· Intel MPI1

-

· MVAPICH21

-

· OpenMPI1

-

· MPICH1

-

· Cray-MVAPICH21 

-

Cray MPT2:

-

· MPI

· Cray Compiling Environment (CCE)

-

· GNU

-

· Intel

Performance Analysis:

-

· CrayPat & Cray Apprentice2

-

· Allinea MAP

-

· Intel Vtune Amplifier XE, Advisor, Trace Analyser & -Collector

Dense:

-

· BLAS

-

· LAPACK

-

· ScaLAPACK

-

· Iterative Refinement Tool

NetCDF2

-

HDF2

Shared Memory

-

· OpenMP 4.0

-

· OpenACC 2.0

Environment Setup

-

· Modules

-

· Lmod1

-

 

Porting Tools:

-

· Reveal

-

· CCDB

FFT:

-

· FFTW

PGAS

-

· UPC

-

· CAF

-

· CoArray C++

 

Debuggers:

-

· lgdb

-

· Allinea DDT

-

· ATP2

-

· STAT2

Sparse:

-

· Cray PETSc (with CASK)2

-

· Cray Trilinos (with CASK)2

 

 

Data Analytics

-

· Urika XC Data Analytics2

-

· Cray Graph Engine2

 

- -**Notes:** - -1. 1Only available on Mahuika HPC Cluster, Mahuika Ancillary - Nodes and Māui Ancillary nodes -2. 2Only available on Māui Supercomputer. -3. On Māui (XC50) the Modules framework is used to simplify access to - the various compiler suites and libraries. To access a particular - compiler suite, you simply load (or switch to) the appropriate - programming environment module using the command PrgEnv-X (where X - is one of gnu, intel, or cray). This facility is not available on - the Mahuika HPC Cluster, Mahuika Ancillary Nodes and Māui Ancillary - nodes. -4. [Intel Parallel Studio XE Cluster - Edition](https://software.intel.com/en-us/node/685016) for Linux - will be installed on the Mahuika HPC Cluster, Mahuika Ancillary - Nodes and Māui Ancillary nodes. -5. Intel Parallel Studio XE Professional Edition for CLE will be - installed installed on Māui. - -## Key Similarities  between CPE on XC50 and CS400/500s - -As shown in the table above, Cray provides a list of tools, libraries, -and compilers for both platforms. The Cray compiler environment comes -with the compiler, basic numeric libraries, automatically including -compile and link flags for system architecture and libraries -(enabled/disabled by loading modules), and the Cray performance analysis -tools (CrayPAT) - -## Key Differences between CPE on XC50 and CS400/500s - -There are many similarities between the XC and CS programming -environments (compilers and many tools and libraries are the same or at -least similar), but also some important differences that affect how a -user interacts with the system when building an application code: - -- The XC platform uses compiler drivers (“ftn”, “cc”, “CC”), users - should not use compilers directly. The CS platforms have compiler - drivers only for Cray compiler. For GNU and Intel compilers, users - run “gfortran”, “ifort”, “gcc”, “icc” etc.; -- On the XC platform, a compiler is chosen by switching to its - corresponding “PrgEnv-xxx” module. This will also switch - automatically the version of the loaded Cray provided libraries, - e.g., the cray-netcdf and cray-fftw library modules – no equivalent - is available on the CS platforms; On the CS platforms the main - software stack is based on Easybuild toolchains. The default one is - “gimkl”, including GCC, Intel MPI, and Intel MKL. -- The XC platform requires everyone to use Cray-MPI, but on the CS - platform, users can choose to use various MPI libraries; -- Getting rid of all modules via “module purge” renders an XC session - unusable (a list of ~20 modules are necessary to guarantee - operation). On CS there are only few modules necessary, the main one - is called “NeSI”, providing the NeSI software stack and slurm - module; -- The XC platform defaults to static linking, the CS platform to - dynamic linking; - -In summary, compilers, as well as various tools and libraries are common -across both platforms. However, there are important differences in how -the programming environment is used, requiring users to familiarise -themselves with each platform. For more information see the specific -user guides for [Mahuika (and Ancillary nodes)](https://nesi.github.io/hpc_training/lessons/maui-and-mahuika/building-code-mahuika) -and [Māui XC50](https://nesi.github.io/hpc_training/lessons/maui-and-mahuika/building-code-maui). diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Software_Installation_Request.md b/docs/Scientific_Computing/HPC_Software_Environment/Software_Installation_Request.md deleted file mode 100644 index 0827bae22..000000000 --- a/docs/Scientific_Computing/HPC_Software_Environment/Software_Installation_Request.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -created_at: '2018-07-31T10:59:09Z' -tags: [] -title: Software Installation Request -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 360000361675 -zendesk_section_id: 360000040076 ---- - -To request that we install a scientific application (either a new -application, or a new version of an already installed application), -please {% include "partials/support_request.html" %}. In -your message, please provide the following information: - -- What is the name and version number of the software you would like - to be installed? If you wish to use a copy from a version control - repository, what tag or release do you need? Please be aware that we - usually require a stable release version of a piece of software - before we will install it for all users. -- Do you have a preference about which platform (Mahuika or Māui) we - install it on? -- Why would you like us to install this software package? -- What is the web site or home web page of the package? If you don't - know this information or the package doesn't have a web site, who is - the author or lead developer? In some cases, there exist two or more - packages with the same or very similar names. If we know the web - site we can be sure that we are installing the same package that you - are requesting. -- How is the package installed? For example, compiled from source, - precompiled binary, or installed as a Python, Perl, R, etc. library? -- What dependencies, if any, does the package require? Please be aware - that the exact dependency list may depend on the particular use - cases you have in mind (like the ability to read and write a - specific file format). -- Have you (or another member of your project team) tried to install - it yourself on a NeSI system? If so, were you successful? -- If you or your institution doesn't own the copyright in the - software, under what licence are you permitted to use it? Does that - licence allow you to install and run it on a NeSI system? (Hint: - Most free, open-source software licences will allow you to do this.) -- Who else do you know of who wants to use that software on a NeSI - system? Please provide their names, institutional affiliations, and - NeSI project codes (if you know them). -- What tests do you have that will allow us to verify that the - software is performing correctly and at an acceptable speed? - -Our team will review your request and will make a decision as to whether -we will install the application and make it generally available. diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Software_Version_Management.md b/docs/Scientific_Computing/HPC_Software_Environment/Software_Version_Management.md deleted file mode 100644 index 8e59049d3..000000000 --- a/docs/Scientific_Computing/HPC_Software_Environment/Software_Version_Management.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -created_at: '2019-07-04T20:48:57Z' -tags: -- software -- versions -title: Software Version Management -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 360001045096 -zendesk_section_id: 360000040056 ---- - -Much of the software installed on the NeSI cluster have multiple -versions available as shown on the -[supported applications page](../Supported_Applications/index.md) -or by using the `module avail` or `module spider` commands. - -If only the application name is given a default version will be chosen, -generally the most recent one. However it is good practice to load -modules using the specific version so you can ensure consistent -execution of your job even after the default version has been changed. - -If you need a specific version of software, feel free to ask support and -we may install it. - -## Example - -``` sh -module load ANSYS -``` - -Will load the default version of ANSYS, in this case {{applications.ANSYS.default}}, however -this may change. - -``` sh -module load ANSYS/18.1 -``` - -Will always load that version specifically. diff --git a/docs/Scientific_Computing/HPC_Software_Environment/Temporary_directories.md b/docs/Scientific_Computing/HPC_Software_Environment/Temporary_directories.md deleted file mode 100644 index bec0bef5a..000000000 --- a/docs/Scientific_Computing/HPC_Software_Environment/Temporary_directories.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -created_at: '2023-07-21T04:10:04Z' -hidden: false -position: 0 -tags: [] -title: Temporary directories -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 7463891150863 -zendesk_section_id: 360000040056 ---- - -Most programs which create temporary files will put those files in the -directory specified by the environment variable `TMPDIR` if that is set, -or `/tmp` otherwise. - -## Māui - -Since Māui nodes host only one job at a time, there is no problem with -using `/tmp`, which gets emptied after every job.   - -## Mahuika - -On Mahuika it is best to avoid the `/tmp` directory since that is shared -with other jobs and not automatically cleared. When a Mahuika Slurm job -starts, `TMPDIR` is set to a directory created just for that job, which -gets automatically deleted when the job finishes.  - -By default, this job-specific temporary directory is placed in -`/dev/shm`, which is a “tmpfs” filesystem and so actually sits in -ordinary RAM.  As a consequence, your job’s memory request should -include enough to cover the size of any temporary files. - -On the `milan` and `hgx` partitions you have the option of specifying -`#SBATCH --gres=ssd` in your job script which will place `TMPDIR` on a -1.5 TB NVMe SSD attached to the node rather than in RAM. When -`--gres=ssd` is set your job’s memory request *does not* need to include -enough to cover the size of any temporary files (as this is a separate -resource). These SSDs give the job a slower but very much larger -temporary directory. They are allocated exclusively to jobs, so there -can only be one such job per node at a time. This gives the job all the -available bandwidth of the SSD device but does limit the number of such -jobs. - -Alternatively you can ignore the provided directory and set `TMPDIR` -yourself, typically to a location in `/nesi/nobackup`.  This will be the -slowest option with the largest capacity. Also if set to `nobackup` the -files will remain after the job finishes, so be weary of how much space -your jobs temporary files use. An example of how `TMPDIR` may be set -yourself is shown below, - -`export TMPDIR=/nesi/nobackup/$SLURM_ACCOUNT/tmp/$SLURM_JOB_ID` - -### Example of copying data into $TMPDIR for use mid-job - -The per job temporary directory can also be used to store data that -needs to be accessed as the job runs. For example you may wish to read -the standard database of Kraken2 (located in -`/opt/nesi/db/Kraken2/standard-2018-09`) from the `milan` SSDs instead -of `/opt`. To do this, request the NVMe SSD on `milan` as described -above. Then, after loading the Kraken2 module in your Slurm script, copy -the database onto the SSD, - -``` sl -cp -r /opt/nesi/db/Kraken2/standard-2018-09/* $TMPDIR -``` - -To get Kraken2 to read the DB from the SSDs (and not from `/opt`), -change the `KRAKEN2_DEFAULT_DB` variable, - -``` bash -export KRAKEN2_DEFAULT_DB=$TMPDIR -``` - -The variable `KRAKEN2_DEFAULT_DB` simply points to the database and is -found by `module show Kraken2`. diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/.pages.yml b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/.pages.yml deleted file mode 100644 index 73c1c9d78..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/.pages.yml +++ /dev/null @@ -1,6 +0,0 @@ -nav: - - Jupyter_on_NeSI.md - - Jupyter_kernels_Tool_assisted_management.md - - Jupyter_kernels_Manual_management.md - - ... - - Release Notes jupyter.nesi.org.nz: Release_Notes_jupyter-nesi-org-nz \ No newline at end of file diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_kernels_Manual_management.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_kernels_Manual_management.md deleted file mode 100644 index a86d8bc29..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_kernels_Manual_management.md +++ /dev/null @@ -1,299 +0,0 @@ ---- -created_at: '2022-01-31T20:45:43Z' -tags: - - JupyterHub -title: Jupyter kernels - Manual management ---- - -## Introduction - -Jupyter kernels execute the code that you write. The following Jupyter -kernels are installed by default and can be selected from the Launcher: - -- Python 3.8.2 -- Python 3.8.1 -- Python 3.7.3 -- Anaconda3 -- R 4.0.1 -- R 3.6.1 - -Many packages are preinstalled in our default Python and R environments -and these can be extended further as described on the -[Python](../../Scientific_Computing/Supported_Applications/Python.md) and -[R](../../Scientific_Computing/Supported_Applications/R.md) support -pages. - -## Adding a custom Python kernel - -!!! note "see also" - See the - [Jupyter kernels - Tool-assisted management](../../Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_kernels_Tool_assisted_management.md) - page for the **preferred** way to register kernels, which uses the - `nesi-add-kernel` command line tool to automate most of these manual - steps. - -You can configure custom Python kernels for running your Jupyter -notebooks. This could be necessary and/or recommended in some -situations, including: - -- if you wish to load a different combination of environment modules - than those we load in our default kernels -- if you would like to activate a virtual environment or conda - environment before launching the kernel - -The following example will create a custom kernel based on the -Miniconda3 environment module (but applies to other environment modules -too). - -In a terminal run the following commands to load a Miniconda environment -module: - -``` sh -module purge -module load Miniconda3/4.8.2 -``` - -Now create a conda environment named "my-conda-env" using Python 3.6. -The *ipykernel* Python package is required but you can change the names -of the environment, version of Python and install other Python packages -as required. - -``` sh -conda create --name my-conda-env python=3.6 -source $(conda info --base)/etc/profile.d/conda.sh -conda activate my-conda-env -conda install ipykernel -# you can pip/conda install other packages here too -``` - -Now create a Jupyter kernel based on your new conda environment: - -``` sh -python -m ipykernel install --user --name my-conda-env --display-name="My Conda Env" -``` - -We must now edit the kernel to load the required NeSI environment -modules before the kernel is launched. Change to the directory the -kernelspec was installed to -`~/.local/share/jupyter/kernels/my-conda-env`, (assuming you kept -`--name my-conda-env` in the above command): - -``` sh -cd ~/.local/share/jupyter/kernels/my-conda-env -``` - -Now create a wrapper script, called `wrapper.sh`, with the following -contents: - -``` sh -#!/usr/bin/env bash - -# load required modules here -module purge -module load Miniconda3/4.8.2 - -# activate conda environment -source $(conda info --base)/etc/profile.d/conda.sh -conda deactivate # workaround for https://github.com/conda/conda/issues/9392 -conda activate my-conda-env - -# run the kernel -exec python $@ -``` - -Make the wrapper script executable: - -``` sh -chmod +x wrapper.sh -``` - -Next edit the *kernel.json* to change the first element of the argv list -to point to the wrapper script we just created. The file should look -like this (change <username> to your NeSI username): - -```json -{ - "argv": [ - "/home//.local/share/jupyter/kernels/my-conda-env/wrapper.sh", - "-m", - "ipykernel_launcher", - "-f", - "{connection_file}" - ], - "display_name": "My Conda Env", - "language": "python" -} -``` - -After refreshing JupyterLab your new kernel should show up in the -Launcher as "My Conda Env". - -## Sharing a Python kernel with your project team members - -You can also configure a shared Python kernel that others with access to -the same NeSI project will be able to load. If this kernel is based on a -Python virtual environment, Conda environment or similar, you must make -sure it also exists in a shared location (other users cannot see your -home directory). - -The example below shows creating a shared Python kernel based on the -`Python/3.8.2-gimkl-2020a` module and also loads the -`ETE/3.1.1-gimkl-2020a-Python-3.8.2` module. - -In a terminal run the following commands to load the Python and ETE -environment modules: - -``` sh -module purge -module load Python/3.8.2-gimkl-2020a -module load ETE/3.1.1-gimkl-2020a-Python-3.8.2 -``` - -Now create a Jupyter kernel within your project directory, based on your -new virtual environment: - -``` sh -python -m ipykernel install --prefix=/nesi/project//.jupyter --name shared-ete-env --display-name="Shared ETE Env" -``` - -Next change to the kernel directory, which for the above command would -be: - -``` sh -cd /nesi/project//.jupyter/share/jupyter/kernels/shared-ete-env -``` - -Create a wrapper script, *wrapper.sh*, with the following contents: - -``` sh -#!/usr/bin/env bash - -# load necessary modules here -module purge -module load Python/3.8.2-gimkl-2020a -module load ETE/3.1.1-gimkl-2020a-Python-3.8.2 - -# run the kernel -exec python $@ -``` - -Note we also load the ETE module so that we can use that from our -kernel. - -Make the wrapper script executable: - -``` sh -chmod +x wrapper.sh -``` - -Next, edit the *kernel.json* to change the first element of the argv -list to point to the wrapper script we just created. The file should -look like this (change <project\_code> to your NeSI project code): - -```json -{ - "argv": [ - "/nesi/project//.jupyter/share/jupyter/kernels/shared-ete-env/wrapper.sh", - "-m", - "ipykernel_launcher", - "-f", - "{connection_file}" - ], - "display_name": "Shared Conda Env", - "language": "python" -} -``` - -After refreshing JupyterLab your new kernel should show up in the -Launcher as "Shared Virtual Env". - -## Custom kernel in a Singularity container - -An example showing setting up a custom kernel running in a Singularity -container can be found on our [Lambda Stack](../../Scientific_Computing/Supported_Applications/Lambda_Stack.md#lambda-stack-via-jupyter) -support page. - -## Adding a custom R kernel - -You can configure custom R kernels for running your Jupyter notebooks. -The following example will create a custom kernel based on the -R/3.6.2-gimkl-2020a environment module and will additionally load an -MPFR environment module (e.g. if you wanted to load the Rmpfr package). - -In a terminal run the following commands to load the required -environment modules: - -``` sh -module purge -module load IRkernel/1.1.1-gimkl-2020a-R-3.6.2 -module load Python/3.8.2-gimkl-2020a -``` - -The IRkernel module loads the R module as a dependency and provides the -R kernel for Jupyter. Python is required to install the kernel (since -Jupyter is written in Python). - -Now create an R Jupyter kernel based on your new conda environment: - -``` sh -R -e "IRkernel::installspec(name='myrwithmpfr', displayname = 'R with MPFR', user = TRUE)" -``` - -We must now to edit the kernel to load the required NeSI environment -modules when the kernel is launched. Change to the directory the -kernelspec was installed to -(~/.local/share/jupyter/kernels/myrwithmpfr,* *assuming you kept *--name -myrwithmpfr* in the above command): - -``` sh -cd ~/.local/share/jupyter/kernels/myrwithmpfr -``` - -Now create a wrapper script in that directory, called *wrapper.sh*, with -the following contents: - -``` sh -#!/usr/bin/env bash - -# load required modules here -module purge -module load MPFR/4.0.2-GCCcore-9.2.0 -module load IRkernel/1.1.1-gimkl-2020a-R-3.6.2 - -# run the kernel -exec R $@ -``` - -Make the wrapper script executable: - -``` sh -chmod +x wrapper.sh -``` - -Next edit the *kernel.json* to change the first element of the argv list -to point to the wrapper script we just created. The file should look -something like this (change <username> to your NeSI username): - -```json -{ - "argv": [ - "/home//.local/share/jupyter/kernels/myrwithmpfr/wrapper.sh", - "--slave", - "-e", - "IRkernel::main()", - "--args", - "{connection_file}" - ], - "display_name": "R with MPFR", - "language": "R" -} -``` - -After refreshing JupyterLab your new R kernel should show up in the -Launcher as "R with MPFR". - -## Spark - -At the time of writing, the latest stable version of Spark does not -support Python 3.8. If you wish to use Spark (e.g. PySpark) make sure -you select one of our Python 3.7.3 or Anaconda3 kernels. diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_kernels_Tool_assisted_management.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_kernels_Tool_assisted_management.md deleted file mode 100644 index 23806aba2..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_kernels_Tool_assisted_management.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -created_at: '2022-01-31T21:28:03Z' -tags: [] -title: Jupyter kernels - Tool-assisted management -vote_count: 1 -vote_sum: 1 -zendesk_article_id: 4414958674831 -zendesk_section_id: 360001189255 ---- - -## Introduction - -Jupyter can execute code in different computing environments using -*kernels*. Some kernels are provided by default (Python, R, etc.) but -you may want to register your computing environment to use it in -notebooks. For example, you may want to load a specific environment -module in your kernel or use a Conda environment. - -To register a Jupyter kernel, you can follow the steps highlighted in -the [Jupyter kernels - Manual management](../../Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_kernels_Manual_management.md) -or use the `nesi-add-kernel` tool provided within the [Jupyter on NeSI service](https://jupyter.nesi.org.nz). -This page details the latter option, which we recommend. - -## Getting started - -First you need to open a terminal. It can be from a session on Jupyter -on NeSI or from a regular ssh connection on Mahuika login node. If you -use the ssh option, make sure to load the JupyterLab module to have -access to the `nesi-add-kernel` tool: - -``` sh -module purge # remove all previously loaded modules -module load JupyterLab -``` - -Then, to list all available options, use the `-h` or `--help` options as -follows: - -``` sh -nesi-add-kernel --help -``` - -Here is an example to add a TensorFlow kernel, using NeSI’s module: - -``` sh -nesi-add-kernel tf_kernel TensorFlow/2.8.2-gimkl-2022a-Python-3.10.5 -``` - -and to share the kernel with other members of your NeSI project: - -``` sh -nesi-add-kernel --shared tf_kernel_shared TensorFlow/2.8.2-gimkl-2022a-Python-3.10.5 -``` - -To list all the installed kernels, use the following command: - -``` sh -jupyter-kernelspec list -``` - -and to delete a specific kernel: - -``` sh -jupyter-kernelspec remove -``` - -where `` stands for the name of the kernel to delete. - -## Conda environment - -First, make sure the `JupyterLab` module is loaded: - -``` sh -module purge -module load JupyterLab -``` - -To add a Conda environment created using -`conda create -p `, use: - -``` sh -nesi-add-kernel my_conda_env -p -``` - -otherwise if created using `conda create -n `, use: - -``` sh -nesi-add-kernel my_conda_env -n -``` - -## Virtual environment - -If you want to use a Python virtual environment, don’t forget to specify -which Python module you used to create it. - -For example, if we create a virtual environment named `my_test_venv` -using Python 3.10.5: - -``` sh -module purge -module load Python/3.10.5-gimkl-2022a -python -m venv my_test_venv -``` - -to create the corresponding `my_test_kernel` kernel, we need to use the -command: - -``` sh -module purge -module load JupyterLab -nesi-add-kernel my_test_kernel Python/3.10.5-gimkl-2022a --venv my_test_venv -``` - -## Singularity container - -To use a Singularity container, use the `-c` or `--container` options as -follows: - -``` sh -module purge -module load JupyterLab -nesi-add-kernel my_test_kernel -c -``` - -where `` is a path to your container image. - -Note that your container **must** have the `ipykernel` Python package -installed in it to be able to work as a Jupyter kernel. - -Additionally, you can use the `--container-args` option to pass more -arguments to the `singularity exec` command used to instantiate the -kernel. - -Here is an example instantiating a NVIDIA NGC container as a kernel. -First, we need to pull the container: - -``` sh -module purge -module load Singularity/3.11.3 -singularity pull nvidia_tf.sif docker://nvcr.io/nvidia/tensorflow:21.07-tf2-py3 -``` - -then we can instantiate the kernel, using the `--nv` singularity flag to -ensure that the GPU will be found at runtime (assuming our Jupyter -session has access to a GPU): - -``` sh -module purge -module load JupyterLab -nesi-add-kernel nvidia_tf -c nvidia_tf.sif --container-args "'--nv'" -``` - -Note that the double-quoting of `--nv` is needed to properly pass the -options to `singularity exec`. diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md deleted file mode 100644 index 436e92abd..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md +++ /dev/null @@ -1,226 +0,0 @@ ---- -created_at: '2020-06-08T04:21:37Z' -tags: -- jupyter -- hub -- home -- lab -- notebook -title: Jupyter on NeSI -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 360001555615 -zendesk_section_id: 360001189255 ---- - -!!! Note - This service is available for users with a current allocation on - Mahuika only. - Please {% include "partials/support_request.html" %} to request a suitable - allocation. - -## Introduction - -NeSI supports the use of [Jupyter](https://jupyter.org/) for -[interactive computing](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Slurm_Interactive_Sessions.md). -Jupyter allows you to create notebooks that contain live code, -equations, visualisations and explanatory text. There are many uses for -Jupyter, including data cleaning, analytics and visualisation, machine -learning, numerical simulation, managing [Slurm job -submissions](../../Getting_Started/Next_Steps/Submitting_your_first_job.md) -and workflows and much more. - -!!! prerequisite "See also" - - See the [RStudio via Jupyter on NeSI](../../Scientific_Computing/Interactive_computing_using_Jupyter/RStudio_via_Jupyter_on_NeSI.md) - page for launching an RStudio instance. - - See the [MATLAB via Jupyter on NeSI](../../Scientific_Computing/Interactive_computing_using_Jupyter/MATLAB_via_Jupyter_on_NeSI.md) - page for launching MATLAB via Jupyter - - See the [Virtual Desktop via Jupyter on NeSI](../../Scientific_Computing/Interactive_computing_using_Jupyter/Virtual_Desktop_via_Jupyter_on_NeSI.md) - page for launching a virtual desktop via Jupyter. - - See the [Jupyter kernels - Tool-assisted management](../../Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_kernels_Tool_assisted_management.md) - (recommended) and [Jupyter kernels - Manual management](../../Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_kernels_Manual_management.md) - pages for adding kernels. - -## Accessing Jupyter on NeSI - -Jupyter at NeSI is powered by [JupyterHub](https://jupyter.org/hub), a -multi-user hub that spawns, manages and proxies multiple instances of -the single-user Jupyter server. - -### Access NeSI's JupyterHub here - -[https://jupyter.nesi.org.nz](https://jupyter.nesi.org.nz) - -When you log in with your [NeSI credentials](../../Getting_Started/Accessing_the_HPCs/Setting_Up_and_Resetting_Your_Password.md) -you will be taken to the "Server Options" page, where typical job -configuration options can be selected to allocate the resources that -will be used to run Jupyter. Typical jobs, not requesting a GPU, should -be up and running within one to two minutes. Requesting a GPU can -increase this time significantly as there are only a small number of -GPUs available at NeSI. - -!!! tip - If your server appears to not have started within 3 minutes please - reload the browser window and check again, otherwise contact - {% include "partials/support_request.html" %}. - -## Known issues - -- When using *srun* in a Jupyter terminal you may see messages like - those shown below. The "error" messages are actually just warnings - and can be ignored; the *srun* command should still work. - Alternatively, you could run *unset TMPDIR* in the terminal before - running *srun* to avoid these warnings. - - ``` sh - srun --pty bash - ``` - - ```out - srun: job 28560743 queued and waiting for resources - srun: job 28560743 has been allocated resources - slurmstepd: error: Unable to create TMPDIR [/dev/shm/jobs/28560712]: Permission denied - slurmstepd: error: Setting TMPDIR to /tmp - ``` - -## Jupyter user interface - -### JupyterLab - -Once your server has started you will be redirected to -[JupyterLab](https://jupyterlab.readthedocs.io/en/stable/). JupyterLab -is the next generation of the Jupyter user interface and provides a way -to use notebooks, text editor, terminals and custom components together. -If you would prefer to use the classic Notebook interface, then select -"Launch Classic Notebook" from the JupyterLab Help menu, or change the -URL from */lab* to */tree* once the server is running. - -### File systems - -Your Jupyter server will start in a new directory created within your -home directory for that specific Jupyter job. Within that directory, you -will find symbolic links to your home directory and to the project and -nobackup directories of your active projects. We do not recommend that -you store files in this initial directory because next time you launch -Jupyter you will be starting in a different directory, instead switch to -one of your home, project or nobackup directories first. - -### Jupyter terminal - -JupyterLab provides a terminal that can be an alternative means of -gaining command line access to NeSI systems instead of using an SSH -client. Some things to note are: - -- when you launch the terminal application some environment modules - are already loaded, so you may want to run `module purge` -- processes launched directly in the JupyterLab terminal will probably - be killed when you Jupyter session times out - -## Ending your interactive session and logging out - -To end a JupyterLab session, please select "Hub Control Panel" under the -File menu then "Stop My Server". Finally, click on "Log Out". - -![control panel](../../assets/images/Jupyter_on_NeSI.png) -![stop server](../../assets/images/Jupyter_on_NeSI_0.png) - -If you click "Log Out" without stopping your server, the server will -continue to run until the Slurm job reaches its maximum wall time. - -This means that if you wish to have a session lasting, say, 4 hours -(which is not offered in the "Select walltime" drop-down) then you can -start a 8 hour session and end the job as described above when you are -finished. Alternatively, you can cancel your Jupyter job by running -`scancel 'job_id'` from within the Jupyter terminal when you are done. -Note this will make the page unresponsive as it now has no compute -powering it. - -## Installing JupyterLab extensions - -JupyterLab supports many extensions that enhance its functionality. At -NeSI we package some extensions into the default JupyterLab environment. -Keep reading if you need to install extensions yourself. - -Note, there were some changes related to extensions in JupyterLab 3.0 -and there are now multiple methods to install extensions. More details -about JupyterLab extensions can be found -[here](https://jupyterlab.readthedocs.io/en/stable/user/extensions.html). -Check the extension's documentation to find out the supported -installation method for that particular extension. - -### Installing prebuilt extensions  - -If the extension is packaged as a prebuilt extension (e.g. as a pip -package), then you can install it from the JupyterLab terminal by -running: - -``` sh -pip install --user -``` - -For example, the [Dask -extension](https://github.com/dask/dask-labextension#jupyterlab-30-or-greater) -can be installed with the following: - -``` sh -pip install --user dask-labextension -``` - -### Installing source extensions - -Installing source extensions requires a rebuild of the JupyterLab web -application. Since this requires write permissions, you will need to set -the JupyterLab [application directory](https://jupyterlab.readthedocs.io/en/stable/user/extensions.html#advanced-usage) -to a location that you can write to. To do this you need to create a -file named *~/.jupyterlab3\_dir* in your home directory with the full -path to your desired JupyterLab application directory and then run some -commands to initialise the JupyterLab application directory. - -Running the following commands will create the JupyterLab application -directory in your home directory: - -``` sh -module load JupyterLab -echo $HOME/.local/share/jupyter/lab > ~/.jupyterlab3_dir -export JUPYTERLAB_DIR=$HOME/.local/share/jupyter/lab -jupyter lab build -``` - -These changes will only take effect after relaunching your Jupyter -server and then you should be able to install JupyterLab extensions as -you please. - -!!!note - The above commands will put the JupyterLab application directory in - your home directory. The application directory often requires at least - 1-2GB of disk space and 30,000 inodes (file count), so make sure you - have space available in your home directory first (see - [NeSI File Systems and Quotas](../../Storage/File_Systems_and_Quotas/NeSI_File_Systems_and_Quotas.md)) - or request a larger quota. - -You could change the path to point to a location in your project -directory, especially if multiple people on your project will share the -same JupyterLab application directory, e.g.: - -``` sh -module load JupyterLab -echo /nesi/project//$USER/jupyter/lab > ~/.jupyterlab_dir -export JUPYTERLAB_DIR=/nesi/project//$USER/jupyter/lab -jupyter lab build -``` - -## Log files - -The log file of a Jupyter server session is saved either in the project -directory of the project you selected on the "Server Options" JupyterHub -page, or in your home directory, and is named -`.jupyterhub__.log` (note the leading `.` which means -the log file is hidden). If you encounter problems with your Jupyter -session, the contents of this file can be a good first clue to debug the -issue. - -## External documentation - -- [Jupyter](https://jupyter.readthedocs.io/en/latest/) -- [JupyterHub](https://jupyterhub.readthedocs.io/en/stable/) -- [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/) diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/MATLAB_via_Jupyter_on_NeSI.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/MATLAB_via_Jupyter_on_NeSI.md deleted file mode 100644 index 8cfdeb5c8..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/MATLAB_via_Jupyter_on_NeSI.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -created_at: '2022-04-04T03:32:24Z' -tags: [] -title: MATLAB via Jupyter on NeSI -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 4614893064591 -zendesk_section_id: 360001189255 ---- - -!!! warning - This functionality is experimental and developing, which may introduce - breaking changes in the future. - If you would like to report a bug or propose a change see the GitHub - repo - [https://github.com/nesi/jupyter-matlab-proxy](https://github.com/nesi/jupyter-matlab-proxy?organization=nesi&organization=nesi) - or {% include "partials/support_request.html" %}. - -## Getting started - -MATLAB can be accessed as a web application via [Jupyter on -NeSI](../../Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md). - -In the JupyterLab interface, MATLAB can be started using the -corresponding entry in the launcher. - -## ![matlab\_proxy\_icon.png](../../assets/images/MATLAB_via_Jupyter_on_NeSI.png) - -Clicking on this entry will open a separate tab in your web browser, -where you will see the following status information page. - -## ![image\_\_1\_.png](../../assets/images/MATLAB_via_Jupyter_on_NeSI_0.png) - -MATLAB may take a few minutes to load, once it does you will be put -straight into the MATLAB environment.  - -You can open the status page at any time by clicking the -![../../assets/images/MATLAB_via_Jupyter_on_NeSI_1.png)](https://github.com/mathworks/jupyter-matlab-proxy/raw/main/img/tools_icon.png) -button. - -!!! warning - Your license must be valid for MATLAB 2021b or newer. - -## Licensing - -If you are a member of an institution that has access to MATLAB, the -corresponding network license will be selected. You can confirm this in -the info panel. - -If you do not wish to use a network license you can click the 'Unset -License Server Address' button. - -## ![image\_\_3\_.png](../../assets/images/MATLAB_via_Jupyter_on_NeSI_2.png) - -If you have no licence address set you can instead authenticate using a -MathWorks email address, provided you have a valid license associated to -your account. - -## ![image\_\_4\_.png](../../assets/images/MATLAB_via_Jupyter_on_NeSI_3.png) - -## Troubleshooting - -As MATLAB via Jupyter on NeSI uses MATLAB 2021a, you will see a glibc -warning whenever you run a system command, and some system commands will -not work as intended. - -For more details see -[MATLAB#known\_bugs](../../Scientific_Computing/Supported_Applications/MATLAB.md#known-bugs). diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/RStudio_via_Jupyter_on_NeSI.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/RStudio_via_Jupyter_on_NeSI.md deleted file mode 100644 index 886359e25..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/RStudio_via_Jupyter_on_NeSI.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -created_at: '2021-05-13T01:00:08Z' -tags: [] -vote_count: 7 -vote_sum: 3 -zendesk_article_id: 360004337836 -zendesk_section_id: 360001189255 ---- - - -!!! note - This functionality is experimental and may introduce breaking changes - in the future. These notes should be read in conjunction with NeSI's - main [R support page](../../Scientific_Computing/Supported_Applications/R.md) - Your feedback is welcome, please don't hesitate {% include "partials/support_request.html" %} to make suggestions. - -## Getting started - -RStudio can be accessed as a web application via [Jupyter on -NeSI](../../Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md). - -In the JupyterLab interface, RStudio can be started using the -corresponding entry in the launcher. - -![rstudio\_launcher.png](../../assets/images/RStudio_via_Jupyter_on_NeSI.png) - -Clicking on this entry will open a separate tab in your web browser, -where RStudio will be accessible. - -Once RStudio is launched, you should briefly see a login screen. It will -be auto-filled using a pre-generated password, unless you disabled -javascript in your web browser. - -## Changing R version - -You can configure a set of [environment modules](../../Getting_Started/Next_Steps/The_HPC_environment.md) -to preload before starting RStudio. This can be useful if you want to -change the version of the R interpreter or use NeSI's *R-Geo* or -*R-bundle-Bioconductor* modules. - -The module needs to be entered in the configuration file -`~/.config/rstudio_on_nesi/prelude.bash`. - -In the following example, we use the module that is built for R/4.2.1 - -``` sh -echo "module load R/4.2.1-gimkl-2022a" > ~/.config/rstudio_on_nesi/prelude.bash -``` - -Once your configuration file is ready, make sure to restart your Jupyter -session and re-launch RStudio for these changes to be taken into -account. Check that the correct version of R has loaded and that the -correct Library Paths are available. For R/4.2.1 the command -`.libPaths()` will return the following: - -```r -.libPaths() -``` - -```out -[1] "/home/YOUR_USER_NAME/R/gimkl-2022a/4.2" -[2] "/opt/nesi/CS400_centos7_bdw/R/4.2.1-gimkl-2022a/lib64/R/library" -``` - -## Package Installation - -To avoid read/write issues with a small temorary directory filling up, -in a terminal run the following two lines of code. These will setup a -larger directory that will allow for packages to be installed to your -personal library. NOTE: this is not creating a library. - -```sh -mkdir -p /nesi/nobackup//rstudio_tmp -echo "TMP=/nesi/nobackup//rstudio_tmp" > .Renviron -``` - -Within RStudio run the command \`tempdir()\` which should return the -following (below), where \`Rtmpjp2rm8\` is a randomly generated folder -name, and is emptied with each new session. So will not fill up your -home directory. - -```r -tempdir() -``` - -```out -[1] "/nesi/nobackup//rstudio_tmp/Rtmpjp2rm8" -``` - -The alternative is to install packages in a terminal session - -## Advanced usage - -RStudio runs in a [Singularity -container](../../Scientific_Computing/Supported_Applications/Singularity.md) -prepared by the NeSI team to run on -[jupyter.nesi.org.nz](https://jupyter.nesi.org.nz). The related code is -hosted on GitHub, in the -[rstudio\_on\_nesi](https://github.com/nesi/rstudio_on_nesi) repository. - -To modify the content of the container, you need to adapt the -[Singularity definition file](https://github.com/nesi/rstudio_on_nesi/blob/main/conf/rstudio_server_on_centos7.def), -found in the `conf` folder of the repository, and then rebuild the -container. - -Once your container is ready, upload it on NeSI and use the -configuration file `~/.config/rstudio_on_nesi/singularity_image_path` to -indicate the path of your container to the RStudio-on-NeSI plugin: - -```sh -echo PATH_TO_CONTAINER > ~/.config/rstudio_on_nesi/singularity_image_path -``` - -Then restart your Jupyter session and launch a new RStudio session to -make use of your container. - -If your RStudio session does not start, try to reload the page, in case -the reported failure is just due to the container taking too much time -to start. - -If this does not work, you will need to investigate the errors. A good -place to start is looking at the log file from jupyter, for the current -session: - -```sh -cat ~/.jupyter/.jupyterhub_${USER}_${SLURM_JOB_ID}.log -``` - -## Troubleshooting - -### Error 500 - -If you get an error 500 after clicking on the launcher icon, this could -be due to RStudio taking too much time to start, which is interpreted as -a failure by JupyterLab. Please try to start RStudio again from the -launcher. If the problem persists, {% include "partials/support_request.html" %}. - -![error\_500.PNG](../../assets/images/RStudio_via_Jupyter_on_NeSI_0.png) - -If you have disabled javascript in your web browser, you will need -to enter your password manually in the RStudio login screen. To -retrieve the password, open a terminal in JupyterLab and enter the -following to print the password: - -```sh -cat ~/.config/rstudio_on_nesi/server_password -``` - -### Error 599 - -RStudio fails to load, times out, fails to initialze -If your RStudio session won't load, a possible solution is to delete the contents of the two hidden directories in your home directory. - -- `.local/share/rstudio` -- `.local/share/rstudio_on_nesi` diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/.pages.yml b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/.pages.yml deleted file mode 100644 index 930475cc2..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/.pages.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -nav: -- jupyter-nesi-org-nz_release_notes_19-12-2024.md -- jupyter-nesi-org-nz_release_notes_26-11-2024.md -- jupyter-nesi-org-nz_release_notes_08-10-2024.md -- jupyter-nesi-org-nz_release_notes_27-08-2024.md -- jupyter-nesi-org-nz_release_notes_30-07-2024.md -- jupyter-nesi-org-nz_release_notes_26-03-2024.md -- jupyter-nesi-org-nz_release_notes_22-11-2023.md -- jupyter-nesi-org-nz_release_notes_14-11-2023.md -- jupyter-nesi-org-nz_release_notes_15-06-2023.md -- jupyter-nesi-org-nz_release_notes_19-05-2023.md -- jupyter-nesi-org-nz_release_notes_02-02-2023.md -- jupyter-nesi-org-nz_release_notes_25-08-2022.md -- jupyter-nesi-org-nz_release_notes_12-07-2022.md -- jupyter-nesi-org-nz_release_notes_31-03-2022.md -- jupyter-nesi-org-nz_release_notes_28-06-2022.md -- jupyter-nesi-org-nz_release_notes_02-06-2022.md -- jupyter-nesi-org-nz_release_notes_02-11-2021.md -- jupyter-nesi-org-nz_release_notes_14-10-2021.md -- jupyter-nesi-org-nz_release_notes_24-09-2021.md -- jupyter-nesi-org-nz_release_notes_16-09-2021.md -- jupyter-nesi-org-nz_release_notes_12-05-2021.md diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_02-02-2023.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_02-02-2023.md deleted file mode 100644 index f0b45b3d3..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_02-02-2023.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -created_at: '2023-02-02T03:42:47Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 02/02/2023 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 6325030048655 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- - -## New and Improved - -- Updated JupyterHub to v2.3.1 -- Updated JupyterLab to v3.5.3 -- Switched to Python 3.10 for running JupyterLab (kernels are - unaffected) - - Note: if you have previously installed Python packages in your - home directory using Python 3.10, we recommend cleaning out your - *~/.local/Python-3.10-gimkl-2022a* directory, as it could - conflict with our JupyterLab installation, and consider - [Installing packages in a Python virtual environment](../../../Scientific_Computing/Supported_Applications/Python.md#installing-packages-in-a-python-virtual-environment) - instead diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_02-06-2022.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_02-06-2022.md deleted file mode 100644 index b2d9bcda3..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_02-06-2022.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -created_at: '2022-06-02T05:35:53Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 02/06/2022 -vote_count: 1 -vote_sum: 1 -zendesk_article_id: 4905985717135 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- - -## Release Update - 2. June 2022 - -## New and Improved - -- Updated JupyterLab version to v3.4.2 -- Updated [RStudio-on-NeSI](../../../Scientific_Computing/Interactive_computing_using_Jupyter/RStudio_via_Jupyter_on_NeSI.md) (v0.22.5): fix library path when using NeSI R package in RStudio (e.g. R-bundle-Bioconductor) -- Plotly extension re-added (missing in the previous release) -- Added [papermill](https://pypi.org/project/papermill/) extension -- Updated [NeSI Virtual Desktop](../../../Scientific_Computing/Interactive_computing_using_Jupyter/Virtual_Desktop_via_Jupyter_on_NeSI.md) to v2.4.1 - - Image changes - - Update default Firefox version. - - Update to use singularity 3.8.5. - - Switched to rocky8 image. - - Added chrome, strace, sview and xfce-terminal to image. - - Added some libraries need for ANSYS - - Added missing GLX libraries. - - Bug fixes - - Fixed faulty startup messages - - Fixed entrypoint duplication issue. - - unset `SLURM_EXPORT_ENV` before starting desktop. - - Refactoring - - Removed dependency on system vdt repo. - - Removed faulty & unneeded bind paths. - - Removed debug by default and hardcoded verbose. - - replaced `VDT_HOME` with XDG equiv diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_02-11-2021.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_02-11-2021.md deleted file mode 100644 index 2cb8f5a09..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_02-11-2021.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -created_at: '2021-11-02T04:15:21Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 02/11/2021 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 4408743148431 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- - - -## Release Update - 02. November 2021 - -## New and Improved - -- Enabled jupyter server proxy to forward requests to a different host - (compute node). - -  diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_08-10-2024.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_08-10-2024.md deleted file mode 100644 index eabc19fba..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_08-10-2024.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -created_at: '2024-10-04T03:31:07Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 04/10/2024 -vote_count: 0 -vote_sum: 0 -search: - boost: 0.1 ---- - -## Fixed - -- The links to the support documentation have been updated for more accurate information. - -If you have any questions about the fix, -please [contact NeSI -Support](mailto:support@nesi.org.nz "mailto:support@nesi.org.nz"). diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_12-05-2021.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_12-05-2021.md deleted file mode 100644 index 5b3dbd6fb..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_12-05-2021.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -created_at: '2021-05-12T01:15:39Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 12/05/2021 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 360004424575 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- - -## Release Update - 12. May 2021 - -## New and Improved - -- JupyterLab upgrade to v3.0.15. - Read more on [user-facing - changes](https://jupyterlab.readthedocs.io/en/stable/getting_started/changelog.html#user-facing-changes) - and the installation of extensions here:  - [https://jupyterlab.readthedocs.io/en/stable/user/extensions.html](https://jupyterlab.readthedocs.io/en/stable/user/extensions.html#finding-extensions) diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_12-07-2022.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_12-07-2022.md deleted file mode 100644 index fab3c7b69..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_12-07-2022.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -created_at: '2022-07-12T04:58:15Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 12/07/2022 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 5129022601487 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- - -## New and Improved - -- Added the `pyviz_comms` package to allow fully interactive usage of - [HoloViz](https://holoviz.org/index.html) tools within notebooks (in - particular Panel and HoloViews). - -  diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_14-10-2021.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_14-10-2021.md deleted file mode 100644 index 2e2e43e3d..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_14-10-2021.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -created_at: '2021-10-14T18:20:24Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 14/10/2021 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 4407886998671 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- - -## Release Update - 14. October 2021 - -## New and Improved - -- Changed hub session timeout to 16 hours. Users will be prompted to - login again after 16 hrs. aligned with max. wall time for JupyterLab - instances.  -- JupyterHub fixed: improvements to avoid 403 errors diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_14-11-2023.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_14-11-2023.md deleted file mode 100644 index 3ea27f01c..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_14-11-2023.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -created_at: '2023-11-14T03:13:56Z' -tags: [] -title: jupyter.nesi.org.nz release notes 14/11/2023 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 8358981393551 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- - -## New and Improved - -- Adding extra logging when the Jupyter Health Check fails - -## Fixed - -- We are now closing user session when the corresponding Jupyter - server is stopped, to avoid idle sessions to linger on the host - -  - -If you have any questions about any of the improvements or fixes, please - {% include "partials/support_request.html" %}. diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_15-06-2023.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_15-06-2023.md deleted file mode 100644 index ff5308b84..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_15-06-2023.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -created_at: '2023-04-19T05:31:05Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 15/06/2023 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 6805305911311 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- - -## New and Improved - -- If [jupyter.nesi.org.nz](http://my.nesi.org.nz/) portal cannot - connect to the NeSI server, a descriptive error message will be - displayed instead of internal error 500 - -## Fixed - -- Update to ignore the low level variable SRUN\_CPUS\_PER\_TASK diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_16-09-2021.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_16-09-2021.md deleted file mode 100644 index 9f8342119..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_16-09-2021.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -created_at: '2021-09-16T04:48:10Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 16/09/2021 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 4406546017551 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- -## Release Update - 16. September 2021 - -## New and Improved - -- JupyterLab upgrade to v3.1.9 (Python updated from v3.8 to v3.9) - Read more on [changes and bug - fixes](https://jupyterlab.readthedocs.io/en/stable/getting_started/changelog.html#id12) -- Updated to JupyterHub 1.4.2 -- Rendering time remaining, CPU and Memory usage in the top menu bar - ![mceclip0.png](../../../assets/images/jupyter-nesi-org-nz_release_notes_16-09-2021.png) -- Confirmed JupyterLab extension for version control using Git - working - See diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_19-05-2023.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_19-05-2023.md deleted file mode 100644 index 54122a393..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_19-05-2023.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -created_at: '2023-05-18T20:48:30Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 19/05/2023 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 7010547494287 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- - - -## Fixed - -- Updated some Python packages in the Python 3.10 kernel to fix an - issue with ipywidgets not working properly in notebooks diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_19-12-2024.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_19-12-2024.md deleted file mode 100644 index f6372f8d0..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_19-12-2024.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -created_at: '2024-12-18T03:31:07Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 19/12/2024 -vote_count: 0 -vote_sum: 0 -search: - boost: 0.1 ---- - -## Fixed - -- A message inviting you to use OnDemand rather than Jupyter on NeSI is now visible only to users who have been invited as Ondemand early access users. -Note: users need to refresh their browser's cache. - -If you have any questions about any of the improvements or fixes, -please [contact NeSI Support](mailto:support@nesi.org.nz "mailto:support@nesi.org.nz"). diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_22-11-2023.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_22-11-2023.md deleted file mode 100644 index e6808d5ce..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_22-11-2023.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -created_at: '2023-11-22T03:10:45Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 22/11/2023 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 8422683604367 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- - -## Fixed - -- We are now closing user sessions when the corresponding Jupyter - server is stopped, to avoid idle sessions to linger on the host. We - missed one case during the last release. - -If you have any questions about any of the improvements or fixes, -please {% include "partials/support_request.html" %}. diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_24-09-2021.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_24-09-2021.md deleted file mode 100644 index 45b698596..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_24-09-2021.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -created_at: '2021-09-24T02:53:44Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 24/09/2021 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 4406923475471 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- - - -## Release Update - 24. September 2021 - -## New and Improved - -- Fixed Singularity version for RStudio and VirtualDesktop kernels -- Fixed pywidgets installation -- JupyterHub fixed: in case a job takes more than 300 seconds, don't - start the job to avoid 'ghost' instances of JupyterLab diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_25-08-2022.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_25-08-2022.md deleted file mode 100644 index ffdc58e96..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_25-08-2022.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -created_at: '2022-08-23T02:25:55Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 25/08/2022 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 5362357660431 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- - -## New and Improved - -- Updated [RStudio-on-NeSI](../../../Scientific_Computing/Interactive_computing_using_Jupyter/RStudio_via_Jupyter_on_NeSI.md) - to v0.24.0 - - RStudio server v2022.07.1 - - Allow usage of NeSI environment modules in RStudio terminal (beta) - - Allow usage of Slurm commands in RStudio terminal (beta) -- Updated [NeSI Virtual - Desktop](../../../Scientific_Computing/Interactive_computing_using_Jupyter/Virtual_Desktop_via_Jupyter_on_NeSI.md) - to v2.4.3 - - Utilising latest version of - [Singularity](../../../Scientific_Computing/Supported_Applications/Singularity.md) - -## Fixed - -- RStudio - - Addressed issue preventing user installation of rmarkdown when using R/4.1.0-gimkl-2020a - - Addressed knitr PDF compilation when using R/4.2.1-gimkl-2022a -- NeSI Virtual Desktop - - Added dependencies to fix OpenGL related issues - - Internal refactoring for maintenance purpose of the permission - with skeleton files in container build diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_26-03-2024.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_26-03-2024.md deleted file mode 100644 index 2cfff391f..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_26-03-2024.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -created_at: '2024-03-26T03:31:07Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 26/03/2024 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 9376633910159 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- - -## New and Improved - -- Updated JupyterLab to v3.6.3 -- Switched to Python 3.11 for running JupyterLab (NeSI-provided - kernels are unaffected) - - Please let us know if you experience any issues with custom - kernels - -## Fixed - -- Improvement on the error handling of connections - -If you have any questions about any of the improvements or fixes, -please [contact NeSI -Support](mailto:support@nesi.org.nz "mailto:support@nesi.org.nz"). diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_26-11-2024.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_26-11-2024.md deleted file mode 100644 index a67b96552..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_26-11-2024.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -created_at: '2024-11-11T03:31:07Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 26/11/2024 -vote_count: 0 -vote_sum: 0 -search: - boost: 0.1 ---- - -## New and Improved - -- Researchers with NeSI OnDemand won't be able to access JupyterHub as part of NeSI migration to newer platform. - -## Fixed - -N/A - -If you have any questions about any of the improvements or fixes, -please [contact NeSI Support](mailto:support@nesi.org.nz "mailto:support@nesi.org.nz"). diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_27-08-2024.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_27-08-2024.md deleted file mode 100644 index 84059312e..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_27-08-2024.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 27/08/2024 -search: - boost: 0.1 ---- - -## Changes - -We have removed some of the older NeSI-provided Jupyter kernels: - -- Anaconda3 2019.03 -- Python 3.7.3 -- Python 3.8.1 -- R 3.6.1 -- R 4.0.1 - -A new kernel has been added: - -- Python 3.11.3 - -If you were using one of the removed kernels and can't switch to another one, -please [contact us](mailto:support@nesi.org.nz "mailto:support@nesi.org.nz") -and we'll be happy to assist you with setting up a -[custom kernel](https://docs.nesi.org.nz/Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_kernels_Tool_assisted_management/). diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_28-06-2022.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_28-06-2022.md deleted file mode 100644 index 3615ef21b..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_28-06-2022.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -created_at: '2022-06-27T00:01:38Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 28/06/2022 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 5042124170127 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- - -## Release Update - 28. June 2022 - -## New and Improved - -- Updated JupyterLab version to v3.4.3 - -## Fixed - -- Addressed issue handling the "slurm job id" with some Python modules that depend on MPI diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_30-07-2024.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_30-07-2024.md deleted file mode 100644 index afe9aeb7d..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_30-07-2024.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -created_at: '2024-07-30T03:31:07Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 30/07/2024 -vote_count: 0 -vote_sum: 0 -search: - boost: 0.1 ---- - -## New and Improved - -- Better support for password managers filling all login fields automatically. -- Disabling the notifications linked to new JupyterLab updates. - -## Fixed - -- Deployment updates for making testing easier on feature branches. - -If you have any questions about any of the improvements or fixes, -please [contact NeSI Support](mailto:support@nesi.org.nz "mailto:support@nesi.org.nz"). diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_31-03-2022.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_31-03-2022.md deleted file mode 100644 index c926db7a1..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Release_Notes_jupyter-nesi-org-nz/jupyter-nesi-org-nz_release_notes_31-03-2022.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -created_at: '2022-03-30T01:04:11Z' -tags: -- releasenote -title: jupyter.nesi.org.nz release notes 31/03/2022 -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 4589733874191 -zendesk_section_id: 360001150156 -search: - boost: 0.1 ---- - -## Release Update - 31. March 2022 - -## New and Improved - -- Updated JupyterLab version - to `JupyterLab/.2022.2.0-gimkl-2020a-3.2.8` -- Added user guidance on options (when launching a server instance) -- Updated available GPU options -- Added links to NeSI documentation diff --git a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Virtual_Desktop_via_Jupyter_on_NeSI.md b/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Virtual_Desktop_via_Jupyter_on_NeSI.md deleted file mode 100644 index 2cb7f12af..000000000 --- a/docs/Scientific_Computing/Interactive_computing_using_Jupyter/Virtual_Desktop_via_Jupyter_on_NeSI.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -created_at: '2020-07-08T01:45:40Z' -tags: [] -title: Virtual Desktop via Jupyter on NeSI -vote_count: 2 -vote_sum: 2 -status: deprecated -zendesk_article_id: 360001600235 -zendesk_section_id: 360001189255 ---- - -A virtual desktop provides a graphical interface to using the cluster. -Desktops are hosted within Singularity containers, so not all of the -NeSI software stack is supported. If you would like to build your own -desktop containers with the code -[here](https://github.com/nesi/nesi-singularity-recipes). - -Rendering is done cluster-side, and compressed before being sent to your -local machine. This means any rendering should be significantly more -responsive than when using X11 on its own (approximately 40 times -faster). - -The quickest and easiest way to get started with a desktop is through -Jupyter on NeSI, [connect here](https://jupyter.nesi.org.nz/). - -## Connecting - -Click the icon labelled 'VirtualDesktop', The desktop instance will last -as long as your Jupyter session. - -## Customisation - -Most of the customisation of the desktop can be done from within, -panels, desktop, software preferences. - -### `pre.bash` - -Enviroment set in `singularity_wrapper.bash` can be changed by creating -a file `$XDG_CONFIG_HOME/vdt/pre.bash` Anything you want to run -*before* launching the container put in here. - -``` sl -export VDT_BASE_IMAGE="~/my_custom_container.sif" # Use a different image file. -export VDT_RUNSCRIPT="~/my_custom_runscript" # Use a different runscript. - -export OVERLAY="TRUE" -export BROWSER="chrome" # Desktop session will inherit this - -module load ANSYS/2021R2 # Any modules you want to be loaded in main instance go here. -``` - -### `post.bash` - -Environment set in `runscript_wrapper.bash` can be changed by creating a -file `$XDG_CONFIG_HOME/vdt/post.bash` - -Things you may wish to set here are: -`VDT_WEBSOCKOPTS`, `VDT_VNCOPTS`, any changes to the wm environment, any -changes to path, this include module files. - -``` sl -export VDT_VNCOPTS="-depth 16" # This will start a 16bit desktop -export BROWSER="chrome" # Desktop session will inherit this. - -module load ANSYS/2021R2 # Any modules you want to be loaded in main instance go here. -``` - -## Custom container - -You can build your own container bootstrapping off -`vdt_base.sif`/`rocky8vis.sif` and then overwrite the default by setting -`VDT_BASE_IMAGE` in `pre.bash`. - -*You can help contribute to this project [here](https://github.com/nesi/nesi-virtual-desktops/projects/1).* diff --git a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/.pages.yml b/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/.pages.yml deleted file mode 100644 index 95e206f2c..000000000 --- a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/.pages.yml +++ /dev/null @@ -1,14 +0,0 @@ -nav: - - Checking_your_projects_usage_using_nn_corehour_usage.md - - GPU_use_on_NeSI.md - - Job_Checkpointing.md - - Slurm_Interactive_Sessions.md - - Fair_Share.md - - Job_prioritisation.md - - Mahuika_Slurm_Partitions.md - - Milan_Compute_Nodes.md - - Maui_Slurm_Partitions.md - - SLURM-Best_Practice.md - - NetCDF-HDF5_file_locking.md - - Checksums.md - - ... diff --git a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Maui_Slurm_Partitions.md b/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Maui_Slurm_Partitions.md deleted file mode 100644 index 3a1c391b8..000000000 --- a/docs/Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Maui_Slurm_Partitions.md +++ /dev/null @@ -1,216 +0,0 @@ ---- -created_at: '2018-05-21T03:32:04Z' -tags: -- maui -- slurm -title: "M\u0101ui Slurm Partitions" -vote_count: 1 -vote_sum: 1 -zendesk_article_id: 360000204116 -zendesk_section_id: 360000030876 ---- - -!!! tip - Partitions on these systems that may be used for NeSI workloads carry - the prefix `nesi_`. - -## Māui (XC50) Slurm Partitions - -Nodes are not shared between jobs on Māui, so the minimum charging unit -is node-hours, where 1 node-hour is 40 core-hours, or 80 Slurm -CPU-hours. - -There is only one partition available to NeSI jobs: - - -------- - - - - - - - - - - - - - - - - - - -
 Name NodesMax -WalltimeAvail / -NodeMax / -AccountDescription
nesi_research31624 -hours80 -CPUs -90 or 180 GB RAM240 -nodes -1200 node-hours runningStandard -partition for all NeSI jobs.
-
-
- -### Limits - -As a consequence of the above limit on the node-hours reserved by your -running jobs (*GrpTRESRunMins* in Slurm documentation, shown in `squeue` -output when you hit it as the reason "*AssocGrpCPURunMinutes"* ) you can -occupy more nodes simultaneously if your jobs request a shorter time -limit: - -| nodes | hours | node-hours | limits reached | -| ----- | ----- | ---------- | -------------------------- | -| 1 | 24 | 24 | 24 hours | -| 50 | 24 | 1200 | 1200 node-hours, 24 hours | -| 100 | 12 | 1200 | 1200 node-hours | -| 240 | 5 | 1200 | 1200 node-hours, 240 nodes | -| 240 | 1 | 240 | 240 nodes | - -Most of the time [job -priority](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Job_prioritisation.md) will -be the most important influence on how long your jobs have to wait - the -above limits are just backstops to ensure that Māui's resources are not -all committed too far into the future, so that debug and other -higher-priority jobs can start reasonably quickly. - -### Debug QoS - -Each job has a "QoS", with the default QoS for a job being determined by -the [allocation class](../../General/NeSI_Policies/Allocation_classes.md) -of its project. Specifying `--qos=debug` will override that and give the -job high priority, but is subject to strict limits: 15 minutes per -job, and only 1 job at a time per user. Debug jobs are limited to 2 -nodes. - -## Māui\_Ancil (CS500) Slurm Partitions - - --------- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameNodesMax -WalltimeAvail / -NodeMax / -JobMax / -UserDescription
nesi_prepost424 -hours80 -CPUs -720 GB RAM20 -CPUs -700 GB RAM80 -CPUs -700 GB RAMPre and -post processing tasks.
nesi_gpu4 to -572 -hours4 -CPUs -12 GB RAM -1 P100 GPU*4 -CPUs -12 GB RAM -1 P100 GPU4 -CPUs -12 GB RAM -1 P100 GPUGPU -jobs and visualisation. 
nesi_igpu0 to -12 -hours4 -CPUs -12 GB RAM -1 P100 GPU*4 -CPUs -12 GB RAM -1 P100 GPU4 -CPUs -12 GB RAM -1 P100 GPUInteractive -GPU access 7am - 8pm.
- -\* NVIDIA Tesla P100 PCIe 12GB card - -### Requesting GPUs - -Nodes in the `nesi_gpu` partition have 1 P100 GPU card each. You can -request it using: - -``` sl -#SBATCH --partition=nesi_gpu -#SBATCH --gpus-per-node=1 -``` - -Note that you need to specify the name of the partition.  You also need -to specify a number of CPUs and amount of memory small enough to fit on -these nodes. - -See [GPU use on NeSI](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/GPU_use_on_NeSI.md) -for more details about Slurm and CUDA settings. diff --git a/docs/Scientific_Computing/Supported_Applications/JupyterLab.md b/docs/Scientific_Computing/Supported_Applications/JupyterLab.md deleted file mode 100644 index c5e0b520c..000000000 --- a/docs/Scientific_Computing/Supported_Applications/JupyterLab.md +++ /dev/null @@ -1,254 +0,0 @@ ---- -created_at: '2019-08-09T00:46:44Z' -status: deprecated -tags: [] -title: JupyterLab ---- - - -[//]: <> (APPS PAGE BOILERPLATE START) -{% set app_name = page.title | trim %} -{% set app = applications[app_name] %} -{% include "partials/app_header.html" %} -[//]: <> (APPS PAGE BOILERPLATE END) - -!!! warning - This documentation contains our legacy instructions for running - JupyterLab by tunnelling through the lander node. - If you are a Mahuika cluster user, we recommend using Jupyter via [jupyter.nesi.org.nz](https://jupyter.nesi.org.nz). - Follow this link for [more - information](../../Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md) - -NeSI provides a service for working on Jupyter Notebooks. As a first -step JupyterLab can be used on Mahuika nodes. JupyterLab is a -single-user web-based Notebook server, running in the user space. -JupyterLab servers should be started preferably on a compute node, -especially for compute intensive or memory intensive workloads. For less -demanding work the JupyterLab server can be started on a login or -virtual lab node. After starting the server your local browser can be -connected. Therefore port forwarding needs to be enabled properly. The -procedure will be simplified in future, but now require the following -steps, which are then described in more details: - - -## Launch JupyterLab - -Since JupyterLab is a web based application, and at NeSI launched behind -the firewall, a **port** needs to be forwarded to your local machine, -where your browser should connected. This ports are numbers between 2000 -and 65000, which needs to be unique on the present machine. The default -port for JupyterLab is 8888, but only one user can use this at a time. - -To avoid the need for modifying the following procedure again and again, -we suggest to (once) select a unique number (between 2000 and 65000). -This number needs to be used while establishing the port forwarding and -while launching JupyterLab. In the following we use the port number -15051 (**please select another number**). - -### Setup SSH port forwarding - -!!! prerequisite - - In the following we assume you already configured - your`.ssh/config` to use two hop method as described in the - [Standard Terminal Setup](../../Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md). - -First, the port forwarding needs to be enabled between your local -machine and the NeSI system. Therewith a local port will be connected to -the remote port on the NeSI system. For simplicity, we kept both numbers -the same (here 15051). This can be specified on the command line in the -terminal or using the -[MobaXterm GUI](#mobaxterm-gui). - -#### SSH Command Line - -The ssh command need to be called with following arguments, e.g. for -Mahuika: - -``` sh -ssh -N -L 15051:localhost:15051 mahuika -``` - -Here -N means "Do not execute a remote command" and -L means "Forward -Local Port". - -!!! tip - - For Maui\_Ancil, e.g. w-mauivlab01 you may want to add the - following to your `.ssh/config` to avoid establishing the - additional hop manually. - ``` ssh - Host maui_vlab - User - Hostname w-mauivlab01.maui.niwa.co.nz - ProxyCommand ssh -W %h:%p maui - ForwardX11 yes - ForwardX11Trusted yes - ServerAliveInterval 300 - ServerAliveCountMax 2 - ``` - <username> needs to be changed. Hostnames can be adapted for - other nodes, e.g. `w-clim01` - -#### MobaXterm GUI - -!!! tips - - MobaXterm has an internal terminal which acts like a linux - terminal and can be configured as described in the [Standard - Terminal - Setup](../../Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md). - Therewith the [SSH command line](#h_892370eb-662a-4480-9ae4-b56fd64eb7d0) approach above can - be used. - - -MobaXterm has a GUI to setup and launch sessions with port forwarding, -click 'Tools > MobaSSH Thunnel (port forwarding)': - -- specify the lander.nesi.org.nz as SSH server address (right, lower - box, first line) -- specify your user name (right, lower box, second line) -- specify the remote server address, e.g. login.mahuika.nesi.org.nz  - (right, upper box first line) -- specify the JupyterLab port number on the local side (left) and at - the remote server (right upper box, second line) -- Save - -![sshTunnel.PNG](../../assets/images/JupyterLab.PNG) - -### Launch the JupyterLab server  - -After successfully establishing the port forwarding, we need open -another terminal and login to the NeSI system in the usual way, e.g. -opening a new terminal and start another ssh session: - -``` sl -ssh mahuika -``` - -On the Mahuika login node, load the environment module which provides -JupyterLab: - -``` sl -module load JupyterLab -``` - -Or alternatively, and particularly if you are using a Māui ancillary -node instead of Mahuika, you can use the Anaconda version of JupyterLab -instead: - -``` sl -module load Anaconda3 -module load IRkernel # optional -``` - -The JupyterLab server then can be started on the present node (login or -virtual lab) or offloaded to a compute node. Please launch compute or -memory intensive tasks on a compute -node - -#### On login nodes / virtual labs - -For very small (computational cheap and small memory) the JupyterLab can -be started on the login or virtual lab using: - -``` sl -jupyter lab --port 15051 --no-browser -``` - -Where, `--port 15051` specifies the above selected port number and -`--no-browser` option prevents JupyterLab from trying to open a browser -on the compute/login node side. Jupyter will present output as described -in the next section including -the URL and a unique key, which needs to be copied in your local -browser. - -#### On compute node - -Especially notebooks with computational and memory intensive tasks -should run on compute nodes. Therefore, a script is provided, taking -care of port forwarding to the compute node and launching JupyterLab. A -session with 60 min on 1 core can be launched using: - -``` sl -srun --ntasks 1 -t 60  jupyter-compute 15051  # please change port number -``` - -After general output, JupyterLab prints a URL with a unique key and the -network port number where the web-server is listening, this should look -similar to: - -``` sl -... -[C 14:03:19.911 LabApp] - To access the notebook, open this file in a browser: - file:///scale_wlg_persistent/filesets/project/nesi99996/.local/share/jupyter/runtime/nbserver-503-open.html - Or copy and paste one of these URLs: - http://localhost:15051/?token=d122855ebf4d029f2bfabb0da03ae01263972d7d830d79c4 -``` - -The last line will be needed in the browser later. - -Therewith the Notebook and its containing tasks are performed on a -compute node. You can double check e.g. using - -``` sl -import os -os.open('hostname').read() -``` - -More resources can be requested, e.g. by using: - -``` sl -srun --ntasks 1 -t 60 --cpus-per-task 5 --mem 512MB jupyter-compute 15051 -``` - -Where 5 cores are requested for threading and a total memory of 3GB. -Please do not use `multiprocessing.cpu_count()` since this is returning -the total amount of cores on the node. Furthermore, if you use -libraries, which implement threading align the numbers of threads (often -called jobs) to the selected number of cores (otherwise the performance -will be affected). - -### JupyterLab in your local browser - - -Finally, you need to open your local web browser and copy and paste the -URL specified by the JupyterLab server into the address bar. After -initializing Jupyter Lab you should see a page similar to: - -![Jupyter.PNG](../../assets/images/JupyterLab_0.PNG) - -## Kernels - -The following JupyterLab kernel are installed: - -- Python3 -- R  -- Spark - -### R - -verify that the module IRkernel is loaded - -``` sl -module load IRkernel -``` - -## Spark - -pySpark and SparkR is supported in NeSI Jupyter notebooks. Therefore, -the module Spark needs to be loaded before starting Jupyter. Please run -Spark workflows on compute nodes. - -``` sl -module load Spark -``` - -## Packages - -There are a long list of default packages provided by the JupyterLab -environment module (list all using `!pip list`) and R (list using -`installed.packages(.Library)`, note the list is shortened).  - -Furthermore, you can install additional packages as described on the -[Python](../../Scientific_Computing/Supported_Applications/Python.md) -and [R](../../Scientific_Computing/Supported_Applications/R.md) support -page. diff --git a/docs/Scientific_Computing/Supported_Applications/index.md b/docs/Scientific_Computing/Supported_Applications/index.md deleted file mode 100644 index 254d53ea9..000000000 --- a/docs/Scientific_Computing/Supported_Applications/index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Supported Applications -template: supported_apps.html -hide: - - toc ---- - - - - - -For more information on environment-modules see [Finding Software](../HPC_Software_Environment/Finding_Software.md). - -On **Mahuika**, and **Ancillary** nodes (both Mahuika and Māui) software packages are provided using 'Lmod' an implementation of Environment Modules with [additional features](https://lmod.readthedocs.io/en/latest/010_user.html). -A list of available software can be obtained with the `module spider` command. - -On **Māui** (XC50), software packages are provided using traditional Environment Modules. No modules are loaded by default. -A list of available software can be obtained with the `module avail` command. diff --git a/docs/Scientific_Computing/Terminal_Setup/.pages.yml b/docs/Scientific_Computing/Terminal_Setup/.pages.yml deleted file mode 100644 index 652facfdc..000000000 --- a/docs/Scientific_Computing/Terminal_Setup/.pages.yml +++ /dev/null @@ -1,4 +0,0 @@ -nav: - - Standard_Terminal_Setup.md - - ... - - X11_on_NeSI.md \ No newline at end of file diff --git a/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/.pages.yml b/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/.pages.yml deleted file mode 100644 index 985ddcc00..000000000 --- a/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/.pages.yml +++ /dev/null @@ -1,5 +0,0 @@ -nav: - - Mahuika.md - - Maui.md - - Maui_Ancillary.md - - ... diff --git a/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/Available_GPUs_on_NeSI.md b/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/Available_GPUs_on_NeSI.md deleted file mode 100644 index 5880c3e72..000000000 --- a/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/Available_GPUs_on_NeSI.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -created_at: '2022-06-13T04:54:38Z' -description: This page below outlines the different types of GPUs available on NeSI -tags: - - gpu ---- - - -NeSI has a range of Graphical Processing Units (GPUs) to accelerate compute-intensive research and support more analysis at scale. -Depending on the type of GPU, you can access them in different ways, such as via batch scheduler (Slurm), interactively (using [Jupyter on NeSI](../Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md)), -or Virtual Machines (VMs). - -The table below outlines the different types of GPUs, -who can access them and how, and whether they are currently available or on the future roadmap. - -If you have any questions about GPUs on NeSI or the status of anything listed in the table, -{% include "partials/support_request.html" %}. - -| GPGPU | Purpose | Location | Access mode | Who can access | Status | -|-------|---------|----------|-------------|----------------|--------| -| 9 NVIDIA Tesla P100 PCIe 12GB cards (1 node with 1 GPU, 4 nodes with 2 GPUs) | | [Mahuika](../The_NeSI_High_Performance_Computers/Mahuika.md) | Slurm and [Jupyter](../Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md) | NeSI users | Currently available | -| 7 NVIDIA A100 PCIe 40GB cards (4 nodes with 1 GPU, 2 nodes with 2 GPUs) | Machine Learning (ML) applications | [Mahuika](../The_NeSI_High_Performance_Computers/Mahuika.md) | Slurm | NeSI users | Currently available | -| 7 A100-1g.5gb instances (1 NVIDIA A100 PCIe 40GB card divided into [7 MIG GPU slices](https://www.nvidia.com/en-us/technologies/multi-instance-gpu/) with 5GB memory each) | Development and debugging | [Mahuika](Mahuika.md) | Slurm and [Jupyter](../Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md) | NeSI users | Currently available | -| 5 NVIDIA Tesla P100 PCIe 12GB (5 nodes with 1 GPU) | Post-processing | [Māui Ancil](Maui_Ancillary.md) | Slurm | NeSI users | Currently available | -| 4 NVIDIA HGX A100 (4 GPUs per board with 80GB memory each, 16 A100 GPUs in total) | Large-scale Machine Learning (ML) applications | [Mahuika](Mahuika.md) | Slurm | NeSI users | Available as part of the [Milan Compute Nodes](../Running_Jobs_on_Maui_and_Mahuika/Milan_Compute_Nodes.md) | -| 4 NVIDIA A40 with 48GB memory each (2 nodes with 2 GPUs, but capacity for 6 additional GPUs already in place) | Teaching / training | Flexible HPC | [Jupyter](../Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md), VM, or bare metal tenancy possible (flexible) | Open to conversations with groups who could benefit from these | In development. | diff --git a/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/Mahuika.md b/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/Mahuika.md deleted file mode 100644 index d2481da59..000000000 --- a/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/Mahuika.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -created_at: '2018-04-22T21:09:28Z' -tags: -- hpc -- mahuika -- cs400 -title: Mahuika -vote_count: 7 -vote_sum: 3 -zendesk_article_id: 360000163575 -zendesk_section_id: 360000034335 ---- - -Mahuika is a Cray CS400 cluster featuring Intel Xeon Broadwell nodes, -FDR InfiniBand interconnect, and NVIDIA GPGPUs. - -Mahuika is designed to provide a capacity, or high throughput, HPC -resource that allows researchers to run many small (from one to a few -hundred CPU cores) compute jobs simultaneously, and to conduct -interactive data analysis. To support jobs that require large (up to -500GB) or huge (up to 4 TB) memory, or GPGPUs, and to provide virtual -lab services, Mahuika has additional nodes optimised for this purpose. - -The Mahuika login (or build) nodes, mahuika01 and mahuika02, provide -access to GNU, Intel and Cray programming environments, including -editors, compilers, linkers, and debugging tools. Typically, users will -ssh to these nodes after logging onto the NeSI lander node. - -## Notes - -1. The Cray Programming Environment on Mahuika, differs from that on - Māui. -2. The `/home, /nesi/project`, and `/nesi/nobackup` - [filesystems](../../Storage/File_Systems_and_Quotas/NeSI_File_Systems_and_Quotas.md) - are mounted on Mahuika. -3. Read about how to compile and link code on Mahuika in section - entitled: [Compiling software on - Mahuika.](../../Scientific_Computing/HPC_Software_Environment/Compiling_software_on_Mahuika.md) -4. An extension to Mahuika with additional, upgraded resources is also - available. see [Milan Compute - Nodes](../../Scientific_Computing/Running_Jobs_on_Maui_and_Mahuika/Milan_Compute_Nodes.md) - for details on access - -## Mahuika HPC Cluster (Cray CS400) - - ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Login -nodes

72 cores in 2× -Broadwell (E5-2695v4, 2.1 GHz, dual socket 18 cores per socket) -nodes

Compute -nodes

8,136 cores in 226 × -Broadwell (E5-2695v4, 2.1 GHz, dual socket 18 cores per socket) -nodes;
-7,552 cores in 64 HPE Apollo 2000 XL225n nodes (AMD EPYC Milan -7713) the Milan partition

Compute -nodes (reserved for NeSI Cloud)
-

288 cores in 8 × -Broadwell (E5-2695v4, 2.1 GHz, dual socket 18 cores per socket) -nodes

GPUs
-

9 NVIDIA Tesla P100 -PCIe 12GB cards (1 node with 1 GPU, 4 nodes with 2 GPUs)

-

8 NVIDIA A100 PCIe 40GB cards (4 nodes with 1 GPU, 2 nodes with -2 GPUs)
-

-

16 NVIDIA A100 HGX 80GB cards (4 nodes with 4 GPU -each)

Hyperthreading

Enabled -(accordingly, SLURM will see ~31,500 cores)

Theoretical -Peak Performance

308.6 -TFLOPs

Memory -capacity per compute node

128 -GB

Memory -capacity per login (build) node

512 -GB

Total System -memory

84.0 -TB

Interconnect

FDR (54.5Gb/s) -InfiniBand to EDR (100Gb/s) Core fabric. 3.97:1 Fat-tree -topology

Workload -Manager

Slurm -(Multi-Cluster)

Operating -System

CentOS 7.4 & -Rocky 8.5 on Milan

- -##  Storage (IBM ESS) - -| | | -|------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **Scratch storage** | 4,412 TB (IBM Spectrum Scale, version 5.0). Total I/O bandwidth to disks is ~130 GB/s | -| **Persistent storage** | 1,765 TB (IBM Spectrum Scale, version 5.0). Shared between Mahuika and Māui Total I/O bandwidth to disks is ~65 GB/s (i.e. the /home and /nesi/project filesystems) | -| **Offline storage** | Of the order of 100 PB (compressed) | - -Scratch and persistent storage are accessible from Mahuika, as well as -from Māui and the ancillary nodes. Offline storage will in due course be -accessible indirectly, via a dedicated service. diff --git a/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/Maui.md b/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/Maui.md deleted file mode 100644 index b39813c12..000000000 --- a/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/Maui.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -created_at: '2018-04-22T23:01:48Z' -status: deprecated -tags: -- hpc -- info -- maui -- XC50 -- cs500 -title: "M\u0101ui" -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 360000163695 -zendesk_section_id: 360000034335 ---- - - -Māui is a Cray XC50 supercomputer featuring Skylake Xeon nodes, Aries -interconnect and IBM ESS Spectrum Scale Storage. NeSI has access to 316 -compute nodes on Māui. - -Māui is designed as a capability high-performance computing resource for -simulations and calculations that require large numbers of CPUs working -in a tightly-coupled parallel fashion, as well as interactive data -analysis. To support workflows that are primarily single core jobs, for -example pre- and post-processing work, and to provide virtual lab -services, we offer a small number [Māui ancillary nodes](../The_NeSI_High_Performance_Computers/Maui_Ancillary.md). - -!!! tips - The computing capacity of the Māui ancillary nodes is limited. If you - think you will need large amounts of computing power for small jobs in - addition to large jobs that can run on Māui, please {% include "partials/support_request.html" %} about getting an - allocation on - [Mahuika](Mahuika.md), - our high-throughput computing cluster. - -The login or build nodes maui01 and maui02 provide access to the full -Cray Programming Environment (e.g. editors, compilers, linkers, debug -tools). Typically, users will access these nodes via SSH from the NeSI -lander node. Jobs can be submitted to the HPC from these nodes. - -## Important Notes - -1. The Cray Programming Environment on the XC50 (supercomputer) differs - from that on Mahuika and the Māui Ancillary nodes. -2. The `/home, /nesi/project`, and `/nesi/nobackup` [file systems](../../Storage/File_Systems_and_Quotas/NeSI_File_Systems_and_Quotas.md) are - mounted on Māui. -3. The I/O subsystem on the XC50 can provide high bandwidth to disk - (large amounts of data), but not many separate reading or writing - operations. If your code performs a lot of disk read or write - operations, it should be run on either the [Māui ancillary - nodes](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Maui_Ancillary.md) or [Mahuika](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Mahuika.md). - -All Māui resources are indicated below, and the the Māui Ancillary Node -resources -[here](../../Scientific_Computing/The_NeSI_High_Performance_Computers/Maui_Ancillary.md). - -## Māui Supercomputer (Cray XC50) - - ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

Login nodes (also known as -eLogin nodes)

80 cores in 2 × Skylake (Gold 6148, 2.4 GHz, -dual socket 20 cores per socket) nodes

Compute nodes

18,560 cores in 464 × Skylake (Gold 6148, 2.4 -GHz, dual socket 20 cores per socket) nodes;

Hyperthreading

Enabled (accordingly, SLURM will see 37,120 -cores)

Theoretical Peak -Performance

1.425 PFLOPS

Memory capacity per compute -node

232 nodes have 96 GB, the remaining 232 have -192 GB each

Memory capacity per login (build) -node

768 GB

Total System -memory

66.8 TB

Interconnect

Cray Aries, Dragonfly topology

Workload -Manager

Slurm (Multi-Cluster)

Operating -System

Cray Linux Environment CLE7.0UP04
-SUSE Linux Enterprise Server 15 SP3
-

- -## Storage (IBM ESS) - -| | | -|----------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------| -| **Scratch Capacity** (accessible from all Māui, Mahuika, and Ancillary nodes). | 4,412 TB (IBM Spectrum Scale, version 5.0). Total I/O bandwidth to disks is 130 GB/s | -| **Persistent storage** (accessible from all Māui, Mahuika, and Ancillary nodes). | 1,765 TB (IBM Spectrum Scale, version 5.0) Shared Storage. Total I/O bandwidth to disks is 65 GB/s (i.e. the /home and /nesi/project filesystems) | -| **Offline storage** (accessible from all Māui, Mahuika, and Ancillary nodes). | Of the order of 100 PB (compressed) | - -  - -  diff --git a/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/Maui_Ancillary.md b/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/Maui_Ancillary.md deleted file mode 100644 index c9471cbdd..000000000 --- a/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/Maui_Ancillary.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -created_at: '2018-05-21T01:43:06Z' -tags: -- maui -- XC50 -- cs500 -title: "M\u0101ui Ancillary" -vote_count: 0 -vote_sum: 0 -zendesk_article_id: 360000203776 -zendesk_section_id: 360000034335 ---- - - -The Māui Ancillary Nodes provide access to a Virtualised environment -that supports: - -1. Pre- and post-processing of data for jobs running on the - [Māui](Maui.md) - Supercomputer or - [Mahuika](Mahuika.md) HPC - Cluster. Typically, as serial processes on a Slurm partition running - on a set of Ancillary node VMs or baremetal servers. -2. Virtual laboratories that provide interactive access to data stored - on the Māui (and Mahuika) storage together with domain analysis - toolsets (e.g. Seismic, Genomics, Climate, etc.). To access the - Virtual Laboratory nodes, users will first logon to the NeSI Lander - node, then ssh to the relevant Virtual Laboratory. Users may submit - jobs to Slurm partitions from Virtual Laboratory nodes. -3. Remote visualisation of data resident on the filesystems. -4. GPGPU computing. - -Scientific Workflows may access resources across the Māui Supercomputer -and any (multi-cluster) Slurm partitions on the Māui or Mahuika systems. - -## Notes - -1. The `/home, /nesi/project`, and `/nesi/nobackup` - [filesystems](../../Storage/File_Systems_and_Quotas/NeSI_File_Systems_and_Quotas.md) - are mounted on the Māui Ancillary Nodes. -2. The Māui Ancillary nodes have Skylake processors, while the Mahuika - nodes use Broadwell processors. - -## Ancillary Node Specifications - -| | | -|--------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------| -| **Multi-Purpose nodes** | 1,120 cores in 28 × Skylake (Gold 6148, 2.4 GHz, dual socket 20 cores per socket) nodes, which will appear as 2,240 logical cores. | -| **Hyperthreading** | Enabled | -| **Local Disk** | 1.2TB SSD | -| **Operating System** | CentOS 7.4 | -| **GPGPUs** | 5 NVIDIA Tesla P100 PCIe 12GB (5 nodes with 1 GPU) | -| **Remote Visualisation** | [NICE DCV](https://www.nice-software.com/products/dcv) | -| **Memory capacity per Multi-Purpose node** | 768 GB | -| **Interconnect** | EDR (100 Gb/s) InfiniBand | -| **Workload Manager** | Slurm (Multi-Cluster) | -| **OpenStack** | The Cray CS500 Ancillary nodes will normally be presented to users as Virtual Machines, provisioned from the physical hardware as required. | - -The Māui_Ancil nodes have different working environment than the Māui -(login) nodes. Therefore a CS500 login node is provided, to create and -submit your jobs on this architecture. To use you need to login from -Māui login nodes to: - -``` sh -w-mauivlab01.maui.nesi.org.nz -``` - -If you are looking for accessing this node from your local machine you -could add the following section to `~/.ssh/config` (extending the -[recommended terminal setup](../../Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md) - -``` sh -Host w-mauivlab01 - User - Hostname w-mauivlab01.maui.nesi.org.nz - ProxyCommand ssh -W %h:%p maui - ForwardX11 yes - ForwardX11Trusted yes - ServerAliveInterval 300 - ServerAliveCountMax 2 -``` diff --git a/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/index.md b/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/index.md deleted file mode 100644 index 7aad8673b..000000000 --- a/docs/Scientific_Computing/The_NeSI_High_Performance_Computers/index.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -created_at: '2018-05-01T23:29:39Z' -tags: -- hpc -- info -title: The NeSI High Performace Computers -vote_count: 2 -vote_sum: 0 -zendesk_article_id: 360000175735 -zendesk_section_id: 360000034335 ---- - -The NeSI High Performance Computers -[Māui](Maui.md) and -[Mahuika](Mahuika.md) provide -the New Zealand research community with access to a national -data-centric and data intensive research computing environment built on -leading edge high performance computing (HPC) systems. - -- Māui, which in Maori mythology is credited with catching a giant - fish using a fishhook taken from his grandmother's jaw-bone; the - giant fish would become the North Island of New Zealand, provides a - Capability (i.e. Supercomputer) HPC resource on which researchers - can run simulations and calculations that require large numbers - (e.g. thousands) of processing cores working in a tightly-coupled, - parallel fashion. -- Mahuika, which in Maori mythology, is a fire deity, from whom Māui - obtained the secret of making fire, provides a Capacity (i.e. - Cluster) HPC resource to allow researchers to run many small (e.g. - from 1 core to a few hundred cores) compute jobs simultaneously - (aka  High Throughput Computing). - -Māui and Mahuika share the same high performance filesystems, -accordingly, data created on either system are visible on the other -(i.e. without the need to copy data between systems). However, they have -different processors (Skylake on Māui, and Broadwell on Mahuika), and -different flavours of Linux (SLES on Māui and CentOS on Mahuika), so -shared applications should be explicitly compiled and linked for each -architecture. These systems and Ancillary Nodes on Mahuika and -on [Māui](Maui.md)  provide -the research community with: - -- Leading edge HPCs (both Capacity and Capability) via a single point - of access; -- New user facing services that can act on the data held within the NeSI HPC infrastructure, including: - - Pre- and post-processing systems to support workflows; - - Virtual Laboratories that provide interactive access to science domain specific tools \[Coming soon\]; - - Remote visualisation services \[Coming soon\]; - - Advanced data analytics tools, and - - The ability to seamlessly move data between high performance disk storage and offline tape. -- Offsite replication of critical data (both online and offline). - -These systems are -[accessed](../../Getting_Started/Accessing_the_HPCs/Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md) -via a “lander” node using [two-factor authentication](../../Getting_Started/Accessing_the_HPCs/Setting_Up_Two_Factor_Authentication.md). - -NeSI researchers have access to all compute nodes on Mahuika, and 316 -compute nodes on Māui. diff --git a/docs/Scientific_Computing/Training/.pages.yml b/docs/Scientific_Computing/Training/.pages.yml deleted file mode 100644 index 678e656a3..000000000 --- a/docs/Scientific_Computing/Training/.pages.yml +++ /dev/null @@ -1,2 +0,0 @@ -nav: - - ... \ No newline at end of file diff --git a/docs/Storage/File_Systems_and_Quotas/.pages.yml b/docs/Storage/File_Systems_and_Quotas/.pages.yml deleted file mode 100644 index 0056976b5..000000000 --- a/docs/Storage/File_Systems_and_Quotas/.pages.yml +++ /dev/null @@ -1,3 +0,0 @@ -nav: - - NeSI_File_Systems_and_Quotas.md - - ... \ No newline at end of file diff --git a/docs/Getting_Started/Getting_Help/System_status.md b/docs/System_status.md similarity index 77% rename from docs/Getting_Started/Getting_Help/System_status.md rename to docs/System_status.md index 78f44dec9..365d5fc58 100644 --- a/docs/Getting_Started/Getting_Help/System_status.md +++ b/docs/System_status.md @@ -9,7 +9,7 @@ zendesk_section_id: 360000164635 --- !!! note "See also" - [NeSI wide area network connectivity](../../Getting_Started/Getting_Help/NeSI_wide_area_network_connectivity.md) + [NeSI wide area network connectivity](NeSI_wide_area_network_connectivity.md) ## NeSI system status related notifications @@ -21,13 +21,13 @@ The [support.nesi.org.nz](https://support.nesi.org.nz) homepage shows current in In order to manage your subscription to notifications, either log into [my.nesi](https://my.nesi.org.nz/account/preference) or use the link included at the bottom of the notification email message "Manage your subscription" or "Unsubscribe" to manage your preferences. -See also our support article [Managing NeSI notification preferences](../../Getting_Started/my-nesi-org-nz/Managing_notification_preferences.md) +See also our support article [Managing NeSI notification preferences](Managing_notification_preferences.md) -![mceclip0.png](../../assets/images/System_status.png){ width="80%" } +![mceclip0.png](System_status.png){ width="80%" } ## status.nesi.org.nz NeSI does publish service incidents and scheduled maintenance via [status.nesi.org.nz](https://status.nesi.org.nz). Interested parties are invited to subscribe to updates (via SMS or email). -![system status](../../assets/images/System_status_0.png){ width="80%" } +![system status](System_status_0.png){ width="80%" } diff --git a/docs/assets/glossary/dictionary.txt b/docs/assets/glossary/dictionary.txt index 62e4fe470..36eee7b32 100644 --- a/docs/assets/glossary/dictionary.txt +++ b/docs/assets/glossary/dictionary.txt @@ -23,8 +23,6 @@ ANNOVAR's ANNOVAR ANSYS's ANSYS -ANTLR's -ANTLR ANTs's ANTs AOCC's @@ -87,14 +85,10 @@ AutoDock-GPU's AutoDock-GPU AutoDock_Vina's AutoDock_Vina -Autoconf-archive's -Autoconf-archive BBMap's BBMap BCFtools's BCFtools -BCL-Convert's -BCL-Convert BEAST's BEAST BEDOPS's @@ -134,8 +128,6 @@ BayeScan's BayeScan BayesAss's BayesAss -Bazel's -Bazel Beagle's Beagle BiG-SCAPE's @@ -153,8 +145,6 @@ Bismark's Bismark Bison's Bison -BlenderPy's -BlenderPy Boost's Boost Bourne @@ -162,17 +152,11 @@ Bowtie's Bowtie Bowtie2's Bowtie2 -Bpipe's -Bpipe Bracken's Bracken -BreakSeq2's -BreakSeq2 Broadwell Bruijn CAE -CCL's -CCL CD-HIT's CD-HIT CDO's @@ -210,19 +194,13 @@ CRAMINO's CRAMINO CRI CRLF -CTPL's -CTPL CUDA's CUDA -CUnit's -CUnit CWL Canu's Canu CapnProto's CapnProto -Catch2's -Catch2 CellRanger's CellRanger CentOS @@ -250,8 +228,6 @@ Clustal-Omega ClustalW2's ClustalW2 Conda -Corset's -Corset CoverM's CoverM CppUnit's @@ -278,8 +254,6 @@ DFT-D4's DFT-D4 DIAMOND's DIAMOND -DISCOVARdenovo's -DISCOVARdenovo DOI DRAM's DRAM @@ -288,8 +262,6 @@ DTNs DTN's DTN DVS -DaliLite's -DaliLite Dask DeconSeq's DeconSeq @@ -319,8 +291,6 @@ EMAN2's EMAN2 EMBOSS's EMBOSS -ENMTML's -ENMTML EOL ESMF's ESMF @@ -343,10 +313,6 @@ ExaML's ExaML ExpansionHunter's ExpansionHunter -Extrae's -Extrae -FALCON's -FALCON FASTA FASTQ FASTX-Toolkit's @@ -368,8 +334,6 @@ FIGARO FLTK's FLTK FTE -FTGL's -FTGL FastANI's FastANI FastME's @@ -423,10 +387,6 @@ GEMMA GEOS's GEOS GFortran -GLM's -GLM -GLPK's -GLPK GLib's GLib GMAP-GSNAP's @@ -436,8 +396,6 @@ GMP GNU GOLD's GOLD -GObject-Introspection's -GObject-Introspection GPAW's GPAW GPFS's @@ -510,8 +468,6 @@ HMMER's HMMER HMMER2's HMMER2 -HOPS's -HOPS HPCs HPC HTSeq's @@ -536,13 +492,9 @@ IDBA-UD's IDBA-UD IGV's IGV -IMPUTE's -IMPUTE IOBUF IQ-TREE's IQ-TREE -IQmol's -IQmol IRkernel's IRkernel ISA-L's @@ -554,15 +506,11 @@ ImageMagick Infernal's Infernal InfiniBand -Inspector's -Inspector InterProScan's InterProScan JAGS's JAGS JSON -JUnit's -JUnit JasPer's JasPer Java's @@ -601,8 +549,6 @@ Kraken2's Kraken2 KronaTools's KronaTools -KyotoCabinet's -KyotoCabinet LAME's LAME LAMMPS's @@ -650,12 +596,8 @@ M4's M4 MAFFT's MAFFT -MAGMA's -MAGMA MAKER's MAKER -MATIO's -MATIO MATLAB's MATLAB MBIE's @@ -674,34 +616,24 @@ METIS MKL MMseqs2's MMseqs2 -MODFLOW's -MODFLOW MPFR's MPFR MPI -MSMC's -MSMC MUMPS's MUMPS MUMmer's MUMmer MUSCLE's MUSCLE -MUST's -MUST MWLR MaSuRCA's MaSuRCA MacOS -Magma's -Magma Mahuika's Mahuika Mamba's Mamba Manaaki -MarkerMiner's -MarkerMiner Marsden Mash's Mash @@ -731,8 +663,6 @@ MetaPhlAn2's MetaPhlAn2 MetaSV's MetaSV -Metashape's -Metashape Metaxa2's Metaxa2 MiMiC's @@ -758,8 +688,6 @@ ModDotPlot's ModDotPlot ModelTest-NG's ModelTest-NG -Molcas's -Molcas Molpro's Molpro Mono's @@ -824,8 +752,6 @@ NewHybrids's NewHybrids Newton-X's Newton-X -NextGenMap's -NextGenMap NextPolish2's NextPolish2 Nextflow's @@ -842,8 +768,6 @@ Nsight-Systems Nvidia OBITools's OBITools -OMA's -OMA OMP OOM OPARI2's @@ -868,8 +792,6 @@ OpenBabel's OpenBabel OpenCV's OpenCV -OpenFAST's -OpenFAST OpenFOAM's OpenFOAM OpenGL @@ -887,14 +809,8 @@ OpenSeesPy's OpenSeesPy OpenSlide's OpenSlide -OrfM's -OrfM -OrthoFiller's -OrthoFiller OrthoFinder's OrthoFinder -OrthoMCL's -OrthoMCL Otago PALEOMIX's PALEOMIX @@ -907,17 +823,11 @@ PCRE's PCRE PCRE2's PCRE2 -PDT's -PDT PEAR's PEAR -PEST++'s -PEST++ PETSc's PETSc PFR -PHASIUS's -PHASIUS PLINK's PLINK PLUMED's @@ -942,8 +852,6 @@ Parallel's Parallel ParallelIO's ParallelIO -Peregrine's -Peregrine Perl's Perl PhyML's @@ -969,8 +877,6 @@ ProtHint Proteinortho's Proteinortho PuTTY -PyOpenGL's -PyOpenGL PyQt's PyQt PyTorch's @@ -1023,8 +929,6 @@ RMBlast's RMBlast RNAmmer's RNAmmer -ROCm's -ROCm ROOT's ROOT RSEM's @@ -1097,12 +1001,8 @@ SMRT-Link's SMRT-Link SNVoter-NanoMethPhase's SNVoter-NanoMethPhase -SOCI's -SOCI SPAdes's SPAdes -SPIDER's -SPIDER SQLite's SQLite SSAHA2's @@ -1112,8 +1012,6 @@ SSHFS SSIF STAR's STAR -STAR-Fusion's -STAR-Fusion SUNDIALS's SUNDIALS SURVIVOR's @@ -1203,8 +1101,6 @@ TensorFlow's TensorFlow TensorRT's TensorRT -Theano's -Theano Tk's Tk TransDecoder's @@ -1264,8 +1160,6 @@ VPN VSCode VSEARCH's VSEARCH -VTK's -VTK VTune's VTune Valgrind's @@ -1284,8 +1178,6 @@ VirHostMatcher's VirHostMatcher VirSorter's VirSorter -VirtualGL's -VirtualGL WAAFLE's WAAFLE WCRP @@ -1301,12 +1193,6 @@ Winnowmap Wise2's Wise2 XC -XHMM's -XHMM -XMDS2's -XMDS2 -XSD's -XSD XVFB XZ's XZ @@ -1387,8 +1273,6 @@ biomolecules biomolecule breseq's breseq -bsddb3's -bsddb3 bzip2's bzip2 c-ares's @@ -1436,14 +1320,10 @@ cutadapt's cutadapt cuteSV's cuteSV -cwltool's -cwltool cyvcf2's cyvcf2 dadi's dadi -dammit's -dammit datasets's datasets dataset @@ -1470,8 +1350,6 @@ easi's easi ecCodes's ecCodes -ectyper's -ectyper edlib's edlib eggnog-mapper's @@ -1497,8 +1375,6 @@ fastp's fastp fastq-tools's fastq-tools -fcGENE's -fcGENE fgbio's fgbio filesets @@ -1506,16 +1382,12 @@ fileset filesystems filesystem findable -fineRADstructure's -fineRADstructure fineSTRUCTURE's fineSTRUCTURE flatbuffers's flatbuffers flex's flex -fmlrc's -fmlrc fmt's fmt fontconfig's @@ -1528,14 +1400,10 @@ freetype's freetype funcx-endpoint's funcx-endpoint -fxtract's -fxtract g2clib's g2clib g2lib's g2lib -ga4gh's -ga4gh gcloud's gcloud geany's @@ -1565,8 +1433,6 @@ git's git glob globbing -globus-automate-client's -globus-automate-client globus-compute-endpoint's globus-compute-endpoint gmsh's @@ -1581,29 +1447,19 @@ googletest's googletest gperf's gperf -grive2's -grive2 -gsort's -gsort h5pp's h5pp haplocheck's haplocheck hapū -help2man's -help2man hifiasm's hifiasm hooks's hooks -hunspell's -hunspell hwloc's hwloc hyperthreaded hyperthreading -hypothesis's -hypothesis icc's icc iccifort's @@ -1635,8 +1491,6 @@ iompi's iompi ipyrad's ipyrad -ispc's -ispc iwi jbigkit's jbigkit @@ -1722,8 +1576,6 @@ libspatialite's libspatialite libtool's libtool -libunistring's -libunistring libunwind's libunwind libvdwxc's @@ -1742,6 +1594,8 @@ lighttpd's lighttpd likwid's likwid +linkers +linker localhost lockdown logs's @@ -1763,8 +1617,6 @@ mapDamage matlab-proxy's matlab-proxy md -meRanTK's -meRanTK medaka's medaka megalodon's @@ -1777,8 +1629,6 @@ miRDeep2's miRDeep2 microarchitecture mihi -mimalloc's -mimalloc miniBUSCO's miniBUSCO miniasm's @@ -1787,8 +1637,6 @@ minimap2's minimap2 miniprot's miniprot -mlpack's -mlpack modbam2bed's modbam2bed modkit's @@ -1797,8 +1645,6 @@ mosdepth's mosdepth mpcci's mpcci -mpifileutils's -mpifileutils muParser's muParser multithread @@ -1868,8 +1714,6 @@ parallel-fastq-dump's parallel-fastq-dump parallelisation parallelise -parasail's -parasail patchelf's patchelf pauvre's @@ -1934,18 +1778,12 @@ pycoQC's pycoQC pymol-open-source's pymol-open-source -pyspoa's -pyspoa qcat's qcat -rDock's -rDock randfold's randfold rasusa's rasusa -razers3's -razers3 rclone's rclone re2c's @@ -1957,22 +1795,14 @@ repos repo reproducibility requeued -rkcommon's -rkcommon rnaQUAST's rnaQUAST roadmap rollout rsync runtime -rust-fmlrc's -rust-fmlrc samblaster's samblaster -samclip's -samclip -savvy's -savvy sbt's sbt sc-RNA's @@ -1995,14 +1825,10 @@ skani's skani slow5tools's slow5tools -smafa's -smafa smoove's smoove snakemake's snakemake -snaphu's -snaphu snappy's snappy snp-sites's @@ -2038,8 +1864,6 @@ supercomputings supercomputing swarm's swarm -swissknife's -swissknife symlink tRNAscan-SE's tRNAscan-SE diff --git a/docs/assets/glossary/snippets.md b/docs/assets/glossary/snippets.md index 0373c8485..6258d1f58 100644 --- a/docs/assets/glossary/snippets.md +++ b/docs/assets/glossary/snippets.md @@ -25,8 +25,6 @@ annotate genetic variants detected from diverse genomes . annotate genetic variants detected from diverse genomes . *[ANSYS's]: A bundle of computer-aided engineering software including Fluent and CFX. *[ANSYS]: A bundle of computer-aided engineering software including Fluent and CFX. -*[ANTLR's]: ANother Tool for Language Recognition -*[ANTLR]: ANother Tool for Language Recognition *[ANTs's]: ANTs extracts information from complex datasets that include imaging. ANTs is useful for managing, interpreting and visualizing multidimensional data. *[ANTs]: ANTs extracts information from complex datasets that include imaging. ANTs is useful for managing, @@ -133,16 +131,10 @@ parallelizable LGA by processing ligand-receptor poses in parallel over multiple compute units. *[AutoDock_Vina's]: AutoDock Vina is an open-source program for doing molecular docking. *[AutoDock_Vina]: AutoDock Vina is an open-source program for doing molecular docking. -*[Autoconf-archive's]: A collection of more than 500 macros for GNU Autoconf -*[Autoconf-archive]: A collection of more than 500 macros for GNU Autoconf *[BBMap's]: BBMap short read aligner, and other bioinformatic tools. *[BBMap]: BBMap short read aligner, and other bioinformatic tools. *[BCFtools's]: Manipulate variant calls in the Variant Call Format (VCF) and its binary counterpart BCF. *[BCFtools]: Manipulate variant calls in the Variant Call Format (VCF) and its binary counterpart BCF. -*[BCL-Convert's]: Converts per cycle binary data output by Illumina sequencers containing basecall -files and quality scores to per read FASTQ files -*[BCL-Convert]: Converts per cycle binary data output by Illumina sequencers containing basecall -files and quality scores to per read FASTQ files *[BEAST's]: Bayesian MCMC phylogenetic analysis of molecular sequences for reconstructing phylogenies and testing evolutionary hypotheses. *[BEAST]: Bayesian MCMC phylogenetic analysis of molecular sequences for reconstructing @@ -187,9 +179,9 @@ BLAS-like dense linear algebra libraries. and the BOLT-REML algorithm for variance components analysis *[BOLT-LMM]: The BOLT-LMM algorithm for mixed model association testing, and the BOLT-REML algorithm for variance components analysis -*[BRAKER's]: BRAKER is a pipeline for fully automated prediction of protein coding genes with GeneMark-ES/ET +*[BRAKER's]: Pipeline for fully automated prediction of protein coding genes with GeneMark-ES/ET and AUGUSTUS in novel eukaryotic genomes. -*[BRAKER]: BRAKER is a pipeline for fully automated prediction of protein coding genes with GeneMark-ES/ET +*[BRAKER]: Pipeline for fully automated prediction of protein coding genes with GeneMark-ES/ET and AUGUSTUS in novel eukaryotic genomes. *[BUSCO's]: Assessing genome assembly and annotation completeness with Benchmarking Universal Single-Copy Orthologs *[BUSCO]: Assessing genome assembly and annotation completeness with Benchmarking Universal Single-Copy Orthologs @@ -211,10 +203,6 @@ and the BOLT-REML algorithm for variance components analysis using differences in allele frequencies between populations. *[BayesAss's]: Program for inference of recent immigration rates between populations using unlinked multilocus genotypes *[BayesAss]: Program for inference of recent immigration rates between populations using unlinked multilocus genotypes -*[Bazel's]: Bazel is a build tool that builds code quickly and reliably. -It is used to build the majority of Google's software. -*[Bazel]: Bazel is a build tool that builds code quickly and reliably. -It is used to build the majority of Google's software. *[Beagle's]: Package for phasing genotypes and for imputing ungenotyped markers. *[Beagle]: Package for phasing genotypes and for imputing ungenotyped markers. *[BiG-SCAPE's]: Constructs sequence similarity networks of Biosynthetic Gene Clusters (BGCs) and groups them into Gene Cluster Families (GCFs). @@ -247,12 +235,6 @@ determine cytosine methylation states into a deterministic LR or generalized LR (GLR) parser employing LALR(1) parser tables. *[Bison]: Bison is a general-purpose parser generator that converts an annotated context-free grammar into a deterministic LR or generalized LR (GLR) parser employing LALR(1) parser tables. -*[BlenderPy's]: Blender provides a pipeline for 3D modeling, rigging, animation, simulation, rendering, -compositing, motion tracking, video editing and 2D animation. -This particular build of Blender provides a Python package 'bpy' rather than the stand-alone application. -*[BlenderPy]: Blender provides a pipeline for 3D modeling, rigging, animation, simulation, rendering, -compositing, motion tracking, video editing and 2D animation. -This particular build of Blender provides a Python package 'bpy' rather than the stand-alone application. *[Boost's]: Boost provides free peer-reviewed portable C++ source libraries. *[Boost]: Boost provides free peer-reviewed portable C++ source libraries. *[Bowtie's]: Ultrafast, memory-efficient short read aligner. @@ -261,16 +243,10 @@ This particular build of Blender provides a Python package 'bpy' rather than the sequencing reads to long reference sequences. *[Bowtie2]: Ultrafast and memory-efficient tool for aligning sequencing reads to long reference sequences. -*[Bpipe's]: A platform for running big bioinformatics jobs that consist of a series of processing stages -*[Bpipe]: A platform for running big bioinformatics jobs that consist of a series of processing stages *[Bracken's]: Hghly accurate statistical method that computes the abundance of species in DNA sequences from a metagenomics sample. *[Bracken]: Hghly accurate statistical method that computes the abundance of species in DNA sequences from a metagenomics sample. -*[BreakSeq2's]: Nucleotide-resolution analysis of structural variants -*[BreakSeq2]: Nucleotide-resolution analysis of structural variants -*[CCL's]: Clozure CL (often called CCL for short) is a free Common Lisp implementation -*[CCL]: Clozure CL (often called CCL for short) is a free Common Lisp implementation *[CD-HIT's]: CD-HIT is a very widely used program for clustering and comparing protein or nucleotide sequences. *[CD-HIT]: CD-HIT is a very widely used program for clustering and @@ -319,8 +295,6 @@ coverage data in multiple samples and linkage data from paired end reads. *[CRABS]: Creating Reference databases for Amplicon-Based Sequencing. *[CRAMINO's]: A tool for quick quality assessment of cram and bam files, intended for long read sequencing *[CRAMINO]: A tool for quick quality assessment of cram and bam files, intended for long read sequencing -*[CTPL's]: C++ Thread Pool Library -*[CTPL]: C++ Thread Pool Library *[CUDA's]: CUDA (formerly Compute Unified Device Architecture) is a parallel computing platform and programming model created by NVIDIA and implemented by the graphics processing units (GPUs) that they produce. CUDA gives developers access @@ -329,16 +303,10 @@ coverage data in multiple samples and linkage data from paired end reads. computing platform and programming model created by NVIDIA and implemented by the graphics processing units (GPUs) that they produce. CUDA gives developers access to the virtual instruction set and memory of the parallel computational elements in CUDA GPUs. -*[CUnit's]: Automated testing framework for C. -*[CUnit]: Automated testing framework for C. *[Canu's]: Sequence assembler designed for high-noise single-molecule sequencing. *[Canu]: Sequence assembler designed for high-noise single-molecule sequencing. *[CapnProto's]: Fast data interchange format and capability-based RPC system. *[CapnProto]: Fast data interchange format and capability-based RPC system. -*[Catch2's]: A modern, C++-native, header-only, test framework for unit-tests, TDD and BDD - - using C++11, C++14, C++17 and later (or C++03 on the Catch1.x branch) -*[Catch2]: A modern, C++-native, header-only, test framework for unit-tests, TDD and BDD - - using C++11, C++14, C++17 and later (or C++03 on the Catch1.x branch) *[CellRanger's]: Cell Ranger is a set of analysis pipelines that process Chromium single-cell RNA-seq output to align reads, generate gene-cell matrices and perform clustering and gene expression analysis. @@ -379,8 +347,6 @@ coverage data in multiple samples and linkage data from paired end reads. can be seen via viewing Cladograms or Phylograms *[ClustalW2's]: ClustalW2 is a general purpose multiple sequence alignment program for DNA or proteins. *[ClustalW2]: ClustalW2 is a general purpose multiple sequence alignment program for DNA or proteins. -*[Corset's]: Clusters contigs and counts reads from de novo assembled transcriptomes. -*[Corset]: Clusters contigs and counts reads from de novo assembled transcriptomes. *[CoverM's]: DNA read coverage and relative abundance calculator focused on metagenomics applications *[CoverM]: DNA read coverage and relative abundance calculator focused on metagenomics applications *[CppUnit's]: C++ port of the JUnit framework for unit testing. @@ -425,15 +391,9 @@ coverage data in multiple samples and linkage data from paired end reads. *[DFT-D4]: Generally Applicable Atomic-Charge Dependent London Dispersion Correction. *[DIAMOND's]: Sequence aligner for protein and translated DNA searches *[DIAMOND]: Sequence aligner for protein and translated DNA searches -*[DISCOVARdenovo's]: Assembler suitable for large genomes based on Illumina reads of length 250 or longer. -*[DISCOVARdenovo]: Assembler suitable for large genomes based on Illumina reads of length 250 or longer. *[DOI]: A unique identifier that identifies digital objects. The object may change physical locations, but the DOI assigned to that object will never change. *[DRAM's]: Tool for annotating metagenomic assembled genomes and VirSorter identified viral contigs.. *[DRAM]: Tool for annotating metagenomic assembled genomes and VirSorter identified viral contigs.. -*[DaliLite's]: Tool set for simulating/evaluating SVs, merging and comparing SVs within and among samples, - and includes various methods to reformat or summarize SVs. -*[DaliLite]: Tool set for simulating/evaluating SVs, merging and comparing SVs within and among samples, - and includes various methods to reformat or summarize SVs. *[DeconSeq's]: A tool that can be used to automatically detect and efficiently remove sequence contaminations from genomic and metagenomic datasets. *[DeconSeq]: A tool that can be used to automatically detect and efficiently remove sequence contaminations @@ -484,8 +444,6 @@ package has a built-in plotting script and supports multiple file formats and qu *[EMBOSS]: EMBOSS is 'The European Molecular Biology Open Software Suite'. EMBOSS is a free Open Source software analysis package specially developed for the needs of the molecular biology (e.g. EMBnet) user community. -*[ENMTML's]: R package for integrated construction of Ecological Niche Models. -*[ENMTML]: R package for integrated construction of Ecological Niche Models. *[ESMF's]: The Earth System Modeling Framework (ESMF) is software for building and coupling weather, climate, and related models. *[ESMF]: The Earth System Modeling Framework (ESMF) is software for building and coupling weather, @@ -520,10 +478,6 @@ Also condatains smetana, carveme and memote . *[ExaML]: Exascale Maximum Likelihood for phylogenetic inference using MPI. *[ExpansionHunter's]: Tool for estimating repeat sizes *[ExpansionHunter]: Tool for estimating repeat sizes -*[Extrae's]: Extrae is capable of instrumenting applications based on MPI, OpenMP, pthreads, CUDA1, OpenCL1, and StarSs1 using different instrumentation approaches -*[Extrae]: Extrae is capable of instrumenting applications based on MPI, OpenMP, pthreads, CUDA1, OpenCL1, and StarSs1 using different instrumentation approaches -*[FALCON's]: Falcon: a set of tools for fast aligning long reads for consensus and assembly -*[FALCON]: Falcon: a set of tools for fast aligning long reads for consensus and assembly *[FASTX-Toolkit's]: Tools for Short-Reads FASTA/FASTQ files preprocessing. *[FASTX-Toolkit]: Tools for Short-Reads FASTA/FASTQ files preprocessing. *[FCM's]: FCM Build - A powerful build system for modern Fortran software applications. FCM Version Control - Wrappers to the Subversion version control system, usage conventions and processes for scientific software development. @@ -550,10 +504,6 @@ in one or more dimensions, of arbitrary input size, and of both real and complex *[FLTK]: FLTK is a cross-platform C++ GUI toolkit for UNIX/Linux (X11), Microsoft Windows, and MacOS X. FLTK provides modern GUI functionality without the bloat and supports 3D graphics via OpenGL and its built-in GLUT emulation. -*[FTGL's]: FTGL is a free open source library to enable developers to use arbitrary -fonts in their OpenGL (www.opengl.org) applications. -*[FTGL]: FTGL is a free open source library to enable developers to use arbitrary -fonts in their OpenGL (www.opengl.org) applications. *[FastANI's]: Tool for fast alignment-free computation of whole-genome Average Nucleotide Identity (ANI). *[FastANI]: Tool for fast alignment-free computation of @@ -638,12 +588,6 @@ FreeSurfer contains a fully automatic structural imaging stream for processing c *[GEMMA]: Genome-wide Efficient Mixed Model Association *[GEOS's]: GEOS (Geometry Engine - Open Source) is a C++ port of the Java Topology Suite (JTS) *[GEOS]: GEOS (Geometry Engine - Open Source) is a C++ port of the Java Topology Suite (JTS) -*[GLM's]: OpenGL Mathematics (GLM) is a header only C++ mathematics library for graphics software based on - the OpenGL Shading Language (GLSL) specifications. -*[GLM]: OpenGL Mathematics (GLM) is a header only C++ mathematics library for graphics software based on - the OpenGL Shading Language (GLSL) specifications. -*[GLPK's]: GNU Linear Programming Kit is intended for solving large-scale linear programming (LP), mixed integer programming (MIP), and other related problems. -*[GLPK]: GNU Linear Programming Kit is intended for solving large-scale linear programming (LP), mixed integer programming (MIP), and other related problems. *[GLib's]: GLib is one of the base libraries of the GTK+ project *[GLib]: GLib is one of the base libraries of the GTK+ project *[GMAP-GSNAP's]: GMAP: A Genomic Mapping and Alignment Program for mRNA and EST Sequences @@ -656,16 +600,6 @@ operating on signed integers, rational numbers, and floating point numbers. operating on signed integers, rational numbers, and floating point numbers. *[GOLD's]: A genetic algorithm for docking flexible ligands into protein binding sites *[GOLD]: A genetic algorithm for docking flexible ligands into protein binding sites -*[GObject-Introspection's]: GObject introspection is a middleware layer between C libraries - (using GObject) and language bindings. The C library can be scanned at - compile time and generate a metadata file, in addition to the actual - native C library. Then at runtime, language bindings can read this - metadata and automatically provide bindings to call into the C library. -*[GObject-Introspection]: GObject introspection is a middleware layer between C libraries - (using GObject) and language bindings. The C library can be scanned at - compile time and generate a metadata file, in addition to the actual - native C library. Then at runtime, language bindings can read this - metadata and automatically provide bindings to call into the C library. *[GPAW's]: GPAW is a density-functional theory (DFT) Python code based on the projector-augmented wave (PAW) method and the atomic simulation environment (ASE). It uses real-space uniform grids and multigrid methods or atom-centered basis-functions. @@ -711,11 +645,9 @@ This is a GPU enabled build, containing both MPI and threadMPI binaries. *[GTS's]: GTS stands for the GNU Triangulated Surface Library. It is an Open Source Free Software Library intended to provide a set of useful functions to deal with 3D surfaces meshed with interconnected triangles. - *[GTS]: GTS stands for the GNU Triangulated Surface Library. It is an Open Source Free Software Library intended to provide a set of useful functions to deal with 3D surfaces meshed with interconnected triangles. - *[GUIs]: A digital interface in which a user interacts with graphical components such as icons, buttons, and menus. *[GUI]: A digital interface in which a user interacts with graphical components such as icons, buttons, and menus. *[GUSHR's]: @@ -808,8 +740,6 @@ Assembly-free construction of UTRs from short read RNA-Seq data on the basis of because of the strength of its underlying mathematical models. In the past, this strength came at significant computational expense, but in the new HMMER3 project, HMMER is now essentially as fast as BLAST. -*[HOPS's]: Pipeline which focuses on screening MALT data for the presence of a user-specified list of target species. -*[HOPS]: Pipeline which focuses on screening MALT data for the presence of a user-specified list of target species. *[HPCs]: Like a regular computer, but larger. Primarily used for heating data centers. *[HPC]: Like a regular computer, but larger. Primarily used for heating data centers. *[HTSeq's]: HTSeq is a Python library to facilitate processing and analysis @@ -848,8 +778,6 @@ Assembly-free construction of UTRs from short read RNA-Seq data on the basis of *[IGV]: The Integrative Genomics Viewer (IGV) is a high-performance visualization tool for interactive exploration of large, integrated genomic datasets. It supports a wide variety of data types, including array-based and next-generation sequence data -*[IMPUTE's]: Genotype imputation and haplotype phasing. -*[IMPUTE]: Genotype imputation and haplotype phasing. *[IQ-TREE's]: Efficient phylogenomic software by maximum likelihood *[IQ-TREE]: Efficient phylogenomic software by maximum likelihood *[IRkernel's]: R packages for providing R kernel for Jupyter. @@ -862,18 +790,12 @@ Assembly-free construction of UTRs from short read RNA-Seq data on the basis of for RNA structure and sequence similarities. *[Infernal]: Infernal ('INFERence of RNA ALignment') is for searching DNA sequence databases for RNA structure and sequence similarities. -*[Inspector's]: Intel Inspector XE is an easy to use memory error checker and thread checker for serial - and parallel applications -*[Inspector]: Intel Inspector XE is an easy to use memory error checker and thread checker for serial - and parallel applications *[InterProScan's]: Sequence analysis application (nucleotide and protein sequences) that combines different protein signature recognition methods into one resource. *[InterProScan]: Sequence analysis application (nucleotide and protein sequences) that combines different protein signature recognition methods into one resource. *[JAGS's]: Just Another Gibbs Sampler - a program for the statistical analysis of Bayesian hierarchical models by Markov Chain Monte Carlo. *[JAGS]: Just Another Gibbs Sampler - a program for the statistical analysis of Bayesian hierarchical models by Markov Chain Monte Carlo. -*[JUnit's]: A programmer-oriented testing framework for Java. -*[JUnit]: A programmer-oriented testing framework for Java. *[JasPer's]: The JasPer Project is an open-source initiative to provide a free software-based reference implementation of the codec specified in the JPEG-2000 Part-1 standard. *[JasPer]: The JasPer Project is an open-source initiative to provide a free @@ -894,6 +816,12 @@ for RNA structure and sequence similarities. *[JsonCpp]: JsonCpp is a C++ library that allows manipulating JSON values, including serialization and deserialization to and from strings. It can also preserve existing comment in unserialization/serialization steps, making it a convenient format to store user input files. +*[Julia's]: A high-level, high-performance dynamic language for technical computing. + +This version was compiled from source with USE_INTEL_JITEVENTS=1 to enable profiling with VTune. +*[Julia]: A high-level, high-performance dynamic language for technical computing. + +This version was compiled from source with USE_INTEL_JITEVENTS=1 to enable profiling with VTune. *[JupyterLab's]: An extensible environment for interactive and reproducible computing, based on the Jupyter Notebook and Architecture. *[JupyterLab]: An extensible environment for interactive and reproducible computing, based on the Jupyter Notebook and Architecture. *[KAT's]: The K-mer Analysis Toolkit (KAT) contains a number of tools that analyse and compare K-mer spectra. @@ -932,8 +860,6 @@ sequencing reads from metagenomic whole genome sequencing experiments several Bioinformatics tools as well as from text and XML files. *[KronaTools]: Krona Tools is a set of scripts to create Krona charts from several Bioinformatics tools as well as from text and XML files. -*[KyotoCabinet's]: Library of routines for managing a database. -*[KyotoCabinet]: Library of routines for managing a database. *[LAME's]: LAME is a high quality MPEG Audio Layer III (MP3) encoder licensed under the LGPL. *[LAME]: LAME is a high quality MPEG Audio Layer III (MP3) encoder licensed under the LGPL. *[LAMMPS's]: LAMMPS is a classical molecular dynamics code, and an acronym @@ -1018,12 +944,8 @@ functionality. GNU M4 also has built-in functions for including files, running shell commands, doing arithmetic, etc. *[MAFFT's]: Multiple sequence alignment program offering a range of methods. *[MAFFT]: Multiple sequence alignment program offering a range of methods. -*[MAGMA's]: Tool for gene analysis and generalized gene-set analysis of GWAS data. -*[MAGMA]: Tool for gene analysis and generalized gene-set analysis of GWAS data. *[MAKER's]: Genome annotation pipeline *[MAKER]: Genome annotation pipeline -*[MATIO's]: matio is an C library for reading and writing Matlab MAT files. -*[MATIO]: matio is an C library for reading and writing Matlab MAT files. *[MATLAB's]: A high-level language and interactive environment for numerical computing. *[MATLAB]: A high-level language and interactive environment for numerical computing. *[MCL's]: The MCL algorithm is short for the Markov Cluster Algorithm, a fast @@ -1048,17 +970,11 @@ and producing fill reducing orderings for sparse matrices. The algorithms implem multilevel recursive-bisection, multilevel k-way, and multi-constraint partitioning schemes. *[MMseqs2's]: MMseqs2: ultra fast and sensitive search and clustering suite *[MMseqs2]: MMseqs2: ultra fast and sensitive search and clustering suite -*[MODFLOW's]: MODFLOW is the U.S. Geological Survey modular finite-difference flow model, which is a computer code that solves the groundwater flow equation. The program is used by hydrogeologists to simulate the flow of groundwater through aquifers. - -*[MODFLOW]: MODFLOW is the U.S. Geological Survey modular finite-difference flow model, which is a computer code that solves the groundwater flow equation. The program is used by hydrogeologists to simulate the flow of groundwater through aquifers. - *[MPFR's]: The MPFR library is a C library for multiple-precision floating-point computations with correct rounding. *[MPFR]: The MPFR library is a C library for multiple-precision floating-point computations with correct rounding. *[MPI]: A standardised message-passing standard designed to function on parallel computing architectures. -*[MSMC's]: Multiple Sequentially Markovian Coalescent, infers population size and gene flow from multiple genome sequences -*[MSMC]: Multiple Sequentially Markovian Coalescent, infers population size and gene flow from multiple genome sequences *[MUMPS's]: A parallel sparse direct solver *[MUMPS]: A parallel sparse direct solver *[MUMmer's]: MUMmer is a system for rapidly aligning entire genomes, @@ -1071,8 +987,6 @@ or some compromise between the two. *[MUSCLE]: MUSCLE is a program for creating multiple alignments of amino acid or nucleotide sequences. A range of options is provided that give you the choice of optimizing accuracy, speed, or some compromise between the two. -*[MUST's]: MUST detects usage errors of the Message Passing Interface (MPI) and reports them to the user. -*[MUST]: MUST detects usage errors of the Message Passing Interface (MPI) and reports them to the user. *[MaSuRCA's]: MaSuRCA is whole genome assembly software. It combines the efficiency of the de Bruijn graph and Overlap-Layout-Consensus (OLC) approaches. MaSuRCA can assemble data sets containing only short reads from Illumina sequencing or a mixture of short reads and long reads @@ -1081,16 +995,8 @@ or some compromise between the two. and Overlap-Layout-Consensus (OLC) approaches. MaSuRCA can assemble data sets containing only short reads from Illumina sequencing or a mixture of short reads and long reads (Sanger, 454, Pacbio and Nanopore). -*[Magma's]: Magma is a large, well-supported software package designed for computations in algebra, number theory, algebraic geometry and algebraic combinatorics. It provides a mathematically rigorous environment for defining and working with structures such as groups, rings, fields, modules, algebras, schemes, curves, graphs, designs, codes and many others. Magma also supports a number of databases designed to aid computational research in those areas of mathematics which are algebraic in nature. - -whatis([==[Homepage: http://magma.maths.usyd.edu.au/magma/ -*[Magma]: Magma is a large, well-supported software package designed for computations in algebra, number theory, algebraic geometry and algebraic combinatorics. It provides a mathematically rigorous environment for defining and working with structures such as groups, rings, fields, modules, algebras, schemes, curves, graphs, designs, codes and many others. Magma also supports a number of databases designed to aid computational research in those areas of mathematics which are algebraic in nature. - -whatis([==[Homepage: http://magma.maths.usyd.edu.au/magma/ *[Mamba's]: Mamba is a fast, robust, and cross-platform package manager. *[Mamba]: Mamba is a fast, robust, and cross-platform package manager. -*[MarkerMiner's]: Workflow for effective discovery of SCN loci in flowering plants angiosperms -*[MarkerMiner]: Workflow for effective discovery of SCN loci in flowering plants angiosperms *[Mash's]: Fast genome and metagenome distance estimation using MinHash *[Mash]: Fast genome and metagenome distance estimation using MinHash *[MashMap's]: Implements a fast and approximate algorithm for computing local alignment boundaries between long DNA sequences @@ -1213,18 +1119,6 @@ libmmgs and libmmg3d libraries. *[ModDotPlot]: Novel dot plot visualization tool used to view tandem repeats *[ModelTest-NG's]: Tool for selecting the best-fit model of evolution for DNA and protein alignments. *[ModelTest-NG]: Tool for selecting the best-fit model of evolution for DNA and protein alignments. -*[Molcas's]: Molcas is an ab initio quantum chemistry software package -developed by scientists to be used by scientists. The basic philosophy is is to -be able to treat general electronic structures for molecules consisting of -atoms from most of the periodic table. As such, the primary focus of the -package is on multiconfigurational methods with applications typically -connected to the treatment of highly degenerate states. -*[Molcas]: Molcas is an ab initio quantum chemistry software package -developed by scientists to be used by scientists. The basic philosophy is is to -be able to treat general electronic structures for molecules consisting of -atoms from most of the periodic table. As such, the primary focus of the -package is on multiconfigurational methods with applications typically -connected to the treatment of highly degenerate states. *[Molpro's]: Molpro is a complete system of ab initio programs for molecular electronic structure calculations. *[Molpro]: Molpro is a complete system of ab initio programs for molecular electronic structure calculations. *[Mono's]: An open source, cross-platform, implementation of C# and the CLR that is @@ -1307,12 +1201,6 @@ individuals fall into each of a set of user-defined hybrid categories. individuals fall into each of a set of user-defined hybrid categories. *[Newton-X's]: NX is a general-purpose program package for simulating the dynamics of electronically excited molecules and molecular assemblies. *[Newton-X]: NX is a general-purpose program package for simulating the dynamics of electronically excited molecules and molecular assemblies. -*[NextGenMap's]: NextGenMap is a flexible highly sensitive short read mapping tool that - handles much higher mismatch rates than comparable algorithms while still outperforming - them in terms of runtime. -*[NextGenMap]: NextGenMap is a flexible highly sensitive short read mapping tool that - handles much higher mismatch rates than comparable algorithms while still outperforming - them in terms of runtime. *[NextPolish2's]: a fast and efficient genome polishing tool for long-read assembly *[NextPolish2]: a fast and efficient genome polishing tool for long-read assembly *[Nextflow's]: Nextflow is a reactive workflow framework and a programming DSL @@ -1335,10 +1223,6 @@ application’s algorithm, help you select the largest opportunities to optimize any quantity of CPUs and GPUs *[OBITools's]: Manipulate various data and sequence files. *[OBITools]: Manipulate various data and sequence files. -*[OMA's]: Orthologous MAtrix project is a method and database for the inference - of orthologs among complete genomes -*[OMA]: Orthologous MAtrix project is a method and database for the inference - of orthologs among complete genomes *[OPARI2's]: source-to-source instrumentation tool for OpenMP and hybrid codes. It surrounds OpenMP directives and runtime library calls with calls to the POMP2 measurement interface. *[OPARI2]: source-to-source instrumentation tool for OpenMP and hybrid codes. @@ -1391,8 +1275,6 @@ interactively, enabling new insights into data exploration. and machine learning software library. OpenCV was built to provide a common infrastructure for computer vision applications and to accelerate the use of machine perception in the commercial products. -*[OpenFAST's]: Wind turbine multiphysics simulation tool -*[OpenFAST]: Wind turbine multiphysics simulation tool *[OpenFOAM's]: OpenFOAM is a free, open source CFD software package. OpenFOAM has an extensive range of features to solve anything from complex fluid flows involving chemical reactions, turbulence and heat transfer, @@ -1419,14 +1301,8 @@ interactively, enabling new insights into data exploration. read whole-slide images (also known as virtual slides). *[OpenSlide]: OpenSlide is a C library that provides a simple interface to read whole-slide images (also known as virtual slides). -*[OrfM's]: A simple and not slow open reading frame (ORF) caller. -*[OrfM]: A simple and not slow open reading frame (ORF) caller. -*[OrthoFiller's]: Identifies missing annotations for evolutionarily conserved genes. -*[OrthoFiller]: Identifies missing annotations for evolutionarily conserved genes. *[OrthoFinder's]: OrthoFinder is a fast, accurate and comprehensive platform for comparative genomics *[OrthoFinder]: OrthoFinder is a fast, accurate and comprehensive platform for comparative genomics -*[OrthoMCL's]: Genome-scale algorithm for grouping orthologous protein sequences. -*[OrthoMCL]: Genome-scale algorithm for grouping orthologous protein sequences. *[PALEOMIX's]: pipelines and tools designed to aid the rapid processing of High-Throughput Sequencing (HTS) data. *[PALEOMIX]: pipelines and tools designed to aid the rapid processing of High-Throughput Sequencing (HTS) data. *[PAML's]: PAML is a package of programs for phylogenetic @@ -1455,30 +1331,12 @@ read whole-slide images (also known as virtual slides). The PCRE library is a set of functions that implement regular expression pattern matching using the same syntax and semantics as Perl 5. -*[PDT's]: Program Database Toolkit (PDT) is a framework for analyzing source code written in several programming languages and for making rich program - knowledge accessible to developers of static and dynamic analysis tools. -*[PDT]: Program Database Toolkit (PDT) is a framework for analyzing source code written in several programming languages and for making rich program - knowledge accessible to developers of static and dynamic analysis tools. *[PEAR's]: Memory-efficient,fully parallelized and highly accurate pair-end read merger. *[PEAR]: Memory-efficient,fully parallelized and highly accurate pair-end read merger. -*[PEST++'s]: PEST++ is a software suite aimed at supporting - complex numerical models in the decision-support context. - Much focus has been devoted to supporting environmental models - (groundwater, surface water, etc) but these tools are readily - applicable to any computer model. - -*[PEST++]: PEST++ is a software suite aimed at supporting - complex numerical models in the decision-support context. - Much focus has been devoted to supporting environmental models - (groundwater, surface water, etc) but these tools are readily - applicable to any computer model. - *[PETSc's]: PETSc, pronounced PET-see (the S is silent), is a suite of data structures and routines for the scalable (parallel) solution of scientific applications modeled by partial differential equations. *[PETSc]: PETSc, pronounced PET-see (the S is silent), is a suite of data structures and routines for the scalable (parallel) solution of scientific applications modeled by partial differential equations. -*[PHASIUS's]: A tool to visualize phase block structure from (many) BAM or CRAM files together with BED annotation -*[PHASIUS]: A tool to visualize phase block structure from (many) BAM or CRAM files together with BED annotation *[PLINK's]: PLINK is a free, open-source whole genome association analysis toolset, designed to perform a range of basic, large-scale analyses in a computationally efficient manner. The focus of PLINK is purely on analysis of genotype/phenotype data, so there is no support for @@ -1566,12 +1424,6 @@ Ray tracing using the OSPRay library is also supported. *[Parallel]: Build and execute shell commands in parallel *[ParallelIO's]: A high-level Parallel I/O Library for structured grid applications *[ParallelIO]: A high-level Parallel I/O Library for structured grid applications -*[Peregrine's]: Genome assembler for long reads (length > 10kb, accuracy > 99%). -Based on Sparse HIereachical MimiMizER (SHIMMER) for fast read-to-read overlaping - -*[Peregrine]: Genome assembler for long reads (length > 10kb, accuracy > 99%). -Based on Sparse HIereachical MimiMizER (SHIMMER) for fast read-to-read overlaping - *[Perl's]: Larry Wall's Practical Extraction and Report Language *[Perl]: Larry Wall's Practical Extraction and Report Language *[PhyML's]: Phylogenetic estimation using Maximum Likelihood @@ -1608,8 +1460,6 @@ Based on Sparse HIereachical MimiMizER (SHIMMER) for fast read-to-read overlapin reference protein sequences. *[Proteinortho's]: Proteinortho is a tool to detect orthologous genes within different species. *[Proteinortho]: Proteinortho is a tool to detect orthologous genes within different species. -*[PyOpenGL's]: PyOpenGL is the most common cross platform Python binding to OpenGL and related APIs. -*[PyOpenGL]: PyOpenGL is the most common cross platform Python binding to OpenGL and related APIs. *[PyQt's]: PyQt5 is a set of Python bindings for v5 of the Qt application framework from The Qt Company. This bundle includes PyQtWebEngine, a set of Python bindings for The Qt Company’s Qt WebEngine framework. *[PyQt]: PyQt5 is a set of Python bindings for v5 of the Qt application framework from The Qt Company. @@ -1692,8 +1542,6 @@ Support for cross_match-like complexity adjusted scoring. Cross_match is Phil Gr Support for cross_match-like masklevel filtering.. *[RNAmmer's]: consistent and rapid annotation of ribosomal RNA genes. *[RNAmmer]: consistent and rapid annotation of ribosomal RNA genes. -*[ROCm's]: Platform for GPU Enabled HPC and UltraScale Computing -*[ROCm]: Platform for GPU Enabled HPC and UltraScale Computing *[ROOT's]: The ROOT system provides a set of OO frameworks with all the functionality needed to handle and analyze large amounts of data in a very efficient way. *[ROOT]: The ROOT system provides a set of OO frameworks with all the functionality @@ -1805,14 +1653,8 @@ for SNP array and high coverage sequencing data. NanoMethPhase - Phase long reads and CpG methylations from Oxford Nanopore Technologies. *[SNVoter-NanoMethPhase]: SNVoter - A top up tool to enhance SNV calling from Nanopore sequencing data & NanoMethPhase - Phase long reads and CpG methylations from Oxford Nanopore Technologies. -*[SOCI's]: Database access library for C++ that makes the illusion of embedding SQL queries in the - regular C++ code, staying entirely within the Standard C++. -*[SOCI]: Database access library for C++ that makes the illusion of embedding SQL queries in the - regular C++ code, staying entirely within the Standard C++. *[SPAdes's]: Genome assembler for single-cell and isolates data sets *[SPAdes]: Genome assembler for single-cell and isolates data sets -*[SPIDER's]: System for Processing Image Data from Electron microscopy and Related fields -*[SPIDER]: System for Processing Image Data from Electron microscopy and Related fields *[SQLite's]: SQLite: SQL Database Engine in a C Library *[SQLite]: SQLite: SQL Database Engine in a C Library *[SSAHA2's]: Pairwise sequence alignment program designed for the efficient mapping of sequencing @@ -1822,8 +1664,6 @@ NanoMethPhase - Phase long reads and CpG methylations from Oxford Nanopore Techn *[SSH]: A network communication protocol that enables two computers to communicate *[STAR's]: Fast universal RNA-seq aligner *[STAR]: Fast universal RNA-seq aligner -*[STAR-Fusion's]: Processes the output generated by the STAR aligner to map junction reads and spanning reads to a reference annotation set -*[STAR-Fusion]: Processes the output generated by the STAR aligner to map junction reads and spanning reads to a reference annotation set *[SUNDIALS's]: SUNDIALS: SUite of Nonlinear and DIfferential/ALgebraic Equation Solvers *[SUNDIALS]: SUNDIALS: SUite of Nonlinear and DIfferential/ALgebraic Equation Solvers *[SURVIVOR's]: Tool set for simulating/evaluating SVs, merging and comparing SVs within and among samples, @@ -1952,10 +1792,6 @@ genes between related species and to accurately distinguish orthologs from paral *[TensorFlow]: An open-source software library for Machine Intelligence *[TensorRT's]: NVIDIA TensorRT is a platform for high-performance deep learning inference *[TensorRT]: NVIDIA TensorRT is a platform for high-performance deep learning inference -*[Theano's]: Theano is a Python library that allows you to define, optimize, -and evaluate mathematical expressions involving multi-dimensional arrays efficiently. -*[Theano]: Theano is a Python library that allows you to define, optimize, -and evaluate mathematical expressions involving multi-dimensional arrays efficiently. *[Tk's]: Tk is an open source, cross-platform widget toolchain that provides a library of basic elements for building a graphical user interface (GUI) in many different programming languages. *[Tk]: Tk is an open source, cross-platform widget toolchain that provides a library of basic elements for @@ -2073,16 +1909,6 @@ and sorting. It also supports FASTQ file analysis, filtering, conversion and mer Performs chimera detection, clustering, full-length and prefix dereplication, rereplication, masking, all-vs-all pairwise global alignment, exact and global alignment searching, shuffling, subsampling and sorting. It also supports FASTQ file analysis, filtering, conversion and merging of paired-end reads. -*[VTK's]: The Visualization Toolkit (VTK) is an open-source, freely available software system for - 3D computer graphics, image processing and visualization. VTK consists of a C++ class library and several - interpreted interface layers including Tcl/Tk, Java, and Python. VTK supports a wide variety of visualization - algorithms including: scalar, vector, tensor, texture, and volumetric methods; and advanced modeling techniques - such as: implicit modeling, polygon reduction, mesh smoothing, cutting, contouring, and Delaunay triangulation. -*[VTK]: The Visualization Toolkit (VTK) is an open-source, freely available software system for - 3D computer graphics, image processing and visualization. VTK consists of a C++ class library and several - interpreted interface layers including Tcl/Tk, Java, and Python. VTK supports a wide variety of visualization - algorithms including: scalar, vector, tensor, texture, and volumetric methods; and advanced modeling techniques - such as: implicit modeling, polygon reduction, mesh smoothing, cutting, contouring, and Delaunay triangulation. *[VTune's]: Intel VTune Amplifier XE is the premier performance profiler for C, C++, C#, Fortran, Assembly and Java. *[VTune]: Intel VTune Amplifier XE is the premier performance profiler for C, C++, C#, Fortran, @@ -2107,12 +1933,6 @@ stand-alone programs for the prediction and comparison of RNA secondary structur *[VirHostMatcher]: Tools for computing various oligonucleotide frequency (ONF) based distance/dissimialrity measures. *[VirSorter's]: VirSorter: mining viral signal from microbial genomic data. *[VirSorter]: VirSorter: mining viral signal from microbial genomic data. -*[VirtualGL's]: VirtualGL is an open source toolkit that gives any Linux or -Unix remote display software the ability to run OpenGL applications with full -hardware acceleration. -*[VirtualGL]: VirtualGL is an open source toolkit that gives any Linux or -Unix remote display software the ability to run OpenGL applications with full -hardware acceleration. *[WAAFLE's]: Workflow to Annotate Assemblies and Find LGT Events. *[WAAFLE]: Workflow to Annotate Assemblies and Find LGT Events. *[WhatsHap's]: Tool for phasing genomic variants using DNA sequencing reads, also called read-based phasing or haplotype assembly. @@ -2123,18 +1943,6 @@ hardware acceleration. into superior minimizer sampling techniques. *[Wise2's]: Aligning proteins or protein HMMs to DNA *[Wise2]: Aligning proteins or protein HMMs to DNA -*[XHMM's]: Calls copy number variation (CNV) from normalized read-depth data from exome capture or other targeted sequencing experiments. -*[XHMM]: Calls copy number variation (CNV) from normalized read-depth data from exome capture or other targeted sequencing experiments. -*[XMDS2's]: Fast integrator of stochastic partial differential equations. -*[XMDS2]: Fast integrator of stochastic partial differential equations. -*[XSD's]: CodeSynthesis XSD is an open-source, cross-platform W3C XML Schema to C++ data binding compiler. - Provided with an XML instance specification (XML Schema), it generates C++ classes that represent the given vocabulary - as well as XML parsing and serialization code. You can then access the data stored in XML using types and functions - that semantically correspond to your application domain rather than dealing with the intricacies of reading and writing XML -*[XSD]: CodeSynthesis XSD is an open-source, cross-platform W3C XML Schema to C++ data binding compiler. - Provided with an XML instance specification (XML Schema), it generates C++ classes that represent the given vocabulary - as well as XML parsing and serialization code. You can then access the data stored in XML using types and functions - that semantically correspond to your application domain rather than dealing with the intricacies of reading and writing XML *[XVFB]: A display server implementing the X11 display server protocol, XVFB performs all graphical operations in virtual memory without showing any screen output. This allows applications that 'require' a GUI to run in a command line environment. Can be invoked with `xvfb-run`. @@ -2236,12 +2044,6 @@ phylogenetics packages. *[bioawk]: An extension to awk, adding the support of several common biological data formats *[breseq's]: breseq is a computational pipeline for the analysis of short-read re-sequencing data *[breseq]: breseq is a computational pipeline for the analysis of short-read re-sequencing data -*[bsddb3's]: bsddb3 is a nearly complete Python binding of the -Oracle/Sleepycat C API for the Database Environment, Database, Cursor, -Log Cursor, Sequence and Transaction objects. -*[bsddb3]: bsddb3 is a nearly complete Python binding of the -Oracle/Sleepycat C API for the Database Environment, Database, Cursor, -Log Cursor, Sequence and Transaction objects. *[bzip2's]: bzip2 is a freely available, patent free, high-quality data compressor. It typically compresses files to within 10% to 15% of the best available techniques (the PPM family of statistical compressors), whilst being around twice as fast at compression and six times faster at decompression. @@ -2312,14 +2114,10 @@ Log Cursor, Sequence and Transaction objects. example when sequencing microRNAs. *[cuteSV's]: Fast and scalable long-read-based SV detection *[cuteSV]: Fast and scalable long-read-based SV detection -*[cwltool's]: Common Workflow Language tool description reference implementation -*[cwltool]: Common Workflow Language tool description reference implementation *[cyvcf2's]: cython + htslib == fast VCF and BCF processing *[cyvcf2]: cython + htslib == fast VCF and BCF processing *[dadi's]: Diffusion Approximation for Demographic Inference *[dadi]: Diffusion Approximation for Demographic Inference -*[dammit's]: de novo transcriptome annotator.. -*[dammit]: de novo transcriptome annotator.. *[datasets's]: Tool to gather data from across NCBI databases *[datasets]: Tool to gather data from across NCBI databases *[deepTools's]: deepTools is a suite of python tools particularly developed for the efficient analysis of @@ -2352,8 +2150,6 @@ for preprocessing raw data and building your own custom reference database. *[ecCodes]: ecCodes is a package developed by ECMWF which provides an application programming interface and a set of tools for decoding and encoding messages in the following formats: WMO FM-92 GRIB edition 1 and edition 2, WMO FM-94 BUFR edition 3 and edition 4, WMO GTS abbreviated header (only decoding). -*[ectyper's]: Standalone versatile serotyping module for Escherichia coli.. -*[ectyper]: Standalone versatile serotyping module for Escherichia coli.. *[edlib's]: Lightweight, super fast library for sequence alignment using edit (Levenshtein) distance. *[edlib]: Lightweight, super fast library for sequence alignment using edit (Levenshtein) distance. *[eggnog-mapper's]: Tool for fast functional annotation of novel sequences (genes or proteins) @@ -2388,26 +2184,24 @@ such as publication, sequence, structure, gene, variation, expression, etc. uncommon tasks with FASTQ files. *[fastq-tools]: A collection of small and efficient programs for performing some common and uncommon tasks with FASTQ files. -*[fcGENE's]: Format converting tool for genotype Data. -*[fcGENE]: Format converting tool for genotype Data. *[fgbio's]: A set of tools to analyze genomic data with a focus on Next Generation Sequencing. *[fgbio]: A set of tools to analyze genomic data with a focus on Next Generation Sequencing. -*[fineRADstructure's]: A package for population structure inference from RAD-seq data -*[fineRADstructure]: A package for population structure inference from RAD-seq data *[fineSTRUCTURE's]: Population assignment using large numbers of densely sampled genomes, including both SNP chips and sequence dat *[fineSTRUCTURE]: Population assignment using large numbers of densely sampled genomes, including both SNP chips and sequence dat *[flatbuffers's]: FlatBuffers: Memory Efficient Serialization Library *[flatbuffers]: FlatBuffers: Memory Efficient Serialization Library -*[flex's]: Flex (Fast Lexical Analyzer) is a tool for generating scanners. A scanner, - sometimes called a tokenizer, is a program which recognizes lexical patterns in text. -*[flex]: Flex (Fast Lexical Analyzer) is a tool for generating scanners. A scanner, - sometimes called a tokenizer, is a program which recognizes lexical patterns in text. -*[fmlrc's]: Tool for performing hybrid correction of long read sequencing -using the BWT and FM-index of short-read sequencing data -*[fmlrc]: Tool for performing hybrid correction of long read sequencing -using the BWT and FM-index of short-read sequencing data -*[fmt's]: Formatting library providing a fast and safe alternative to C stdio and C++ iostreams. -*[fmt]: Formatting library providing a fast and safe alternative to C stdio and C++ iostreams. +*[flex's]: + Flex (Fast Lexical Analyzer) is a tool for generating scanners. A scanner, + sometimes called a tokenizer, is a program which recognizes lexical patterns + in text. + +*[flex]: + Flex (Fast Lexical Analyzer) is a tool for generating scanners. A scanner, + sometimes called a tokenizer, is a program which recognizes lexical patterns + in text. + +*[fmt's]: fmt (formerly cppformat) is an open-source formatting library. +*[fmt]: fmt (formerly cppformat) is an open-source formatting library. *[fontconfig's]: Fontconfig is a library designed to provide system-wide font configuration, customization and application access. @@ -2448,14 +2242,10 @@ using the BWT and FM-index of short-read sequencing data campus clusters, clouds, and supercomputers. A funcX endpoint is a persistent service launched by the user on a compute system to serve as a conduit for executing functions on that computer. -*[fxtract's]: Extract sequences from a fastx (fasta or fastq) file given a subsequence. -*[fxtract]: Extract sequences from a fastx (fasta or fastq) file given a subsequence. *[g2clib's]: Library contains GRIB2 encoder/decoder ('C' version). *[g2clib]: Library contains GRIB2 encoder/decoder ('C' version). *[g2lib's]: Library contains GRIB2 encoder/decoder and search/indexing routines. *[g2lib]: Library contains GRIB2 encoder/decoder and search/indexing routines. -*[ga4gh's]: A reference implementation of the GA4GH API -*[ga4gh]: A reference implementation of the GA4GH API *[gcloud's]: Libraries and tools for interacting with Google Cloud products and services. *[gcloud]: Libraries and tools for interacting with Google Cloud products and services. *[gemmforge's]: GPU-GEMM generator for the Discontinuous Galerkin method. @@ -2490,8 +2280,6 @@ the LZW compression algorithm was patented. to handle everything from small to very large projects with speed and efficiency. *[git]: Git is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency. -*[globus-automate-client's]: Client for the Globus Flows service. -*[globus-automate-client]: Client for the Globus Flows service. *[globus-compute-endpoint's]: Globus Compute is a distributed Function as a Service (FaaS) platform that enables flexible, scalable, and high performance remote function execution. Unlike centralized FaaS platforms, Globus Compute allows users to execute functions on heterogeneous remote computers, from laptops to @@ -2532,22 +2320,12 @@ to handle everything from small to very large projects with speed and efficiency perfect, which means that the hash table has no collisions, and the hash table lookup needs a single string comparison only. -*[grive2's]: Command line tool for Google Drive. -*[grive2]: Command line tool for Google Drive. -*[gsort's]: Tool to sort genomic files according to a genomefile. -*[gsort]: Tool to sort genomic files according to a genomefile. *[h5pp's]: A simple C++17 wrapper for HDF5. *[h5pp]: A simple C++17 wrapper for HDF5. *[haplocheck's]: Detects in-sample contamination in mtDNA or WGS sequencing studies by analyzing the mitchondrial content *[haplocheck]: Detects in-sample contamination in mtDNA or WGS sequencing studies by analyzing the mitchondrial content -*[help2man's]: help2man produces simple manual pages from the '--help' and '--version' output of other commands. -*[help2man]: help2man produces simple manual pages from the '--help' and '--version' output of other commands. *[hifiasm's]: Hifiasm: a haplotype-resolved assembler for accurate Hifi reads. *[hifiasm]: Hifiasm: a haplotype-resolved assembler for accurate Hifi reads. -*[hunspell's]: Spell checker and morphological analyzer library and program designed for languages - with rich morphology and complex word compounding or character encoding. -*[hunspell]: Spell checker and morphological analyzer library and program designed for languages - with rich morphology and complex word compounding or character encoding. *[hwloc's]: The Portable Hardware Locality (hwloc) software package provides a portable abstraction (across OS, versions, architectures, ...) of the hierarchical topology of modern architectures, including NUMA memory nodes, sockets, shared caches, cores and simultaneous multithreading. It also gathers various @@ -2560,12 +2338,6 @@ to handle everything from small to very large projects with speed and efficiency system attributes such as cache and memory information as well as the locality of I/O devices such as network interfaces, InfiniBand HCAs or GPUs. It primarily aims at helping applications with gathering information about modern computing hardware so as to exploit it accordingly and efficiently. -*[hypothesis's]: Hypothesis is an advanced testing library for Python. It lets you write tests which are parametrized - by a source of examples, and then generates simple and comprehensible examples that make your tests fail. This lets - you find more bugs in your code with less work. -*[hypothesis]: Hypothesis is an advanced testing library for Python. It lets you write tests which are parametrized - by a source of examples, and then generates simple and comprehensible examples that make your tests fail. This lets - you find more bugs in your code with less work. *[icc's]: Intel C and C++ compilers *[icc]: Intel C and C++ compilers *[iccifort's]: Intel C, C++ & Fortran compilers @@ -2608,18 +2380,6 @@ to handle everything from small to very large projects with speed and efficiency data sets (e.g., RAD, ddRAD, GBS) for population genetic and phylogenetic studies. *[ipyrad]: ipyrad is an interactive toolkit for assembly and analysis of restriction-site associated genomic data sets (e.g., RAD, ddRAD, GBS) for population genetic and phylogenetic studies. -*[ispc's]: Intel SPMD Program Compilers; An open-source compiler for high-performance - SIMD programming on the CPU. ispc is a compiler for a variant of the C programming language, - with extensions for 'single program, multiple data' (SPMD) programming. - Under the SPMD model, the programmer writes a program that generally appears - to be a regular serial program, though the execution model is actually that - a number of program instances execute in parallel on the hardware. -*[ispc]: Intel SPMD Program Compilers; An open-source compiler for high-performance - SIMD programming on the CPU. ispc is a compiler for a variant of the C programming language, - with extensions for 'single program, multiple data' (SPMD) programming. - Under the SPMD model, the programmer writes a program that generally appears - to be a regular serial program, though the execution model is actually that - a number of program instances execute in parallel on the hardware. *[jbigkit's]: JBIG-KIT is a software implementation of the JBIG1 data compression standard *[jbigkit]: JBIG-KIT is a software implementation of the JBIG1 data compression standard *[jcvi's]: Collection of Python libraries to parse bioinformatics files, or perform computation related to assembly, annotation, and comparative genomics. @@ -2740,10 +2500,6 @@ compression and decompression. libjpeg is a library that implements JPEG image e behind a consistent, portable interface. *[libtool]: GNU libtool is a generic library support script. Libtool hides the complexity of using shared libraries behind a consistent, portable interface. -*[libunistring's]: This library provides functions for manipulating Unicode strings and for manipulating C strings - according to the Unicode standard. -*[libunistring]: This library provides functions for manipulating Unicode strings and for manipulating C strings - according to the Unicode standard. *[libunwind's]: Define a portable and efficient C programming API to determine the call-chain of a program. *[libunwind]: Define a portable and efficient C programming API to determine the call-chain of a program. *[libvdwxc's]: libvdwxc is a general library for evaluating energy and potential for @@ -2802,8 +2558,6 @@ DNA sequencing reads generated by Next-Generation Sequencing platforms. DNA sequencing reads generated by Next-Generation Sequencing platforms. *[matlab-proxy's]: Python package which enables you to launch MATLAB and access it from a web browser. *[matlab-proxy]: Python package which enables you to launch MATLAB and access it from a web browser. -*[meRanTK's]: High performance toolkit for complete analysis of methylated RNA data. -*[meRanTK]: High performance toolkit for complete analysis of methylated RNA data. *[medaka's]: Medaka is a tool to create a consensus sequence from nanopore sequencing data. *[medaka]: Medaka is a tool to create a consensus sequence from nanopore sequencing data. *[megalodon's]: Tool to extract high accuracy modified base and sequence variant calls from raw nanopore reads @@ -2814,8 +2568,6 @@ by anchoring the information rich basecalling neural network output to a referen *[metaWRAP]: Flexible pipeline for genome-resolved metagenomic data analysis. *[miRDeep2's]: Completely overhauled tool which discovers microRNA genes by analyzing sequenced RNAs *[miRDeep2]: Completely overhauled tool which discovers microRNA genes by analyzing sequenced RNAs -*[mimalloc's]: mimalloc is a general purpose allocator with excellent performance characteristics. -*[mimalloc]: mimalloc is a general purpose allocator with excellent performance characteristics. *[miniBUSCO's]: faster and more accurate reimplementation of BUSCO. *[miniBUSCO]: faster and more accurate reimplementation of BUSCO. *[miniasm's]: Fast OLC-based de novo assembler for noisy long reads. @@ -2834,8 +2586,6 @@ sequences from a few kilobases to ~100 megabases in length at an error rate ~15%. . *[miniprot's]: Aligns a protein sequence against a genome with affine gap penalty, splicing and frameshift.. *[miniprot]: Aligns a protein sequence against a genome with affine gap penalty, splicing and frameshift.. -*[mlpack's]: Fast, and flexible C++ machine learning library with bindings to other languages -*[mlpack]: Fast, and flexible C++ machine learning library with bindings to other languages *[modbam2bed's]: A program to aggregate modified base counts stored in a modified-base BAM file to a bedMethyl file. *[modbam2bed]: A program to aggregate modified base counts stored in a modified-base BAM file to a bedMethyl file. *[modkit's]: Tool for working with modified bases from Oxford Nanopore @@ -2844,8 +2594,6 @@ rate ~15%. . *[mosdepth]: Fast BAM/CRAM depth calculation for WGS, exome, or targeted sequencing *[mpcci's]: MpCCI is a vendor neutral and application independent interface for co-simulation. MpCCI offers advanced and proven features for multiphysics modelling. *[mpcci]: MpCCI is a vendor neutral and application independent interface for co-simulation. MpCCI offers advanced and proven features for multiphysics modelling. -*[mpifileutils's]: MPI-Based File Utilities For Distributed Systems -*[mpifileutils]: MPI-Based File Utilities For Distributed Systems *[muParser's]: muParser is an extensible high performance math expression parser library written in C++. It works by transforming a mathematical expression into bytecode and precalculating constant parts of the expression. @@ -2946,12 +2694,6 @@ and several bioinformatic post-processing features *[pandoc]: Almost universal document converter *[parallel-fastq-dump's]: parallel fastq-dump wrapper *[parallel-fastq-dump]: parallel fastq-dump wrapper -*[parasail's]: parasail is a SIMD C (C99) library containing implementations - of the Smith-Waterman (local), Needleman-Wunsch (global), and semi-global - pairwise sequence alignment algorithms. -*[parasail]: parasail is a SIMD C (C99) library containing implementations - of the Smith-Waterman (local), Needleman-Wunsch (global), and semi-global - pairwise sequence alignment algorithms. *[patchelf's]: PatchELF is a small utility to modify the dynamic linker and RPATH of ELF executables. *[patchelf]: PatchELF is a small utility to modify the dynamic linker and RPATH of ELF executables. *[pauvre's]: Tools for plotting Oxford Nanopore and other long-read data. @@ -3014,26 +2756,12 @@ for giant viruses and viruses that use alternative genetic codes. *[pycoQC]: Computes metrics and generates interactive QC plots for Oxford Nanopore technologies sequencing data. *[pymol-open-source's]: PyMOL (open source version) molecular visualization system. *[pymol-open-source]: PyMOL (open source version) molecular visualization system. -*[pyspoa's]: Python bindings to spoa. -*[pyspoa]: Python bindings to spoa. *[qcat's]: Command-line tool for demultiplexing Oxford Nanopore reads from FASTQ files *[qcat]: Command-line tool for demultiplexing Oxford Nanopore reads from FASTQ files -*[rDock's]: rDock is a fast and versatile Open Source docking program that -can be used to dock small molecules against proteins and nucleic acids. It is -designed for High Throughput Virtual Screening (HTVS) campaigns and Binding Mode -prediction studies. rDock is mainly written in C++ and accessory scripts and -programs are written in C++, perl or python languages. -*[rDock]: rDock is a fast and versatile Open Source docking program that -can be used to dock small molecules against proteins and nucleic acids. It is -designed for High Throughput Virtual Screening (HTVS) campaigns and Binding Mode -prediction studies. rDock is mainly written in C++ and accessory scripts and -programs are written in C++, perl or python languages. *[randfold's]: Minimum free energy of folding randomization test software *[randfold]: Minimum free energy of folding randomization test software *[rasusa's]: Randomly subsample sequencing reads to a specified coverage. *[rasusa]: Randomly subsample sequencing reads to a specified coverage. -*[razers3's]: Tool for mapping millions of short genomic reads onto a reference genome. -*[razers3]: Tool for mapping millions of short genomic reads onto a reference genome. *[rclone's]: Rclone is a command line program to sync files and directories to and from a variety of online storage services @@ -3050,16 +2778,8 @@ of conditional jumps and comparisons. fast lexers: at least as fast as their reasonably optimized hand-coded counterparts. Instead of using traditional table-driven approach, re2c encodes the generated finite state automata directly in the form of conditional jumps and comparisons. -*[rkcommon's]: -A common set of C++ infrastructure and CMake utilities used by various components of Intel® oneAPI Rendering Toolkit. - -*[rkcommon]: -A common set of C++ infrastructure and CMake utilities used by various components of Intel® oneAPI Rendering Toolkit. - *[rnaQUAST's]: Tool for evaluating RNA-Seq assemblies using reference genome and gene database *[rnaQUAST]: Tool for evaluating RNA-Seq assemblies using reference genome and gene database -*[rust-fmlrc's]: FM-index Long Read Corrector (Rust implementation) -*[rust-fmlrc]: FM-index Long Read Corrector (Rust implementation) *[samblaster's]: samblaster is a fast and flexible program for marking duplicates in read-id grouped paired-end SAM files. It can also optionally output discordant read pairs and/or split read mappings to separate SAM files, and/or unmapped/clipped reads to a separate FASTQ file. When marking duplicates, samblaster will require approximately 20MB of memory per 1M read pairs. @@ -3068,10 +2788,6 @@ reads to a separate FASTQ file. When marking duplicates, samblaster will require It can also optionally output discordant read pairs and/or split read mappings to separate SAM files, and/or unmapped/clipped reads to a separate FASTQ file. When marking duplicates, samblaster will require approximately 20MB of memory per 1M read pairs. -*[samclip's]: Filter SAM file for soft and hard clipped alignments. -*[samclip]: Filter SAM file for soft and hard clipped alignments. -*[savvy's]: Interface to various variant calling formats. -*[savvy]: Interface to various variant calling formats. *[sbt's]: sbt is a build tool for Scala, Java, and more. *[sbt]: sbt is a build tool for Scala, Java, and more. *[sc-RNA's]: Bioconductor bundle for single-cell RNA-Seq Data analysis @@ -3100,18 +2816,10 @@ reads to a separate FASTQ file. When marking duplicates, samblaster will require and manipulating data in SLOW5 format. *[slow5tools]: Toolkit for converting (FAST5 <-> SLOW5), compressing, viewing, indexing and manipulating data in SLOW5 format. -*[smafa's]: Smafa attempts to align or cluster pre-aligned biological sequences, handling sequences - which are all the same length. -*[smafa]: Smafa attempts to align or cluster pre-aligned biological sequences, handling sequences - which are all the same length. *[smoove's]: simplifies and speeds calling and genotyping SVs for short reads. *[smoove]: simplifies and speeds calling and genotyping SVs for short reads. *[snakemake's]: The Snakemake workflow management system is a tool to create reproducible and scalable data analyses. *[snakemake]: The Snakemake workflow management system is a tool to create reproducible and scalable data analyses. -*[snaphu's]: SNAPHU is an implementation of the Statistical-cost, Network-flow Algorithm for Phase Unwrapping - proposed by Chen and Zebker -*[snaphu]: SNAPHU is an implementation of the Statistical-cost, Network-flow Algorithm for Phase Unwrapping - proposed by Chen and Zebker *[snappy's]: Snappy is a compression/decompression library. It does not aim for maximum compression, or compatibility with any other compression library; instead, it aims for very high speeds and reasonable compression. @@ -3150,8 +2858,6 @@ perform quality-control on BAM/CRAM/BCF/VCF/GVCF The purpose of swarm is to provide a novel clustering algorithm that handles massive sets of amplicons. Results of traditional clustering algorithms are strongly input-order dependent, and rely on an arbitrary global clustering threshold. swarm results are resilient to input-order changes and rely on a small local linking threshold d, representing the maximum number of differences between two amplicons. *[swarm]: A robust and fast clustering method for amplicon-based studies. The purpose of swarm is to provide a novel clustering algorithm that handles massive sets of amplicons. Results of traditional clustering algorithms are strongly input-order dependent, and rely on an arbitrary global clustering threshold. swarm results are resilient to input-order changes and rely on a small local linking threshold d, representing the maximum number of differences between two amplicons. -*[swissknife's]: Perl module for reading and writing UniProtKB data in plain text format. -*[swissknife]: Perl module for reading and writing UniProtKB data in plain text format. *[tRNAscan-SE's]: Transfer RNA detection *[tRNAscan-SE]: Transfer RNA detection *[tabix's]: Generic indexer for TAB-delimited genome position files diff --git a/docs/assets/icons/iconmonstr-cloud-14.svg b/docs/assets/icons/iconmonstr-cloud-14.svg new file mode 100644 index 000000000..45c83a823 --- /dev/null +++ b/docs/assets/icons/iconmonstr-cloud-14.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/assets/icons/iconmonstr-cloud-15.svg b/docs/assets/icons/iconmonstr-cloud-15.svg new file mode 100644 index 000000000..ab66bc181 --- /dev/null +++ b/docs/assets/icons/iconmonstr-cloud-15.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/assets/icons/iconmonstr-git-4.svg b/docs/assets/icons/iconmonstr-git-4.svg new file mode 100644 index 000000000..9fc19ce1d --- /dev/null +++ b/docs/assets/icons/iconmonstr-git-4.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/assets/icons/iconmonstr-git-5.svg b/docs/assets/icons/iconmonstr-git-5.svg new file mode 100644 index 000000000..f87e14cdc --- /dev/null +++ b/docs/assets/icons/iconmonstr-git-5.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/assets/icons/iconmonstr-handshake-4.svg b/docs/assets/icons/iconmonstr-handshake-4.svg new file mode 100644 index 000000000..1509094eb --- /dev/null +++ b/docs/assets/icons/iconmonstr-handshake-4.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/assets/icons/iconmonstr-handshake-5.svg b/docs/assets/icons/iconmonstr-handshake-5.svg new file mode 100644 index 000000000..624439350 --- /dev/null +++ b/docs/assets/icons/iconmonstr-handshake-5.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/assets/images/5w-and-1h.png b/docs/assets/images/5w-and-1h.png new file mode 100644 index 000000000..7a3ed579a Binary files /dev/null and b/docs/assets/images/5w-and-1h.png differ diff --git a/docs/assets/images/Debugging.PNG b/docs/assets/images/Debugging.png similarity index 100% rename from docs/assets/images/Debugging.PNG rename to docs/assets/images/Debugging.png diff --git a/docs/assets/images/Debugging_0.PNG b/docs/assets/images/Debugging_0.png similarity index 100% rename from docs/assets/images/Debugging_0.PNG rename to docs/assets/images/Debugging_0.png diff --git a/docs/assets/images/Debugging_1.PNG b/docs/assets/images/Debugging_1.png similarity index 100% rename from docs/assets/images/Debugging_1.PNG rename to docs/assets/images/Debugging_1.png diff --git a/docs/assets/images/JupyterLab.PNG b/docs/assets/images/JupyterLab.png similarity index 100% rename from docs/assets/images/JupyterLab.PNG rename to docs/assets/images/JupyterLab.png diff --git a/docs/assets/images/JupyterLab_0.PNG b/docs/assets/images/JupyterLab_0.png similarity index 100% rename from docs/assets/images/JupyterLab_0.PNG rename to docs/assets/images/JupyterLab_0.png diff --git a/docs/assets/images/Profiler-ARM_MAP.PNG b/docs/assets/images/Profiler-ARM_MAP2.png similarity index 100% rename from docs/assets/images/Profiler-ARM_MAP.PNG rename to docs/assets/images/Profiler-ARM_MAP2.png diff --git a/docs/assets/images/app-cred-selected.png b/docs/assets/images/app-cred-selected.png new file mode 100644 index 000000000..a319085e3 Binary files /dev/null and b/docs/assets/images/app-cred-selected.png differ diff --git a/docs/assets/images/app-creds-overview.png b/docs/assets/images/app-creds-overview.png new file mode 100644 index 000000000..cdfa05dbe Binary files /dev/null and b/docs/assets/images/app-creds-overview.png differ diff --git a/docs/assets/images/commit-changes-github.png b/docs/assets/images/commit-changes-github.png new file mode 100644 index 000000000..08bd5f168 Binary files /dev/null and b/docs/assets/images/commit-changes-github.png differ diff --git a/docs/assets/images/compute-network-port-attached.png b/docs/assets/images/compute-network-port-attached.png new file mode 100644 index 000000000..46062f154 Binary files /dev/null and b/docs/assets/images/compute-network-port-attached.png differ diff --git a/docs/assets/images/compute-network-port-removed.png b/docs/assets/images/compute-network-port-removed.png new file mode 100644 index 000000000..2a3717102 Binary files /dev/null and b/docs/assets/images/compute-network-port-removed.png differ diff --git a/docs/assets/images/compute-overview.png b/docs/assets/images/compute-overview.png new file mode 100644 index 000000000..203fe0d91 Binary files /dev/null and b/docs/assets/images/compute-overview.png differ diff --git a/docs/assets/images/confirm-resize.png b/docs/assets/images/confirm-resize.png new file mode 100644 index 000000000..787c603d8 Binary files /dev/null and b/docs/assets/images/confirm-resize.png differ diff --git a/docs/assets/images/cyberduck-connection-dialog-rdc.png b/docs/assets/images/cyberduck-connection-dialog-rdc.png new file mode 100644 index 000000000..40ee57bc8 Binary files /dev/null and b/docs/assets/images/cyberduck-connection-dialog-rdc.png differ diff --git a/docs/assets/images/cyberduck-connection-dialog.png b/docs/assets/images/cyberduck-connection-dialog.png new file mode 100644 index 000000000..965009d9d Binary files /dev/null and b/docs/assets/images/cyberduck-connection-dialog.png differ diff --git a/docs/assets/images/cyberduck-container-view.png b/docs/assets/images/cyberduck-container-view.png new file mode 100644 index 000000000..65a3cdff4 Binary files /dev/null and b/docs/assets/images/cyberduck-container-view.png differ diff --git a/docs/assets/images/cyberduck-overview.png b/docs/assets/images/cyberduck-overview.png new file mode 100644 index 000000000..c3c48fe3c Binary files /dev/null and b/docs/assets/images/cyberduck-overview.png differ diff --git a/docs/assets/images/default-security-group-rules.png b/docs/assets/images/default-security-group-rules.png new file mode 100644 index 000000000..17b80861d Binary files /dev/null and b/docs/assets/images/default-security-group-rules.png differ diff --git a/docs/assets/images/edit-on-github.png b/docs/assets/images/edit-on-github.png new file mode 100644 index 000000000..ade379eaf Binary files /dev/null and b/docs/assets/images/edit-on-github.png differ diff --git a/docs/assets/images/ee-supporting-evidence.png b/docs/assets/images/ee-supporting-evidence.png new file mode 100644 index 000000000..767a26ca0 Binary files /dev/null and b/docs/assets/images/ee-supporting-evidence.png differ diff --git a/docs/assets/images/floating-ips.png b/docs/assets/images/floating-ips.png new file mode 100644 index 000000000..b180e2b3c Binary files /dev/null and b/docs/assets/images/floating-ips.png differ diff --git a/docs/assets/images/instance-action-menu.png b/docs/assets/images/instance-action-menu.png new file mode 100644 index 000000000..361a411d6 Binary files /dev/null and b/docs/assets/images/instance-action-menu.png differ diff --git a/docs/assets/images/instance-overview.png b/docs/assets/images/instance-overview.png new file mode 100644 index 000000000..e737d58ba Binary files /dev/null and b/docs/assets/images/instance-overview.png differ diff --git a/docs/assets/images/manage-security-groups-add-dialog.png b/docs/assets/images/manage-security-groups-add-dialog.png new file mode 100644 index 000000000..19b623bd5 Binary files /dev/null and b/docs/assets/images/manage-security-groups-add-dialog.png differ diff --git a/docs/assets/images/manage-security-groups-dialog.png b/docs/assets/images/manage-security-groups-dialog.png new file mode 100644 index 000000000..193b3b217 Binary files /dev/null and b/docs/assets/images/manage-security-groups-dialog.png differ diff --git a/docs/assets/images/nesi-logo.png b/docs/assets/images/nesi-logo.png new file mode 100644 index 000000000..81c1a8945 Binary files /dev/null and b/docs/assets/images/nesi-logo.png differ diff --git a/docs/assets/images/network-ports-added.png b/docs/assets/images/network-ports-added.png new file mode 100644 index 000000000..4936bdfbb Binary files /dev/null and b/docs/assets/images/network-ports-added.png differ diff --git a/docs/assets/images/network-ports-create-dialog.png b/docs/assets/images/network-ports-create-dialog.png new file mode 100644 index 000000000..b1aedb17c Binary files /dev/null and b/docs/assets/images/network-ports-create-dialog.png differ diff --git a/docs/assets/images/network-ports-overview-selected.png b/docs/assets/images/network-ports-overview-selected.png new file mode 100644 index 000000000..35061e7a7 Binary files /dev/null and b/docs/assets/images/network-ports-overview-selected.png differ diff --git a/docs/assets/images/network-ports-overview.png b/docs/assets/images/network-ports-overview.png new file mode 100644 index 000000000..45badc3a5 Binary files /dev/null and b/docs/assets/images/network-ports-overview.png differ diff --git a/docs/assets/images/networks-overview-selected.png b/docs/assets/images/networks-overview-selected.png new file mode 100644 index 000000000..fa5a35a7a Binary files /dev/null and b/docs/assets/images/networks-overview-selected.png differ diff --git a/docs/assets/images/networks-overview.png b/docs/assets/images/networks-overview.png new file mode 100644 index 000000000..f4a4ebcf5 Binary files /dev/null and b/docs/assets/images/networks-overview.png differ diff --git a/docs/assets/images/new-key-pair-download.png b/docs/assets/images/new-key-pair-download.png new file mode 100644 index 000000000..de7ada25e Binary files /dev/null and b/docs/assets/images/new-key-pair-download.png differ diff --git a/docs/assets/images/new-key-pair-filled.png b/docs/assets/images/new-key-pair-filled.png new file mode 100644 index 000000000..1d46edf80 Binary files /dev/null and b/docs/assets/images/new-key-pair-filled.png differ diff --git a/docs/assets/images/new-key-pair.png b/docs/assets/images/new-key-pair.png new file mode 100644 index 000000000..1981869cf Binary files /dev/null and b/docs/assets/images/new-key-pair.png differ diff --git a/docs/assets/images/new-security-group-rules.png b/docs/assets/images/new-security-group-rules.png new file mode 100644 index 000000000..7c340dd1c Binary files /dev/null and b/docs/assets/images/new-security-group-rules.png differ diff --git a/docs/assets/images/object-storage-container-overview.png b/docs/assets/images/object-storage-container-overview.png new file mode 100644 index 000000000..bdcf2acb5 Binary files /dev/null and b/docs/assets/images/object-storage-container-overview.png differ diff --git a/docs/assets/images/object-storage-overview.png b/docs/assets/images/object-storage-overview.png new file mode 100644 index 000000000..5c83f1309 Binary files /dev/null and b/docs/assets/images/object-storage-overview.png differ diff --git a/docs/assets/images/object-storage-upload-dialog.png b/docs/assets/images/object-storage-upload-dialog.png new file mode 100644 index 000000000..83627866e Binary files /dev/null and b/docs/assets/images/object-storage-upload-dialog.png differ diff --git a/docs/assets/images/project-selector.png b/docs/assets/images/project-selector.png new file mode 100644 index 000000000..8da991a5b Binary files /dev/null and b/docs/assets/images/project-selector.png differ diff --git a/docs/assets/images/propose-change-github.png b/docs/assets/images/propose-change-github.png new file mode 100644 index 000000000..6366c018b Binary files /dev/null and b/docs/assets/images/propose-change-github.png differ diff --git a/docs/assets/images/resize-instance-dialog.png b/docs/assets/images/resize-instance-dialog.png new file mode 100644 index 000000000..813417cec Binary files /dev/null and b/docs/assets/images/resize-instance-dialog.png differ diff --git a/docs/assets/images/security-group-add-custom-rule-new.png b/docs/assets/images/security-group-add-custom-rule-new.png new file mode 100644 index 000000000..b93e304e6 Binary files /dev/null and b/docs/assets/images/security-group-add-custom-rule-new.png differ diff --git a/docs/assets/images/security-group-add-rule-custom-example.png b/docs/assets/images/security-group-add-rule-custom-example.png new file mode 100644 index 000000000..bf1638e1c Binary files /dev/null and b/docs/assets/images/security-group-add-rule-custom-example.png differ diff --git a/docs/assets/images/security-group-add-rule-dialog.png b/docs/assets/images/security-group-add-rule-dialog.png new file mode 100644 index 000000000..013b25ce2 Binary files /dev/null and b/docs/assets/images/security-group-add-rule-dialog.png differ diff --git a/docs/assets/images/security-group-add-rule-new.png b/docs/assets/images/security-group-add-rule-new.png new file mode 100644 index 000000000..723667cbd Binary files /dev/null and b/docs/assets/images/security-group-add-rule-new.png differ diff --git a/docs/assets/images/security-group-add-rule.png b/docs/assets/images/security-group-add-rule.png new file mode 100644 index 000000000..9d825d6aa Binary files /dev/null and b/docs/assets/images/security-group-add-rule.png differ diff --git a/docs/assets/images/security-group-manage-rules.png b/docs/assets/images/security-group-manage-rules.png new file mode 100644 index 000000000..56d918f9a Binary files /dev/null and b/docs/assets/images/security-group-manage-rules.png differ diff --git a/docs/assets/images/security-group-predefined-rule-example.png b/docs/assets/images/security-group-predefined-rule-example.png new file mode 100644 index 000000000..0e3ba96a9 Binary files /dev/null and b/docs/assets/images/security-group-predefined-rule-example.png differ diff --git a/docs/assets/images/security-group-predefined-rules.png b/docs/assets/images/security-group-predefined-rules.png new file mode 100644 index 000000000..8aa042a04 Binary files /dev/null and b/docs/assets/images/security-group-predefined-rules.png differ diff --git a/docs/assets/images/security-groups-overview.png b/docs/assets/images/security-groups-overview.png new file mode 100644 index 000000000..747f10d58 Binary files /dev/null and b/docs/assets/images/security-groups-overview.png differ diff --git a/docs/assets/images/specific-network-view.png b/docs/assets/images/specific-network-view.png new file mode 100644 index 000000000..acd90111f Binary files /dev/null and b/docs/assets/images/specific-network-view.png differ diff --git a/docs/assets/images/topic-maze-solved.png b/docs/assets/images/topic-maze-solved.png new file mode 100644 index 000000000..8fc7422ba Binary files /dev/null and b/docs/assets/images/topic-maze-solved.png differ diff --git a/docs/assets/images/topic-maze.png b/docs/assets/images/topic-maze.png new file mode 100644 index 000000000..1670bb8c7 Binary files /dev/null and b/docs/assets/images/topic-maze.png differ diff --git a/docs/assets/images/user-menu.png b/docs/assets/images/user-menu.png new file mode 100644 index 000000000..4b3d020db Binary files /dev/null and b/docs/assets/images/user-menu.png differ diff --git a/docs/assets/images/volume-attached.png b/docs/assets/images/volume-attached.png new file mode 100644 index 000000000..2e40083d9 Binary files /dev/null and b/docs/assets/images/volume-attached.png differ diff --git a/docs/assets/module-list.json b/docs/assets/module-list.json index 89af5fb3f..ddee00663 100644 --- a/docs/assets/module-list.json +++ b/docs/assets/module-list.json @@ -3,17 +3,12 @@ "description": "Finite Element Analysis software for modeling, visualization and best-in-class implicit and explicit dynamics FEA.", "domains": [ "cae", - "engineering", - "mahuika", - "gpu", - "mpi", - "omp", - "fea" + "engineering" ], "extensions": [], "licence_type": "proprietary", "homepage": "http://www.simulia.com/products/abaqus_fea.html", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/ABAQUS", + "support": "", "versions": [ "2017", "2018", @@ -290,16 +285,15 @@ "homepage": "https://software.intel.com/intel-advisor-xe", "support": "", "versions": [ - "2020_update1", "2023.1.0" ], "admin_list": [], "network_licences": [], - "default": "2020_update1", + "default": "2023.1.0", "default_type": "latest", "last_updated": 1689143576, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/perf/Advisor/2020_update1.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/perf/Advisor/2023.1.0.lua", "force_hide": "False", "force_show": "False" }, @@ -349,7 +343,11 @@ "versions": [ "2017-GCC-7.4.0-serial" ], - "admin_list": [], + "admin_list": [ + { + "2017-GCC-7.4.0-serial": "Warning: AGE/2017-GCC-7.4.0-serial is old and marked for deletion. If you still need it, then please let us know." + } + ], "network_licences": [], "default": "2017-GCC-7.4.0-serial", "default_type": "latest", @@ -367,18 +365,19 @@ "extensions": [], "licence_type": "", "homepage": "https://github.com/deepmind/alphafold", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/AlphaFold", + "support": "", "versions": [ "2.3.2", - "3.0.0" + "3.0.0", + "3.0.1" ], "admin_list": [], "network_licences": [], - "default": "3.0.0", + "default": "3.0.1", "default_type": "latest", - "last_updated": 1733003523, + "last_updated": 1739762231, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/AlphaFold/3.0.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/AlphaFold/3.0.1.lua", "force_hide": "False", "force_show": "False" }, @@ -587,14 +586,12 @@ "description": "A bundle of computer-aided engineering software including Fluent and CFX.", "domains": [ "engineering", - "visualisation", - "mahuika", - "application" + "visualisation" ], "extensions": [], "licence_type": "proprietary", "homepage": "https://www.ansys.com", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/ANSYS", + "support": "", "versions": [ "18.1", "19.1", @@ -808,7 +805,11 @@ "1.10.9-Java-1.8.0_144", "1.10.9-Java-11.0.4" ], - "admin_list": [], + "admin_list": [ + { + "1.10.1-Java-1.8.0_144": "Warning: ant/1.10.1-Java-1.8.0_144 is old and marked for deletion. Please select a more recent version (try 'module spider ant') or let us know that you still need it." + } + ], "network_licences": [], "default": "1.10.9-Java-1.8.0_144", "default_type": "latest", @@ -847,40 +848,6 @@ "force_hide": "False", "force_show": "False" }, - "ANTLR": { - "description": "ANother Tool for Language Recognition", - "domains": [ - "lang", - "social_science" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://www.antlr2.org", - "support": "", - "versions": [ - "2.7.7-GCC-11.3.0", - "2.7.7-GCC-7.4.0" - ], - "admin_list": [ - { - "2.7.7-gimkl-2017a": "Warning: ANTLR/2.7.7-gimkl-2017a is very old and will soon be deleted. If you still need it, then please let us know." - }, - { - "2.7.7-GCC-11.3.0": "Warning: ANTLR/2.7.7-GCC-11.3.0 is old and marked for deletion. If you still need it, then please let us know." - }, - { - "2.7.7-GCC-7.4.0": "Warning: ANTLR/2.7.7-GCC-7.4.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "2.7.7-GCC-7.4.0", - "default_type": "latest", - "last_updated": 1688334609, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/lang/ANTLR/2.7.7-GCC-7.4.0.lua", - "force_hide": "False", - "force_show": "False" - }, "ANTs": { "description": "ANTs extracts information from complex datasets that include imaging. ANTs is useful for managing,\n interpreting and visualizing multidimensional data.", "domains": [ @@ -983,7 +950,11 @@ "versions": [ "4.0-gompi-2022a" ], - "admin_list": [], + "admin_list": [ + { + "4.0-gompi-2022a": "Warning: AOCL-FFTW/4.0-gompi-2022a has not been used for some time and so is marked for deletion. If you want it to remain then please let us know." + } + ], "network_licences": [], "default": "4.0-gompi-2022a", "default_type": "latest", @@ -1372,7 +1343,11 @@ "versions": [ "2.5.1-GCCcore-9.2.0" ], - "admin_list": [], + "admin_list": [ + { + "2.5.1-GCCcore-9.2.0": "Warning: attr/2.5.1-GCCcore-9.2.0 is old and marked for deletion. If you still need it, then please let us know." + } + ], "network_licences": [], "default": "2.5.1-GCCcore-9.2.0", "default_type": "latest", @@ -1410,28 +1385,6 @@ "force_hide": "False", "force_show": "False" }, - "Autoconf-archive": { - "description": "A collection of more than 500 macros for GNU Autoconf", - "domains": [ - "devel" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://www.gnu.org/software/autoconf-archive", - "support": "", - "versions": [ - "2023.02.20" - ], - "admin_list": [], - "network_licences": [], - "default": "2023.02.20", - "default_type": "latest", - "last_updated": 1704337812, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/Autoconf-archive/2023.02.20.lua", - "force_hide": "False", - "force_show": "False" - }, "AutoDock-GPU": { "description": "OpenCL and Cuda accelerated version of AutoDock. It leverages its embarrasingly\nparallelizable LGA by processing ligand-receptor poses in parallel over\nmultiple compute units.", "domains": [ @@ -1600,10 +1553,6 @@ "homepage": "basilisk.fr", "support": "", "versions": [ - "20180226-gimpi-2018b", - "20190508-gimpi-2018b", - "20200620-gimpi-2020a", - "20220112-gimpi-2020a", "20220324-gimpi-2020a" ], "admin_list": [ @@ -1618,11 +1567,11 @@ } ], "network_licences": [], - "default": "20180226-gimpi-2018b", + "default": "20220324-gimpi-2020a", "default_type": "latest", "last_updated": 1648087186, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/Basilisk/20180226-gimpi-2018b.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/Basilisk/20220324-gimpi-2020a.lua", "force_hide": "False", "force_show": "False" }, @@ -1692,51 +1641,6 @@ "force_hide": "False", "force_show": "False" }, - "Bazel": { - "description": "Bazel is a build tool that builds code quickly and reliably.\nIt is used to build the majority of Google's software.", - "domains": [ - "devel" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://bazel.io/", - "support": "", - "versions": [ - "0.26.1-GCCcore-7.4.0", - "2.0.0-GCCcore-7.4.0", - "2.0.0-GCCcore-9.2.0", - "3.4.1-GCCcore-9.2.0", - "3.7.2-GCCcore-9.2.0" - ], - "admin_list": [ - { - "0.16.0-GCC-5.4.0": "Warning: Bazel/0.16.0-GCC-5.4.0 is very old and will soon be deleted. If you still need it, then please let us know." - }, - { - "0.26.1-GCCcore-7.4.0": "Warning: Bazel/0.26.1-GCCcore-7.4.0 is old and marked for deletion. If you still need it, then please let us know." - }, - { - "2.0.0-GCCcore-7.4.0": "Warning: Bazel/2.0.0-GCCcore-7.4.0 is old and marked for deletion. If you still need it, then please let us know." - }, - { - "2.0.0-GCCcore-9.2.0": "Warning: Bazel/2.0.0-GCCcore-9.2.0 is old and marked for deletion. If you still need it, then please let us know." - }, - { - "3.4.1-GCCcore-9.2.0": "Warning: Bazel/3.4.1-GCCcore-9.2.0 is old and marked for deletion. If you still need it, then please let us know." - }, - { - "3.7.2-GCCcore-9.2.0": "Warning: Bazel/3.7.2-GCCcore-9.2.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "3.4.1-GCCcore-9.2.0", - "default_type": "latest", - "last_updated": 1625820014, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/Bazel/3.4.1-GCCcore-9.2.0.lua", - "force_hide": "False", - "force_show": "False" - }, "BBMap": { "description": "BBMap short read aligner, and other bioinformatic tools.", "domains": [ @@ -1806,33 +1710,6 @@ "force_hide": "False", "force_show": "False" }, - "BCL-Convert": { - "description": "Converts per cycle binary data output by Illumina sequencers containing basecall\nfiles and quality scores to per read FASTQ files", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://support.illumina.com/sequencing/sequencing_software/bcl-convert.html", - "support": "", - "versions": [ - "4.0.3", - "4.2.4" - ], - "admin_list": [ - { - "4.0.3": "Warning: BCL-Convert/4.0.3 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "4.0.3", - "default_type": "latest", - "last_updated": 1700605569, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/BCL-Convert/4.0.3.lua", - "force_hide": "False", - "force_show": "False" - }, "bcl2fastq2": { "description": "bcl2fastq Conversion Software both demultiplexes data and converts BCL files generated by\n Illumina sequencing systems to standard FASTQ file formats for downstream analysis.", "domains": [ @@ -1927,7 +1804,6 @@ "versions": [ "1.10.4-gimkl-2017a-no-beagle", "2.5.2", - "2.6.3", "2.6.6", "2.7.0", "2.7.7" @@ -2034,6 +1910,9 @@ "admin_list": [ { "0.1.1-r16-intel-2017a": "Warning: BEEF/0.1.1-r16-intel-2017a is very old and will soon be removed. Please select a more recent version (try 'module spider BEEF')." + }, + { + "0.1.1-r16-intel-2018b": "Warning: BEEF/0.1.1-r16-intel-2018b is old and marked for deletion along with the rest of our intel-2018b software. Please select a more recent version (try 'module spider BEEF') or let us know that you still need it." } ], "network_licences": [], @@ -2099,7 +1978,6 @@ "homepage": "https://github.com/pmelsted/bifrost", "support": "", "versions": [ - "1.0.4-GCC-9.2.0", "1.3.5-GCC-12.3.0" ], "admin_list": [ @@ -2108,11 +1986,11 @@ } ], "network_licences": [], - "default": "1.0.4-GCC-9.2.0", + "default": "1.3.5-GCC-12.3.0", "default_type": "latest", "last_updated": 1724796822, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Bifrost/1.0.4-GCC-9.2.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Bifrost/1.3.5-GCC-12.3.0.lua", "force_hide": "False", "force_show": "False" }, @@ -2305,6 +2183,9 @@ "admin_list": [ { "3.3.2-GCCcore-7.4.0": "Warning: Bison/3.3.2-GCCcore-7.4.0 is old and marked for deletion. Please select a more recent version (try 'module spider Bison') or let us know that you still need it." + }, + { + "3.0.4-GCC-5.4.0": "Warning: Bison/3.0.4-GCC-5.4.0 is old and marked for deletion. If you still need it, then please let us know." } ], "network_licences": [], @@ -2320,13 +2201,12 @@ "description": "Basic Local Alignment Search Tool, or BLAST, is an algorithm\n for comparing primary biological sequence information, such as the amino-acid\n sequences of different proteins or the nucleotides of DNA sequences.", "domains": [ "bio", - "biology", - "mahuika" + "biology" ], "extensions": [], "licence_type": "", "homepage": "http://blast.ncbi.nlm.nih.gov/", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/BLAST", + "support": "", "versions": [ "2.10.0-GCC-9.2.0", "2.12.0-GCC-9.2.0", @@ -2395,48 +2275,26 @@ "versions": [ "3.5-GCC-11.3.0", "3.5-GCC-9.2.0", - "3.5-gimkl-2018b" + "3.5-gimkl-2018b", + "3.7-GCC-12.3.0" ], "admin_list": [ { - "3.5-gimkl-2018b": "Warning: BLAT/3.5-gimkl-2018b is old and marked for deletion. Please select a more recent version (try 'module spider BLAT') or let us know that you still need it." + "3.5-GCC-9.2.0": "Warning: BLAT/3.5-GCC-9.2.0 is marked for deletion. Please select BLAT/3.5-GCC-11.3.0 or a more recent version (try 'module spider BLAT')." + }, + { + "3.5-gimkl-2018b": "Warning: BLAT/3.5-gimkl-2018b is marked for deletion. Please select the equivalent BLAT/3.5-GCC-11.3.0 or a more recent version (try 'module spider BLAT')." } ], "network_licences": [], "default": "3.5-gimkl-2018b", "default_type": "latest", - "last_updated": 1658125156, + "last_updated": 1738894538, "modulefile_text": "", "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/BLAT/3.5-gimkl-2018b.lua", "force_hide": "False", "force_show": "False" }, - "BlenderPy": { - "description": "Blender provides a pipeline for 3D modeling, rigging, animation, simulation, rendering, \ncompositing, motion tracking, video editing and 2D animation. \nThis particular build of Blender provides a Python package 'bpy' rather than the stand-alone application.", - "domains": [ - "vis" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://wiki.blender.org/wiki/Building_Blender/Other/BlenderAsPyModule", - "support": "", - "versions": [ - "2.93.1-gimkl-2020a-Python-3.9.5" - ], - "admin_list": [ - { - "2.93.1-gimkl-2020a-Python-3.9.5": "Warning: BlenderPy/2.93.1-gimkl-2020a-Python-3.9.5 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "2.93.1-gimkl-2020a-Python-3.9.5", - "default_type": "latest", - "last_updated": 1628566955, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/BlenderPy/2.93.1-gimkl-2020a-Python-3.9.5.lua", - "force_hide": "False", - "force_show": "False" - }, "BLIS": { "description": "BLIS is a portable software framework for instantiating high-performance\nBLAS-like dense linear algebra libraries.", "domains": [ @@ -2497,12 +2355,8 @@ "1.61.0-GCCcore-7.4.0", "1.61.0-gimkl-2017a", "1.61.0-gimkl-2018b", - "1.61.0-intel-2018b", - "1.64.0-gimkl-2018b-Python-2.7.16", "1.69.0-GCCcore-7.4.0", "1.71.0-GCCcore-9.2.0", - "1.71.0-gimkl-2018b-Python-3.7.3", - "1.71.0-gimkl-2020a-Python-3.8.2", "1.76.0-intel-2022a", "1.77.0-GCC-11.3.0", "1.83.0-GCC-12.3.0" @@ -2516,6 +2370,12 @@ }, { "1.64.0-gimkl-2018b-Python-2.7.16": "Warning: Boost/1.64.0-gimkl-2018b-Python-2.7.16 is old and marked for deletion. Please select a more recent version (try 'module spider Boost') or let us know that you still need it." + }, + { + "1.55.0-gimkl-2018b": "Warning: Boost/1.55.0-gimkl-2018b is old and marked for deletion. Please select a more recent version (try 'module spider Boost') or let us know that you still need it." + }, + { + "1.61.0-gimkl-2017a": "Warning: Boost/1.61.0-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider Boost') or let us know that you still need it." } ], "network_licences": [], @@ -2539,8 +2399,6 @@ "homepage": "http://bowtie-bio.sourceforge.net/index.shtml", "support": "", "versions": [ - "0.12.7", - "0.12.8", "1.2.0-gimkl-2017a", "1.2.2-GCC-7.4.0", "1.2.3-GCC-9.2.0", @@ -2553,14 +2411,17 @@ }, { "0.12.7": "Warning: Bowtie/0.12.7 is old and marked for deletion. Please select a more recent version (try 'module spider Bowtie') or let us know that you still need it." + }, + { + "1.2.2-GCC-7.4.0": "Warning: Bowtie/1.2.2-GCC-7.4.0 is old and marked for deletion. Please select a more recent version (try 'module spider Bowtie') or let us know that you still need it." } ], "network_licences": [], - "default": "0.12.8", + "default": "1.2.2-GCC-7.4.0", "default_type": "latest", "last_updated": 1737334417, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/Bowtie/0.12.8.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Bowtie/1.2.2-GCC-7.4.0.lua", "force_hide": "False", "force_show": "False" }, @@ -2595,37 +2456,6 @@ "force_hide": "False", "force_show": "False" }, - "Bpipe": { - "description": "A platform for running big bioinformatics jobs that consist of a series of processing stages", - "domains": [ - "bio", - "biology" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://docs.bpipe.org", - "support": "", - "versions": [ - "0.9.9.6", - "0.9.9.8" - ], - "admin_list": [ - { - "0.9.9.6": "Warning: Bpipe/0.9.9.6 is old and marked for deletion. If you still need it, then please let us know." - }, - { - "0.9.9.8": "Warning: Bpipe/0.9.9.8 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "0.9.9.8", - "default_type": "latest", - "last_updated": 1623048067, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Bpipe/0.9.9.8.lua", - "force_hide": "False", - "force_show": "False" - }, "Bracken": { "description": "Hghly accurate statistical method that computes the abundance of \nspecies in DNA sequences from a metagenomics sample.", "domains": [ @@ -2637,7 +2467,6 @@ "support": "", "versions": [ "2.6.0-GCCcore-9.2.0", - "2.6.2-GCCcore-9.2.0", "2.7-GCC-11.3.0" ], "admin_list": [ @@ -2658,20 +2487,18 @@ "force_show": "False" }, "BRAKER": { - "description": "BRAKER is a pipeline for fully automated prediction of protein coding genes with GeneMark-ES/ET\n and AUGUSTUS in novel eukaryotic genomes.", + "description": "Pipeline for fully automated prediction of protein coding genes with GeneMark-ES/ET\n and AUGUSTUS in novel eukaryotic genomes.", "domains": [ "bio" ], "extensions": [ - "File::HomeDir-1.006", - "Role::Tiny-2.001004", - "Sub::Quote-2.006006" + "Sub::Quote-2.006008", + "YAML::XS-0.86" ], "licence_type": "", "homepage": "https://github.com/Gaius-Augustus/BRAKER", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/BRAKER", + "support": "", "versions": [ - "2.1.6-gimkl-2020a-Perl-5.30.1-Python-3.8.2", "2.1.6-gimkl-2022a-Perl-5.34.1", "3.0.2-gimkl-2022a-Perl-5.34.1", "3.0.3-gimkl-2022a-Perl-5.34.1", @@ -2683,37 +2510,11 @@ } ], "network_licences": [], - "default": "2.1.6-gimkl-2020a-Perl-5.30.1-Python-3.8.2", + "default": "3.0.2-gimkl-2022a-Perl-5.34.1", "default_type": "latest", "last_updated": 1728962371, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/BRAKER/2.1.6-gimkl-2020a-Perl-5.30.1-Python-3.8.2.lua", - "force_hide": "False", - "force_show": "False" - }, - "BreakSeq2": { - "description": "Nucleotide-resolution analysis of structural variants", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://bioinform.github.io/breakseq2/", - "support": "", - "versions": [ - "2.2-gimkl-2018b-Python-2.7.16" - ], - "admin_list": [ - { - "2.2-gimkl-2018b-Python-2.7.16": "Warning: BreakSeq2/2.2-gimkl-2018b-Python-2.7.16 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "2.2-gimkl-2018b-Python-2.7.16", - "default_type": "latest", - "last_updated": 1575946515, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/BreakSeq2/2.2-gimkl-2018b-Python-2.7.16.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/BRAKER/3.0.2-gimkl-2022a-Perl-5.34.1.lua", "force_hide": "False", "force_show": "False" }, @@ -2727,8 +2528,6 @@ "homepage": "https://barricklab.org/breseq", "support": "", "versions": [ - "0.35.4-gimkl-2020a-R-4.0.1", - "0.36.1-gimkl-2020a-R-4.1.0", "0.38.1-gimkl-2022a-R-4.2.1", "0.38.3-gimkl-2022a-R-4.3.1" ], @@ -2741,37 +2540,11 @@ } ], "network_licences": [], - "default": "0.35.4-gimkl-2020a-R-4.0.1", + "default": "0.38.1-gimkl-2022a-R-4.2.1", "default_type": "latest", "last_updated": 1713742325, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/breseq/0.35.4-gimkl-2020a-R-4.0.1.lua", - "force_hide": "False", - "force_show": "False" - }, - "bsddb3": { - "description": "bsddb3 is a nearly complete Python binding of the\nOracle/Sleepycat C API for the Database Environment, Database, Cursor,\nLog Cursor, Sequence and Transaction objects.", - "domains": [ - "data" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://pypi.org/project/bsddb3/", - "support": "", - "versions": [ - "6.2.6-gimkl-2018b-Python-2.7.16" - ], - "admin_list": [ - { - "6.2.6-gimkl-2018b-Python-2.7.16": "Warning: bsddb3/6.2.6-gimkl-2018b-Python-2.7.16 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "6.2.6-gimkl-2018b-Python-2.7.16", - "default_type": "latest", - "last_updated": 1593474230, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/bsddb3/6.2.6-gimkl-2018b-Python-2.7.16.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/breseq/0.38.1-gimkl-2022a-R-4.2.1.lua", "force_hide": "False", "force_show": "False" }, @@ -2816,7 +2589,6 @@ "homepage": "http://bio-bwa.sourceforge.net/", "support": "", "versions": [ - "0.7.15-gimkl-2017a", "0.7.17-GCC-11.3.0", "0.7.17-GCC-7.4.0", "0.7.17-GCC-9.2.0", @@ -2832,11 +2604,11 @@ } ], "network_licences": [], - "default": "0.7.15-gimkl-2017a", + "default": "0.7.17-GCC-9.2.0", "default_type": "latest", "last_updated": 1722569337, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/BWA/0.7.15-gimkl-2017a.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/BWA/0.7.17-GCC-9.2.0.lua", "force_hide": "False", "force_show": "False" }, @@ -2860,6 +2632,9 @@ "admin_list": [ { "1.0.6-intel-2017a": "Warning: bzip2/1.0.6-intel-2017a is very old, please select a more recent version (try 'module spider bzip2')." + }, + { + "1.0.6-intel-2018b": "Warning: bzip2/1.0.6-intel-2018b is old and marked for deletion along with the rest of our intel-2018b software. Please select a more recent version (try 'module spider bzip2') or let us know that you still need it." } ], "network_licences": [], @@ -2958,7 +2733,6 @@ "homepage": "https://capnproto.org", "support": "", "versions": [ - "0.10.2-GCC-11.3.0", "0.7.0-GCCcore-7.4.0", "0.8.0-GCCcore-9.2.0", "0.9.1-GCC-11.3.0", @@ -2978,58 +2752,6 @@ "force_hide": "False", "force_show": "False" }, - "Catch2": { - "description": "A modern, C++-native, header-only, test framework for unit-tests, TDD and BDD \n - using C++11, C++14, C++17 and later (or C++03 on the Catch1.x branch) ", - "domains": [ - "lib" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/catchorg/Catch2", - "support": "", - "versions": [ - "2.13.4" - ], - "admin_list": [ - { - "2.13.4": "Warning: Catch2/2.13.4 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "2.13.4", - "default_type": "latest", - "last_updated": 1614819476, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/Catch2/2.13.4.lua", - "force_hide": "False", - "force_show": "False" - }, - "CCL": { - "description": "Clozure CL (often called CCL for short) is a free Common Lisp implementation ", - "domains": [ - "lang" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://ccl.clozure.com/", - "support": "", - "versions": [ - "1.12-GCCcore-9.2.0" - ], - "admin_list": [ - { - "1.12-GCCcore-9.2.0": "Warning: CCL/1.12-GCCcore-9.2.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.12-GCCcore-9.2.0", - "default_type": "latest", - "last_updated": 1611309006, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/lang/CCL/1.12-GCCcore-9.2.0.lua", - "force_hide": "False", - "force_show": "False" - }, "CD-HIT": { "description": " CD-HIT is a very widely used program for clustering and\n comparing protein or nucleotide sequences.", "domains": [ @@ -3115,8 +2837,6 @@ "homepage": "https://support.10xgenomics.com/single-cell-gene-expression/software/pipelines/latest/what-is-cell-ranger", "support": "", "versions": [ - "4.0.0", - "6.0.1", "6.1.2", "7.1.0" ], @@ -3126,6 +2846,9 @@ }, { "6.0.1": "Warning: CellRanger/6.0.1 is old and marked for deletion. Please select a more recent version (try 'module spider CellRanger') or let us know that you still need it." + }, + { + "6.1.2": "Warning: CellRanger/6.1.2 is old and marked for deletion. Please select a more recent version (try 'module spider CellRanger') or let us know that you still need it." } ], "network_licences": [], @@ -3175,7 +2898,11 @@ "versions": [ "1.3.0" ], - "admin_list": [], + "admin_list": [ + { + "1.3.0": "Warning: Cereal/1.3.0 is old and marked for deletion. If you still need it, then please let us know." + } + ], "network_licences": [], "default": "1.3.0", "default_type": "latest", @@ -3195,7 +2922,6 @@ "homepage": "http://heasarc.gsfc.nasa.gov/fitsio/", "support": "", "versions": [ - "3.42-gimkl-2017a", "3.45-GCCcore-7.4.0" ], "admin_list": [ @@ -3232,7 +2958,10 @@ "admin_list": [ { "4.9.1-gimkl-2017a-Python-2.7.14": "Warning: CGAL/4.9.1-gimkl-2017a-Python-2.7.14 is very old, please select a more recent version (try 'module spider CGAL')." - } + }, + { + "4.9.1-gimkl-2017a": "Warning: CGAL/4.9.1-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider CGAL') or let us know that you still need it." + } ], "network_licences": [], "default": "4.13-GCCcore-7.4.0", @@ -3280,7 +3009,11 @@ "1.2.1-gimkl-2022a-Python-3.10.5", "1.2.3-foss-2023a-Python-3.11.6" ], - "admin_list": [], + "admin_list": [ + { + "1.0.13-gimkl-2018b-Python-2.7.16": "Warning: CheckM/1.0.13-gimkl-2018b-Python-2.7.16 is old and marked for deletion. Please select a more recent version (try 'module spider CheckM') or let us know that you still need it." + } + ], "network_licences": [], "default": "1.2.1-gimkl-2022a-Python-3.10.5", "default_type": "latest", @@ -3380,13 +3113,14 @@ "support": "", "versions": [ "0.2.0-GCC-11.3.0", - "0.5.0-GCC-11.3.0" + "0.5.0-GCC-11.3.0", + "0.9.0-GCC-12.3.0" ], "admin_list": [], "network_licences": [], "default": "0.5.0-GCC-11.3.0", "default_type": "latest", - "last_updated": 1681163621, + "last_updated": 1738887021, "modulefile_text": "", "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/chopper/0.5.0-GCC-11.3.0.lua", "force_hide": "False", @@ -3449,7 +3183,7 @@ "extensions": [], "licence_type": "", "homepage": "https://github.com/HKU-BAL/Clair3", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/Clair3", + "support": "", "versions": [ "0.1.12-Miniconda3", "1.0.0-Miniconda3", @@ -3676,16 +3410,12 @@ "domains": [ "chemistry", "engineering", - "physics", - "cae", - "multiphysics", - "cfd", - "fea" + "physics" ], "extensions": [], "licence_type": "proprietary", "homepage": "https://www.comsol.com/", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/COMSOL", + "support": "", "versions": [ "5.3", "5.4", @@ -3970,39 +3700,17 @@ "1.0.0-gimkl-2018b-Python-2.7.16", "1.1.0-gimkl-2020a-Python-3.8.2" ], - "admin_list": [], - "network_licences": [], - "default": "1.1.0-gimkl-2020a-Python-3.8.2", - "default_type": "latest", - "last_updated": 1612226345, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/CONCOCT/1.1.0-gimkl-2020a-Python-3.8.2.lua", - "force_hide": "False", - "force_show": "False" - }, - "Corset": { - "description": "Clusters contigs and counts reads from de novo assembled transcriptomes.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/Oshlack/Corset/wiki", - "support": "", - "versions": [ - "1.09-GCC-9.2.0" - ], "admin_list": [ { - "1.09-GCC-9.2.0": "Warning: Corset/1.09-GCC-9.2.0 is old and marked for deletion. If you still need it, then please let us know." + "1.0.0-gimkl-2018b-Python-2.7.16": "Warning: CONCOCT/1.0.0-gimkl-2018b-Python-2.7.16 is old and marked for deletion. Please select a more recent version (try 'module spider CONCOCT') or let us know that you still need it." } ], "network_licences": [], - "default": "1.09-GCC-9.2.0", + "default": "1.1.0-gimkl-2020a-Python-3.8.2", "default_type": "latest", - "last_updated": 1608178178, + "last_updated": 1612226345, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Corset/1.09-GCC-9.2.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/CONCOCT/1.1.0-gimkl-2020a-Python-3.8.2.lua", "force_hide": "False", "force_show": "False" }, @@ -4099,7 +3807,6 @@ "homepage": "http://freedesktop.org/wiki/Software/cppunit", "support": "", "versions": [ - "1.13.2-GCCcore-7.4.0", "1.15.1-GCCcore-11.3.0" ], "admin_list": [ @@ -4111,11 +3818,11 @@ } ], "network_licences": [], - "default": "1.13.2-GCCcore-7.4.0", + "default": "1.15.1-GCCcore-11.3.0", "default_type": "latest", "last_updated": 1652169062, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/CppUnit/1.13.2-GCCcore-7.4.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/CppUnit/1.15.1-GCCcore-11.3.0.lua", "force_hide": "False", "force_show": "False" }, @@ -4259,32 +3966,6 @@ "force_hide": "False", "force_show": "False" }, - "CTPL": { - "description": "C++ Thread Pool Library", - "domains": [ - "lib" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/vit-vit/CTPL", - "support": "", - "versions": [ - "0.0.2" - ], - "admin_list": [ - { - "0.0.2": "Warning: CTPL/0.0.2 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "0.0.2", - "default_type": "latest", - "last_updated": 1614816230, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/CTPL/0.0.2.lua", - "force_hide": "False", - "force_show": "False" - }, "CubeLib": { "description": "Cube general purpose C++ library component and command-line tools.\n", "domains": [ @@ -4295,8 +3976,7 @@ "homepage": "https://www.scalasca.org/software/cube-4.x/download.html", "support": "", "versions": [ - "4.4.4-GCCcore-9.2.0", - "4.6-GCCcore-9.2.0" + "4.4.4-GCCcore-9.2.0" ], "admin_list": [ { @@ -4306,7 +3986,7 @@ "network_licences": [], "default": "4.4.4-GCCcore-9.2.0", "default_type": "latest", - "last_updated": 1630287638, + "last_updated": 1612309768, "modulefile_text": "", "module_path": "/opt/nesi/CS400_centos7_bdw/modules/perf/CubeLib/4.4.4-GCCcore-9.2.0.lua", "force_hide": "False", @@ -4360,6 +4040,7 @@ "12.2.2", "12.3.0", "12.4.1", + "12.5.0", "12.6.3", "8.0.61", "9.0.176", @@ -4369,7 +4050,7 @@ "network_licences": [], "default": "11.0.2", "default_type": "latest", - "last_updated": 1733363565, + "last_updated": 1740036467, "modulefile_text": "", "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/CUDA/11.0.2.lua", "force_hide": "False", @@ -4385,13 +4066,8 @@ "homepage": "https://developer.nvidia.com/cudnn", "support": "", "versions": [ - "5.1", "6.0-CUDA-8.0.61", - "7.0.5-CUDA-9.0.176", - "7.4.2.24-CUDA-10.0.130", - "7.5.0.56-CUDA-10.0.130", "7.6.4.38-CUDA-10.1.243", - "7.6.5.32-CUDA-10.0.130", "7.6.5.32-CUDA-10.2.89", "8.0.2.39-CUDA-11.0.2", "8.0.5.39-CUDA-11.1.1", @@ -4402,6 +4078,7 @@ "8.8.0.121-CUDA-12.0.0", "8.9.7.29-CUDA-12.2.2", "8.9.7.29-CUDA-12.3.0", + "9.3.0.75-CUDA-12.5.0", "9.5.1.17-CUDA-12.6.3" ], "admin_list": [ @@ -4418,7 +4095,7 @@ "network_licences": [], "default": "7.6.4.38-CUDA-10.1.243", "default_type": "latest", - "last_updated": 1733364248, + "last_updated": 1740036865, "modulefile_text": "", "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/cuDNN/7.6.4.38-CUDA-10.1.243.lua", "force_hide": "False", @@ -4451,32 +4128,6 @@ "force_hide": "False", "force_show": "False" }, - "CUnit": { - "description": "Automated testing framework for C.", - "domains": [ - "lang" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://sourceforge.net/projects/cunit/", - "support": "", - "versions": [ - "2.1-3-GCCcore-9.2.0" - ], - "admin_list": [ - { - "2.1-3-GCCcore-9.2.0": "Warning: CUnit/2.1-3-GCCcore-9.2.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "2.1-3-GCCcore-9.2.0", - "default_type": "latest", - "last_updated": 1595898611, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/lang/CUnit/2.1-3-GCCcore-9.2.0.lua", - "force_hide": "False", - "force_show": "False" - }, "cURL": { "description": "\n libcurl is a free and easy-to-use client-side URL transfer library,\n supporting DICT, FILE, FTP, FTPS, Gopher, HTTP, HTTPS, IMAP, IMAPS, LDAP,\n LDAPS, POP3, POP3S, RTMP, RTSP, SCP, SFTP, SMTP, SMTPS, Telnet and TFTP.\n libcurl supports SSL certificates, HTTP POST, HTTP PUT, FTP uploading, HTTP\n form based upload, proxies, cookies, user+password authentication (Basic,\n Digest, NTLM, Negotiate, Kerberos), file transfer resume, http proxy tunneling\n and more.\n", "domains": [ @@ -4566,50 +4217,6 @@ "force_hide": "False", "force_show": "False" }, - "cwltool": { - "description": "Common Workflow Language tool description reference implementation", - "domains": [ - "bio" - ], - "extensions": [ - "bagit-1.7.0", - "CacheControl-0.12.6", - "coloredlogs-14.0", - "cwltool-3.0.20200317203547", - "docker-4.2.2", - "humanfriendly-8.1", - "isodate-0.6.0", - "mypy_extensions-0.4.3", - "pathlib2-2.3.5", - "prov-1.5.1", - "rdflib-4.2.2", - "rdflib-jsonld-0.4.0", - "ruamel.yaml-0.16.5", - "schema-salad-5.0.20200302192450", - "shellescape-3.4.1", - "toil-4.1.0", - "websocket-0.2.1" - ], - "licence_type": "", - "homepage": "https://github.com/common-workflow-language/cwltool", - "support": "", - "versions": [ - "3.0.20200317203547-gimkl-2020a-Python-3.8.2" - ], - "admin_list": [ - { - "3.0.20200317203547-gimkl-2020a-Python-3.8.2": "Warning: cwltool/3.0.20200317203547-gimkl-2020a-Python-3.8.2 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "3.0.20200317203547-gimkl-2020a-Python-3.8.2", - "default_type": "latest", - "last_updated": 1594204307, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/cwltool/3.0.20200317203547-gimkl-2020a-Python-3.8.2.lua", - "force_hide": "False", - "force_show": "False" - }, "Cytoscape": { "description": "Cytoscape is an open source software platform for visualizing molecular interaction networks and\n biological pathways and integrating these networks with annotations, gene expression profiles and other state data.", "domains": [ @@ -4619,10 +4226,9 @@ ], "extensions": [], "licence_type": "", - "homepage": "http://cytoscape.org/", + "homepage": "https://cytoscape.org/", "support": "", "versions": [ - "2.7.0", "3.9.1" ], "admin_list": [ @@ -4631,11 +4237,11 @@ } ], "network_licences": [], - "default": "2.7.0", + "default": "3.9.1", "default_type": "latest", "last_updated": 1675913770, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Cytoscape/2.7.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Cytoscape/3.9.1.lua", "force_hide": "False", "force_show": "False" }, @@ -4701,7 +4307,11 @@ "1.2.0.1-gimkl-2020a-Python-3.8.2", "1.5.0-gimkl-2022a-Python-3.11.3" ], - "admin_list": [], + "admin_list": [ + { + "1.2.0.1-gimkl-2020a-Python-3.8.2": "Warning: D-Genies/1.2.0.1-gimkl-2020a-Python-3.8.2 is old and marked for deletion. Please select a more recent version (try 'module spider D-Genies') or let us know that you still need it." + } + ], "network_licences": [], "default": "1.5.0-gimkl-2022a-Python-3.11.3", "default_type": "latest", @@ -4742,58 +4352,6 @@ "force_hide": "False", "force_show": "False" }, - "DaliLite": { - "description": "Tool set for simulating/evaluating SVs, merging and comparing SVs within and among samples,\n and includes various methods to reformat or summarize SVs.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://ekhidna2.biocenter.helsinki.fi/dali/", - "support": "", - "versions": [ - "5.0-iimpi-2020a" - ], - "admin_list": [ - { - "5.0-iimpi-2020a": "Warning: DaliLite/5.0-iimpi-2020a is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "5.0-iimpi-2020a", - "default_type": "latest", - "last_updated": 1630903960, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/DaliLite/5.0-iimpi-2020a.lua", - "force_hide": "False", - "force_show": "False" - }, - "dammit": { - "description": "de novo transcriptome annotator..", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://dib-lab.github.io/dammit/", - "support": "", - "versions": [ - "1.2" - ], - "admin_list": [ - { - "1.2": "Warning: dammit/1.2 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.2", - "default_type": "latest", - "last_updated": 1613620708, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/dammit/1.2.lua", - "force_hide": "False", - "force_show": "False" - }, "DAS_Tool": { "description": "DAS Tool is an automated method that integrates the results of a flexible number of binning\n algorithms to calculate an optimized, non-redundant set of bins from a single assembly.", "domains": [ @@ -4852,7 +4410,11 @@ "6.2.23-GCCcore-7.4.0", "6.2.23-gimkl-2017a" ], - "admin_list": [], + "admin_list": [ + { + "6.2.23-gimkl-2017a": "Warning: DB/6.2.23-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider DB') or let us know that you still need it." + } + ], "network_licences": [], "default": "6.2.23-GCCcore-7.4.0", "default_type": "latest", @@ -4873,8 +4435,7 @@ "support": "", "versions": [ "1.13.8-GCCcore-7.4.0", - "1.13.8-GCCcore-9.2.0", - "1.14.0-GCC-11.3.0" + "1.13.8-GCCcore-9.2.0" ], "admin_list": [ { @@ -4884,7 +4445,7 @@ "network_licences": [], "default": "1.13.8-GCCcore-7.4.0", "default_type": "latest", - "last_updated": 1660008643, + "last_updated": 1594169073, "modulefile_text": "", "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/DBus/1.13.8-GCCcore-7.4.0.lua", "force_hide": "False", @@ -4922,7 +4483,6 @@ "homepage": "http://www.mackenziemathislab.org/deeplabcut", "support": "", "versions": [ - "2.2.0.3", "2.2.0.6", "2.3.5" ], @@ -4990,17 +4550,12 @@ "Delft3D": { "description": "Integrated simulation of sediment transport and morphology, waves, water quality and ecology.", "domains": [ - "geo", - "hydrodynamics", - "morphodynamics", - "particle modelling", - "water quality testing", - "wave modelling" + "geo" ], "extensions": [], "licence_type": "", "homepage": "http://oss.deltares.nl/web/delft3d", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/Delft3D", + "support": "", "versions": [ "141732-intel-2022a", "64418-intel-2022a", @@ -5051,7 +4606,6 @@ "homepage": "https://github.com/dellytools/delly", "support": "", "versions": [ - "0.8.1", "1.1.3" ], "admin_list": [ @@ -5176,32 +4730,6 @@ "force_hide": "False", "force_show": "False" }, - "DISCOVARdenovo": { - "description": "Assembler suitable for large genomes based on Illumina reads of length 250 or longer.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://www.broadinstitute.org/software/discovar/blog/", - "support": "", - "versions": [ - "52488" - ], - "admin_list": [ - { - "52488": "Warning: DISCOVARdenovo/52488 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "52488", - "default_type": "latest", - "last_updated": 1614563505, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/DISCOVARdenovo/52488.lua", - "force_hide": "False", - "force_show": "False" - }, "Dorado": { "description": "High-performance, easy-to-use, open source basecaller for Oxford Nanopore reads.", "domains": [ @@ -5210,15 +4738,10 @@ "extensions": [], "licence_type": "", "homepage": "https://nanoporetech.com/products/minit", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/Dorado", + "support": "", "versions": [ - "0.2.1", - "0.2.4", - "0.3.0", - "0.3.1", "0.3.2", "0.3.4", - "0.3.4-rc2", "0.4.0", "0.4.1", "0.4.2", @@ -5231,13 +4754,24 @@ "0.7.3", "0.8.0", "0.8.3", - "0.9.0" + "0.9.0", + "0.9.1" + ], + "admin_list": [ + { + "0.4.0": "Warning: Dorado/0.4.0 is old and marked for deletion. Please select a more recent version (try 'module spider Dorado') or let us know that you still need it." + }, + { + "0.4.1": "Warning: Dorado/0.4.1 is old and marked for deletion. Please select a more recent version (try 'module spider Dorado') or let us know that you still need it." + }, + { + "0.4.2": "Warning: Dorado/0.4.2 is old and marked for deletion. Please select a more recent version (try 'module spider Dorado') or let us know that you still need it." + } ], - "admin_list": [], "network_licences": [], "default": "0.9.0", "default_type": "latest", - "last_updated": 1736196479, + "last_updated": 1739136364, "modulefile_text": "", "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Dorado/0.9.0.lua", "force_hide": "False", @@ -5254,8 +4788,7 @@ "support": "", "versions": [ "3.1.5-GCCcore-7.4.0", - "3.1.5-GCCcore-9.2.0", - "3.2.1-GCC-11.3.0" + "3.1.5-GCCcore-9.2.0" ], "admin_list": [ { @@ -5263,11 +4796,11 @@ } ], "network_licences": [], - "default": "3.2.1-GCC-11.3.0", + "default": "3.1.5-GCCcore-7.4.0", "default_type": "latest", - "last_updated": 1660031344, + "last_updated": 1594169091, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/double-conversion/3.2.1-GCC-11.3.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/double-conversion/3.1.5-GCCcore-7.4.0.lua", "force_hide": "False", "force_show": "False" }, @@ -5383,7 +4916,11 @@ "versions": [ "1.1.3-gimpi-2020a" ], - "admin_list": [], + "admin_list": [ + { + "1.1.3-gimpi-2020a": "Warning: dtcmp/1.1.3-gimpi-2020a is old and marked for deletion. If you still need it, then please let us know." + } + ], "network_licences": [], "default": "1.1.3-gimpi-2020a", "default_type": "latest", @@ -5525,30 +5062,6 @@ "force_hide": "False", "force_show": "False" }, - "ectyper": { - "description": "Standalone versatile serotyping module for Escherichia coli..", - "domains": [ - "bio" - ], - "extensions": [ - "ectyper-1.0.0rc1" - ], - "licence_type": "", - "homepage": "https://pypi.org/project/ectyper/", - "support": "", - "versions": [ - "1.0.0-gimkl-2022a-Python-3.10.5" - ], - "admin_list": [], - "network_licences": [], - "default": "1.0.0-gimkl-2022a-Python-3.10.5", - "default_type": "latest", - "last_updated": 1662853701, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/ectyper/1.0.0-gimkl-2022a-Python-3.10.5.lua", - "force_hide": "False", - "force_show": "False" - }, "edlib": { "description": "Lightweight, super fast library for sequence alignment using edit (Levenshtein) distance.", "domains": [ @@ -5839,64 +5352,6 @@ "force_hide": "False", "force_show": "False" }, - "ENMTML": { - "description": "R package for integrated construction of Ecological Niche Models.", - "domains": [ - "bio" - ], - "extensions": [ - "ade4-1.7-17", - "adehabitatHR-0.4.19", - "adehabitatHS-0.3.15", - "adehabitatLT-0.3.25", - "adehabitatMA-0.3.14", - "caret-6.0-88", - "CircStats-0.2-6", - "ENMTML-2021-06-24", - "fields-12.5", - "filehash-2.4-2", - "flexclust-1.4-0", - "geosphere-1.5-10", - "gower-0.2.2", - "ipred-0.9-11", - "kernlab-0.9-29", - "lava-1.6.9", - "lubridate-1.7.10", - "maps-3.3.0", - "maxlike-0.1-8", - "maxnet-0.1.4", - "ModelMetrics-1.2.2.2", - "modeltools-0.2-23", - "pgirmess-1.7.0", - "pROC-1.17.0.1", - "prodlim-2019.11.13", - "recipes-0.1.16", - "RStoolbox-0.2.6", - "spThin-0.2.0", - "SQUAREM-2021.1", - "timeDate-3043.102", - "usdm-1.1-18" - ], - "licence_type": "", - "homepage": "https://andrefaa.github.io/ENMTML", - "support": "", - "versions": [ - "2021-06-24-gimkl-2020a-R-4.1.0" - ], - "admin_list": [ - { - "2021-06-24-gimkl-2020a-R-4.1.0": "Warning: ENMTML/2021-06-24-gimkl-2020a-R-4.1.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "2021-06-24-gimkl-2020a-R-4.1.0", - "default_type": "latest", - "last_updated": 1626993695, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/ENMTML/2021-06-24-gimkl-2020a-R-4.1.0.lua", - "force_hide": "False", - "force_show": "False" - }, "ensmallen": { "description": "C++ header-only library for numerical optimization", "domains": [ @@ -5909,7 +5364,11 @@ "versions": [ "2.17.0-gimkl-2020a" ], - "admin_list": [], + "admin_list": [ + { + "2.17.0-gimkl-2020a": "Warning: ensmallen/2.17.0-gimkl-2020a is old and marked for deletion. If you still need it, then please let us know." + } + ], "network_licences": [], "default": "2.17.0-gimkl-2020a", "default_type": "latest", @@ -5955,7 +5414,11 @@ "versions": [ "7.1.0r-intel-2018b" ], - "admin_list": [], + "admin_list": [ + { + "7.1.0r-intel-2018b": "Warning: ESMF/7.1.0r-intel-2018b is old and marked for deletion along with the rest of our intel-2018b software. If you still need ESMF, then please let us know." + } + ], "network_licences": [], "default": "7.1.0r-intel-2018b", "default_type": "latest", @@ -6146,124 +5609,71 @@ "force_hide": "False", "force_show": "False" }, - "Extrae": { - "description": "Extrae is capable of instrumenting applications based on MPI, OpenMP, pthreads, CUDA1, OpenCL1, and StarSs1 using different instrumentation approaches", + "FastANI": { + "description": "Tool for fast alignment-free computation of\n whole-genome Average Nucleotide Identity (ANI).", "domains": [ - "perf" + "bio" ], "extensions": [], "licence_type": "", - "homepage": "http://www.bsc.es/computer-sciences/performance-tools", + "homepage": "http://www.iodbc.org/", "support": "", "versions": [ - "3.8.3-intel-2020a" + "1.1-gimkl-2018b", + "1.3-GCCcore-9.2.0", + "1.33-GCC-11.3.0", + "1.33-intel-2020a", + "1.33-intel-2022a" ], "admin_list": [ { - "3.8.3-intel-2020a": "Warning: Extrae/3.8.3-intel-2020a is old and marked for deletion. If you still need it, then please let us know." + "1.3-GCCcore-9.2.0": "Warning: FastANI/1.3-GCCcore-9.2.0 is old and marked for deletion. Please select a more recent version (try 'module spider FastANI') or let us know that you still need it." } ], "network_licences": [], - "default": "3.8.3-intel-2020a", + "default": "1.33-intel-2020a", "default_type": "latest", - "last_updated": 1611137241, + "last_updated": 1676431679, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/perf/Extrae/3.8.3-intel-2020a.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/FastANI/1.33-intel-2020a.lua", "force_hide": "False", "force_show": "False" }, - "FALCON": { - "description": "Falcon: a set of tools for fast aligning long reads for consensus and assembly", + "FastME": { + "description": "FastME: a comprehensive, accurate and fast distance-based phylogeny inference program.", "domains": [ - "bio", - "biology" - ], - "extensions": [ - "FALCON-1.8.8", - "networkx-1.10", - "pypeFLOW-20170504" + "bio" ], + "extensions": [], "licence_type": "", - "homepage": "https://github.com/PacificBiosciences/FALCON", + "homepage": "http://www.atgc-montpellier.fr/fastme/", "support": "", "versions": [ - "1.8.8-gimkl-2020a-Python-2.7.18" + "2.1.6.1-gimkl-2018b", + "2.1.6.2-GCC-9.2.0" ], "admin_list": [ { - "1.8.8-gimkl-2017a": "Warning: FALCON/1.8.8-gimkl-2017a is obsolete and will soon be removed, so please select a more recent version (try 'module spider FALCON')." + "2.1.5-gimkl-2017a": "Warning: FastME/2.1.5-gimkl-2017a is obsolete and will soon be removed, so please select a more recent version (try 'module spider FastME')." } ], "network_licences": [], - "default": "1.8.8-gimkl-2020a-Python-2.7.18", + "default": "2.1.6.1-gimkl-2018b", "default_type": "latest", - "last_updated": 1598946894, + "last_updated": 1623956670, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/FALCON/1.8.8-gimkl-2020a-Python-2.7.18.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/FastME/2.1.6.1-gimkl-2018b.lua", "force_hide": "False", "force_show": "False" }, - "FastANI": { - "description": "Tool for fast alignment-free computation of\n whole-genome Average Nucleotide Identity (ANI).", + "fastp": { + "description": "A tool designed to provide fast all-in-one preprocessing for FastQ files.", "domains": [ "bio" ], "extensions": [], "licence_type": "", - "homepage": "http://www.iodbc.org/", - "support": "", - "versions": [ - "1.1-gimkl-2018b", - "1.3-GCCcore-9.2.0", - "1.33-GCC-11.3.0", - "1.33-intel-2020a", - "1.33-intel-2022a" - ], - "admin_list": [], - "network_licences": [], - "default": "1.33-intel-2020a", - "default_type": "latest", - "last_updated": 1676431679, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/FastANI/1.33-intel-2020a.lua", - "force_hide": "False", - "force_show": "False" - }, - "FastME": { - "description": "FastME: a comprehensive, accurate and fast distance-based phylogeny inference program.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://www.atgc-montpellier.fr/fastme/", - "support": "", - "versions": [ - "2.1.6.1-gimkl-2018b", - "2.1.6.2-GCC-9.2.0" - ], - "admin_list": [ - { - "2.1.5-gimkl-2017a": "Warning: FastME/2.1.5-gimkl-2017a is obsolete and will soon be removed, so please select a more recent version (try 'module spider FastME')." - } - ], - "network_licences": [], - "default": "2.1.6.1-gimkl-2018b", - "default_type": "latest", - "last_updated": 1623956670, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/FastME/2.1.6.1-gimkl-2018b.lua", - "force_hide": "False", - "force_show": "False" - }, - "fastp": { - "description": "A tool designed to provide fast all-in-one preprocessing for FastQ files.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/OpenGene/fastp", + "homepage": "https://github.com/OpenGene/fastp", "support": "", "versions": [ "0.20.0-GCCcore-7.4.0", @@ -6442,32 +5852,6 @@ "force_hide": "False", "force_show": "False" }, - "fcGENE": { - "description": "Format converting tool for genotype Data.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://sourceforge.net/projects/fcgene/", - "support": "", - "versions": [ - "1.0.7" - ], - "admin_list": [ - { - "1.0.7": "Warning: fcGENE/1.0.7 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.0.7", - "default_type": "latest", - "last_updated": 1601421542, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/fcGENE/1.0.7.lua", - "force_hide": "False", - "force_show": "False" - }, "FCM": { "description": "FCM Build - A powerful build system for modern Fortran software applications. FCM Version Control - Wrappers to the Subversion version control system, usage conventions and processes for scientific software development.", "domains": [ @@ -6500,16 +5884,13 @@ "domains": [ "engineering", "phys", - "physics", - "mahuika" + "physics" ], "extensions": [], "licence_type": "", "homepage": "https://pages.nist.gov/fds-smv/", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/FDS", + "support": "", "versions": [ - "6.7.5-intel-2020a", - "6.7.7-intel-2020a", "6.7.9-intel-2022a" ], "admin_list": [ @@ -6530,11 +5911,11 @@ } ], "network_licences": [], - "default": "6.7.7-intel-2020a", + "default": "6.7.9-intel-2022a", "default_type": "latest", "last_updated": 1674774986, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/phys/FDS/6.7.7-intel-2020a.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/phys/FDS/6.7.9-intel-2022a.lua", "force_hide": "False", "force_show": "False" }, @@ -6553,7 +5934,11 @@ "4.2.2-GCCcore-9.2.0", "5.1.1-GCC-11.3.0" ], - "admin_list": [], + "admin_list": [ + { + "3.2.4-gimkl-2017a": "Warning: FFmpeg/3.2.4-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider FFmpeg') or let us know that you still need it." + } + ], "network_licences": [], "default": "5.1.1-GCC-11.3.0", "default_type": "latest", @@ -6577,7 +5962,6 @@ "3.3.10-gompi-2022a", "3.3.10-intel-compilers-2023.2.1", "3.3.5-gimkl-2017a", - "3.3.8-gimpi-2018b", "3.3.8-gimpi-2020a", "3.3.9-iimpi-2022a" ], @@ -6587,6 +5971,9 @@ }, { "3.3.8-gimpi-2018b": "Warning: FFTW/3.3.8-gimpi-2018b is old and marked for deletion. Please select a more recent version (try 'module spider FFTW') or let us know that you still need it." + }, + { + "3.3.5-gimkl-2017a": "Warning: FFTW/3.3.5-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider FFTW') or let us know that you still need it." } ], "network_licences": [], @@ -6767,33 +6154,6 @@ "force_hide": "False", "force_show": "False" }, - "fineRADstructure": { - "description": "A package for population structure inference from RAD-seq data", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://cichlid.gurdon.cam.ac.uk/fineRADstructure.html", - "support": "", - "versions": [ - "0.3.2r109-GCCcore-7.4.0", - "0.3.2r109-gimkl-2020a" - ], - "admin_list": [ - { - "0.3.2r109-GCCcore-7.4.0": "Warning: fineRADstructure/0.3.2r109-GCCcore-7.4.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "0.3.2r109-GCCcore-7.4.0", - "default_type": "latest", - "last_updated": 1590394338, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/fineRADstructure/0.3.2r109-GCCcore-7.4.0.lua", - "force_hide": "False", - "force_show": "False" - }, "fineSTRUCTURE": { "description": "Population assignment using large numbers of densely sampled genomes, including both SNP chips and sequence dat", "domains": [ @@ -6829,8 +6189,7 @@ "homepage": "https://github.com/google/flatbuffers/", "support": "", "versions": [ - "1.12.0-GCCcore-9.2.0", - "2.0.0-GCCcore-9.2.0" + "1.12.0-GCCcore-9.2.0" ], "admin_list": [ { @@ -6838,16 +6197,16 @@ } ], "network_licences": [], - "default": "2.0.0-GCCcore-9.2.0", + "default": "1.12.0-GCCcore-9.2.0", "default_type": "latest", - "last_updated": 1644892425, + "last_updated": 1600931233, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/flatbuffers/2.0.0-GCCcore-9.2.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/flatbuffers/1.12.0-GCCcore-9.2.0.lua", "force_hide": "False", "force_show": "False" }, "flex": { - "description": "Flex (Fast Lexical Analyzer) is a tool for generating scanners. A scanner,\n sometimes called a tokenizer, is a program which recognizes lexical patterns in text.", + "description": "\n Flex (Fast Lexical Analyzer) is a tool for generating scanners. A scanner, \n sometimes called a tokenizer, is a program which recognizes lexical patterns\n in text.\n", "domains": [ "lang", "social_science" @@ -6857,7 +6216,6 @@ "homepage": "http://flex.sourceforge.net/", "support": "", "versions": [ - "2.6.0-GCC-5.4.0", "2.6.4-GCCcore-7.4.0" ], "admin_list": [ @@ -6866,11 +6224,11 @@ } ], "network_licences": [], - "default": "2.6.0-GCC-5.4.0", + "default": "2.6.4-GCCcore-7.4.0", "default_type": "latest", "last_updated": 1551840800, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/lang/flex/2.6.0-GCC-5.4.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/lang/flex/2.6.4-GCCcore-7.4.0.lua", "force_hide": "False", "force_show": "False" }, @@ -6882,7 +6240,7 @@ "extensions": [], "licence_type": "", "homepage": "https://gitlab.mpi-magdeburg.mpg.de/software/flexiblas-release", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/FlexiBLAS", + "support": "", "versions": [ "3.1.3-GCC-11.3.0", "3.3.1-GCC-12.3.0", @@ -6953,34 +6311,8 @@ "force_hide": "False", "force_show": "False" }, - "fmlrc": { - "description": " Tool for performing hybrid correction of long read sequencing \nusing the BWT and FM-index of short-read sequencing data", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/holtjma/fmlrc", - "support": "", - "versions": [ - "1.0.0-GCC-9.2.0" - ], - "admin_list": [ - { - "1.0.0-GCC-9.2.0": "Warning: fmlrc/1.0.0-GCC-9.2.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.0.0-GCC-9.2.0", - "default_type": "latest", - "last_updated": 1598850552, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/fmlrc/1.0.0-GCC-9.2.0.lua", - "force_hide": "False", - "force_show": "False" - }, "fmt": { - "description": "Formatting library providing a fast and safe alternative to C stdio and C++ iostreams.", + "description": "fmt (formerly cppformat) is an open-source formatting library.", "domains": [ "lib" ], @@ -6989,16 +6321,15 @@ "homepage": "http://fmtlib.net/", "support": "", "versions": [ - "7.1.3-GCCcore-9.2.0", - "8.0.1" + "7.1.3-GCCcore-9.2.0" ], "admin_list": [], "network_licences": [], - "default": "8.0.1", + "default": "7.1.3-GCCcore-9.2.0", "default_type": "latest", "last_updated": 1625624582, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/fmt/8.0.1.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/fmt/7.1.3-GCCcore-9.2.0.lua", "force_hide": "False", "force_show": "False" }, @@ -7038,8 +6369,6 @@ "homepage": "https://developer.arm.com/products/software-development-tools/hpc/arm-forge", "support": "", "versions": [ - "19.0", - "20.0.2", "21.1.3", "21.1.3", "21.1.3", @@ -7126,6 +6455,9 @@ "admin_list": [ { "1.1.0-gimkl-2017a": "Warning: FreeBayes/1.1.0-gimkl-2017a is obsolete and will soon be removed, so please select a more recent version (try 'module spider FreeBayes')." + }, + { + "1.3.1-GCC-7.4.0": "Warning: FreeBayes/1.3.1-GCC-7.4.0 is old and marked for deletion. Please select a more recent version (try 'module spider FreeBayes') or let us know that you still need it." } ], "network_licences": [], @@ -7256,33 +6588,6 @@ "force_hide": "False", "force_show": "False" }, - "FTGL": { - "description": " FTGL is a free open source library to enable developers to use arbitrary\nfonts in their OpenGL (www.opengl.org) applications. ", - "domains": [ - "vis", - "lib" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://ftgl.sourceforge.net/docs/html/", - "support": "", - "versions": [ - "2.1.3-rc5-GCCcore-7.4.0" - ], - "admin_list": [ - { - "2.1.3-rc5-gimkl-2017a": "Warning: FTGL/2.1.3-rc5-gimkl-2017a is obsolete and will soon be removed, so please select a more recent version (try 'module spider FTGL')." - } - ], - "network_licences": [], - "default": "2.1.3-rc5-GCCcore-7.4.0", - "default_type": "latest", - "last_updated": 1552261246, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/FTGL/2.1.3-rc5-GCCcore-7.4.0.lua", - "force_hide": "False", - "force_show": "False" - }, "funcx-endpoint": { "description": "funcX is a distributed Function as a Service (FaaS) platform that enables flexible,\n scalable, and high performance remote function execution. Unlike centralized FaaS platforms,\n funcX allows users to execute functions on heterogeneous remote computers, from laptops to\n campus clusters, clouds, and supercomputers. A funcX endpoint is a persistent service\n launched by the user on a compute system to serve as a conduit for executing functions on\n that computer.", "domains": [ @@ -7291,39 +6596,37 @@ "extensions": [ "bcrypt-3.2.2", "click-8.1.3", - "cryptography-37.0.4", + "cryptography-39.0.2", "dill-0.3.5.1", "docutils-0.19", - "funcx-1.0.2", - "funcx_common-0.0.15", - "funcx_endpoint-1.0.2", - "globus_sdk-3.10.1", - "paramiko-2.11.0", - "parsl-1.3.0.dev0", - "pika-1.3.0", - "pydantic-1.9.2", - "PyJWT-2.4.0", + "funcx-1.0.11", + "funcx_common-0.0.24", + "funcx_endpoint-1.0.11", + "globus_sdk-3.17.0", + "paramiko-3.0.0", + "parsl-2023.1.23", + "pika-1.3.1", + "pydantic-1.10.6", + "PyJWT-2.6.0", "PyNaCl-1.5.0", - "python_daemon-2.3.1", - "pyzmq-23.2.1", + "python_daemon-2.3.2", + "pyzmq-23.2.0", "retry-0.9.2", "setproctitle-1.3.2", - "texttable-1.6.4", + "texttable-1.6.7", "typeguard-2.13.3", + "types_paramiko-3.0.0.4", + "types_requests-2.28.11.15", + "types_six-1.16.21.7", + "types_urllib3-1.26.25.8", + "typing_extensions-4.5.0", "websockets-10.3" ], "licence_type": "", "homepage": "https://funcx.readthedocs.io/en/latest/endpoints.html", "support": "", "versions": [ - "0.3.5-gimkl-2020a-Python-3.9.5", - "0.3.6-gimkl-2020a-Python-3.9.9", - "1.0.1-gimkl-2020a-Python-3.9.9", "1.0.11-gimkl-2022a-Python-3.10.5", - "1.0.2-gimkl-2020a-Python-3.9.9", - "1.0.4-gimkl-2020a-Python-3.9.9", - "1.0.5-gimkl-2020a-Python-3.9.9", - "1.0.6-gimkl-2020a-Python-3.9.9", "1.0.7-gimkl-2020a-Python-3.9.9" ], "admin_list": [ @@ -7347,33 +6650,11 @@ } ], "network_licences": [], - "default": "1.0.2-gimkl-2020a-Python-3.9.9", + "default": "1.0.11-gimkl-2022a-Python-3.10.5", "default_type": "latest", "last_updated": 1678416408, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/funcx-endpoint/1.0.2-gimkl-2020a-Python-3.9.9.lua", - "force_hide": "False", - "force_show": "False" - }, - "fxtract": { - "description": "Extract sequences from a fastx (fasta or fastq) file given a subsequence.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/ctSkennerton/fxtract", - "support": "", - "versions": [ - "2.4-GCC-11.3.0" - ], - "admin_list": [], - "network_licences": [], - "default": "2.4-GCC-11.3.0", - "default_type": "latest", - "last_updated": 1659934961, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/fxtract/2.4-GCC-11.3.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/funcx-endpoint/1.0.11-gimkl-2022a-Python-3.10.5.lua", "force_hide": "False", "force_show": "False" }, @@ -7387,16 +6668,15 @@ "homepage": "http://www.nco.ncep.noaa.gov/pmb/codes/GRIB2/", "support": "", "versions": [ - "1.6.0-GCC-12.3.0", "1.6.0-GCCcore-7.4.0" ], "admin_list": [], "network_licences": [], - "default": "1.6.0-GCC-12.3.0", + "default": "1.6.0-GCCcore-7.4.0", "default_type": "latest", - "last_updated": 1717979450, + "last_updated": 1560743158, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/g2clib/1.6.0-GCC-12.3.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/g2clib/1.6.0-GCCcore-7.4.0.lua", "force_hide": "False", "force_show": "False" }, @@ -7410,48 +6690,19 @@ "homepage": "http://www.nco.ncep.noaa.gov/pmb/codes/GRIB2/", "support": "", "versions": [ - "3.1.0-intel-2018b", - "3.2.0-GCC-12.3.0" - ], - "admin_list": [], - "network_licences": [], - "default": "3.2.0-GCC-12.3.0", - "default_type": "latest", - "last_updated": 1717979472, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/g2lib/3.2.0-GCC-12.3.0.lua", - "force_hide": "False", - "force_show": "False" - }, - "ga4gh": { - "description": "A reference implementation of the GA4GH API", - "domains": [ - "bio" - ], - "extensions": [ - "ga4gh-server-0.3.6", - "htsget-0.2.4", - "humanize-0.5.1", - "Pygments-2.5.2", - "setuptools-32.3.1" - ], - "licence_type": "", - "homepage": "https://ga4gh-reference-implementation.readthedocs.io/en/stable/", - "support": "", - "versions": [ - "0.3.6-gimkl-2020a-Python-2.7.18" + "3.1.0-intel-2018b" ], "admin_list": [ { - "0.3.6-gimkl-2020a-Python-2.7.18": "Warning: ga4gh/0.3.6-gimkl-2020a-Python-2.7.18 is old and marked for deletion. If you still need it, then please let us know." + "3.1.0-intel-2018b": "Warning: g2lib/3.1.0-intel-2018b is old and marked for deletion along with the rest of our intel-2018b software. If you still need it, then please let us know." } ], "network_licences": [], - "default": "0.3.6-gimkl-2020a-Python-2.7.18", + "default": "3.1.0-intel-2018b", "default_type": "latest", - "last_updated": 1596412869, + "last_updated": 1560743374, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/ga4gh/0.3.6-gimkl-2020a-Python-2.7.18.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/g2lib/3.1.0-intel-2018b.lua", "force_hide": "False", "force_show": "False" }, @@ -7464,7 +6715,7 @@ "extensions": [], "licence_type": "", "homepage": "http://www.broadinstitute.org/gatk/", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/GATK", + "support": "", "versions": [ "3.5-Java-1.8.0_144", "3.5-Java-15.0.2", @@ -7503,18 +6754,13 @@ "description": "", "domains": [ "chem", - "chemistry", - "mahuika" + "chemistry" ], "extensions": [], "licence_type": "proprietary", "homepage": "", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/Gaussian", + "support": "", "versions": [ - "09-B.01", - "09-B.01", - "09-C.01", - "09-C.01", "09-D.01", "09-D.01" ], @@ -7527,11 +6773,11 @@ } ], "network_licences": [], - "default": "09-C.01", + "default": "09-D.01", "default_type": "latest", "last_updated": 1534202024, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/chem/Gaussian/09-C.01", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/chem/Gaussian/09-D.01", "force_hide": "False", "force_show": "False" }, @@ -7665,7 +6911,11 @@ "3.6.3-gimpi-2022a", "3.6.4-gompi-2023a" ], - "admin_list": [], + "admin_list": [ + { + "2.2.2-gimkl-2017a-GEOS-3.5.1": "Warning: GDAL/2.2.2-gimkl-2017a-GEOS-3.5.1 is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider GDAL') or let us know that you still need it." + } + ], "network_licences": [], "default": "3.2.1-intel-2022a", "default_type": "latest", @@ -7731,7 +6981,6 @@ "homepage": "https://github.com/genetics-statistics/GEMMA", "support": "", "versions": [ - "0.98.4-GCC-9.2.0", "0.98.5-GCC-12.3.0" ], "admin_list": [ @@ -7929,8 +7178,6 @@ "homepage": "http://www.gnu.org/software/gettext/", "support": "", "versions": [ - "0.19.8-gimkl-2017a", - "0.19.8-gimkl-2018b", "0.19.8.1", "0.19.8.1-GCCcore-7.4.0", "0.21" @@ -8132,7 +7379,6 @@ "homepage": "https://ccb.jhu.edu/software/glimmerhmm", "support": "", "versions": [ - "3.0.4-gimkl-2018b", "3.0.4c-GCC-11.3.0", "3.0.4c-GCC-9.2.0" ], @@ -8150,67 +7396,6 @@ "force_hide": "False", "force_show": "False" }, - "GLM": { - "description": "OpenGL Mathematics (GLM) is a header only C++ mathematics library for graphics software based on\n the OpenGL Shading Language (GLSL) specifications.", - "domains": [ - "lib" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/g-truc/glm", - "support": "", - "versions": [ - "0.9.9.8" - ], - "admin_list": [], - "network_licences": [], - "default": "0.9.9.8", - "default_type": "latest", - "last_updated": 1587447909, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/GLM/0.9.9.8.lua", - "force_hide": "False", - "force_show": "False" - }, - "globus-automate-client": { - "description": "Client for the Globus Flows service.", - "domains": [ - "tools" - ], - "extensions": [ - "arrow-1.2.3", - "colorama-0.4.6", - "cryptography-39.0.0", - "flit_core-3.8.0", - "globus-automate-client-0.16.1.post1", - "globus-sdk-3.15.1", - "graphviz-0.12", - "hatchling-1.10.0", - "jsonschema-3.2.0", - "PyJWT-2.6.0", - "PyYAML-5.4.1", - "semantic_version-2.10.0", - "setuptools-67.0.0", - "setuptools-rust-1.5.2", - "shellingham-1.5.0.post1", - "typer-0.4.2" - ], - "licence_type": "", - "homepage": "https://globus-automate-client.readthedocs.io/en/latest/quick_start.html", - "support": "", - "versions": [ - "0.16.1.post1-gimkl-2022a" - ], - "admin_list": [], - "network_licences": [], - "default": "0.16.1.post1-gimkl-2022a", - "default_type": "latest", - "last_updated": 1675156914, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/globus-automate-client/0.16.1.post1-gimkl-2022a.lua", - "force_hide": "False", - "force_show": "False" - }, "globus-compute-endpoint": { "description": "Globus Compute is a distributed Function as a Service (FaaS) platform that enables flexible,\n scalable, and high performance remote function execution. Unlike centralized FaaS platforms,\n Globus Compute allows users to execute functions on heterogeneous remote computers, from laptops to\n campus clusters, clouds, and supercomputers. A Globus Compute endpoint is a persistent service\n launched by the user on a compute system to serve as a conduit for executing functions on\n that computer.", "domains": [ @@ -8286,28 +7471,6 @@ "force_hide": "False", "force_show": "False" }, - "GLPK": { - "description": "GNU Linear Programming Kit is intended for solving large-scale linear programming (LP), mixed integer programming (MIP), and other related problems.", - "domains": [ - "tools" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://www.gnu.org/software/glpk/", - "support": "", - "versions": [ - "5.0-GCCcore-11.3.0" - ], - "admin_list": [], - "network_licences": [], - "default": "5.0-GCCcore-11.3.0", - "default_type": "latest", - "last_updated": 1655859470, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/GLPK/5.0-GCCcore-11.3.0.lua", - "force_hide": "False", - "force_show": "False" - }, "GMAP-GSNAP": { "description": "GMAP: A Genomic Mapping and Alignment Program for mRNA and EST Sequences\n GSNAP: Genomic Short-read Nucleotide Alignment Program", "domains": [ @@ -8346,7 +7509,6 @@ "homepage": "http://gmplib.org/", "support": "", "versions": [ - "6.1.2-GCC-5.4.0", "6.1.2-GCCcore-7.4.0", "6.1.2-GCCcore-9.2.0", "6.1.2-gimkl-2017a", @@ -8419,10 +7581,6 @@ "homepage": "http://www.golang.org", "support": "", "versions": [ - "1.11.5", - "1.13.6", - "1.15.6", - "1.17.3", "1.19.1", "1.21.3" ], @@ -8435,41 +7593,11 @@ } ], "network_licences": [], - "default": "1.11.5", + "default": "1.19.1", "default_type": "latest", "last_updated": 1698713461, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/Go/1.11.5.lua", - "force_hide": "False", - "force_show": "False" - }, - "GObject-Introspection": { - "description": "GObject introspection is a middleware layer between C libraries\n (using GObject) and language bindings. The C library can be scanned at\n compile time and generate a metadata file, in addition to the actual\n native C library. Then at runtime, language bindings can read this\n metadata and automatically provide bindings to call into the C library.", - "domains": [ - "devel" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://gi.readthedocs.io/en/latest/", - "support": "", - "versions": [ - "1.66.1-GCCcore-9.2.0", - "1.72.0-GCC-11.3.0" - ], - "admin_list": [ - { - "1.66.1-GCCcore-9.2.0": "Warning: GObject-Introspection/1.66.1-GCCcore-9.2.0 is old and marked for deletion. If you still need it, then please let us know." - }, - { - "1.72.0-GCC-11.3.0": "Warning: GObject-Introspection/1.72.0-GCC-11.3.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.72.0-GCC-11.3.0", - "default_type": "latest", - "last_updated": 1657675725, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/GObject-Introspection/1.72.0-GCC-11.3.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/Go/1.19.1.lua", "force_hide": "False", "force_show": "False" }, @@ -8483,8 +7611,6 @@ "homepage": "https://www.ccdc.cam.ac.uk/solutions/csd-discovery/Components/Gold/", "support": "", "versions": [ - "2018", - "2020", "2022" ], "admin_list": [ @@ -8496,11 +7622,11 @@ } ], "network_licences": [], - "default": "2020", + "default": "2022", "default_type": "latest", "last_updated": 1655937311, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/GOLD/2020.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/GOLD/2022.lua", "force_hide": "False", "force_show": "False" }, @@ -8665,10 +7791,7 @@ ], "admin_list": [ { - "7.2.0-gimkl-2017a-Python-2.7.14": "Warning: GRASS/7.2.0-gimkl-2017a-Python-2.7.14 is old and marked for removal. Please use GRASS/7.6.1-gimkl-2018b-Python-2.7.16 (or newer) instead, or let us know if you still need this version." - }, - { - "7.6.0-gimkl-2017a-Python-2.7.14": "Warning: our gimkl/2017a toolchain is very old and will soon be removed, so please use GRASS/7.6.1-gimkl-2018b-Python-2.7.16 (or newer) instead." + "7.6.1-gimkl-2018b-Python-2.7.16": "Warning: GRASS/7.6.1-gimkl-2018b-Python-2.7.16 is very old. Please select a more recent version (try 'module spider GRASS'), or let us know that you still need this version." } ], "network_licences": [], @@ -8708,32 +7831,6 @@ "force_hide": "False", "force_show": "False" }, - "grive2": { - "description": "Command line tool for Google Drive.", - "domains": [ - "tools" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://stnava.github.io/ANTs/", - "support": "", - "versions": [ - "0.5.1-GCCcore-9.2.0" - ], - "admin_list": [ - { - "0.5.1-GCCcore-9.2.0": "Warning: grive2/0.5.1-GCCcore-9.2.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "0.5.1-GCCcore-9.2.0", - "default_type": "latest", - "last_updated": 1616030865, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/grive2/0.5.1-GCCcore-9.2.0.lua", - "force_hide": "False", - "force_show": "False" - }, "GROMACS": { "description": "\nGROMACS is a versatile package to perform molecular dynamics,\n i.e. simulate the Newtonian equations of motion for systems with hundreds to millions of particles.\n\nThis is a GPU enabled build, containing both MPI and threadMPI binaries.\n", "domains": [ @@ -8744,7 +7841,7 @@ "extensions": [], "licence_type": "", "homepage": "http://www.gromacs.org", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/GROMACS", + "support": "", "versions": [ "2020.4-gimkl-2020a-cuda-11.3.1-hybrid-PLUMED-2.6.2", "2020.5-intel-2020a-cuda-11.0.2-hybrid", @@ -8797,6 +7894,9 @@ }, { "1.6-GCCcore-9.2.0": "Warning: GSL/1.6-GCCcore-9.2.0 is old and marked for deletion. Please select a more recent version (try 'module spider GSL') or let us know that you still need it." + }, + { + "2.3-gimkl-2017a": "Warning: GSL/2.3-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider GSL') or let us know that you still need it." } ], "network_licences": [], @@ -8808,32 +7908,6 @@ "force_hide": "False", "force_show": "False" }, - "gsort": { - "description": "Tool to sort genomic files according to a genomefile.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/brentp/gsort", - "support": "", - "versions": [ - "0.1.4" - ], - "admin_list": [ - { - "0.1.4": "Warning: gsort/0.1.4 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "0.1.4", - "default_type": "latest", - "last_updated": 1685600597, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/gsort/0.1.4.lua", - "force_hide": "False", - "force_show": "False" - }, "GST-plugins-base": { "description": "GStreamer plug-ins and elements.", "domains": [ @@ -8888,8 +7962,6 @@ "homepage": "https://github.com/Ecogenomics/GTDBTk", "support": "", "versions": [ - "0.2.2-gimkl-2018b-Python-2.7.16", - "0.3.2-gimkl-2018b-Python-2.7.16", "1.2.0-gimkl-2020a-Python-3.8.2", "1.5.0-gimkl-2020a-Python-3.8.2", "2.1.0-gimkl-2020a-Python-3.9.9", @@ -8900,6 +7972,12 @@ "admin_list": [ { "0.3.2-gimkl-2018b-Python-2.7.16": "Warning: GTDB-Tk/0.3.2-gimkl-2018b-Python-2.7.16 is old and marked for deletion. Please select a more recent version (try 'module spider GTDB-Tk') or let us know that you still need it." + }, + { + "1.2.0-gimkl-2020a-Python-3.8.2": "Warning: GTDB-Tk/1.2.0-gimkl-2020a-Python-3.8.2 is old and marked for deletion. Please select a more recent version (try 'module spider GTDB-Tk') or let us know that you still need it." + }, + { + "1.5.0-gimkl-2020a-Python-3.8.2": "Warning: GTDB-Tk/1.5.0-gimkl-2020a-Python-3.8.2 is old and marked for deletion. Please select a more recent version (try 'module spider GTDB-Tk') or let us know that you still need it." } ], "network_licences": [], @@ -8934,7 +8012,7 @@ "force_show": "False" }, "GTS": { - "description": "GTS stands for the GNU Triangulated Surface Library.\n It is an Open Source Free Software Library intended to provide a set of useful\n functions to deal with 3D surfaces meshed with interconnected triangles.\n", + "description": "GTS stands for the GNU Triangulated Surface Library.\n It is an Open Source Free Software Library intended to provide a set of useful\n functions to deal with 3D surfaces meshed with interconnected triangles.", "domains": [ "vis" ], @@ -8943,9 +8021,7 @@ "homepage": "http://gts.sourceforge.net/", "support": "", "versions": [ - "0.7.6-GCC-11.3.0", - "0.7.6-GCCcore-9.2.0", - "121130-GCCcore-7.4.0" + "0.7.6-GCCcore-9.2.0" ], "admin_list": [ { @@ -8956,11 +8032,11 @@ } ], "network_licences": [], - "default": "121130-GCCcore-7.4.0", + "default": "0.7.6-GCCcore-9.2.0", "default_type": "latest", - "last_updated": 1657687444, + "last_updated": 1605145825, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/vis/GTS/121130-GCCcore-7.4.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/vis/GTS/0.7.6-GCCcore-9.2.0.lua", "force_hide": "False", "force_show": "False" }, @@ -9146,32 +8222,6 @@ "force_hide": "False", "force_show": "False" }, - "help2man": { - "description": "help2man produces simple manual pages from the '--help' and '--version' output of other commands.", - "domains": [ - "tools" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://www.gnu.org/software/help2man/", - "support": "", - "versions": [ - "1.47.8-GCCcore-7.4.0" - ], - "admin_list": [ - { - "1.47.8-GCCcore-7.4.0": "Warning: help2man/1.47.8-GCCcore-7.4.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.47.8-GCCcore-7.4.0", - "default_type": "latest", - "last_updated": 1551840716, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/help2man/1.47.8-GCCcore-7.4.0.lua", - "force_hide": "False", - "force_show": "False" - }, "hifiasm": { "description": "Hifiasm: a haplotype-resolved assembler for accurate Hifi reads.", "domains": [ @@ -9184,13 +8234,14 @@ "versions": [ "0.15.5-GCC-9.2.0", "0.19.5-GCC-11.3.0", - "0.19.7-GCC-11.3.0" + "0.19.7-GCC-11.3.0", + "0.24.0-GCC-12.3.0" ], "admin_list": [], "network_licences": [], "default": "0.19.7-GCC-11.3.0", "default_type": "latest", - "last_updated": 1698878247, + "last_updated": 1740208923, "modulefile_text": "", "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/hifiasm/0.19.7-GCC-11.3.0.lua", "force_hide": "False", @@ -9218,6 +8269,9 @@ }, { "2.0.5-gimkl-2017a": "Warning: HISAT2/2.0.5-gimkl-2017a is very old, please select a more recent version (try 'module spider HISAT2')." + }, + { + "2.1.0-gimkl-2018b": "Warning: HISAT2/2.1.0-gimkl-2018b is old and marked for deletion. Please select a more recent version (try 'module spider HISAT2') or let us know that you still need it." } ], "network_licences": [], @@ -9243,7 +8297,6 @@ "3.0-GCC-9.2.0", "3.1b2-gimkl-2017a", "3.1b2-gimkl-2018b", - "3.1b2-gimkl-2020a", "3.2.1-gimkl-2018b", "3.3-GCC-9.2.0", "3.3.2-GCC-11.3.0", @@ -9330,32 +8383,6 @@ "force_hide": "False", "force_show": "False" }, - "HOPS": { - "description": "Pipeline which focuses on screening MALT data for the presence of a user-specified list of target species.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/rhuebler/HOPS", - "support": "", - "versions": [ - "0.33" - ], - "admin_list": [ - { - "0.33": "Warning: HOPS/0.33 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "0.33", - "default_type": "latest", - "last_updated": 1614111797, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/HOPS/0.33.lua", - "force_hide": "False", - "force_show": "False" - }, "HpcGridRunner": { "description": "HPC GridRunner is a simple command-line interface to high throughput computing using a variety of different grid computing platforms, including LSF, SGE, SLURM, and PBS.", "domains": [ @@ -9426,7 +8453,14 @@ "1.8-gimkl-2018b", "1.9-GCC-7.4.0" ], - "admin_list": [], + "admin_list": [ + { + "1.3.2-gimkl-2017a": "Warning: HTSlib/1.3.2-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider HTSlib') or let us know that you still need it." + }, + { + "1.8-gimkl-2017a": "Warning: HTSlib/1.8-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider HTSlib') or let us know that you still need it." + } + ], "network_licences": [], "default": "1.12-GCCcore-9.2.0", "default_type": "latest", @@ -9458,32 +8492,6 @@ "force_hide": "False", "force_show": "False" }, - "hunspell": { - "description": "Spell checker and morphological analyzer library and program designed for languages\n with rich morphology and complex word compounding or character encoding.", - "domains": [ - "tools" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://hunspell.github.io/", - "support": "", - "versions": [ - "1.7.0" - ], - "admin_list": [ - { - "1.7.0": "Warning: hunspell/1.7.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.7.0", - "default_type": "latest", - "last_updated": 1625049081, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/hunspell/1.7.0.lua", - "force_hide": "False", - "force_show": "False" - }, "hwloc": { "description": "The Portable Hardware Locality (hwloc) software package provides a portable abstraction\n (across OS, versions, architectures, ...) of the hierarchical topology of modern architectures, including\n NUMA memory nodes, sockets, shared caches, cores and simultaneous multithreading. It also gathers various\n system attributes such as cache and memory information as well as the locality of I/O devices such as\n network interfaces, InfiniBand HCAs or GPUs. It primarily aims at helping applications with gathering\n information about modern computing hardware so as to exploit it accordingly and efficiently.", "domains": [ @@ -9519,7 +8527,6 @@ "homepage": "https://github.com/mossmatters/HybPiper", "support": "", "versions": [ - "1.3.1-gimkl-2020a-Python-3.8.2", "2.0.1rc-Miniconda3" ], "admin_list": [ @@ -9536,39 +8543,6 @@ "force_hide": "False", "force_show": "False" }, - "hypothesis": { - "description": "Hypothesis is an advanced testing library for Python. It lets you write tests which are parametrized\n by a source of examples, and then generates simple and comprehensible examples that make your tests fail. This lets\n you find more bugs in your code with less work.", - "domains": [ - "tools" - ], - "extensions": [ - "exceptiongroup-1.1.1", - "expecttest-0.1.4", - "flit_core-3.8.0", - "flit_scm-1.7.0", - "hypothesis-6.68.2", - "setuptools_scm-7.1.0" - ], - "licence_type": "", - "homepage": "https://github.com/HypothesisWorks/hypothesis", - "support": "", - "versions": [ - "6.68.2-gimkl-2022a-Python-3.10.5" - ], - "admin_list": [ - { - "6.68.2-gimkl-2022a-Python-3.10.5": "Warning: hypothesis/6.68.2-gimkl-2022a-Python-3.10.5 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "6.68.2-gimkl-2022a-Python-3.10.5", - "default_type": "latest", - "last_updated": 1679276710, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/hypothesis/6.68.2-gimkl-2022a-Python-3.10.5.lua", - "force_hide": "False", - "force_show": "False" - }, "Hypre": { "description": "Hypre is a library for solving large, sparse linear systems of equations on massively\n parallel computers. The problems of interest arise in the simulation codes being developed at LLNL\n and elsewhere to study physical phenomena in the defense, environmental, energy, and biological sciences.", "domains": [ @@ -9582,7 +8556,6 @@ "2.18.2-gimkl-2018b", "2.21.0-gimkl-2022a", "2.24.0-intel-2022a", - "2.25.0-gimkl-2022a", "2.28.0-foss-2023a" ], "admin_list": [], @@ -9731,7 +8704,6 @@ "support": "", "versions": [ "2.16.1", - "2.4.17", "2.5.0", "2.9.4" ], @@ -9813,8 +8785,6 @@ "versions": [ "2017.6.256-gimpi-2017a", "2017.6.256-iimpi-2017a", - "2018.1.163-gimpi-2018b", - "2018.1.163-iimpi-2018b", "2018.4.274-gimpi-2018b", "2018.4.274-iimpi-2018b", "2020.0.166-gimpi-2020a", @@ -9823,11 +8793,11 @@ ], "admin_list": [], "network_licences": [], - "default": "2018.1.163-iimpi-2018b", + "default": "2018.4.274-gimpi-2018b", "default_type": "latest", "last_updated": 1655872252, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/imkl/2018.1.163-iimpi-2018b.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/imkl/2018.4.274-gimpi-2018b.lua", "force_hide": "False", "force_show": "False" }, @@ -9891,7 +8861,6 @@ "2018.1.163-iccifort-2018.1.163-GCC-7.4.0", "2018.4.274-GCC-7.4.0", "2019.6.166-GCC-9.2.0", - "2019.6.166-NVHPC-21.1-GCC-9.2.0-CUDA-11.2.0", "2019.6.166-NVHPC-21.7-GCC-9.2.0-CUDA-11.4.1", "2019.6.166-iccifort-2020.0.166", "2021.5.1-GCC-11.3.0", @@ -9911,29 +8880,6 @@ "force_hide": "False", "force_show": "False" }, - "IMPUTE": { - "description": "Genotype imputation and haplotype phasing.", - "domains": [ - "bio", - "biology" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://mathgen.stats.ox.ac.uk/impute/impute_v2.html", - "support": "", - "versions": [ - "2.3.2" - ], - "admin_list": [], - "network_licences": [], - "default": "2.3.2", - "default_type": "latest", - "last_updated": 1533595414, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/IMPUTE/2.3.2.lua", - "force_hide": "False", - "force_show": "False" - }, "Infernal": { "description": "Infernal ('INFERence of RNA ALignment') is for searching DNA sequence databases\nfor RNA structure and sequence similarities.", "domains": [ @@ -9987,28 +8933,6 @@ "force_hide": "False", "force_show": "False" }, - "Inspector": { - "description": "Intel Inspector XE is an easy to use memory error checker and thread checker for serial\n and parallel applications", - "domains": [ - "tools" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://software.intel.com/en-us/intel-inspector-xe", - "support": "", - "versions": [ - "2019_update5" - ], - "admin_list": [], - "network_licences": [], - "default": "2019_update5", - "default_type": "latest", - "last_updated": 1590638833, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/Inspector/2019_update5.lua", - "force_hide": "False", - "force_show": "False" - }, "intel": { "description": "Intel Cluster Toolkit Compiler Edition provides Intel C/C++ and Fortran compilers, Intel MPI & Intel MKL.", "domains": [ @@ -10137,13 +9061,18 @@ "domains": [ "bio" ], - "extensions": [], + "extensions": [ + "editables-0.3", + "hatchling-1.10.0", + "ipyparallel-8.4.1", + "ipyrad-0.9.85", + "packaging-21.3", + "pathspec-0.10.1" + ], "licence_type": "", "homepage": "https://ipyrad.readthedocs.io", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/ipyrad", + "support": "", "versions": [ - "0.9.61-gimkl-2020a-Python-3.8.2", - "0.9.81-Miniconda3", "0.9.85-gimkl-2022a-Python-3.10.5" ], "admin_list": [ @@ -10155,11 +9084,11 @@ } ], "network_licences": [], - "default": "0.9.81-Miniconda3", + "default": "0.9.85-gimkl-2022a-Python-3.10.5", "default_type": "latest", "last_updated": 1664321075, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/ipyrad/0.9.81-Miniconda3.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/ipyrad/0.9.85-gimkl-2022a-Python-3.10.5.lua", "force_hide": "False", "force_show": "False" }, @@ -10188,28 +9117,6 @@ "force_hide": "False", "force_show": "False" }, - "IQmol": { - "description": "", - "domains": [ - "visualisation" - ], - "extensions": [], - "licence_type": "proprietary", - "homepage": "", - "support": "", - "versions": [ - "2.11" - ], - "admin_list": [], - "network_licences": [], - "default": "2.11", - "default_type": "latest", - "last_updated": 1554268984, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/IQmol/2.11.lua", - "force_hide": "False", - "force_show": "False" - }, "IRkernel": { "description": "R packages for providing R kernel for Jupyter.", "domains": [ @@ -10271,7 +9178,6 @@ "support": "", "versions": [ "2.30.0", - "2.30.0-GCCcore-9.2.0", "2.30.0-gimkl-2020a" ], "admin_list": [ @@ -10288,40 +9194,6 @@ "force_hide": "False", "force_show": "False" }, - "ispc": { - "description": "Intel SPMD Program Compilers; An open-source compiler for high-performance\n SIMD programming on the CPU. ispc is a compiler for a variant of the C programming language,\n with extensions for 'single program, multiple data' (SPMD) programming.\n Under the SPMD model, the programmer writes a program that generally appears\n to be a regular serial program, though the execution model is actually that\n a number of program instances execute in parallel on the hardware.", - "domains": [ - "compiler" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://ispc.github.io/ , https://github.com/ispc/ispc/", - "support": "", - "versions": [ - "1.10.0", - "1.15.0", - "1.9.2-GCC-5.4.0" - ], - "admin_list": [ - { - "1.9.2-GCC-5.4.0": "Warning: ispc/1.9.2-GCC-5.4.0 is very old and will soon be deleted. If you still need it, then please let us know." - }, - { - "1.10.0": "Warning: ispc/1.10.0 is old and marked for deletion. If you still need it, then please let us know." - }, - { - "1.15.0": "Warning: ispc/1.15.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.10.0", - "default_type": "latest", - "last_updated": 1615517338, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/ispc/1.10.0.lua", - "force_hide": "False", - "force_show": "False" - }, "JAGS": { "description": "Just Another Gibbs Sampler - a program for the statistical analysis of Bayesian hierarchical models by Markov Chain Monte Carlo.", "domains": [ @@ -10333,8 +9205,6 @@ "homepage": "http://mcmc-jags.sourceforge.net", "support": "", "versions": [ - "4.3.0-gimkl-2018b", - "4.3.0-gimkl-2018b-mt", "4.3.0-gimkl-2020a-mt", "4.3.1-gimkl-2022a-mt" ], @@ -10368,7 +9238,6 @@ "homepage": "http://www.ece.uvic.ca/~frodo/jasper/", "support": "", "versions": [ - "1.900.29-gimkl-2017a", "2.0.14-GCC-7.4.0", "2.0.14-GCCcore-7.4.0", "2.0.33-GCC-12.3.0" @@ -10391,14 +9260,12 @@ "description": "\n Java Platform, Standard Edition (Java SE) lets you develop and deploy\n Java applications on desktops and servers.\n", "domains": [ "lang", - "language", - "mahuika", - "general" + "language" ], "extensions": [], "licence_type": "", "homepage": "http://java.com/", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/Java", + "support": "", "versions": [ "1.7.0_51", "1.8.0_144", @@ -10455,7 +9322,11 @@ "versions": [ "1.0.6-gimkl-2020a-Python-3.8.2" ], - "admin_list": [], + "admin_list": [ + { + "1.0.6-gimkl-2020a-Python-3.8.2": "Warning: jcvi/1.0.6-gimkl-2020a-Python-3.8.2 is old and marked for deletion. If you still need it, then please let us know." + } + ], "network_licences": [], "default": "1.0.6-gimkl-2020a-Python-3.8.2", "default_type": "latest", @@ -10478,7 +9349,6 @@ "versions": [ "2.2.10-gimkl-2018b", "2.2.10-gimkl-2020a", - "2.2.6-gimkl-2017a", "2.3.0-GCC-11.3.0", "2.3.0-GCC-12.3.0", "2.3.0-gimkl-2020a" @@ -10510,11 +9380,9 @@ "homepage": "http://www.canonware.com/jemalloc", "support": "", "versions": [ - "3.6.0-gimkl-2017a", "5.2.0-gimkl-2018b", "5.2.1", "5.2.1-GCC-9.2.0", - "5.2.1-GCCcore-9.2.0", "5.3.0" ], "admin_list": [ @@ -10523,6 +9391,9 @@ }, { "5.2.1-GCCcore-9.2.0": "Warning: jemalloc/5.2.1-GCCcore-9.2.0 is old and marked for deletion. Please select a more recent version (try 'module spider jemalloc') or let us know that you still need it." + }, + { + "5.2.1": "Warning: jemalloc/5.2.1 is old and marked for deletion. Please select a more recent version (try 'module spider jemalloc') or let us know that you still need it." } ], "network_licences": [], @@ -10603,20 +9474,17 @@ "force_show": "False" }, "Julia": { - "description": "", + "description": "A high-level, high-performance dynamic language for technical computing.\n\nThis version was compiled from source with USE_INTEL_JITEVENTS=1 to enable profiling with VTune.", "domains": [ "lang" ], "extensions": [], "licence_type": "", - "homepage": "", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/Julia", + "homepage": "https://julialang.org/", + "support": "", "versions": [ - "0.6.4", - "1.0.0", - "1.1.0", + "1.11.3-GCC-12.3.0-VTune", "1.2.0-gimkl-2018b-VTune", - "1.4.1-GCC-9.2.0-VTune", "1.5.1-GCC-9.2.0-VTune", "1.6.0-GCC-9.2.0-VTune", "1.6.2-GCC-9.2.0-VTune", @@ -10639,44 +9507,17 @@ }, { "1.1.0": "Warning: Julia/1.1.0 is old and marked for deletion. Please select a more recent version (try 'module spider Julia') or let us know that you still need it." - } - ], - "network_licences": [], - "default": "1.1.0", - "default_type": "latest", - "last_updated": 1701639957, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/Julia/1.1.0.lua", - "force_hide": "False", - "force_show": "False" - }, - "JUnit": { - "description": "A programmer-oriented testing framework for Java.", - "domains": [ - "devel" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://sourceforge.net/projects/junit", - "support": "", - "versions": [ - "4.12", - "4.12-Java-1.8.0_144" - ], - "admin_list": [ - { - "4.12": "Warning: JUnit/4.12 is old and marked for deletion. If you still need it, then please let us know." }, { - "4.12-Java-1.8.0_144": "Warning: JUnit/4.12-Java-1.8.0_144 is old and marked for deletion. If you still need it, then please let us know." + "1.2.0-gimkl-2018b-VTune": "Warning: Julia/1.2.0-gimkl-2018b-VTune is old and marked for deletion. Please select a more recent version (try 'module spider Julia') or let us know that you still need it." } ], "network_licences": [], - "default": "4.12-Java-1.8.0_144", + "default": "1.2.0-gimkl-2018b-VTune", "default_type": "latest", - "last_updated": 1533595414, + "last_updated": 1739153789, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/JUnit/4.12-Java-1.8.0_144.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/lang/Julia/1.2.0-gimkl-2018b-VTune.lua", "force_hide": "False", "force_show": "False" }, @@ -10769,19 +9610,9 @@ ], "licence_type": "", "homepage": "https://pypi.org/project/jupyterlab/", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/JupyterLab", - "versions": [ - "2.1.3-gimkl-2018b-Python-3.8.1", - "2.1.5-gimkl-2018b-Python-3.8.1", - "2.2.4-gimkl-2018b-Python-3.8.1", - "2021.5.0-gimkl-2020a-3.0.15", - "2021.8.2-gimkl-2020a-3.1.9", - "2021.9.0-gimkl-2020a-3.1.9", - "2022.2.0-gimkl-2020a-3.2.8", - "2022.5.0-gimkl-2020a-3.4.2", - "2022.6.0-gimkl-2020a-3.4.3", + "support": "", + "versions": [ "2022.7.0-gimkl-2020a-3.4.3", - "2022.8.0-gimkl-2020a-3.4.5", "2023.1.0-gimkl-2022a-3.5.3", "2023.11.0-gimkl-2022a-3.6.3", "2024.08.0-foss-2023a-4.2.4", @@ -10997,8 +9828,6 @@ "homepage": "https://bitbucket.org/genomicepidemiology/kma", "support": "", "versions": [ - "1.3.23-gimkl-2020a", - "1.4.1-gimkl-2020a", "1.4.15-GCC-12.3.0", "1.4.5-GCC-11.3.0" ], @@ -11029,7 +9858,6 @@ "homepage": "http://sun.aei.polsl.pl/kmc", "support": "", "versions": [ - "3.1.1-gimkl-2020a-Python-3.8.2", "3.1.2rc1-gimkl-2020a-Python-3.8.2" ], "admin_list": [ @@ -11103,7 +9931,6 @@ "support": "", "versions": [ "2.0.8-beta-GCC-9.2.0", - "2.0.8-beta-gimkl-2018b", "2.0.9-beta-GCC-9.2.0", "2.1.1-GCC-9.2.0", "2.1.2-GCC-11.3.0", @@ -11134,7 +9961,6 @@ "homepage": "https://github.com/marbl/Krona/wiki/KronaTools", "support": "", "versions": [ - "2.7.1-gimkl-2018b", "2.8.1-GCC-11.3.0-Perl-5.34.1" ], "admin_list": [], @@ -11147,45 +9973,19 @@ "force_hide": "False", "force_show": "False" }, - "KyotoCabinet": { - "description": "Library of routines for managing a database.", + "LAME": { + "description": "LAME is a high quality MPEG Audio Layer III (MP3) encoder licensed under the LGPL.", "domains": [ - "lib" + "data" ], "extensions": [], "licence_type": "", - "homepage": "https://fallabs.com/kyotocabinet", + "homepage": "http://lame.sourceforge.net/", "support": "", "versions": [ - "1.2.77-GCCcore-7.4.0" - ], - "admin_list": [ - { - "1.2.77-GCCcore-7.4.0": "Warning: KyotoCabinet/1.2.77-GCCcore-7.4.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.2.77-GCCcore-7.4.0", - "default_type": "latest", - "last_updated": 1580536431, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/KyotoCabinet/1.2.77-GCCcore-7.4.0.lua", - "force_hide": "False", - "force_show": "False" - }, - "LAME": { - "description": "LAME is a high quality MPEG Audio Layer III (MP3) encoder licensed under the LGPL.", - "domains": [ - "data" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://lame.sourceforge.net/", - "support": "", - "versions": [ - "3.100-GCC-11.3.0", - "3.100-GCCcore-7.4.0", - "3.100-GCCcore-9.2.0" + "3.100-GCC-11.3.0", + "3.100-GCCcore-7.4.0", + "3.100-GCCcore-9.2.0" ], "admin_list": [], "network_licences": [], @@ -11289,8 +10089,6 @@ "homepage": "http://wiki.dlang.org/LDC", "support": "", "versions": [ - "1.1.1", - "1.17.0", "1.26.0" ], "admin_list": [ @@ -11418,7 +10216,11 @@ "versions": [ "0.3-iimpi-2020a" ], - "admin_list": [], + "admin_list": [ + { + "0.3-iimpi-2020a": "Warning: libcircle/0.3-iimpi-2020a is old and marked for deletion. If you still need it, then please let us know." + } + ], "network_licences": [], "default": "0.3-iimpi-2020a", "default_type": "latest", @@ -11460,8 +10262,6 @@ "homepage": "https://dri.freedesktop.org", "support": "", "versions": [ - "2.4.110", - "2.4.99", "2.4.99-GCCcore-9.2.0" ], "admin_list": [ @@ -11475,7 +10275,7 @@ "network_licences": [], "default": "2.4.99-GCCcore-9.2.0", "default_type": "latest", - "last_updated": 1662455078, + "last_updated": 1587706196, "modulefile_text": "", "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/libdrm/2.4.99-GCCcore-9.2.0.lua", "force_hide": "False", @@ -11557,13 +10357,16 @@ "homepage": "http://sourceware.org/libffi/", "support": "", "versions": [ - "3.2.1-GCC-5.4.0", "3.2.1-GCCcore-7.4.0", "3.2.1-GCCcore-9.2.0", "3.2.1-gimkl-2017a", "3.4.2" ], - "admin_list": [], + "admin_list": [ + { + "3.2.1-gimkl-2017a": "Warning: libffi/3.2.1-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider libffi') or let us know that you still need it." + } + ], "network_licences": [], "default": "3.2.1-gimkl-2017a", "default_type": "latest", @@ -11583,8 +10386,7 @@ "homepage": "https://developer.amd.com/amd-cpu-libraries/blas-library/#libflame", "support": "", "versions": [ - "4.0-GCC-11.3.0-amd", - "5.2.0-GCC-11.3.0" + "4.0-GCC-11.3.0-amd" ], "admin_list": [], "network_licences": [], @@ -11634,7 +10436,11 @@ "2.3.3-GCCcore-11.3.0", "2.3.3-GCCcore-12.3.0" ], - "admin_list": [], + "admin_list": [ + { + "2.2.4-gimkl-2017a": "Warning: libgd/2.2.4-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider libgd') or let us know that you still need it." + } + ], "network_licences": [], "default": "2.3.3-GCCcore-12.3.0", "default_type": "latest", @@ -11728,21 +10534,23 @@ "homepage": "https://github.com/NVIDIA/libglvnd", "support": "", "versions": [ - "1.2.0", "1.2.0-GCC-7.4.0", "1.2.0-GCCcore-9.2.0" ], "admin_list": [ { "1.2.0": "Warning: libglvnd/1.2.0 is old and marked for deletion. Please select a more recent version (try 'module spider libglvnd') or let us know that you still need it." + }, + { + "1.2.0-GCC-7.4.0": "Warning: libglvnd/1.2.0-GCC-7.4.0 is old and marked for deletion. Please select a more recent version (try 'module spider libglvnd') or let us know that you still need it." } ], "network_licences": [], - "default": "1.2.0", + "default": "1.2.0-GCCcore-9.2.0", "default_type": "latest", - "last_updated": 1662453505, + "last_updated": 1587706225, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/libglvnd/1.2.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/libglvnd/1.2.0-GCCcore-9.2.0.lua", "force_hide": "False", "force_show": "False" }, @@ -11857,7 +10665,6 @@ "homepage": "https://github.com/evaleev/libint", "support": "", "versions": [ - "2.6.0-iimpi-2020a-lmax-6-cp2k", "2.6.0-iimpi-2022a-lmax-6-cp2k" ], "admin_list": [ @@ -11869,11 +10676,11 @@ } ], "network_licences": [], - "default": "2.6.0-iimpi-2020a-lmax-6-cp2k", + "default": "2.6.0-iimpi-2022a-lmax-6-cp2k", "default_type": "latest", "last_updated": 1654734196, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/Libint/2.6.0-iimpi-2020a-lmax-6-cp2k.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/Libint/2.6.0-iimpi-2022a-lmax-6-cp2k.lua", "force_hide": "False", "force_show": "False" }, @@ -12013,7 +10820,6 @@ "homepage": "http://cnswww.cns.cwru.edu/php/chet/readline/rltop.html", "support": "", "versions": [ - "6.3-GCC-5.4.0", "6.3-gimkl-2017a", "8.0-GCCcore-7.4.0", "8.0-GCCcore-9.2.0", @@ -12156,28 +10962,6 @@ "force_hide": "False", "force_show": "False" }, - "libunistring": { - "description": "This library provides functions for manipulating Unicode strings and for manipulating C strings\n according to the Unicode standard.", - "domains": [ - "lib" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://www.gnu.org/software/libunistring/", - "support": "", - "versions": [ - "0.9.6-GCC-5.4.0" - ], - "admin_list": [], - "network_licences": [], - "default": "0.9.6-GCC-5.4.0", - "default_type": "latest", - "last_updated": 1533595414, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/libunistring/0.9.6-GCC-5.4.0.lua", - "force_hide": "False", - "force_show": "False" - }, "libunwind": { "description": "Define a portable and efficient C programming API to determine the call-chain of a program. ", "domains": [ @@ -12274,7 +11058,11 @@ "2.9.9-GCCcore-7.4.0", "2.9.9-intel-2018b" ], - "admin_list": [], + "admin_list": [ + { + "2.9.9-intel-2018b": "Warning: libxml2/2.9.9-intel-2018b is old and marked for deletion along with the rest of our intel-2018b software. Please select a more recent version (try 'module spider libxml2') or let us know that you still need it." + } + ], "network_licences": [], "default": "2.9.4-gimkl-2017a", "default_type": "latest", @@ -12301,7 +11089,11 @@ "1.1.34-GCCcore-9.2.0", "1.1.38-GCCcore-12.3.0" ], - "admin_list": [], + "admin_list": [ + { + "1.1.29-gimkl-2017a": "Warning: libxslt/1.1.29-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider libxslt') or let us know that you still need it." + } + ], "network_licences": [], "default": "1.1.29-gimkl-2018b", "default_type": "latest", @@ -12428,8 +11220,6 @@ "homepage": "https://github.com/RRZE-HPC/likwid", "support": "", "versions": [ - "5.1.0-GCCcore-9.2.0", - "5.2.0-GCCcore-9.2.0", "5.2.1-GCC-11.3.0" ], "admin_list": [ @@ -12441,11 +11231,11 @@ } ], "network_licences": [], - "default": "5.1.0-GCCcore-9.2.0", + "default": "5.2.1-GCC-11.3.0", "default_type": "latest", "last_updated": 1654065199, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/likwid/5.1.0-GCCcore-9.2.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/likwid/5.2.1-GCC-11.3.0.lua", "force_hide": "False", "force_show": "False" }, @@ -12466,9 +11256,6 @@ "admin_list": [ { "1.8.5-gimkl-2018b": "Warning: LINKS/1.8.5-gimkl-2018b is old and marked for deletion. Please select a more recent version (try 'module spider LINKS') or let us know that you still need it." - }, - { - "1.8.7-GCC-9.2.0": "Warning: LINKS/1.8.7-GCC-9.2.0 is old and marked for deletion. Please select a more recent version (try 'module spider LINKS') or let us know that you still need it." } ], "network_licences": [], @@ -12518,7 +11305,6 @@ "14.0.6-GCC-11.3.0-static", "14.0.6-GCC-12.3.0-static", "4.0.0-gimkl-2017a", - "6.0.1-GCC-5.4.0", "6.0.1-GCCcore-7.4.0", "9.0.0-GCCcore-9.2.0" ], @@ -12528,6 +11314,9 @@ }, { "6.0.1-GCC-5.4.0": "Warning: LLVM/6.0.1-GCC-5.4.0 is old and marked for deletion. Please select a more recent version (try 'module spider LLVM') or let us know that you still need it." + }, + { + "4.0.0-gimkl-2017a": "Warning: LLVM/4.0.0-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider LLVM') or let us know that you still need it." } ], "network_licences": [], @@ -12757,12 +11546,14 @@ "homepage": "http://www.oberhumer.com/opensource/lzo/", "support": "", "versions": [ - "2.10-GCCcore-7.4.0", "2.10-GCCcore-9.2.0" ], "admin_list": [ { "2.10-GCCcore-7.4.0": "Warning: LZO/2.10-GCCcore-7.4.0 is old and marked for deletion. Please select a more recent version (try 'module spider LZO') or let us know that you still need it." + }, + { + "2.10-GCCcore-9.2.0": "Warning: LZO/2.10-GCCcore-9.2.0 is old and marked for deletion. If you still need it, then please let us know." } ], "network_licences": [], @@ -12849,36 +11640,6 @@ "force_hide": "False", "force_show": "False" }, - "MAGMA": { - "description": "Tool for gene analysis and generalized gene-set analysis of GWAS data.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://ctg.cncr.nl/software/magma", - "support": "", - "versions": [ - "1.07b-GCCcore-9.2.0", - "1.08-GCCcore-9.2.0" - ], - "admin_list": [ - { - "1.07b-GCCcore-9.2.0": "Warning: MAGMA/1.07b-GCCcore-9.2.0 is old and marked for deletion. If you still need it, then please let us know." - }, - { - "1.08-GCCcore-9.2.0": "Warning: MAGMA/1.08-GCCcore-9.2.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.08-GCCcore-9.2.0", - "default_type": "latest", - "last_updated": 1601935533, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/MAGMA/1.08-GCCcore-9.2.0.lua", - "force_hide": "False", - "force_show": "False" - }, "magma": { "description": "The MAGMA project aims to develop a dense linear algebra library similar to\n LAPACK but for heterogeneous/hybrid architectures, starting with current Multicore+GPU systems.", "domains": [ @@ -12889,7 +11650,6 @@ "homepage": "https://icl.cs.utk.edu/magma/", "support": "", "versions": [ - "2.5.4-gimkl-2020a-CUDA-11.1.1", "2.7.1-gimkl-2022a-CUDA-11.6.2" ], "admin_list": [ @@ -12898,35 +11658,11 @@ } ], "network_licences": [], - "default": "2.5.4-gimkl-2020a-CUDA-11.1.1", + "default": "2.7.1-gimkl-2022a-CUDA-11.6.2", "default_type": "latest", "last_updated": 1679279048, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/math/magma/2.5.4-gimkl-2020a-CUDA-11.1.1.lua", - "force_hide": "False", - "force_show": "False" - }, - "Magma": { - "description": "Magma is a large, well-supported software package designed for computations in algebra, number theory, algebraic geometry and algebraic combinatorics. It provides a mathematically rigorous environment for defining and working with structures such as groups, rings, fields, modules, algebras, schemes, curves, graphs, designs, codes and many others. Magma also supports a number of databases designed to aid computational research in those areas of mathematics which are algebraic in nature.\n \nwhatis([==[Homepage: http://magma.maths.usyd.edu.au/magma/", - "domains": [], - "extensions": [], - "licence_type": "", - "homepage": "", - "support": "", - "versions": [ - "Magma-2.26.9" - ], - "admin_list": [ - { - "Magma-2.26.9": "Warning: Magma/Magma-2.26.9 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "Magma-2.26.9", - "default_type": "latest", - "last_updated": 1635992766, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/Magma/Magma-2.26.9.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/math/magma/2.7.1-gimkl-2022a-CUDA-11.6.2.lua", "force_hide": "False", "force_show": "False" }, @@ -12938,7 +11674,7 @@ "extensions": [], "licence_type": "", "homepage": "http://www.yandell-lab.org/software/maker.html", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/MAKER", + "support": "", "versions": [ "2.31.9-gimkl-2018b", "2.31.9-gimkl-2020a" @@ -13023,31 +11759,6 @@ "force_hide": "False", "force_show": "False" }, - "MarkerMiner": { - "description": "Workflow for effective discovery of SCN loci in flowering plants angiosperms", - "domains": [ - "bio" - ], - "extensions": [ - "docopt-0.4.0", - "mandrill-1.0.59" - ], - "licence_type": "", - "homepage": "https://bitbucket.org/srikarchamala/markerminer/", - "support": "", - "versions": [ - "1.0-gimkl-2020a-Python-2.7.18" - ], - "admin_list": [], - "network_licences": [], - "default": "1.0-gimkl-2020a-Python-2.7.18", - "default_type": "latest", - "last_updated": 1663661348, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/MarkerMiner/1.0-gimkl-2020a-Python-2.7.18.lua", - "force_hide": "False", - "force_show": "False" - }, "Mash": { "description": "Fast genome and metagenome distance estimation using MinHash", "domains": [ @@ -13138,10 +11849,6 @@ "homepage": "http://www.genome.umd.edu/masurca.html", "support": "", "versions": [ - "3.3.1-gimkl-2018b", - "3.3.4-gimkl-2018b", - "3.4.1-gimkl-2020a", - "4.0.5-gimkl-2020a", "4.0.9-gimkl-2020a", "4.1.0-GCC-11.3.0" ], @@ -13157,38 +11864,11 @@ } ], "network_licences": [], - "default": "4.0.5-gimkl-2020a", + "default": "4.0.9-gimkl-2020a", "default_type": "latest", "last_updated": 1701388068, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/MaSuRCA/4.0.5-gimkl-2020a.lua", - "force_hide": "False", - "force_show": "False" - }, - "MATIO": { - "description": "matio is an C library for reading and writing Matlab MAT files.", - "domains": [ - "lib" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://sourceforge.net/projects/matio/", - "support": "", - "versions": [ - "1.5.17-GCCcore-7.4.0", - "1.5.17-GCCcore-9.2.0" - ], - "admin_list": [ - { - "1.5.17-GCCcore-9.2.0": "Warning: MATIO/1.5.17-GCCcore-9.2.0 is old and marked for deletion. Please select a more recent version (try 'module spider MATIO') or let us know that you still need it." - } - ], - "network_licences": [], - "default": "1.5.17-GCCcore-7.4.0", - "default_type": "latest", - "last_updated": 1588239644, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/MATIO/1.5.17-GCCcore-7.4.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/MaSuRCA/4.0.9-gimkl-2020a.lua", "force_hide": "False", "force_show": "False" }, @@ -13198,14 +11878,12 @@ "devel", "engineering", "mathematics", - "visualisation", - "ml", - "matlab" + "visualisation" ], "extensions": [], "licence_type": "proprietary", "homepage": "http://www.mathworks.com/products/matlab", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/MATLAB", + "support": "", "versions": [ "2017b", "2018b", @@ -13437,7 +12115,7 @@ "default_type": "static", "last_updated": 1731286240, "modulefile_text": "", - "module_path": "/scale_wlg_persistent/filesets/opt_nesi/CS400_centos7_bdw/modules/all/MATLAB/2021b.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/MATLAB/2021b.lua", "force_hide": "False", "force_show": "False" }, @@ -13559,10 +12237,6 @@ "support": "", "versions": [ "2016b", - "2017b", - "2019a", - "2019b", - "2020a", "2020b", "2021a", "2021b", @@ -13598,11 +12272,11 @@ } ], "network_licences": [], - "default": "2019a", + "default": "2021b", "default_type": "latest", "last_updated": 1630639564, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/MCR/2019a.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/MCR/2021b.lua", "force_hide": "False", "force_show": "False" }, @@ -13617,7 +12291,6 @@ "support": "", "versions": [ "1.11.1-Miniconda3-22.11.1-1", - "1.4.3-Miniconda3-4.10.3", "1.6.0-Miniconda3-4.12.0", "1.7.2-Miniconda3-22.11.1-1", "1.8.0-Miniconda3-22.11.1-1" @@ -13642,16 +12315,15 @@ "homepage": "https://github.com/voutcn/megahit", "support": "", "versions": [ - "1.1.4-gimkl-2018b-Python-2.7.16", "1.2.9-gimkl-2022a-Python-3.10.5" ], "admin_list": [], "network_licences": [], - "default": "1.1.4-gimkl-2018b-Python-2.7.16", + "default": "1.2.9-gimkl-2022a-Python-3.10.5", "default_type": "latest", "last_updated": 1666126724, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/MEGAHIT/1.1.4-gimkl-2018b-Python-2.7.16.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/MEGAHIT/1.2.9-gimkl-2022a-Python-3.10.5.lua", "force_hide": "False", "force_show": "False" }, @@ -13691,7 +12363,6 @@ "homepage": "https://github.com/nanoporetech/megalodon", "support": "", "versions": [ - "2.3.1-gimkl-2020a-Python-3.8.2", "2.5.0-gimkl-2022a-Python-3.10.5" ], "admin_list": [], @@ -13704,32 +12375,6 @@ "force_hide": "False", "force_show": "False" }, - "meRanTK": { - "description": "High performance toolkit for complete analysis of methylated RNA data.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://www.icbi.at/index.html", - "support": "", - "versions": [ - "1.1.1b" - ], - "admin_list": [ - { - "1.1.1b": "Warning: meRanTK/1.1.1b is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.1.1b", - "default_type": "latest", - "last_updated": 1574709709, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/meRanTK/1.1.1b.lua", - "force_hide": "False", - "force_show": "False" - }, "Merqury": { "description": "Evaluate genome assemblies with k-mers and more", "domains": [ @@ -13786,7 +12431,6 @@ "homepage": "https://mesonbuild.com", "support": "", "versions": [ - "0.53.2", "0.62.1" ], "admin_list": [ @@ -13795,11 +12439,11 @@ } ], "network_licences": [], - "default": "0.53.2", + "default": "0.62.1", "default_type": "latest", "last_updated": 1657674905, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/Meson/0.53.2.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/Meson/0.62.1.lua", "force_hide": "False", "force_show": "False" }, @@ -13911,7 +12555,6 @@ "support": "", "versions": [ "3.0.14-gimkl-2020a-Python-3.8.2", - "3.0.9-gimkl-2020a-Python-3.8.2", "4.0.4-gimkl-2022a-Python-3.10.5", "4.1.0-gimkl-2022a-Python-3.10.5" ], @@ -13951,33 +12594,6 @@ "force_hide": "False", "force_show": "False" }, - "Metashape": { - "description": "", - "domains": [], - "extensions": [], - "licence_type": "", - "homepage": "", - "support": "", - "versions": [ - "1.6" - ], - "admin_list": [ - { - "1.6": "Warning: Metashape/1.6 is old and marked for deletion. If you still need it, then please let us know." - }, - { - "1.6": "Warning: Metashape has not been used for a long time and so is marked for deletion. If you still need it then please let us know." - } - ], - "network_licences": [], - "default": "1.6", - "default_type": "latest", - "last_updated": 1595371272, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/Metashape/1.6.lua", - "force_hide": "False", - "force_show": "False" - }, "MetaSV": { "description": "Structural-variant caller", "domains": [ @@ -13994,7 +12610,11 @@ "versions": [ "0.5.4-gimkl-2018b-Python-2.7.16" ], - "admin_list": [], + "admin_list": [ + { + "0.5.4-gimkl-2018b-Python-2.7.16": "Warning: MetaSV/0.5.4-gimkl-2018b-Python-2.7.16 is old and marked for deletion. If you still need it, then please let us know." + } + ], "network_licences": [], "default": "0.5.4-gimkl-2018b-Python-2.7.16", "default_type": "latest", @@ -14037,7 +12657,6 @@ "homepage": "https://microbiology.se/software/metaxa2/", "support": "", "versions": [ - "2.2-gimkl-2020a", "2.2.2-gimkl-2020a", "2.2.3-gimkl-2022a" ], @@ -14070,7 +12689,6 @@ "support": "", "versions": [ "4.0.3-GCCcore-9.2.0", - "4.0.3-gimkl-2017a", "5.1.0-GCC-11.3.0", "5.1.0-GCC-12.3.0", "5.1.0-GCC-7.4.0", @@ -14089,6 +12707,12 @@ }, { "4.0.3-gimkl-2017a": "Warning: METIS/4.0.3-gimkl-2017a is old and marked for deletion. Please select a more recent version (try 'module spider METIS') or let us know that you still need it." + }, + { + "5.1.0-intel-2018b": "Warning: METIS/5.1.0-intel-2018b is old and marked for deletion along with the rest of our intel-2018b software. Please select a more recent version (try 'module spider METIS') or let us know that you still need it." + }, + { + "5.1.0-gimkl-2017a": "Warning: METIS/5.1.0-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider METIS') or let us know that you still need it." } ], "network_licences": [], @@ -14100,32 +12724,6 @@ "force_hide": "False", "force_show": "False" }, - "mimalloc": { - "description": "mimalloc is a general purpose allocator with excellent performance characteristics.", - "domains": [ - "lib" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://microsoft.github.io/mimalloc/", - "support": "", - "versions": [ - "2.0.6-GCC-11.3.0" - ], - "admin_list": [ - { - "2.0.6-GCC-11.3.0": "Warning: mimalloc/2.0.6-GCC-11.3.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "2.0.6-GCC-11.3.0", - "default_type": "latest", - "last_updated": 1657401936, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/mimalloc/2.0.6-GCC-11.3.0.lua", - "force_hide": "False", - "force_show": "False" - }, "MiMiC": { "description": " \nMiMiC: A Framework for Multiscale Modeling in Computational Chemistry\n\nThis package includes mimicpy\n", "domains": [ @@ -14223,7 +12821,7 @@ "extensions": [], "licence_type": "", "homepage": "https://www.continuum.io/anaconda-overview", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/Miniconda3", + "support": "", "versions": [ "22.11.1-1", "23.10.0-1", @@ -14423,32 +13021,6 @@ "force_hide": "False", "force_show": "False" }, - "mlpack": { - "description": "Fast, and flexible C++ machine learning library with bindings to other languages", - "domains": [ - "lib" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://uscilab.github.io/cereal/", - "support": "", - "versions": [ - "3.4.2-gimkl-2020a" - ], - "admin_list": [ - { - "3.4.2-gimkl-2020a": "Warning: mlpack/3.4.2-gimkl-2020a is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "3.4.2-gimkl-2020a", - "default_type": "latest", - "last_updated": 1629676354, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/mlpack/3.4.2-gimkl-2020a.lua", - "force_hide": "False", - "force_show": "False" - }, "Mmg": { "description": "\nMmg is an open source software for simplicial remeshing. It provides 3 applications and 4 libraries:\nthe mmg2d application and the libmmg2d library: adaptation and optimization of a two-dimensional\ntriangulation and generation of a triangulation from a set of points or from given boundary edges\nthe mmgs application and the libmmgs library: adaptation and optimization of a surface triangulation\nand isovalue discretization the mmg3d application and the libmmg3d library: adaptation and optimization\nof a tetrahedral mesh and implicit domain meshing the libmmg library gathering the libmmg2d,\nlibmmgs and libmmg3d libraries.", "domains": [ @@ -14573,32 +13145,6 @@ "force_hide": "False", "force_show": "False" }, - "MODFLOW": { - "description": "MODFLOW is the U.S. Geological Survey modular finite-difference flow model, which is a computer code that solves the groundwater flow equation. The program is used by hydrogeologists to simulate the flow of groundwater through aquifers.\n", - "domains": [ - "geo" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/MODFLOW-USGS/modflow6", - "support": "", - "versions": [ - "6.2.2-GCC-9.2.0" - ], - "admin_list": [ - { - "6.2.2-GCC-9.2.0": "Warning: MODFLOW/6.2.2-GCC-9.2.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "6.2.2-GCC-9.2.0", - "default_type": "latest", - "last_updated": 1639114893, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/geo/MODFLOW/6.2.2-GCC-9.2.0.lua", - "force_hide": "False", - "force_show": "False" - }, "modkit": { "description": "Tool for working with modified bases from Oxford Nanopore", "domains": [ @@ -14622,46 +13168,17 @@ "force_hide": "False", "force_show": "False" }, - "Molcas": { - "description": "Molcas is an ab initio quantum chemistry software package\ndeveloped by scientists to be used by scientists. The basic philosophy is is to\nbe able to treat general electronic structures for molecules consisting of\natoms from most of the periodic table. As such, the primary focus of the\npackage is on multiconfigurational methods with applications typically\nconnected to the treatment of highly degenerate states.", - "domains": [ - "chem", - "chemistry" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://www.molcas.org", - "support": "", - "versions": [ - "8.0-15.06.18_CentOS_6.6_x86_64" - ], - "admin_list": [ - { - "8.0-15.06.18_CentOS_6.6_x86_64": "Warning: Molcas/8.0-15.06.18_CentOS_6.6_x86_64 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "8.0-15.06.18_CentOS_6.6_x86_64", - "default_type": "latest", - "last_updated": 1533595414, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/Molcas/8.0-15.06.18_CentOS_6.6_x86_64.lua", - "force_hide": "False", - "force_show": "False" - }, "Molpro": { "description": "Molpro is a complete system of ab initio programs for molecular electronic structure calculations.", "domains": [ "chem", - "chemistry", - "mahuika" + "chemistry" ], "extensions": [], "licence_type": "", "homepage": "https://molpro.net", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/Molpro", + "support": "", "versions": [ - "mpp-2015.1.25.linux_x86_64_openmp", "mpp-2019.2.2.linux_x86_64_openmp" ], "admin_list": [ @@ -14790,17 +13307,18 @@ "homepage": "http://www.mothur.org/", "support": "", "versions": [ - "1.41.0-gimkl-2018b-Python-2.7.16" + "1.41.0-gimkl-2018b-Python-2.7.16", + "1.48.2-foss-2023a" ], "admin_list": [ { - "1.39.5-gimkl-2017a": "Warning: Mothur/1.39.5-gimkl-2017a is very old, please select a more recent version (try 'module spider Mothur')." + "1.41.0-gimkl-2018b-Python-2.7.16": "Warning: Mothur/1.41.0-gimkl-2018b-Python-2.7.16 is old and marked for deletion. Please select a more recent version (try 'module spider Mothur') or let us know that you still need it." } ], "network_licences": [], "default": "1.41.0-gimkl-2018b-Python-2.7.16", "default_type": "latest", - "last_updated": 1565638628, + "last_updated": 1738894866, "modulefile_text": "", "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Mothur/1.41.0-gimkl-2018b-Python-2.7.16.lua", "force_hide": "False", @@ -14843,7 +13361,11 @@ "4.1.0-GCC-11.3.0", "4.2.1-GCC-12.3.0" ], - "admin_list": [], + "admin_list": [ + { + "3.1.5-gimkl-2017a": "Warning: MPFR/3.1.5-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider MPFR') or let us know that you still need it." + } + ], "network_licences": [], "default": "4.0.2-GCCcore-9.2.0", "default_type": "latest", @@ -14853,28 +13375,6 @@ "force_hide": "False", "force_show": "False" }, - "mpifileutils": { - "description": "MPI-Based File Utilities For Distributed Systems", - "domains": [ - "tools" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://hpc.github.io/mpifileutils/", - "support": "", - "versions": [ - "0.11-gimpi-2020a" - ], - "admin_list": [], - "network_licences": [], - "default": "0.11-gimpi-2020a", - "default_type": "latest", - "last_updated": 1635325081, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/mpifileutils/0.11-gimpi-2020a.lua", - "force_hide": "False", - "force_show": "False" - }, "MrBayes": { "description": "MrBayes is a program for the Bayesian estimation of phylogeny.", "domains": [ @@ -14902,32 +13402,6 @@ "force_hide": "False", "force_show": "False" }, - "MSMC": { - "description": "Multiple Sequentially Markovian Coalescent, infers population size and gene flow from multiple genome sequences ", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/stschiff/msmc/blob/master/guide.md", - "support": "", - "versions": [ - "1.1.0-GCC-7.4.0" - ], - "admin_list": [ - { - "1.1.0-GCC-7.4.0": "Warning: MSMC/1.1.0-GCC-7.4.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.1.0-GCC-7.4.0", - "default_type": "latest", - "last_updated": 1568006910, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/MSMC/1.1.0-GCC-7.4.0.lua", - "force_hide": "False", - "force_show": "False" - }, "MultiQC": { "description": "Aggregate results from bioinformatics analyses across many samples into a single \n report.\n MultiQC searches a given directory for analysis logs and compiles a HTML report. It's a general\n use tool, perfect for summarising the output from numerous bioinformatics tools.", "domains": [ @@ -15058,7 +13532,6 @@ "versions": [ "2.2.5-GCC-11.3.0", "2.2.5-GCCcore-7.4.0", - "2.2.5-gimkl-2017a", "2.2.5-gimkl-2020a" ], "admin_list": [], @@ -15081,43 +13554,17 @@ "licence_type": "", "homepage": "http://drive5.com/muscle/", "support": "", - "versions": [ - "3.8.1551", - "3.8.31" - ], - "admin_list": [], - "network_licences": [], - "default": "3.8.31", - "default_type": "latest", - "last_updated": 1616061012, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/MUSCLE/3.8.31.lua", - "force_hide": "False", - "force_show": "False" - }, - "MUST": { - "description": "MUST detects usage errors of the Message Passing Interface (MPI) and reports them to the user.", - "domains": [ - "perf" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://hpc.rwth-aachen.de/must/", - "support": "", - "versions": [ - "1.6-gimkl-2020a-Python-3.8.2" - ], - "admin_list": [ - { - "1.6-gimkl-2020a-Python-3.8.2": "Warning: MUST/1.6-gimkl-2020a-Python-3.8.2 is old and marked for deletion. If you still need it, then please let us know." - } + "versions": [ + "3.8.1551", + "3.8.31" ], + "admin_list": [], "network_licences": [], - "default": "1.6-gimkl-2020a-Python-3.8.2", + "default": "3.8.31", "default_type": "latest", - "last_updated": 1612341367, + "last_updated": 1616061012, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/perf/MUST/1.6-gimkl-2020a-Python-3.8.2.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/MUSCLE/3.8.31.lua", "force_hide": "False", "force_show": "False" }, @@ -15140,6 +13587,9 @@ "admin_list": [ { "2.12-gimkl-2017a-mpi": "Warning: NAMD/2.12-gimkl-2017a-mpi is old and will soon be removed, so please select a more recent version such as NAMD/2.14-gimkl-2022a-mpi." + }, + { + "2.12-gimkl-2017a-cuda": "Warning: NAMD/2.12-gimkl-2017a-cuda is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider NAMD') or let us know that you still need it." } ], "network_licences": [], @@ -15231,7 +13681,6 @@ "homepage": "https://github.com/wdecoster/nanoget", "support": "", "versions": [ - "1.12.1-gimkl-2020a-Python-3.8.2", "1.16.1-gimkl-2020a-Python-3.8.2", "1.18.1-gimkl-2022a-Python-3.10.5" ], @@ -15292,7 +13741,11 @@ "1.2.0-gimkl-2020a-Python-3.8.2", "1.2.1-gimkl-2022a-Python-3.10.5" ], - "admin_list": [], + "admin_list": [ + { + "0.23.1-gimkl-2020a-Python-3.8.2": "Warning: nanomath/0.23.1-gimkl-2020a-Python-3.8.2 is old and marked for deletion. Please select a more recent version (try 'module spider nanomath') or let us know that you still need it." + } + ], "network_licences": [], "default": "1.2.1-gimkl-2022a-Python-3.10.5", "default_type": "latest", @@ -15340,9 +13793,6 @@ "homepage": "https://github.com/jts/nanopolish", "support": "", "versions": [ - "0.10.2-gimkl-2018b", - "0.11.1-gimkl-2018b", - "0.13.2-gimkl-2020a", "0.13.3-gimkl-2020a-Python-3.9.9", "0.14.0-foss-2023a-Python-3.11.6" ], @@ -15381,7 +13831,11 @@ "0.9.4-gimkl-2020a-Python-3.8.2", "0.9.4-gimkl-2022a-Python-3.10.5" ], - "admin_list": [], + "admin_list": [ + { + "0.9.4-gimkl-2020a-Python-3.8.2": "Warning: nanoQC/0.9.4-gimkl-2020a-Python-3.8.2 is old and marked for deletion. Please select a more recent version (try 'module spider nanoQC') or let us know that you still need it." + } + ], "network_licences": [], "default": "0.9.4-gimkl-2022a-Python-3.10.5", "default_type": "latest", @@ -15484,8 +13938,7 @@ "2.17.1-CUDA-12.0.0", "2.4.7-CUDA-10.1.243", "2.6.4-CUDA-10.2.89", - "2.7.8-CUDA-11.0.2", - "2.8.4-CUDA-11.2.0" + "2.7.8-CUDA-11.0.2" ], "admin_list": [ { @@ -15574,7 +14027,11 @@ "6.2-GCCcore-11.3.0", "6.4-GCCcore-12.3.0" ], - "admin_list": [], + "admin_list": [ + { + "6.0-GCC-5.4.0": "Warning: ncurses/6.0-GCC-5.4.0 is old and marked for deletion. Please select a more recent version (try 'module spider ncurses') or let us know that you still need it." + } + ], "network_licences": [], "default": "6.0-gimkl-2017a", "default_type": "latest", @@ -15700,7 +14157,11 @@ "4.8.1-iimpi-2022a", "4.9.2-gompi-2023a" ], - "admin_list": [], + "admin_list": [ + { + "4.4.1-intel-2017a": "Warning: netCDF/4.4.1-intel-2017a is very old and will soon be removed. Please select a more recent version such as netCDF/4.8.1-iimpi-2022a." + } + ], "network_licences": [], "default": "4.6.2-gimpi-2018b", "default_type": "latest", @@ -15759,6 +14220,9 @@ }, { "4.3.1-iimpi-2020a": "Warning: netCDF-C++4/4.3.1-iimpi-2020a is old and marked for deletion. Please select a more recent version (try 'module spider netCDF-C++4') or let us know that you still need it." + }, + { + "4.3.0-iimpi-2018b": "Warning: netCDF-C++4/4.3.0-iimpi-2018b is old and marked for deletion along with the rest of our intel-2018b software. Please select a more recent version (try 'module spider netCDF-C++4') or let us know that you still need it." } ], "network_licences": [], @@ -15865,12 +14329,6 @@ "support": "", "versions": [ "19.04.0", - "19.07.0", - "19.10.0", - "20.04.1", - "20.10.0", - "21.01.1", - "21.02.0", "21.04.3", "21.10.6", "22.04.3", @@ -15909,32 +14367,6 @@ "force_hide": "False", "force_show": "False" }, - "NextGenMap": { - "description": "NextGenMap is a flexible highly sensitive short read mapping tool that\n handles much higher mismatch rates than comparable algorithms while still outperforming\n them in terms of runtime.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://cibiv.github.io/NextGenMap/", - "support": "", - "versions": [ - "0.5.5-GCC-11.3.0" - ], - "admin_list": [ - { - "0.5.5-GCC-11.3.0": "Warning: NextGenMap/0.5.5-GCC-11.3.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "0.5.5-GCC-11.3.0", - "default_type": "latest", - "last_updated": 1652221791, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/NextGenMap/0.5.5-GCC-11.3.0.lua", - "force_hide": "False", - "force_show": "False" - }, "NextPolish2": { "description": "a fast and efficient genome polishing tool for long-read assembly", "domains": [ @@ -15970,11 +14402,7 @@ "homepage": "https://github.com/ncbi/ngs", "support": "", "versions": [ - "1.2.5-gimkl-2017a", "1.3.0-gimkl-2018b", - "2.10.1-gimkl-2018b", - "2.10.5-GCCcore-9.2.0", - "2.10.5-gimkl-2018b", "2.10.5-gimkl-2020a", "2.11.2-GCC-11.3.0" ], @@ -15993,11 +14421,11 @@ } ], "network_licences": [], - "default": "1.2.5-gimkl-2017a", + "default": "2.11.2-GCC-11.3.0", "default_type": "latest", "last_updated": 1667101352, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/NGS/1.2.5-gimkl-2017a.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/NGS/2.11.2-GCC-11.3.0.lua", "force_hide": "False", "force_show": "False" }, @@ -16062,7 +14490,11 @@ "2.7.0-GCC-11.3.0", "2.7.1-GCC-12.3.0" ], - "admin_list": [], + "admin_list": [ + { + "2.4.2-gimkl-2017a": "Warning: NLopt/2.4.2-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider NLopt') or let us know that you still need it." + } + ], "network_licences": [], "default": "2.5.0-gimkl-2018b", "default_type": "latest", @@ -16085,8 +14517,6 @@ "versions": [ "12.17.0-GCCcore-7.4.0", "14.16.1-GCCcore-9.2.0", - "14.3.0-GCCcore-7.4.0", - "14.3.0-GCCcore-9.2.0", "16.15.1-GCCcore-11.3.0", "17.8.0-GCCcore-9.2.0", "18.18.2-GCCcore-11.3.0", @@ -16121,15 +14551,16 @@ "support": "", "versions": [ "7.5.0-iimpi-2022a", - "7.5.1-iimpi-2022a" + "7.5.1-iimpi-2022a", + "7.6.0-iimpi-2022a" ], "admin_list": [], "network_licences": [], - "default": "7.5.1-iimpi-2022a", + "default": "7.6.0-iimpi-2022a", "default_type": "latest", - "last_updated": 1681340744, + "last_updated": 1739764412, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/NONMEM/7.5.1-iimpi-2022a", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/NONMEM/7.6.0-iimpi-2022a", "force_hide": "False", "force_show": "False" }, @@ -16361,19 +14792,21 @@ "domains": [ "biology", "chem", - "chemistry", - "mahuika" + "chemistry" ], "extensions": [], "licence_type": "", "homepage": "https://nwchemgit.github.io/ ", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/NWChem", + "support": "", "versions": [ "6.8.1.revision133-gimkl-2018b-2018-06-14-Python-2.7.16" ], "admin_list": [ { "6.8.revision47-gimkl-2017a-2017-12-14-Python-2.7.14": "Warning: NWChem/6.8.revision47-gimkl-2017a-2017-12-14-Python-2.7.14 is old and will soon be removed. Please check for a newer version ('module spider NWChem') or let us know if you need a new version installed." + }, + { + "6.8.1.revision133-gimkl-2018b-2018-06-14-Python-2.7.16": "Warning: this NWChem environment module is old and marked for deletion. If you would like us to install a newer NWChem then please let us know." } ], "network_licences": [], @@ -16435,28 +14868,6 @@ "force_hide": "False", "force_show": "False" }, - "OMA": { - "description": "Orthologous MAtrix project is a method and database for the inference \n of orthologs among complete genomes", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://omabrowser.org/standalone/", - "support": "", - "versions": [ - "2.4.1" - ], - "admin_list": [], - "network_licences": [], - "default": "2.4.1", - "default_type": "latest", - "last_updated": 1594071005, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/OMA/2.4.1.lua", - "force_hide": "False", - "force_show": "False" - }, "ont-guppy-gpu": { "description": "Data processing toolkit that contains the Oxford Nanopore Technologies' basecalling algorithms,\nand several bioinformatic post-processing features", "domains": [ @@ -16465,14 +14876,10 @@ "extensions": [], "licence_type": "", "homepage": "https://nanoporetech.com/products/minit", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/ont-guppy-gpu", + "support": "", "versions": [ - "5.0.16", - "5.0.7", "6.0.1", - "6.1.2", "6.2.1", - "6.4.2", "6.4.6", "6.5.7" ], @@ -16555,7 +14962,6 @@ "0.3.20-GCC-11.3.0", "0.3.23-GCC-12.3.0", "0.3.23-intel-compilers-2023.2.1", - "0.3.6-GCC-7.4.0", "0.3.6-GCC-9.2.0", "0.3.9-GCC-9.2.0" ], @@ -16608,47 +15014,17 @@ "force_hide": "False", "force_show": "False" }, - "OpenFAST": { - "description": "Wind turbine multiphysics simulation tool", - "domains": [ - "cae", - "engineering", - "physics" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://openfast.readthedocs.io", - "support": "", - "versions": [ - "2.1.0-gimkl-2018b" - ], - "admin_list": [ - { - "2.1.0-gimkl-2018b": "Warning: OpenFAST/2.1.0-gimkl-2018b is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "2.1.0-gimkl-2018b", - "default_type": "latest", - "last_updated": 1564984538, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/cae/OpenFAST/2.1.0-gimkl-2018b.lua", - "force_hide": "False", - "force_show": "False" - }, "OpenFOAM": { "description": "OpenFOAM is a free, open source CFD software package.\n OpenFOAM has an extensive range of features to solve anything from complex fluid flows\n involving chemical reactions, turbulence and heat transfer,\n to solid dynamics and electromagnetics.", "domains": [ "cae", "chemistry", - "engineering", - "cfd", - "fea" + "engineering" ], "extensions": [], "licence_type": "", "homepage": "http://www.openfoam.com/", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/OpenFOAM", + "support": "", "versions": [ "10-gimkl-2022a", "2.4.0-gimkl-2018b", @@ -16669,6 +15045,12 @@ }, { "v1712-gimkl-2017a": "Warning: OpenFOAM/v1712-gimkl-2017a is very old and will soon be removed, so please select a more recent version (try 'module spider OpenFOAM')." + }, + { + "5.0-gimkl-2018b": "Warning: OpenFOAM/5.0-gimkl-2018b is old and will soon be removed, so please select a more recent version (try 'module spider OpenFOAM') or let us know that you still need it." + }, + { + "6-gimkl-2018b": "Warning: OpenFOAM/6-gimkl-2018b is old and will soon be removed, so please select a more recent version (try 'module spider OpenFOAM') or let us know that you still need it." } ], "network_licences": [], @@ -16748,14 +15130,12 @@ "domains": [ "cae", "earth_science", - "engineering", - "geo", - "earthquake" + "engineering" ], "extensions": [], "licence_type": "", "homepage": "http://opensees.berkeley.edu", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/OpenSees", + "support": "", "versions": [ "20220411-gimkl-2020a", "3.0.0-gimkl-2017a", @@ -16847,7 +15227,11 @@ "1.1.1k-GCCcore-12.3.0", "1.1.1k-GCCcore-9.2.0" ], - "admin_list": [], + "admin_list": [ + { + "1.1.0e-gimkl-2017a": "Warning: OpenSSL/1.1.0e-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider OpenSSL') or let us know that you still need it." + } + ], "network_licences": [], "default": "1.1.0e-gimkl-2017a", "default_type": "latest", @@ -16861,13 +15245,12 @@ "description": "ORCA is a flexible, efficient and easy-to-use general purpose tool for quantum chemistry\n with specific emphasis on spectroscopic properties of open-shell molecules.\n It features a wide variety of standard quantum chemical methods ranging from semiempirical methods to DFT to single-\n and multireference correlated ab initio methods.\n It can also treat environmental and relativistic effects.", "domains": [ "chem", - "chemistry", - "mahuika" + "chemistry" ], "extensions": [], "licence_type": "", "homepage": "https://orcaforum.kofo.mpg.de", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/ORCA", + "support": "", "versions": [ "5.0.3-OpenMPI-4.1.1", "5.0.4-OpenMPI-4.1.5", @@ -16893,58 +15276,6 @@ "force_hide": "False", "force_show": "False" }, - "OrfM": { - "description": "A simple and not slow open reading frame (ORF) caller.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/wwood/OrfM", - "support": "", - "versions": [ - "0.7.1-GCC-11.3.0" - ], - "admin_list": [ - { - "0.7.1-GCC-11.3.0": "Warning: OrfM/0.7.1-GCC-11.3.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "0.7.1-GCC-11.3.0", - "default_type": "latest", - "last_updated": 1659935407, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/OrfM/0.7.1-GCC-11.3.0.lua", - "force_hide": "False", - "force_show": "False" - }, - "OrthoFiller": { - "description": "Identifies missing annotations for evolutionarily conserved genes.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/mpdunne/orthofiller/", - "support": "", - "versions": [ - "1.1.4-gimkl-2018b-Python-2.7.16" - ], - "admin_list": [ - { - "1.1.4-gimkl-2018b-Python-2.7.16": "Warning: OrthoFiller/1.1.4-gimkl-2018b-Python-2.7.16 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.1.4-gimkl-2018b-Python-2.7.16", - "default_type": "latest", - "last_updated": 1570070004, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/OrthoFiller/1.1.4-gimkl-2018b-Python-2.7.16.lua", - "force_hide": "False", - "force_show": "False" - }, "OrthoFinder": { "description": "OrthoFinder is a fast, accurate and comprehensive platform for comparative genomics", "domains": [ @@ -16968,32 +15299,6 @@ "force_hide": "False", "force_show": "False" }, - "OrthoMCL": { - "description": "Genome-scale algorithm for grouping orthologous protein sequences.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://orthomcl.org/", - "support": "", - "versions": [ - "2.0.9-gimkl-2020a-Perl-5.30.1" - ], - "admin_list": [ - { - "2.0.9-gimkl-2020a-Perl-5.30.1": "Warning: OrthoMCL/2.0.9-gimkl-2020a-Perl-5.30.1 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "2.0.9-gimkl-2020a-Perl-5.30.1", - "default_type": "latest", - "last_updated": 1642046510, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/OrthoMCL/2.0.9-gimkl-2020a-Perl-5.30.1.lua", - "force_hide": "False", - "force_show": "False" - }, "OSPRay": { "description": "\nOSPRay features interactive CPU rendering capabilities geared towards\nScientific Visualization applications. Advanced shading effects such\nas Ambient Occlusion, shadows, and transparency can be rendered\ninteractively, enabling new insights into data exploration.\n", "domains": [ @@ -17143,7 +15448,6 @@ "support": "", "versions": [ "4.10.7-GCC-12.3.0", - "4.9i-GCC-7.4.0", "4.9j-GCC-9.2.0" ], "admin_list": [ @@ -17263,6 +15567,9 @@ "admin_list": [ { "5.4.3-intel-2017a": "Warning: PAPI/5.4.3-intel-2017a is obsolete and will soon be removed, so please select a more recent version (try 'module spider PAPI')." + }, + { + "5.7.0-intel-2020a": "Warning: PAPI/5.7.0-intel-2020a is old and marked for deletion. Please select a more recent version (try 'module spider PAPI') or let us know that you still need it." } ], "network_licences": [], @@ -17310,8 +15617,6 @@ "homepage": "https://github.com/rvalieris/parallel-fastq-dump", "support": "", "versions": [ - "0.6.5-gimkl-2018b-Python-3.7.3", - "0.6.6-gimkl-2020a-Python-3.8.2", "0.6.7" ], "admin_list": [ @@ -17323,11 +15628,11 @@ } ], "network_licences": [], - "default": "0.6.6-gimkl-2020a-Python-3.8.2", + "default": "0.6.7", "default_type": "latest", "last_updated": 1719288036, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/parallel-fastq-dump/0.6.6-gimkl-2020a-Python-3.8.2.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/parallel-fastq-dump/0.6.7.lua", "force_hide": "False", "force_show": "False" }, @@ -17353,32 +15658,6 @@ "force_hide": "False", "force_show": "False" }, - "parasail": { - "description": "parasail is a SIMD C (C99) library containing implementations\n of the Smith-Waterman (local), Needleman-Wunsch (global), and semi-global\n pairwise sequence alignment algorithms. ", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/jeffdaily/parasail", - "support": "", - "versions": [ - "2.4.3" - ], - "admin_list": [ - { - "2.4.3": "Warning: parasail/2.4.3 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "2.4.3", - "default_type": "latest", - "last_updated": 1617136939, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/parasail/2.4.3.lua", - "force_hide": "False", - "force_show": "False" - }, "ParaView": { "description": "\nParaView is a scientific parallel visualizer.\n\nThis version supports CPU-only rendering without\nX context using the OSMesa library, it does not\nsupport GPU rendering.\n\nUse the GALLIUM_DRIVER environment variable to choose\na software renderer, it is recommended to use\n\nGALLIUM_DRIVER=swr\n\nfor best performance.\n\nRay tracing using the OSPRay library is also supported.\n", "domains": [ @@ -17389,7 +15668,7 @@ "extensions": [], "licence_type": "", "homepage": "http://www.paraview.org", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/ParaView", + "support": "", "versions": [ "5.4.1-gimkl-2018b-Python-2.7.16", "5.4.1-gimpi-2018b", @@ -17547,28 +15826,6 @@ "force_hide": "False", "force_show": "False" }, - "PDT": { - "description": "Program Database Toolkit (PDT) is a framework for analyzing source code written in several programming languages and for making rich program\n knowledge accessible to developers of static and dynamic analysis tools. ", - "domains": [ - "perf" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://www.cs.uoregon.edu/research/pdt/", - "support": "", - "versions": [ - "3.25.1-GCCcore-9.2.0" - ], - "admin_list": [], - "network_licences": [], - "default": "3.25.1-GCCcore-9.2.0", - "default_type": "latest", - "last_updated": 1611739026, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/perf/PDT/3.25.1-GCCcore-9.2.0.lua", - "force_hide": "False", - "force_show": "False" - }, "PEAR": { "description": "Memory-efficient,fully parallelized and highly accurate pair-end read merger.", "domains": [ @@ -17591,31 +15848,6 @@ "force_hide": "False", "force_show": "False" }, - "Peregrine": { - "description": "Genome assembler for long reads (length > 10kb, accuracy > 99%). \nBased on Sparse HIereachical MimiMizER (SHIMMER) for fast read-to-read overlaping\n", - "domains": [ - "bio" - ], - "extensions": [ - "cffi-1.14.0", - "pycparser-2.20" - ], - "licence_type": "", - "homepage": "https://github.com/cschin/Peregrine", - "support": "", - "versions": [ - "0.1.6.1-gimkl-2020a" - ], - "admin_list": [], - "network_licences": [], - "default": "0.1.6.1-gimkl-2020a", - "default_type": "latest", - "last_updated": 1588538378, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Peregrine/0.1.6.1-gimkl-2020a.lua", - "force_hide": "False", - "force_show": "False" - }, "Perl": { "description": "Larry Wall's Practical Extraction and Report Language", "domains": [ @@ -17900,32 +16132,6 @@ "force_hide": "False", "force_show": "False" }, - "PEST++": { - "description": "PEST++ is a software suite aimed at supporting\n complex numerical models in the decision-support context.\n Much focus has been devoted to supporting environmental models\n (groundwater, surface water, etc) but these tools are readily\n applicable to any computer model.\n", - "domains": [ - "geo" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/usgs/pestpp", - "support": "", - "versions": [ - "5.1.5-gimkl-2020a" - ], - "admin_list": [ - { - "5.1.5-gimkl-2020a": "Warning: PEST++/5.1.5-gimkl-2020a is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "5.1.5-gimkl-2020a", - "default_type": "latest", - "last_updated": 1639117257, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/geo/PEST++/5.1.5-gimkl-2020a.lua", - "force_hide": "False", - "force_show": "False" - }, "PETSc": { "description": "PETSc, pronounced PET-see (the S is silent), is a suite of data structures and routines for the\n scalable (parallel) solution of scientific applications modeled by partial differential equations.", "domains": [ @@ -17987,33 +16193,11 @@ ], "admin_list": [], "network_licences": [], - "default": "2021-Miniconda3", - "default_type": "latest", - "last_updated": 1686875203, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/pgge/2021-Miniconda3.lua", - "force_hide": "False", - "force_show": "False" - }, - "PHASIUS": { - "description": "A tool to visualize phase block structure from (many) BAM or CRAM files together with BED annotation", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/wdecoster/cramino", - "support": "", - "versions": [ - "0.1.0d-GCC-11.3.0" - ], - "admin_list": [], - "network_licences": [], - "default": "0.1.0d-GCC-11.3.0", + "default": "2021-Miniconda3", "default_type": "latest", - "last_updated": 1675045280, + "last_updated": 1686875203, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/PHASIUS/0.1.0d-GCC-11.3.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/pgge/2021-Miniconda3.lua", "force_hide": "False", "force_show": "False" }, @@ -18061,7 +16245,6 @@ "support": "", "versions": [ "3.0.1-gimkl-2020a-Python-3.8.2", - "3.0.2-gimkl-2020a-Python-3.9.9", "3.0.3-gimkl-2022a-Python-3.10.5" ], "admin_list": [ @@ -18117,7 +16300,11 @@ "versions": [ "1.3-gimkl-2022a" ], - "admin_list": [], + "admin_list": [ + { + "1.3-gimkl-2022a": "Warning: phyx/1.3-gimkl-2022a has not been used for some time and so is marked for deletion. If you want it to remain then please let us know." + } + ], "network_licences": [], "default": "1.3-gimkl-2022a", "default_type": "latest", @@ -18242,15 +16429,17 @@ "support": "", "versions": [ "1.09b6.16", - "2.00a2.3" + "2.00a2.3", + "2.00a5.14-GCC-12.3.0", + "2.00a6.9" ], "admin_list": [], "network_licences": [], - "default": "1.09b6.16", + "default": "2.00a5.14-GCC-12.3.0", "default_type": "latest", - "last_updated": 1651640298, + "last_updated": 1739826509, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/PLINK/1.09b6.16.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/PLINK/2.00a5.14-GCC-12.3.0.lua", "force_hide": "False", "force_show": "False" }, @@ -18267,9 +16456,7 @@ "support": "", "versions": [ "2.6.2-gimkl-2020a", - "2.6.2-intel-2020a", "2.7.2-intel-2022a", - "2.7.3-intel-2020a", "2.8.0-gimkl-2022a", "2.8.0-intel-2020a" ], @@ -18288,11 +16475,11 @@ } ], "network_licences": [], - "default": "2.7.3-intel-2020a", + "default": "2.8.0-gimkl-2022a", "default_type": "latest", "last_updated": 1664260151, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/PLUMED/2.7.3-intel-2020a.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/PLUMED/2.8.0-gimkl-2022a.lua", "force_hide": "False", "force_show": "False" }, @@ -18603,7 +16790,6 @@ "homepage": "http://www.vicbioinformatics.com/software.prokka.shtml", "support": "", "versions": [ - "1.13.4-gimkl-2018b", "1.14.5-GCC-11.3.0", "1.14.5-GCC-9.2.0" ], @@ -18684,11 +16870,9 @@ "support": "", "versions": [ "23.0-GCC-11.3.0", - "3.10.0-GCCcore-7.4.0", "3.12.1-gimkl-2018b", "3.12.2-GCCcore-9.2.0", - "3.14.0-GCCcore-9.2.0", - "3.17.3-GCCcore-9.2.0" + "3.14.0-GCCcore-9.2.0" ], "admin_list": [ { @@ -18699,6 +16883,9 @@ }, { "3.17.3-GCCcore-9.2.0": "Warning: protobuf/3.17.3-GCCcore-9.2.0 is old and marked for deletion. Please select a more recent version (try 'module spider protobuf') or let us know that you still need it." + }, + { + "3.12.2-GCCcore-9.2.0": "Warning: protobuf/3.12.2-GCCcore-9.2.0 is old and marked for deletion. Please select a more recent version (try 'module spider protobuf') or let us know that you still need it." } ], "network_licences": [], @@ -18767,7 +16954,11 @@ "3.70-GCCcore-7.4.0", "3.75-GCCcore-9.2.0" ], - "admin_list": [], + "admin_list": [ + { + "3.70-GCCcore-7.4.0": "Warning: pstoedit/3.70-GCCcore-7.4.0 is old and marked for deletion. Please select a more recent version (try 'module spider pstoedit') or let us know that you still need it." + } + ], "network_licences": [], "default": "3.75-GCCcore-9.2.0", "default_type": "latest", @@ -18899,15 +17090,13 @@ ], "extensions": [ "namedlist-1.8", - "pyani-0.2.10", - "SQLAlchemy-1.3.10", - "tqdm-4.60.0" + "pyani-0.2.12", + "SQLAlchemy-1.4.41" ], "licence_type": "", "homepage": "https://github.com/widdowquinn/pyani", "support": "", "versions": [ - "0.2.10-gimkl-2020a-Python-3.8.2", "0.2.12-gimkl-2022a-Python-3.10.5" ], "admin_list": [ @@ -18916,11 +17105,11 @@ } ], "network_licences": [], - "default": "0.2.10-gimkl-2020a-Python-3.8.2", + "default": "0.2.12-gimkl-2022a-Python-3.10.5", "default_type": "latest", "last_updated": 1663557881, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/pyani/0.2.10-gimkl-2020a-Python-3.8.2.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/pyani/0.2.12-gimkl-2022a-Python-3.10.5.lua", "force_hide": "False", "force_show": "False" }, @@ -18971,28 +17160,6 @@ "force_hide": "False", "force_show": "False" }, - "PyOpenGL": { - "description": "PyOpenGL is the most common cross platform Python binding to OpenGL and related APIs.", - "domains": [ - "vis" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://pyopengl.sourceforge.net", - "support": "", - "versions": [ - "3.1.5-gimkl-2018b-Python-2.7.16" - ], - "admin_list": [], - "network_licences": [], - "default": "3.1.5-gimkl-2018b-Python-2.7.16", - "default_type": "latest", - "last_updated": 1593474213, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/vis/PyOpenGL/3.1.5-gimkl-2018b-Python-2.7.16.lua", - "force_hide": "False", - "force_show": "False" - }, "PyQt": { "description": "PyQt5 is a set of Python bindings for v5 of the Qt application framework from The Qt Company.\nThis bundle includes PyQtWebEngine, a set of Python bindings for The Qt Company\u2019s Qt WebEngine framework.", "domains": [ @@ -19004,7 +17171,6 @@ "support": "", "versions": [ "5.10.1-gimkl-2018b-Python-3.7.3", - "5.12.1-gimkl-2018b-Python-2.7.16", "5.12.1-gimkl-2020a-Python-3.8.2" ], "admin_list": [ @@ -19024,28 +17190,6 @@ "force_hide": "False", "force_show": "False" }, - "pyspoa": { - "description": "Python bindings to spoa.", - "domains": [ - "lib" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/nanoporetech/pyspoa", - "support": "", - "versions": [ - "0.0.8-gimkl-2018b-Python-3.8.1" - ], - "admin_list": [], - "network_licences": [], - "default": "0.0.8-gimkl-2018b-Python-3.8.1", - "default_type": "latest", - "last_updated": 1630492151, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/pyspoa/0.0.8-gimkl-2018b-Python-3.8.1.lua", - "force_hide": "False", - "force_show": "False" - }, "Python": { "description": "Python is a programming language that lets you work more quickly and integrate your systems more effectively.", "domains": [ @@ -19053,8 +17197,7 @@ "lang", "language", "machine_learning", - "visualisation", - "ml" + "visualisation" ], "extensions": [ "altair-4.2.0", @@ -19155,7 +17298,7 @@ ], "licence_type": "", "homepage": "http://python.org/", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/Python", + "support": "", "versions": [ "2.7.14-gimkl-2017a", "2.7.16-gimkl-2018b", @@ -19298,8 +17441,6 @@ "homepage": "", "support": "", "versions": [ - "4.4.2", - "4.4.2-mpi", "5.2.1-mpi", "5.3.2-mpi", "5.4.2-mpi", @@ -19333,13 +17474,6 @@ "homepage": "http://qiime2.org/", "support": "", "versions": [ - "2020.11", - "2020.2", - "2020.6", - "2020.8", - "2021.11", - "2021.2", - "2021.4", "2022.2", "2023.5", "2024.2-shotgun" @@ -19388,7 +17522,6 @@ "versions": [ "5.10.1-GCCcore-7.4.0", "5.11.2-GCCcore-7.4.0", - "5.12.3-GCCcore-7.4.0", "5.12.3-GCCcore-9.2.0", "5.13.2-GCCcore-9.2.0" ], @@ -19511,9 +17644,7 @@ "lang", "language", "machine_learning", - "visualisation", - "mahuika", - "R" + "visualisation" ], "extensions": [ "abind-1.4-5", @@ -19872,7 +18003,7 @@ ], "licence_type": "", "homepage": "http://www.r-project.org/", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/R", + "support": "", "versions": [ "3.5.3-gimkl-2018b", "3.6.1-gimkl-2018b", @@ -20311,7 +18442,6 @@ "support": "", "versions": [ "1.4.13-GCC-9.2.0", - "1.4.21-GCC-7.4.0-CUDA-11.2.0-hybrid", "1.4.21-GCC-9.2.0-CUDA-11.2.0-hybrid", "1.5.0-GCC-11.3.0" ], @@ -20341,7 +18471,11 @@ "versions": [ "2.2-gimkl-2018b-Python-2.7.16" ], - "admin_list": [], + "admin_list": [ + { + "2.2-gimkl-2018b-Python-2.7.16": "Warning: Ragout/2.2-gimkl-2018b-Python-2.7.16 is old and marked for deletion. If you still need it, then please let us know." + } + ], "network_licences": [], "default": "2.2-gimkl-2018b-Python-2.7.16", "default_type": "latest", @@ -20471,7 +18605,6 @@ "homepage": "https://mmseqs.com", "support": "", "versions": [ - "1.4.0-GCC-9.2.0", "1.5.0-GCC-9.2.0" ], "admin_list": [ @@ -20480,11 +18613,11 @@ } ], "network_licences": [], - "default": "1.4.0-GCC-9.2.0", + "default": "1.5.0-GCC-9.2.0", "default_type": "latest", "last_updated": 1624138860, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Raven/1.4.0-GCC-9.2.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Raven/1.5.0-GCC-9.2.0.lua", "force_hide": "False", "force_show": "False" }, @@ -20492,13 +18625,12 @@ "description": "RAxML search algorithm for maximum likelihood based inference of phylogenetic trees.", "domains": [ "bio", - "biology", - "mahuika" + "biology" ], "extensions": [], "licence_type": "", "homepage": "https://github.com/stamatak/standard-RAxML", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/RAxML", + "support": "", "versions": [ "8.2.10-gimkl-2017a", "8.2.12-gimkl-2020a", @@ -20528,7 +18660,6 @@ "homepage": "https://github.com/amkozlov/raxml-ng", "support": "", "versions": [ - "1.1.0-gimkl-2020a", "1.1.0-gimkl-2022a" ], "admin_list": [ @@ -20545,32 +18676,6 @@ "force_hide": "False", "force_show": "False" }, - "razers3": { - "description": "Tool for mapping millions of short genomic reads onto a reference genome.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://www.ncbi.nlm.nih.gov/pubmed/22923295", - "support": "", - "versions": [ - "3.5.8" - ], - "admin_list": [ - { - "3.5.8": "Warning: razers3/3.5.8 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "3.5.8", - "default_type": "latest", - "last_updated": 1579496046, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/razers3/3.5.8.lua", - "force_hide": "False", - "force_show": "False" - }, "rclone": { "description": "\n Rclone is a command line program to sync files and directories to and from\n a variety of online storage services\n", "domains": [ @@ -20581,7 +18686,6 @@ "homepage": "https://rclone.org/", "support": "", "versions": [ - "1.42", "1.54.0", "1.57.0", "1.62.2" @@ -20622,33 +18726,6 @@ "force_hide": "False", "force_show": "False" }, - "rDock": { - "description": "rDock is a fast and versatile Open Source docking program that\ncan be used to dock small molecules against proteins and nucleic acids. It is\ndesigned for High Throughput Virtual Screening (HTVS) campaigns and Binding Mode\nprediction studies. rDock is mainly written in C++ and accessory scripts and\nprograms are written in C++, perl or python languages.", - "domains": [ - "bio", - "chemistry" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://sourceforge.net/projects/rdock/", - "support": "", - "versions": [ - "2013.1-gimkl-2018b" - ], - "admin_list": [ - { - "2013.1-gimkl-2018b": "Warning: rDock/2013.1-gimkl-2018b is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "2013.1-gimkl-2018b", - "default_type": "latest", - "last_updated": 1563247455, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/rDock/2013.1-gimkl-2018b.lua", - "force_hide": "False", - "force_show": "False" - }, "RDP-Classifier": { "description": "The RDP Classifier is a naive Bayesian classifier that can rapidly and accurately provides taxonomic\n assignments from domain to genus, with confidence estimates for each assignment.", "domains": [ @@ -20771,16 +18848,12 @@ "description": "RELION (for REgularised LIkelihood OptimisatioN, pronounce rely-on) \n is a stand-alone computer program that employs an empirical Bayesian \n approach to refinement of (multiple) 3D reconstructions or 2D class \n averages in electron cryo-microscopy (cryo-EM). ", "domains": [ "bio", - "biology", - "no_toc", - "no_lic", - "no_desc", - "no_ver" + "biology" ], "extensions": [], "licence_type": "", "homepage": "http://www2.mrc-lmb.cam.ac.uk/relion/index.php/Main_Page", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/Relion", + "support": "", "versions": [ "3.0beta-gimkl-2017a", "4.0.1-gimkl-2020a" @@ -21029,32 +19102,6 @@ "force_hide": "False", "force_show": "False" }, - "rkcommon": { - "description": "\nA common set of C++ infrastructure and CMake utilities used by various components of Intel\u00ae oneAPI Rendering Toolkit.\n", - "domains": [ - "vis" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/ospray/rkcommon", - "support": "", - "versions": [ - "1.6.0-GCCcore-9.2.0" - ], - "admin_list": [ - { - "1.6.0-GCCcore-9.2.0": "Warning: rkcommon/1.6.0-GCCcore-9.2.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.6.0-GCCcore-9.2.0", - "default_type": "latest", - "last_updated": 1615521849, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/vis/rkcommon/1.6.0-GCCcore-9.2.0.lua", - "force_hide": "False", - "force_show": "False" - }, "RMBlast": { "description": "RMBlast supports RepeatMasker searches by adding a few necessary features to the stock NCBI blastn program. These include:\nSupport for custom matrices ( without KA-Statistics ).\nSupport for cross_match-like complexity adjusted scoring. Cross_match is Phil Green's seeded smith-waterman search algorithm.\nSupport for cross_match-like masklevel filtering..", "domains": [ @@ -21173,32 +19220,6 @@ "force_hide": "False", "force_show": "False" }, - "ROCm": { - "description": "Platform for GPU Enabled HPC and UltraScale Computing", - "domains": [ - "tools" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/RadeonOpenCompute", - "support": "", - "versions": [ - "4.2.0-GCC-9.2.0-amd" - ], - "admin_list": [ - { - "4.2.0-GCC-9.2.0-amd": "Warning: ROCm/4.2.0-GCC-9.2.0-amd is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "4.2.0-GCC-9.2.0-amd", - "default_type": "latest", - "last_updated": 1629885341, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/ROCm/4.2.0-GCC-9.2.0-amd.lua", - "force_hide": "False", - "force_show": "False" - }, "ROOT": { "description": "The ROOT system provides a set of OO frameworks with all the functionality\n needed to handle and analyze large amounts of data in a very efficient way.", "domains": [ @@ -21258,7 +19279,6 @@ "homepage": "http://deweylab.biostat.wisc.edu/rsem/", "support": "", "versions": [ - "1.3.3-gimkl-2020a", "1.3.3-gimkl-2022a" ], "admin_list": [ @@ -21289,7 +19309,6 @@ "homepage": "http://www.rsgislib.org", "support": "", "versions": [ - "3.3.16-gimkl-2017a-Python-3.6.3", "3.6.14-gimkl-2018b-Python-3.7.3", "5.0.10-gimkl-2020a-Python-3.9.9", "5.0.10-gimkl-2022a-Python-3.10.5" @@ -21302,7 +19321,7 @@ "network_licences": [], "default": "5.0.10-gimkl-2022a-Python-3.10.5", "default_type": "latest", - "last_updated": 1705377459, + "last_updated": 1664244733, "modulefile_text": "", "module_path": "/opt/nesi/CS400_centos7_bdw/modules/geo/RSGISLib/5.0.10-gimkl-2022a-Python-3.10.5.lua", "force_hide": "False", @@ -21437,36 +19456,6 @@ "force_hide": "False", "force_show": "False" }, - "rust-fmlrc": { - "description": "FM-index Long Read Corrector (Rust implementation)", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/HudsonAlpha/rust-fmlrc", - "support": "", - "versions": [ - "0.1.5-GCCcore-9.2.0", - "0.1.7-GCC-11.3.0" - ], - "admin_list": [ - { - "0.1.5-GCCcore-9.2.0": "Warning: rust-fmlrc/0.1.5-GCCcore-9.2.0 is old and marked for deletion. If you still need it, then please let us know." - }, - { - "0.1.7-GCC-11.3.0": "Warning: rust-fmlrc/0.1.7-GCC-11.3.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "0.1.7-GCC-11.3.0", - "default_type": "latest", - "last_updated": 1652242725, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/rust-fmlrc/0.1.7-GCC-11.3.0.lua", - "force_hide": "False", - "force_show": "False" - }, "SAGE": { "description": "Ppackage containing programs for use in the genetic analysis of\nfamily, pedigree and individual data.", "domains": [ @@ -21514,6 +19503,12 @@ }, { "0.11.3-gimkl-2017a": "Warning: Salmon/0.11.3-gimkl-2017a is very old and will soon be deleted. Please select a more recent version (try 'module spider Salmon')." + }, + { + "0.13.1-gimkl-2018b": "Warning: Salmon/0.13.1-gimkl-2018b is old and marked for deletion. Please select a more recent version (try 'module spider Salmon') or let us know that you still need it." + }, + { + "0.14.0-gimkl-2018b": "Warning: Salmon/0.14.0-gimkl-2018b is old and marked for deletion. Please select a more recent version (try 'module spider Salmon') or let us know that you still need it." } ], "network_licences": [], @@ -21536,7 +19531,6 @@ "homepage": "https://lomereiter.github.io/sambamba/", "support": "", "versions": [ - "0.7.1", "0.8.0" ], "admin_list": [ @@ -21548,11 +19542,11 @@ } ], "network_licences": [], - "default": "0.7.1", + "default": "0.8.0", "default_type": "latest", "last_updated": 1640120136, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Sambamba/0.7.1.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Sambamba/0.8.0.lua", "force_hide": "False", "force_show": "False" }, @@ -21584,28 +19578,6 @@ "force_hide": "False", "force_show": "False" }, - "samclip": { - "description": "Filter SAM file for soft and hard clipped alignments.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/tseemann/samclip", - "support": "", - "versions": [ - "0.4.0-GCC-11.3.0" - ], - "admin_list": [], - "network_licences": [], - "default": "0.4.0-GCC-11.3.0", - "default_type": "latest", - "last_updated": 1670275023, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/samclip/0.4.0-GCC-11.3.0.lua", - "force_hide": "False", - "force_show": "False" - }, "SAMtools": { "description": "Samtools is a suite of programs for interacting with high-throughput sequencing data.\n SAMtools - Reading/writing/editing/indexing/viewing SAM/BAM/CRAM format", "domains": [ @@ -21659,48 +19631,6 @@ "force_hide": "False", "force_show": "False" }, - "SAS": { - "description": "SAS is a statistical software suite developed by SAS Institute for data management, advanced analytics, multivariate analysis, business intelligence, criminal investigation, and predictive analytics. - Homepage: https://www.sas.com/en_nz/home.html/", - "domains": [], - "extensions": [], - "licence_type": "", - "homepage": "", - "support": "", - "versions": [ - "9.4" - ], - "admin_list": [], - "network_licences": [], - "default": "9.4", - "default_type": "latest", - "last_updated": 1626409285, - "modulefile_text": "", - "module_path": "/opt/nesi/share/modules/all/SAS/9.4.lua", - "force_hide": "False", - "force_show": "False" - }, - "savvy": { - "description": "Interface to various variant calling formats.", - "domains": [ - "data" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/statgen/savvy", - "support": "", - "versions": [ - "2.1.0-GCC-11.3.0" - ], - "admin_list": [], - "network_licences": [], - "default": "2.1.0-GCC-11.3.0", - "default_type": "latest", - "last_updated": 1688018340, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/savvy/2.1.0-GCC-11.3.0.lua", - "force_hide": "False", - "force_show": "False" - }, "sbt": { "description": "sbt is a build tool for Scala, Java, and more.", "domains": [ @@ -21884,7 +19814,11 @@ "7.0.3-gimpi-2022a", "7.0.4-foss-2023a" ], - "admin_list": [], + "admin_list": [ + { + "6.0.4-gimkl-2017a": "Warning: SCOTCH/6.0.4-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider SCOTCH') or let us know that you still need it." + } + ], "network_licences": [], "default": "6.1.2-gimpi-2022a", "default_type": "latest", @@ -21980,8 +19914,6 @@ "4.3.10", "4.4.0-gimkl-2020a", "4.5.1-gimkl-2020a-Python-3.8.2", - "4.5.1-gimkl-2020a-Python-3.9.9", - "4.5.1-gimkl-2022a-Python-3.10.5", "4.5.1-gimkl-2022a-Python-3.11.3" ], "admin_list": [], @@ -22004,8 +19936,7 @@ "homepage": "https://github.com/seqan/seqan", "support": "", "versions": [ - "2.4.0-GCC-12.3.0", - "2.4.0-GCCcore-9.2.0" + "2.4.0-GCC-12.3.0" ], "admin_list": [ { @@ -22013,11 +19944,11 @@ } ], "network_licences": [], - "default": "2.4.0-GCCcore-9.2.0", + "default": "2.4.0-GCC-12.3.0", "default_type": "latest", "last_updated": 1721883682, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/SeqAn/2.4.0-GCCcore-9.2.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/SeqAn/2.4.0-GCC-12.3.0.lua", "force_hide": "False", "force_show": "False" }, @@ -22036,7 +19967,7 @@ ], "admin_list": [ { - "3.0.0": "Warning: SeqAn3/3.0.0 is old and marked for deletion. If you still need it, then please let us know." + "3.0.0": "Warning: SeqAn3/3.0.0 is old and marked for deletion. Please select a more recent version (try 'module spider SeqAn3') or let us know that you still need it." } ], "network_licences": [], @@ -22280,15 +20211,12 @@ "description": "Singularity is a portable application stack packaging and runtime utility.", "domains": [ "tools", - "base", - "containers", - "singularity", - "docker" + "base" ], "extensions": [], "licence_type": "", "homepage": "https://www.sylabs.io/docs/", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/Singularity", + "support": "", "versions": [ "3.10.0", "3.10.3", @@ -22364,8 +20292,7 @@ "homepage": "https://www.fz-juelich.de/ias/jsc/EN/Expertise/Support/Software/SIONlib/_node.html", "support": "", "versions": [ - "1.7.6-GCCcore-9.2.0-tools", - "1.7.7-GCC-11.3.0" + "1.7.6-GCCcore-9.2.0-tools" ], "admin_list": [ { @@ -22373,11 +20300,11 @@ } ], "network_licences": [], - "default": "1.7.7-GCC-11.3.0", + "default": "1.7.6-GCCcore-9.2.0-tools", "default_type": "latest", - "last_updated": 1674863632, + "last_updated": 1611739084, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/SIONlib/1.7.7-GCC-11.3.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/SIONlib/1.7.6-GCCcore-9.2.0-tools.lua", "force_hide": "False", "force_show": "False" }, @@ -22391,10 +20318,7 @@ "homepage": "http://www.riverbankcomputing.com/software/sip/", "support": "", "versions": [ - "4.19.15-gimkl-2018b-Python-3.7.3", - "4.19.15-gimkl-2020a-Python-3.8.2", - "4.19.8-gimkl-2018b-Python-3.7.3", - "4.19.8-gimkl-2020a-Python-3.8.2" + "4.19.8-gimkl-2018b-Python-3.7.3" ], "admin_list": [ { @@ -22408,11 +20332,11 @@ } ], "network_licences": [], - "default": "4.19.8-gimkl-2020a-Python-3.8.2", + "default": "4.19.8-gimkl-2018b-Python-3.7.3", "default_type": "latest", - "last_updated": 1592859945, + "last_updated": 1555988290, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/lang/SIP/4.19.8-gimkl-2020a-Python-3.8.2.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/lang/SIP/4.19.8-gimkl-2018b-Python-3.7.3.lua", "force_hide": "False", "force_show": "False" }, @@ -22546,28 +20470,6 @@ "force_hide": "False", "force_show": "False" }, - "smafa": { - "description": "Smafa attempts to align or cluster pre-aligned biological sequences, handling sequences\n which are all the same length.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/wwood/smafa", - "support": "", - "versions": [ - "0.5.0-GCC-11.3.0" - ], - "admin_list": [], - "network_licences": [], - "default": "0.5.0-GCC-11.3.0", - "default_type": "latest", - "last_updated": 1659927648, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/smafa/0.5.0-GCC-11.3.0.lua", - "force_hide": "False", - "force_show": "False" - }, "smoove": { "description": "simplifies and speeds calling and genotyping SVs for short reads.", "domains": [ @@ -22648,20 +20550,9 @@ "homepage": "https://snakemake.readthedocs.io", "support": "", "versions": [ - "5.10.0-gimkl-2020a-Python-3.8.2", "5.19.3-gimkl-2020a-Python-3.8.2", - "5.32.0-gimkl-2020a-Python-3.8.2", - "5.5.0-gimkl-2018b-Python-3.7.3", - "6.13.1-gimkl-2020a-Python-3.8.2", - "6.8.0-gimkl-2020a-Python-3.8.2", - "7.0.1-gimkl-2020a-Python-3.9.9", - "7.19.1-gimkl-2022a-Python-3.10.5", - "7.26.0-gimkl-2022a-Python-3.11.3", - "7.30.1-gimkl-2022a-Python-3.11.3", - "7.32.0-gimkl-2022a-Python-3.11.3", "7.32.3-foss-2023a-Python-3.11.6", "7.32.3-gimkl-2022a-Python-3.11.3", - "7.6.2-gimkl-2020a-Python-3.9.9", "8.1.0-gimkl-2022a-Python-3.11.3" ], "admin_list": [ @@ -22688,49 +20579,23 @@ }, { "7.19.1-gimkl-2022a-Python-3.10.5": "Warning: snakemake/7.19.1-gimkl-2022a-Python-3.10.5 is old and marked for deletion. Please select a more recent version (try 'module spider snakemake') or let us know that you still need it" - }, - { - "7.26.0-gimkl-2022a-Python-3.11.3": "Warning: snakemake/7.26.0-gimkl-2022a-Python-3.11.3 is old and marked for deletion. Please select a more recent version (try 'module spider snakemake') or let us know that you still need it" - }, - { - "7.30.1-gimkl-2022a-Python-3.11.3": "Warning: snakemake/7.30.1-gimkl-2022a-Python-3.11.3 is old and marked for deletion. Please select a more recent version (try 'module spider snakemake') or let us know that you still need it" - }, - { - "7.32.0-gimkl-2022a-Python-3.11.3": "Warning: snakemake/7.32.0-gimkl-2022a-Python-3.11.3 is old and marked for deletion. Please select a more recent version (try 'module spider snakemake') or let us know that you still need it" - } - ], - "network_licences": [], - "default": "7.32.3-gimkl-2022a-Python-3.11.3", - "default_type": "static", - "last_updated": 1704940352, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/snakemake/7.32.3-gimkl-2022a-Python-3.11.3.lua", - "force_hide": "False", - "force_show": "False" - }, - "snaphu": { - "description": "SNAPHU is an implementation of the Statistical-cost, Network-flow Algorithm for Phase Unwrapping\n proposed by Chen and Zebker", - "domains": [ - "math" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://web.stanford.edu/group/radar/softwareandlinks/sw/snaphu/", - "support": "", - "versions": [ - "2.0.4-GCCcore-9.2.0" - ], - "admin_list": [ + }, + { + "7.26.0-gimkl-2022a-Python-3.11.3": "Warning: snakemake/7.26.0-gimkl-2022a-Python-3.11.3 is old and marked for deletion. Please select a more recent version (try 'module spider snakemake') or let us know that you still need it" + }, { - "2.0.4-GCCcore-9.2.0": "Warning: snaphu/2.0.4-GCCcore-9.2.0 is old and marked for deletion. If you still need it, then please let us know." + "7.30.1-gimkl-2022a-Python-3.11.3": "Warning: snakemake/7.30.1-gimkl-2022a-Python-3.11.3 is old and marked for deletion. Please select a more recent version (try 'module spider snakemake') or let us know that you still need it" + }, + { + "7.32.0-gimkl-2022a-Python-3.11.3": "Warning: snakemake/7.32.0-gimkl-2022a-Python-3.11.3 is old and marked for deletion. Please select a more recent version (try 'module spider snakemake') or let us know that you still need it" } ], "network_licences": [], - "default": "2.0.4-GCCcore-9.2.0", - "default_type": "latest", - "last_updated": 1606862298, + "default": "7.32.3-gimkl-2022a-Python-3.11.3", + "default_type": "static", + "last_updated": 1704940352, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/math/snaphu/2.0.4-GCCcore-9.2.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/snakemake/7.32.3-gimkl-2022a-Python-3.11.3.lua", "force_hide": "False", "force_show": "False" }, @@ -22815,10 +20680,9 @@ "extensions": [], "licence_type": "", "homepage": "https://pcingola.github.io/SnpEff/", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/snpEff", + "support": "", "versions": [ "4.2", - "4.3t", "5.0-Java-11.0.4" ], "admin_list": [ @@ -22857,36 +20721,6 @@ "force_hide": "False", "force_show": "False" }, - "SOCI": { - "description": "Database access library for C++ that makes the illusion of embedding SQL queries in the\n regular C++ code, staying entirely within the Standard C++.", - "domains": [ - "lang" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://soci.sourceforge.net/", - "support": "", - "versions": [ - "4.0.1-GCC-9.2.0", - "4.0.2-GCC-9.2.0" - ], - "admin_list": [ - { - "4.0.1-GCC-9.2.0": "Warning: SOCI/4.0.1-GCC-9.2.0 is old and marked for deletion. If you still need it, then please let us know." - }, - { - "4.0.2-GCC-9.2.0": "Warning: SOCI/4.0.2-GCC-9.2.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "4.0.2-GCC-9.2.0", - "default_type": "latest", - "last_updated": 1648111234, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/lang/SOCI/4.0.2-GCC-9.2.0.lua", - "force_hide": "False", - "force_show": "False" - }, "somalier": { "description": "extract informative sites, evaluate relatedness, and \nperform quality-control on BAM/CRAM/BCF/VCF/GVCF", "domains": [ @@ -23107,32 +20941,6 @@ "force_hide": "False", "force_show": "False" }, - "SPIDER": { - "description": "System for Processing Image Data from Electron microscopy and Related fields", - "domains": [ - "vis" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://spider.wadsworth.org/spider_doc/spider/docs/spider.html", - "support": "", - "versions": [ - "22.02" - ], - "admin_list": [ - { - "22.02": "Warning: SPIDER/22.02 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "22.02", - "default_type": "latest", - "last_updated": 1533595414, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/vis/SPIDER/22.02.lua", - "force_hide": "False", - "force_show": "False" - }, "splat": { "description": "", "domains": [ @@ -23166,13 +20974,15 @@ "homepage": "https://github.com/rvaser/spoa", "support": "", "versions": [ - "3.0.2-GCC-9.2.0", "4.0.7-GCC-7.4.0", "4.0.7-GCC-9.2.0" ], "admin_list": [ { "3.0.2-GCC-9.2.0": "Warning: spoa/3.0.2-GCC-9.2.0 is old and marked for deletion. Please select a more recent version (try 'module spider spoa') or let us know that you still need it." + }, + { + "4.0.7-GCC-7.4.0": "Warning: spoa/4.0.7-GCC-7.4.0 is old and marked for deletion. Please select a more recent version (try 'module spider spoa') or let us know that you still need it." } ], "network_licences": [], @@ -23245,9 +21055,7 @@ "homepage": "https://trace.ncbi.nlm.nih.gov/Traces/sra/sra.cgi?view=software", "support": "", "versions": [ - "2.10.9", "2.11.3", - "2.8.0", "2.9.6", "3.0.2" ], @@ -23404,32 +21212,6 @@ "force_hide": "False", "force_show": "False" }, - "STAR-Fusion": { - "description": "Processes the output generated by the STAR aligner to map junction reads and spanning reads to a reference annotation set", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://github.com/STAR-Fusion/STAR-Fusion/wiki", - "support": "", - "versions": [ - "1.6.0-gimkl-2018b" - ], - "admin_list": [ - { - "1.6.0-gimkl-2018b": "Warning: STAR-Fusion/1.6.0-gimkl-2018b is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.6.0-gimkl-2018b", - "default_type": "latest", - "last_updated": 1554872945, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/STAR-Fusion/1.6.0-gimkl-2018b.lua", - "force_hide": "False", - "force_show": "False" - }, "StringTie": { "description": "StringTie is a fast and highly efficient assembler of RNA-Seq alignments into potential transcripts.", "domains": [ @@ -23440,7 +21222,6 @@ "homepage": "http://ccb.jhu.edu/software/%(namelower)/", "support": "", "versions": [ - "1.3.5-gimkl-2018b", "2.2.0-gimkl-2022a", "2.2.0-gimkl-2022a-Python-3.11.3", "2.2.1-gimkl-2022a-Python-3.11.3", @@ -23580,7 +21361,17 @@ "5.8.1-gimkl-2022a-METIS-5.1.0", "7.8.3-foss-2023a" ], - "admin_list": [], + "admin_list": [ + { + "5.4.0-intel-2018b-METIS-5.1.0": "Warning: SuiteSparse/5.4.0-intel-2018b-METIS-5.1.0 is old and marked for deletion. Please select a more recent version (try 'module spider SuiteSparse') or let us know that you still need it." + }, + { + "5.6.0-intel-2020a-METIS-5.1.0": "Warning: SuiteSparse/5.6.0-intel-2020a-METIS-5.1.0 is old and marked for deletion. Please select a more recent version (try 'module spider SuiteSparse') or let us know that you still need it." + }, + { + "4.5.4-gimkl-2017a": "Warning: SuiteSparse/4.5.4-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider SuiteSparse') or let us know that you still need it." + } + ], "network_licences": [], "default": "5.13.0-gimkl-2022a", "default_type": "latest", @@ -23601,9 +21392,6 @@ "homepage": "http://computation.llnl.gov/projects/sundials", "support": "", "versions": [ - "4.1.0-gimkl-2018b", - "5.5.0-gimkl-2020a", - "5.7.0-gimkl-2020a", "5.8.0-gimkl-2020a" ], "admin_list": [ @@ -23621,11 +21409,11 @@ } ], "network_licences": [], - "default": "4.1.0-gimkl-2018b", + "default": "5.8.0-gimkl-2020a", "default_type": "latest", "last_updated": 1636661583, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/math/SUNDIALS/4.1.0-gimkl-2018b.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/math/SUNDIALS/5.8.0-gimkl-2020a.lua", "force_hide": "False", "force_show": "False" }, @@ -23659,13 +21447,12 @@ "description": "Supernova is a software package for de novo assembly from Chromium Linked-Reads\n that are made from a single whole-genome library from an individual DNA source", "domains": [ "bio", - "biology", - "mahuika" + "biology" ], "extensions": [], "licence_type": "", "homepage": "https://support.10xgenomics.com/de-novo-assembly/software/overview/latest/welcome", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/Supernova", + "support": "", "versions": [ "2.1.1" ], @@ -23753,32 +21540,6 @@ "force_hide": "False", "force_show": "False" }, - "swissknife": { - "description": "Perl module for reading and writing UniProtKB data in plain text format.", - "domains": [ - "bio" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://sourceforge.net/projects/swissknife/", - "support": "", - "versions": [ - "1.80-GCC-9.2.0-Perl-5.30.1" - ], - "admin_list": [ - { - "1.80-GCC-9.2.0-Perl-5.30.1": "Warning: swissknife/1.80-GCC-9.2.0-Perl-5.30.1 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.80-GCC-9.2.0-Perl-5.30.1", - "default_type": "latest", - "last_updated": 1632991451, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/swissknife/1.80-GCC-9.2.0-Perl-5.30.1.lua", - "force_hide": "False", - "force_show": "False" - }, "Szip": { "description": "Szip compression software, providing lossless compression of scientific data", "domains": [ @@ -23861,7 +21622,6 @@ "homepage": "https://github.com/intel/tbb", "support": "", "versions": [ - "2019_U1-gimkl-2017a", "2019_U4-GCCcore-7.4.0", "2019_U9-GCCcore-9.2.0", "2020.2-GCCcore-9.2.0", @@ -23928,7 +21688,11 @@ "8.6.6-gimkl-2017a", "8.6.9-GCCcore-7.4.0" ], - "admin_list": [], + "admin_list": [ + { + "8.6.6-gimkl-2017a": "Warning: Tcl/8.6.6-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider Tcl') or let us know that you still need it." + } + ], "network_licences": [], "default": "8.5.19-gimkl-2017a", "default_type": "latest", @@ -23988,7 +21752,13 @@ ], "admin_list": [ { - "2.0.1-gimkl-2018b-Python-3.8.1": "Warning: TensorFlow/2.0.1-gimkl-2018b-Python-3.8.1 is quite old, so please select a more recent version (try 'module spider Tensorflow') or let us know that you still need it." + "2.0.1-gimkl-2018b-Python-3.8.1": "Warning: TensorFlow/2.0.1-gimkl-2018b-Python-3.8.1 is quite old and so will soon be removed. Please select a more recent version (try 'module spider Tensorflow') or let us know that you still need it." + }, + { + "2.2.2-gimkl-2018b-Python-3.8.1": "Warning: TensorFlow/2.2.2-gimkl-2018b-Python-3.8.1 is quite old and so will soon be removed. Please select a more recent version (try 'module spider Tensorflow') or let us know that you still need it." + }, + { + "2.2.3-gimkl-2018b-Python-3.8.1": "Warning: TensorFlow/2.2.3-gimkl-2018b-Python-3.8.1 is quite old and so will soon be removed. Please select a more recent version (try 'module spider Tensorflow') or let us know that you still need it." } ], "network_licences": [], @@ -24013,7 +21783,11 @@ "7.0.0.11-gimkl-2018b-Python-3.7.3", "8.6.1.6-gimkl-2022a-Python-3.11.3-CUDA-11.8.0" ], - "admin_list": [], + "admin_list": [ + { + "7.0.0.11-gimkl-2018b-Python-3.7.3": "Warning: TensorRT/7.0.0.11-gimkl-2018b-Python-3.7.3 is old and marked for deletion. Please select a more recent version (try 'module spider TensorRT') or let us know that you still need it." + } + ], "network_licences": [], "default": "7.0.0.11-gimkl-2018b-Python-3.7.3", "default_type": "latest", @@ -24047,35 +21821,6 @@ "force_hide": "False", "force_show": "False" }, - "Theano": { - "description": "Theano is a Python library that allows you to define, optimize,\nand evaluate mathematical expressions involving multi-dimensional arrays efficiently.", - "domains": [ - "math" - ], - "extensions": [ - "libgpuarray-0.7.6", - "Theano-1.0.5" - ], - "licence_type": "", - "homepage": "https://deeplearning.net/software/theano", - "support": "", - "versions": [ - "1.0.5-gimkl-2020a-Python-3.8.2" - ], - "admin_list": [ - { - "1.0.5-gimkl-2020a-Python-3.8.2": "Warning: Theano/1.0.5-gimkl-2020a-Python-3.8.2 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.0.5-gimkl-2020a-Python-3.8.2", - "default_type": "latest", - "last_updated": 1608002216, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/math/Theano/1.0.5-gimkl-2020a-Python-3.8.2.lua", - "force_hide": "False", - "force_show": "False" - }, "Tk": { "description": "Tk is an open source, cross-platform widget toolchain that provides a library of basic elements for\n building a graphical user interface (GUI) in many different programming languages.", "domains": [ @@ -24092,7 +21837,11 @@ "8.6.6-gimkl-2017a", "8.6.9-GCCcore-7.4.0" ], - "admin_list": [], + "admin_list": [ + { + "8.6.6-gimkl-2017a": "Warning: Tk/8.6.6-gimkl-2017a is old and marked for deletion along with the rest of our gimkl-2017a software. Please select a more recent version (try 'module spider Tk') or let us know that you still need it." + } + ], "network_licences": [], "default": "8.6.9-GCCcore-7.4.0", "default_type": "latest", @@ -24347,7 +22096,7 @@ "extensions": [], "licence_type": "", "homepage": "http://trinityrnaseq.github.io", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/Trinity", + "support": "", "versions": [ "2.11.0-gimkl-2020a", "2.13.2-gimkl-2020a", @@ -24362,6 +22111,12 @@ }, { "2.8.4-gimkl-2017a": "Warning: Trinity/2.8.4-gimkl-2017a is very old, please select a more recent version (try 'module spider Trinity')." + }, + { + "2.8.4-gimkl-2018b": "Warning: Trinity/2.8.4-gimkl-2018b is old and marked for deletion. Please select a more recent version (try 'module spider Trinity') or let us know that you still need it." + }, + { + "2.8.5-gimkl-2018b": "Warning: Trinity/2.8.5-gimkl-2018b is old and marked for deletion. Please select a more recent version (try 'module spider Trinity') or let us know that you still need it." } ], "network_licences": [], @@ -24530,9 +22285,8 @@ "extensions": [], "licence_type": "", "homepage": "https://www.turbovnc.org", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/TurboVNC", + "support": "", "versions": [ - "2.2.3-GCC-7.4.0", "2.2.4-GCC-9.2.0" ], "admin_list": [ @@ -24660,7 +22414,6 @@ "homepage": "https://github.com/rrwick/Unicycler", "support": "", "versions": [ - "0.4.8-gimkl-2020a-Python-3.8.2", "0.4.9-gimkl-2020a-Python-3.8.2", "0.5.0-gimkl-2020a-Python-3.8.2" ], @@ -24673,11 +22426,11 @@ } ], "network_licences": [], - "default": "0.4.8-gimkl-2020a-Python-3.8.2", + "default": "0.4.9-gimkl-2020a-Python-3.8.2", "default_type": "latest", "last_updated": 1653294410, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Unicycler/0.4.8-gimkl-2020a-Python-3.8.2.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/bio/Unicycler/0.4.9-gimkl-2020a-Python-3.8.2.lua", "force_hide": "False", "force_show": "False" }, @@ -24793,6 +22546,9 @@ "admin_list": [ { "3.13.0-gimkl-2017a": "Warning: Valgrind/3.13.0-gimkl-2017a is obsolete and will soon be removed, so please select a more recent version (try 'module spider Valgrind')." + }, + { + "3.14.0-GCC-7.4.0": "Warning: Valgrind/3.14.0-GCC-7.4.0 is old and marked for deletion. Please select a more recent version (try 'module spider Valgrind') or let us know that you still need it." } ], "network_licences": [], @@ -24831,15 +22587,12 @@ "domains": [ "base", "chem", - "chemistry", - "Density Functional Theory", - "Molecular Dynamics", - "Computational Chemistry" + "chemistry" ], "extensions": [], "licence_type": "proprietary", "homepage": "http://www.vasp.at", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/VASP", + "support": "", "versions": [ "4.6-gimkl-2020a", "5.4.4-gimkl-2022a", @@ -24926,7 +22679,6 @@ "homepage": "https://github.com/AndersenLab/VCF-kit", "support": "", "versions": [ - "0.1.6", "0.2.6-gimkl-2020a-Python-3.8.2" ], "admin_list": [ @@ -25220,7 +22972,7 @@ ], "licence_type": "", "homepage": "https://github.com/simroux/VirSorter", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/VirSorter", + "support": "", "versions": [ "1.0.6-gimkl-2020a-Perl-5.30.1", "2.1-gimkl-2020a-Python-3.8.2", @@ -25236,36 +22988,6 @@ "force_hide": "False", "force_show": "False" }, - "VirtualGL": { - "description": "VirtualGL is an open source toolkit that gives any Linux or \nUnix remote display software the ability to run OpenGL applications with full \nhardware acceleration.", - "domains": [ - "vis" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://virtualgl.org/", - "support": "", - "versions": [ - "2.6.2-GCCcore-7.4.0", - "2.6.2-gimkl-2020a" - ], - "admin_list": [ - { - "2.6.2-GCCcore-7.4.0": "Warning: VirtualGL/2.6.2-GCCcore-7.4.0 is old and marked for deletion. If you still need it, then please let us know." - }, - { - "2.6.2-gimkl-2020a": "Warning: VirtualGL/2.6.2-gimkl-2020a is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "2.6.2-gimkl-2020a", - "default_type": "latest", - "last_updated": 1588141913, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/vis/VirtualGL/2.6.2-gimkl-2020a.lua", - "force_hide": "False", - "force_show": "False" - }, "VMD": { "description": "VMD is a molecular visualization program for displaying, animating, and analyzing large biomolecular\n systems using 3-D graphics and built-in scripting.", "domains": [ @@ -25318,33 +23040,6 @@ "force_hide": "False", "force_show": "False" }, - "VTK": { - "description": "The Visualization Toolkit (VTK) is an open-source, freely available software system for\n 3D computer graphics, image processing and visualization. VTK consists of a C++ class library and several\n interpreted interface layers including Tcl/Tk, Java, and Python. VTK supports a wide variety of visualization\n algorithms including: scalar, vector, tensor, texture, and volumetric methods; and advanced modeling techniques\n such as: implicit modeling, polygon reduction, mesh smoothing, cutting, contouring, and Delaunay triangulation.", - "domains": [ - "vis", - "visualisation" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://www.vtk.org", - "support": "", - "versions": [ - "7.1.1-gimkl-2018b-Python-2.7.16" - ], - "admin_list": [ - { - "7.1.1-gimkl-2018b-Python-2.7.16": "Warning: VTK/7.1.1-gimkl-2018b-Python-2.7.16 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "7.1.1-gimkl-2018b-Python-2.7.16", - "default_type": "latest", - "last_updated": 1573607162, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/vis/VTK/7.1.1-gimkl-2018b-Python-2.7.16.lua", - "force_hide": "False", - "force_show": "False" - }, "VTune": { "description": "Intel VTune Amplifier XE is the premier performance profiler for C, C++, C#, Fortran,\n Assembly and Java.", "domains": [ @@ -25353,9 +23048,8 @@ "extensions": [], "licence_type": "", "homepage": "https://software.intel.com/en-us/vtune", - "support": "https://www.docs.nesi.org.nz/Scientific_Computing/Supported_Applications/VTune", + "support": "", "versions": [ - "2019_update4", "2019_update8", "2023.1.0" ], @@ -25365,11 +23059,11 @@ } ], "network_licences": [], - "default": "2019_update4", + "default": "2019_update8", "default_type": "latest", "last_updated": 1686195115, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/VTune/2019_update4.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/VTune/2019_update8.lua", "force_hide": "False", "force_show": "False" }, @@ -25454,7 +23148,6 @@ "homepage": "https://whatshap.readthedocs.io/en/latest/index.html", "support": "", "versions": [ - "1.1-gimkl-2020a", "1.6-gimkl-2022a-Python-3.10.5" ], "admin_list": [ @@ -25481,7 +23174,6 @@ "homepage": "https://pypi.python.org/pypi/wheel", "support": "", "versions": [ - "0.31.1-gimkl-2018b-Python-2.7.16", "0.31.1-gimkl-2020a-Python-2.7.18" ], "admin_list": [ @@ -25490,11 +23182,11 @@ } ], "network_licences": [], - "default": "0.31.1-gimkl-2018b-Python-2.7.16", + "default": "0.31.1-gimkl-2020a-Python-2.7.18", "default_type": "latest", "last_updated": 1596159578, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/wheel/0.31.1-gimkl-2018b-Python-2.7.16.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/tools/wheel/0.31.1-gimkl-2020a-Python-2.7.18.lua", "force_hide": "False", "force_show": "False" }, @@ -25650,9 +23342,7 @@ "homepage": "http://xerces.apache.org/xerces-c/", "support": "", "versions": [ - "3.1.1-GCCcore-9.2.0", - "3.2.2-GCCcore-7.4.0", - "3.2.3-GCCcore-9.2.0" + "3.2.2-GCCcore-7.4.0" ], "admin_list": [ { @@ -25663,37 +23353,11 @@ } ], "network_licences": [], - "default": "3.1.1-GCCcore-9.2.0", - "default_type": "latest", - "last_updated": 1618223935, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/Xerces-C++/3.1.1-GCCcore-9.2.0.lua", - "force_hide": "False", - "force_show": "False" - }, - "XHMM": { - "description": "Calls copy number variation (CNV) from normalized read-depth data from exome capture or other targeted sequencing experiments.", - "domains": [ - "math" - ], - "extensions": [], - "licence_type": "", - "homepage": "https://atgu.mgh.harvard.edu/xhmm", - "support": "", - "versions": [ - "1.0-gimkl-2018b" - ], - "admin_list": [ - { - "1.0-gimkl-2018b": "Warning: XHMM/1.0-gimkl-2018b is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "1.0-gimkl-2018b", + "default": "3.2.2-GCCcore-7.4.0", "default_type": "latest", - "last_updated": 1569195933, + "last_updated": 1554875425, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/math/XHMM/1.0-gimkl-2018b.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/Xerces-C++/3.2.2-GCCcore-7.4.0.lua", "force_hide": "False", "force_show": "False" }, @@ -25707,7 +23371,6 @@ "homepage": "https://xkbcommon.org/", "support": "", "versions": [ - "0.10.0", "0.10.0-GCCcore-9.2.0" ], "admin_list": [ @@ -25718,68 +23381,12 @@ "network_licences": [], "default": "0.10.0-GCCcore-9.2.0", "default_type": "latest", - "last_updated": 1660004819, + "last_updated": 1594164681, "modulefile_text": "", "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/xkbcommon/0.10.0-GCCcore-9.2.0.lua", "force_hide": "False", "force_show": "False" }, - "XMDS2": { - "description": " Fast integrator of stochastic partial differential equations.", - "domains": [ - "math" - ], - "extensions": [ - "Cheetah3-3.2.6", - "Markdown-3.3.4", - "xmds2-beta3.0.0" - ], - "licence_type": "", - "homepage": "http://www.xmds.org/", - "support": "", - "versions": [ - "3.0.0-beta-gimkl-2020a-Python-3.8.2" - ], - "admin_list": [ - { - "3.0.0-beta-gimkl-2020a-Python-3.8.2": "Warning: XMDS2/3.0.0-beta-gimkl-2020a-Python-3.8.2 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "3.0.0-beta-gimkl-2020a-Python-3.8.2", - "default_type": "latest", - "last_updated": 1625044688, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/math/XMDS2/3.0.0-beta-gimkl-2020a-Python-3.8.2.lua", - "force_hide": "False", - "force_show": "False" - }, - "XSD": { - "description": "CodeSynthesis XSD is an open-source, cross-platform W3C XML Schema to C++ data binding compiler.\n Provided with an XML instance specification (XML Schema), it generates C++ classes that represent the given vocabulary\n as well as XML parsing and serialization code. You can then access the data stored in XML using types and functions\n that semantically correspond to your application domain rather than dealing with the intricacies of reading and writing XML", - "domains": [ - "devel" - ], - "extensions": [], - "licence_type": "", - "homepage": "http://www.codesynthesis.com/products/xsd/", - "support": "", - "versions": [ - "4.0.0-GCCcore-7.4.0" - ], - "admin_list": [ - { - "4.0.0-GCCcore-7.4.0": "Warning: XSD/4.0.0-GCCcore-7.4.0 is old and marked for deletion. If you still need it, then please let us know." - } - ], - "network_licences": [], - "default": "4.0.0-GCCcore-7.4.0", - "default_type": "latest", - "last_updated": 1574127035, - "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/all/XSD/4.0.0-GCCcore-7.4.0.lua", - "force_hide": "False", - "force_show": "False" - }, "xtb": { "description": " xtb - An extended tight-binding semi-empirical program package. ", "domains": [ @@ -25864,7 +23471,11 @@ "versions": [ "2.1.0-GCCcore-9.2.0" ], - "admin_list": [], + "admin_list": [ + { + "2.1.0-GCCcore-9.2.0": "Warning: yajl/2.1.0-GCCcore-9.2.0 is old and marked for deletion. If you still need it, then please let us know." + } + ], "network_licences": [], "default": "2.1.0-GCCcore-9.2.0", "default_type": "latest", @@ -25907,7 +23518,6 @@ "homepage": "https://github.com/jbeder/yaml-cpp", "support": "", "versions": [ - "0.6.2-GCCcore-7.4.0", "0.6.3-GCCcore-9.2.0", "0.8.0-GCC-12.3.0" ], @@ -25917,11 +23527,11 @@ } ], "network_licences": [], - "default": "0.6.2-GCCcore-7.4.0", + "default": "0.6.3-GCCcore-9.2.0", "default_type": "latest", "last_updated": 1710206334, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/cae/yaml-cpp/0.6.2-GCCcore-7.4.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/cae/yaml-cpp/0.6.3-GCCcore-9.2.0.lua", "force_hide": "False", "force_show": "False" }, @@ -25936,7 +23546,6 @@ "homepage": "http://www.tortall.net/projects/yasm/", "support": "", "versions": [ - "1.3.0-GCCcore-7.4.0", "1.3.0-gimkl-2017a" ], "admin_list": [ @@ -25945,11 +23554,11 @@ } ], "network_licences": [], - "default": "1.3.0-GCCcore-7.4.0", + "default": "1.3.0-gimkl-2017a", "default_type": "latest", - "last_updated": 1551767787, + "last_updated": 1533595414, "modulefile_text": "", - "module_path": "/opt/nesi/CS400_centos7_bdw/modules/lang/Yasm/1.3.0-GCCcore-7.4.0.lua", + "module_path": "/opt/nesi/CS400_centos7_bdw/modules/lang/Yasm/1.3.0-gimkl-2017a.lua", "force_hide": "False", "force_show": "False" }, @@ -26009,7 +23618,6 @@ "support": "", "versions": [ "4.3.2-GCCcore-9.2.0", - "4.3.2-gimkl-2018b", "4.3.4-GCCcore-11.3.0", "4.3.5-GCCcore-12.3.0" ], diff --git a/docs/assets/stylesheets/theme.css b/docs/assets/stylesheets/theme.css index 144f8e45e..731195d5b 100644 --- a/docs/assets/stylesheets/theme.css +++ b/docs/assets/stylesheets/theme.css @@ -124,3 +124,51 @@ height: 40px; margin: 10px; } + +[dir=ltr] .md-tabs__list { + margin-left: 2rem; +} + +.md-tabs__link { + margin-top: -0.1rem; +} +.md-tabs__item { + height: 1rem; + padding-left: 0.1rem; + padding-right: 0.1rem; +} +.md-tabs { + background-color: var(--md-primary-bg-color); +} +.md-tabs__link { + background: var(--md-primary-fg-color); + border: sold 1px var(--md-primary-fg-color); + padding-bottom: 0.4em; + padding-left: 0.4em; + padding-right: 0.4em; + border-bottom-left-radius: 6px; + border-bottom-right-radius: 6px; +} + +/* For icons on home pages */ +div.grid img { + vertical-align: middle; + margin-right: 1em; + max-width: 20%; +} + +/* Allow x3 grids */ +/* https://github.com/squidfunk/mkdocs-material/issues/3018#issuecomment-1316559519 */ + +.grid.cards.md-grid-three{ + grid-template-columns: repeat(auto-fit, minmax(10rem, 1fr)) +} +.grid.cards.md-grid-four { + grid-template-columns: repeat(auto-fit, minmax(8rem, 1fr)); +} + + + +label#__nav_1_label, label#__nav_2_label, label#__nav_3_label, label#__nav_4_label{ + display: none; +} diff --git a/docs/cards-help.yaml b/docs/cards-help.yaml deleted file mode 100644 index 959c5a83e..000000000 --- a/docs/cards-help.yaml +++ /dev/null @@ -1,12 +0,0 @@ - -- title: Introductory Workshops - content: We offer introductory workshops for new HPC users on most weeks of the year. - url: Getting_Started/Getting_Help/Introductory_Material.md - -- title: Online Office Hours - content: Drop-in sessions over Zoom. Bring your problems or questions and get one-on-one support. - url: Getting_Started/Getting_Help/Weekly_Online_Office_Hours.md - -- title: Consultancy service - content: Scientific and HPC-focussed computational support across a range of domains. - url: Getting_Started/Getting_Help/Consultancy.md diff --git a/docs/cards-platform-refresh.yaml b/docs/cards-platform-refresh.yaml deleted file mode 100644 index a227a4adc..000000000 --- a/docs/cards-platform-refresh.yaml +++ /dev/null @@ -1,14 +0,0 @@ -- title: Online Office Hours - content: Drop-in sessions over Zoom. Bring your problems or questions and get one-on-one support. - url: Getting_Started/Getting_Help/Weekly_Online_Office_Hours.md - image: ./assets/icons/material/video-call.svg - -- title: Frequently Asked Questions - content: Answers to some of your questions about what's happening. - url: General/FAQs/Common_questions_about_the_platform_refresh.md - image: ./assets/icons/material/help.svg - -- title: Latest Updates - content: Quick status updates and links to documentation that will help you prepare for migration. - url: General/Announcements/platform_refresh_updates.md - image: ./assets/icons/material/update.svg diff --git a/docs/cards-quickstart.yaml b/docs/cards-quickstart.yaml deleted file mode 100644 index 8942ace31..000000000 --- a/docs/cards-quickstart.yaml +++ /dev/null @@ -1,14 +0,0 @@ -- title: NeSI Accounts - content: How to get started with NeSI accounts and projects. - url: Getting_Started/Accounts-Projects_and_Allocations/Creating_a_NeSI_Account_Profile.md - image: ./assets/icons/material/account-details.svg - -- title: Cluster Access - content: Recommended clients for Linux, Mac, and Windows users. - url: Getting_Started/Accessing_the_HPCs/Choosing_and_Configuring_Software_for_Connecting_to_the_Clusters.md - image: ./assets/icons/material/compass.svg - -- title: SSH Config - content: Already familiar with using remote resources? - url: Scientific_Computing/Terminal_Setup/Standard_Terminal_Setup.md - image: ./assets/icons/material/cog-transfer-outline.svg diff --git a/docs/cards-services.yaml b/docs/cards-services.yaml deleted file mode 100644 index 8d57b3cb5..000000000 --- a/docs/cards-services.yaml +++ /dev/null @@ -1,14 +0,0 @@ -- title: Jupyter Notebooks - content: Connect to NeSI through your web browser. - url: Scientific_Computing/Interactive_computing_using_Jupyter/Jupyter_on_NeSI.md - image: ./assets/icons/logomark-notext-blackbody-blackmoons.svg - -- title: File Transfer - content: Getting your data on and off the cluster. - url: Getting_Started/Next_Steps/Moving_files_to_and_from_the_cluster.md - image: ./assets/icons/material/file-arrow-up-down.svg - -- title: Job Scheduler - content: How to use our job scheduler, Slurm. - url: Getting_Started/Next_Steps/Submitting_your_first_job.md#slurm - image: ./assets/icons/material/tray-full.svg diff --git a/docs/index.md b/docs/index.md index 296a596b5..08348ddb2 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,24 +1,32 @@ --- -template: main.html -hide: toc +created_at: 2025-02-04 +hide: + - toc + - nav +title: "" --- -# NeSI Support Documentation -Technical documentation for the NeSI High Performance Computing platform. +
-## Platform Refresh +- ![](../assets/icons/material/account-details.svg) [__Accounts and Services__](Access/) -[cards cols=3 (./docs/cards-platform-refresh.yaml)] + --- + Accounts n stuff -## Quickstart +- ![](../assets/icons/material/compass.svg) [__High Performance Computing__](High_Performance_Computing/) -[cards cols=3 (./docs/cards-quickstart.yaml)] + --- + Hatch Pea Sea Services. -## Services +- ![](../assets/icons/material/cog-transfer-outline.svg) [__Research Developer Cloud__](Researcher_Developer_Cloud/) -[cards cols=3 (./docs/cards-services.yaml)] + --- + Develop research on [the cloud](Researcher_Developer_Cloud/). -## Help +- ![](../assets/icons/material/cog-transfer-outline.svg) [__Training__](Capability_&_Skills/) -[cards cols=3 (./docs/cards-help.yaml)] + --- + [Train'n](Capability_&_Skills/) + +
diff --git a/mkdocs.yml b/mkdocs.yml index 197173739..fcbfdf5f8 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -28,6 +28,7 @@ theme: - navigation.instant.progress - navigation.instant.prefetch # Start loading page on link hover. Insiders only - navigation.top # 'Return to top' button + - navigation.tabs # - navigation.prune # Reduces site size. ALSO BREAKS NAV BEHAVIOR - navigation.tracking # Put anchor in URL - navigation.path # Breadcrumbs. Insiders only @@ -59,9 +60,8 @@ markdown_extensions: # - docs/assets/glossary/snippets.md # Causes massive slowdown. Also, bad. - attr_list - abbr - - neoteroi.cards - - neoteroi.timeline - footnotes + - md_in_html - def_list extra: analytics: @@ -70,6 +70,7 @@ extra: plugins: - search - open-in-new-tab + - autolinks - rss: categories: - tags @@ -82,8 +83,8 @@ plugins: feeds_filenames: rss_created: page_creation.xml rss_updated: page_update.xml - - redirects: - map_file: docs/redirect_map.yml + # - redirects: + # map_file: docs/redirect_map.yml - awesome-pages: filename: .pages.yml # - git-authors diff --git a/overrides/partials/glossary.html b/overrides/partials/glossary.html index d0e5d31ee..9e6506cd3 100644 --- a/overrides/partials/glossary.html +++ b/overrides/partials/glossary.html @@ -48,10 +48,6 @@ A bundle of computer-aided engineering software including Fluent and CFX. -## ANTLR: - -ANother Tool for Language Recognition - ## ANTs: ANTs extracts information from complex datasets that include imaging. ANTs is useful for managing, @@ -183,10 +179,6 @@ AutoDock Vina is an open-source program for doing molecular docking. -## Autoconf-archive: - -A collection of more than 500 macros for GNU Autoconf - ## BBMap: BBMap short read aligner, and other bioinformatic tools. @@ -195,11 +187,6 @@ Manipulate variant calls in the Variant Call Format (VCF) and its binary counterpart BCF. -## BCL-Convert: - -Converts per cycle binary data output by Illumina sequencers containing basecall -files and quality scores to per read FASTQ files - ## BEAST: Bayesian MCMC phylogenetic analysis of molecular sequences for reconstructing @@ -254,7 +241,7 @@ ## BRAKER: -BRAKER is a pipeline for fully automated prediction of protein coding genes with GeneMark-ES/ET +Pipeline for fully automated prediction of protein coding genes with GeneMark-ES/ET and AUGUSTUS in novel eukaryotic genomes. ## BUSCO: @@ -291,11 +278,6 @@ Program for inference of recent immigration rates between populations using unlinked multilocus genotypes -## Bazel: - -Bazel is a build tool that builds code quickly and reliably. -It is used to build the majority of Google's software. - ## Beagle: Package for phasing genotypes and for imputing ungenotyped markers. @@ -336,12 +318,6 @@ Bison is a general-purpose parser generator that converts an annotated context-free grammar into a deterministic LR or generalized LR (GLR) parser employing LALR(1) parser tables. -## BlenderPy: - -Blender provides a pipeline for 3D modeling, rigging, animation, simulation, rendering, -compositing, motion tracking, video editing and 2D animation. -This particular build of Blender provides a Python package 'bpy' rather than the stand-alone application. - ## Boost: Boost provides free peer-reviewed portable C++ source libraries. @@ -355,23 +331,11 @@ Ultrafast and memory-efficient tool for aligning sequencing reads to long reference sequences. -## Bpipe: - -A platform for running big bioinformatics jobs that consist of a series of processing stages - ## Bracken: Hghly accurate statistical method that computes the abundance of species in DNA sequences from a metagenomics sample. -## BreakSeq2: - -Nucleotide-resolution analysis of structural variants - -## CCL: - -Clozure CL (often called CCL for short) is a free Common Lisp implementation - ## CD-HIT: CD-HIT is a very widely used program for clustering and @@ -437,10 +401,6 @@ A tool for quick quality assessment of cram and bam files, intended for long read sequencing -## CTPL: - -C++ Thread Pool Library - ## CUDA: CUDA (formerly Compute Unified Device Architecture) is a parallel @@ -448,10 +408,6 @@ graphics processing units (GPUs) that they produce. CUDA gives developers access to the virtual instruction set and memory of the parallel computational elements in CUDA GPUs. -## CUnit: - -Automated testing framework for C. - ## Canu: Sequence assembler designed for high-noise single-molecule sequencing. @@ -460,11 +416,6 @@ Fast data interchange format and capability-based RPC system. -## Catch2: - -A modern, C++-native, header-only, test framework for unit-tests, TDD and BDD - - using C++11, C++14, C++17 and later (or C++03 on the Catch1.x branch) - ## CellRanger: Cell Ranger is a set of analysis pipelines that process Chromium @@ -521,10 +472,6 @@ ClustalW2 is a general purpose multiple sequence alignment program for DNA or proteins. -## Corset: - -Clusters contigs and counts reads from de novo assembled transcriptomes. - ## CoverM: DNA read coverage and relative abundance calculator focused on metagenomics applications @@ -583,10 +530,6 @@ Sequence aligner for protein and translated DNA searches -## DISCOVARdenovo: - -Assembler suitable for large genomes based on Illumina reads of length 250 or longer. - ## DOI: A unique identifier that identifies digital objects. The object may change physical locations, but the DOI assigned to that object will never change. @@ -595,11 +538,6 @@ Tool for annotating metagenomic assembled genomes and VirSorter identified viral contigs.. -## DaliLite: - -Tool set for simulating/evaluating SVs, merging and comparing SVs within and among samples, - and includes various methods to reformat or summarize SVs. - ## DeconSeq: A tool that can be used to automatically detect and efficiently remove sequence contaminations @@ -664,10 +602,6 @@ EMBOSS is a free Open Source software analysis package specially developed for the needs of the molecular biology (e.g. EMBnet) user community. -## ENMTML: - -R package for integrated construction of Ecological Niche Models. - ## ESMF: The Earth System Modeling Framework (ESMF) is software for building and coupling weather, @@ -712,14 +646,6 @@ Tool for estimating repeat sizes -## Extrae: - -Extrae is capable of instrumenting applications based on MPI, OpenMP, pthreads, CUDA1, OpenCL1, and StarSs1 using different instrumentation approaches - -## FALCON: - -Falcon: a set of tools for fast aligning long reads for consensus and assembly - ## FASTX-Toolkit: Tools for Short-Reads FASTA/FASTQ files preprocessing. @@ -757,11 +683,6 @@ and MacOS X. FLTK provides modern GUI functionality without the bloat and supports 3D graphics via OpenGL and its built-in GLUT emulation. -## FTGL: - - FTGL is a free open source library to enable developers to use arbitrary -fonts in their OpenGL (www.opengl.org) applications. - ## FastANI: Tool for fast alignment-free computation of @@ -876,15 +797,6 @@ GEOS (Geometry Engine - Open Source) is a C++ port of the Java Topology Suite (JTS) -## GLM: - -OpenGL Mathematics (GLM) is a header only C++ mathematics library for graphics software based on - the OpenGL Shading Language (GLSL) specifications. - -## GLPK: - -GNU Linear Programming Kit is intended for solving large-scale linear programming (LP), mixed integer programming (MIP), and other related problems. - ## GLib: GLib is one of the base libraries of the GTK+ project @@ -903,14 +815,6 @@ A genetic algorithm for docking flexible ligands into protein binding sites -## GObject-Introspection: - -GObject introspection is a middleware layer between C libraries - (using GObject) and language bindings. The C library can be scanned at - compile time and generate a metadata file, in addition to the actual - native C library. Then at runtime, language bindings can read this - metadata and automatically provide bindings to call into the C library. - ## GPAW: GPAW is a density-functional theory (DFT) Python code based on the projector-augmented wave (PAW) @@ -968,7 +872,6 @@ It is an Open Source Free Software Library intended to provide a set of useful functions to deal with 3D surfaces meshed with interconnected triangles. - ## GUI: A digital interface in which a user interacts with graphical components such as icons, buttons, and menus. @@ -1063,10 +966,6 @@ came at significant computational expense, but in the new HMMER3 project, HMMER is now essentially as fast as BLAST. -## HOPS: - -Pipeline which focuses on screening MALT data for the presence of a user-specified list of target species. - ## HPC: Like a regular computer, but larger. Primarily used for heating data centers. @@ -1119,10 +1018,6 @@ tool for interactive exploration of large, integrated genomic datasets. It supports a wide variety of data types, including array-based and next-generation sequence data -## IMPUTE: - -Genotype imputation and haplotype phasing. - ## IQ-TREE: Efficient phylogenomic software by maximum likelihood @@ -1144,11 +1039,6 @@ Infernal ('INFERence of RNA ALignment') is for searching DNA sequence databases for RNA structure and sequence similarities. -## Inspector: - -Intel Inspector XE is an easy to use memory error checker and thread checker for serial - and parallel applications - ## InterProScan: Sequence analysis application (nucleotide and protein sequences) that combines @@ -1158,10 +1048,6 @@ Just Another Gibbs Sampler - a program for the statistical analysis of Bayesian hierarchical models by Markov Chain Monte Carlo. -## JUnit: - -A programmer-oriented testing framework for Java. - ## JasPer: The JasPer Project is an open-source initiative to provide a free @@ -1184,6 +1070,12 @@ including serialization and deserialization to and from strings. It can also preserve existing comment in unserialization/serialization steps, making it a convenient format to store user input files. +## Julia: + +A high-level, high-performance dynamic language for technical computing. + +This version was compiled from source with USE_INTEL_JITEVENTS=1 to enable profiling with VTune. + ## JupyterLab: An extensible environment for interactive and reproducible computing, based on the Jupyter Notebook and Architecture. @@ -1233,10 +1125,6 @@ Krona Tools is a set of scripts to create Krona charts from several Bioinformatics tools as well as from text and XML files. -## KyotoCabinet: - -Library of routines for managing a database. - ## LAME: LAME is a high quality MPEG Audio Layer III (MP3) encoder licensed under the LGPL. @@ -1339,18 +1227,10 @@ Multiple sequence alignment program offering a range of methods. -## MAGMA: - -Tool for gene analysis and generalized gene-set analysis of GWAS data. - ## MAKER: Genome annotation pipeline -## MATIO: - -matio is an C library for reading and writing Matlab MAT files. - ## MATLAB: A high-level language and interactive environment for numerical computing. @@ -1384,11 +1264,6 @@ MMseqs2: ultra fast and sensitive search and clustering suite -## MODFLOW: - -MODFLOW is the U.S. Geological Survey modular finite-difference flow model, which is a computer code that solves the groundwater flow equation. The program is used by hydrogeologists to simulate the flow of groundwater through aquifers. - - ## MPFR: The MPFR library is a C library for multiple-precision @@ -1398,10 +1273,6 @@ A standardised message-passing standard designed to function on parallel computing architectures. -## MSMC: - -Multiple Sequentially Markovian Coalescent, infers population size and gene flow from multiple genome sequences - ## MUMPS: A parallel sparse direct solver @@ -1417,10 +1288,6 @@ sequences. A range of options is provided that give you the choice of optimizing accuracy, speed, or some compromise between the two. -## MUST: - -MUST detects usage errors of the Message Passing Interface (MPI) and reports them to the user. - ## MaSuRCA: MaSuRCA is whole genome assembly software. It combines the efficiency of the de Bruijn graph @@ -1428,20 +1295,10 @@ only short reads from Illumina sequencing or a mixture of short reads and long reads (Sanger, 454, Pacbio and Nanopore). -## Magma: - -Magma is a large, well-supported software package designed for computations in algebra, number theory, algebraic geometry and algebraic combinatorics. It provides a mathematically rigorous environment for defining and working with structures such as groups, rings, fields, modules, algebras, schemes, curves, graphs, designs, codes and many others. Magma also supports a number of databases designed to aid computational research in those areas of mathematics which are algebraic in nature. - -whatis([==[Homepage: http://magma.maths.usyd.edu.au/magma/ - ## Mamba: Mamba is a fast, robust, and cross-platform package manager. -## MarkerMiner: - -Workflow for effective discovery of SCN loci in flowering plants angiosperms - ## Mash: Fast genome and metagenome distance estimation using MinHash @@ -1578,15 +1435,6 @@ Tool for selecting the best-fit model of evolution for DNA and protein alignments. -## Molcas: - -Molcas is an ab initio quantum chemistry software package -developed by scientists to be used by scientists. The basic philosophy is is to -be able to treat general electronic structures for molecules consisting of -atoms from most of the periodic table. As such, the primary focus of the -package is on multiconfigurational methods with applications typically -connected to the treatment of highly degenerate states. - ## Molpro: Molpro is a complete system of ab initio programs for molecular electronic structure calculations. @@ -1706,12 +1554,6 @@ NX is a general-purpose program package for simulating the dynamics of electronically excited molecules and molecular assemblies. -## NextGenMap: - -NextGenMap is a flexible highly sensitive short read mapping tool that - handles much higher mismatch rates than comparable algorithms while still outperforming - them in terms of runtime. - ## NextPolish2: a fast and efficient genome polishing tool for long-read assembly @@ -1744,11 +1586,6 @@ Manipulate various data and sequence files. -## OMA: - -Orthologous MAtrix project is a method and database for the inference - of orthologs among complete genomes - ## OPARI2: source-to-source instrumentation tool for OpenMP and hybrid codes. @@ -1806,10 +1643,6 @@ a common infrastructure for computer vision applications and to accelerate the use of machine perception in the commercial products. -## OpenFAST: - -Wind turbine multiphysics simulation tool - ## OpenFOAM: OpenFOAM is a free, open source CFD software package. @@ -1844,22 +1677,10 @@ OpenSlide is a C library that provides a simple interface to read whole-slide images (also known as virtual slides). -## OrfM: - -A simple and not slow open reading frame (ORF) caller. - -## OrthoFiller: - -Identifies missing annotations for evolutionarily conserved genes. - ## OrthoFinder: OrthoFinder is a fast, accurate and comprehensive platform for comparative genomics -## OrthoMCL: - -Genome-scale algorithm for grouping orthologous protein sequences. - ## PALEOMIX: pipelines and tools designed to aid the rapid processing of High-Throughput Sequencing (HTS) data. @@ -1889,33 +1710,15 @@ and semantics as Perl 5. -## PDT: - -Program Database Toolkit (PDT) is a framework for analyzing source code written in several programming languages and for making rich program - knowledge accessible to developers of static and dynamic analysis tools. - ## PEAR: Memory-efficient,fully parallelized and highly accurate pair-end read merger. -## PEST++: - -PEST++ is a software suite aimed at supporting - complex numerical models in the decision-support context. - Much focus has been devoted to supporting environmental models - (groundwater, surface water, etc) but these tools are readily - applicable to any computer model. - - ## PETSc: PETSc, pronounced PET-see (the S is silent), is a suite of data structures and routines for the scalable (parallel) solution of scientific applications modeled by partial differential equations. -## PHASIUS: - -A tool to visualize phase block structure from (many) BAM or CRAM files together with BED annotation - ## PLINK: PLINK is a free, open-source whole genome association analysis toolset, @@ -1996,12 +1799,6 @@ A high-level Parallel I/O Library for structured grid applications -## Peregrine: - -Genome assembler for long reads (length > 10kb, accuracy > 99%). -Based on Sparse HIereachical MimiMizER (SHIMMER) for fast read-to-read overlaping - - ## Perl: Larry Wall's Practical Extraction and Report Language @@ -2053,10 +1850,6 @@ Proteinortho is a tool to detect orthologous genes within different species. -## PyOpenGL: - -PyOpenGL is the most common cross platform Python binding to OpenGL and related APIs. - ## PyQt: PyQt5 is a set of Python bindings for v5 of the Qt application framework from The Qt Company. @@ -2167,10 +1960,6 @@ consistent and rapid annotation of ribosomal RNA genes. -## ROCm: - -Platform for GPU Enabled HPC and UltraScale Computing - ## ROOT: The ROOT system provides a set of OO frameworks with all the functionality @@ -2326,19 +2115,10 @@ SNVoter - A top up tool to enhance SNV calling from Nanopore sequencing data & NanoMethPhase - Phase long reads and CpG methylations from Oxford Nanopore Technologies. -## SOCI: - -Database access library for C++ that makes the illusion of embedding SQL queries in the - regular C++ code, staying entirely within the Standard C++. - ## SPAdes: Genome assembler for single-cell and isolates data sets -## SPIDER: - -System for Processing Image Data from Electron microscopy and Related fields - ## SQLite: SQLite: SQL Database Engine in a C Library @@ -2356,10 +2136,6 @@ Fast universal RNA-seq aligner -## STAR-Fusion: - -Processes the output generated by the STAR aligner to map junction reads and spanning reads to a reference annotation set - ## SUNDIALS: SUNDIALS: SUite of Nonlinear and DIfferential/ALgebraic Equation Solvers @@ -2544,11 +2320,6 @@ NVIDIA TensorRT is a platform for high-performance deep learning inference -## Theano: - -Theano is a Python library that allows you to define, optimize, -and evaluate mathematical expressions involving multi-dimensional arrays efficiently. - ## Tk: Tk is an open source, cross-platform widget toolchain that provides a library of basic elements for @@ -2680,14 +2451,6 @@ all-vs-all pairwise global alignment, exact and global alignment searching, shuffling, subsampling and sorting. It also supports FASTQ file analysis, filtering, conversion and merging of paired-end reads. -## VTK: - -The Visualization Toolkit (VTK) is an open-source, freely available software system for - 3D computer graphics, image processing and visualization. VTK consists of a C++ class library and several - interpreted interface layers including Tcl/Tk, Java, and Python. VTK supports a wide variety of visualization - algorithms including: scalar, vector, tensor, texture, and volumetric methods; and advanced modeling techniques - such as: implicit modeling, polygon reduction, mesh smoothing, cutting, contouring, and Delaunay triangulation. - ## VTune: Intel VTune Amplifier XE is the premier performance profiler for C, C++, C#, Fortran, @@ -2727,12 +2490,6 @@ VirSorter: mining viral signal from microbial genomic data. -## VirtualGL: - -VirtualGL is an open source toolkit that gives any Linux or -Unix remote display software the ability to run OpenGL applications with full -hardware acceleration. - ## WAAFLE: Workflow to Annotate Assemblies and Find LGT Events. @@ -2750,21 +2507,6 @@ Aligning proteins or protein HMMs to DNA -## XHMM: - -Calls copy number variation (CNV) from normalized read-depth data from exome capture or other targeted sequencing experiments. - -## XMDS2: - - Fast integrator of stochastic partial differential equations. - -## XSD: - -CodeSynthesis XSD is an open-source, cross-platform W3C XML Schema to C++ data binding compiler. - Provided with an XML instance specification (XML Schema), it generates C++ classes that represent the given vocabulary - as well as XML parsing and serialization code. You can then access the data stored in XML using types and functions - that semantically correspond to your application domain rather than dealing with the intricacies of reading and writing XML - ## XVFB: A display server implementing the X11 display server protocol, XVFB performs all graphical operations in virtual memory without showing any screen output. @@ -2901,12 +2643,6 @@ breseq is a computational pipeline for the analysis of short-read re-sequencing data -## bsddb3: - -bsddb3 is a nearly complete Python binding of the -Oracle/Sleepycat C API for the Database Environment, Database, Cursor, -Log Cursor, Sequence and Transaction objects. - ## bzip2: bzip2 is a freely available, patent free, high-quality data compressor. It typically @@ -2993,10 +2729,6 @@ Fast and scalable long-read-based SV detection -## cwltool: - -Common Workflow Language tool description reference implementation - ## cyvcf2: cython + htslib == fast VCF and BCF processing @@ -3005,10 +2737,6 @@ Diffusion Approximation for Demographic Inference -## dammit: - -de novo transcriptome annotator.. - ## datasets: Tool to gather data from across NCBI databases @@ -3058,10 +2786,6 @@ a set of tools for decoding and encoding messages in the following formats: WMO FM-92 GRIB edition 1 and edition 2, WMO FM-94 BUFR edition 3 and edition 4, WMO GTS abbreviated header (only decoding). -## ectyper: - -Standalone versatile serotyping module for Escherichia coli.. - ## edlib: Lightweight, super fast library for sequence alignment using edit (Levenshtein) distance. @@ -3109,18 +2833,10 @@ A collection of small and efficient programs for performing some common and uncommon tasks with FASTQ files. -## fcGENE: - -Format converting tool for genotype Data. - ## fgbio: A set of tools to analyze genomic data with a focus on Next Generation Sequencing. -## fineRADstructure: - -A package for population structure inference from RAD-seq data - ## fineSTRUCTURE: Population assignment using large numbers of densely sampled genomes, including both SNP chips and sequence dat @@ -3131,17 +2847,15 @@ ## flex: -Flex (Fast Lexical Analyzer) is a tool for generating scanners. A scanner, - sometimes called a tokenizer, is a program which recognizes lexical patterns in text. -## fmlrc: + Flex (Fast Lexical Analyzer) is a tool for generating scanners. A scanner, + sometimes called a tokenizer, is a program which recognizes lexical patterns + in text. - Tool for performing hybrid correction of long read sequencing -using the BWT and FM-index of short-read sequencing data ## fmt: -Formatting library providing a fast and safe alternative to C stdio and C++ iostreams. +fmt (formerly cppformat) is an open-source formatting library. ## fontconfig: @@ -3178,10 +2892,6 @@ launched by the user on a compute system to serve as a conduit for executing functions on that computer. -## fxtract: - -Extract sequences from a fastx (fasta or fastq) file given a subsequence. - ## g2clib: Library contains GRIB2 encoder/decoder ('C' version). @@ -3190,10 +2900,6 @@ Library contains GRIB2 encoder/decoder and search/indexing routines. -## ga4gh: - -A reference implementation of the GA4GH API - ## gcloud: Libraries and tools for interacting with Google Cloud products and services. @@ -3241,10 +2947,6 @@ Git is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency. -## globus-automate-client: - -Client for the Globus Flows service. - ## globus-compute-endpoint: Globus Compute is a distributed Function as a Service (FaaS) platform that enables flexible, @@ -3286,14 +2988,6 @@ table lookup needs a single string comparison only. -## grive2: - -Command line tool for Google Drive. - -## gsort: - -Tool to sort genomic files according to a genomefile. - ## h5pp: A simple C++17 wrapper for HDF5. @@ -3302,19 +2996,10 @@ Detects in-sample contamination in mtDNA or WGS sequencing studies by analyzing the mitchondrial content -## help2man: - -help2man produces simple manual pages from the '--help' and '--version' output of other commands. - ## hifiasm: Hifiasm: a haplotype-resolved assembler for accurate Hifi reads. -## hunspell: - -Spell checker and morphological analyzer library and program designed for languages - with rich morphology and complex word compounding or character encoding. - ## hwloc: The Portable Hardware Locality (hwloc) software package provides a portable abstraction @@ -3324,12 +3009,6 @@ network interfaces, InfiniBand HCAs or GPUs. It primarily aims at helping applications with gathering information about modern computing hardware so as to exploit it accordingly and efficiently. -## hypothesis: - -Hypothesis is an advanced testing library for Python. It lets you write tests which are parametrized - by a source of examples, and then generates simple and comprehensible examples that make your tests fail. This lets - you find more bugs in your code with less work. - ## icc: Intel C and C++ compilers @@ -3390,15 +3069,6 @@ ipyrad is an interactive toolkit for assembly and analysis of restriction-site associated genomic data sets (e.g., RAD, ddRAD, GBS) for population genetic and phylogenetic studies. -## ispc: - -Intel SPMD Program Compilers; An open-source compiler for high-performance - SIMD programming on the CPU. ispc is a compiler for a variant of the C programming language, - with extensions for 'single program, multiple data' (SPMD) programming. - Under the SPMD model, the programmer writes a program that generally appears - to be a regular serial program, though the execution model is actually that - a number of program instances execute in parallel on the hardware. - ## jbigkit: JBIG-KIT is a software implementation of the JBIG1 data compression standard @@ -3576,11 +3246,6 @@ GNU libtool is a generic library support script. Libtool hides the complexity of using shared libraries behind a consistent, portable interface. -## libunistring: - -This library provides functions for manipulating Unicode strings and for manipulating C strings - according to the Unicode standard. - ## libunwind: Define a portable and efficient C programming API to determine the call-chain of a program. @@ -3661,10 +3326,6 @@ Python package which enables you to launch MATLAB and access it from a web browser. -## meRanTK: - -High performance toolkit for complete analysis of methylated RNA data. - ## medaka: Medaka is a tool to create a consensus sequence from nanopore sequencing data. @@ -3682,10 +3343,6 @@ Completely overhauled tool which discovers microRNA genes by analyzing sequenced RNAs -## mimalloc: - -mimalloc is a general purpose allocator with excellent performance characteristics. - ## miniBUSCO: faster and more accurate reimplementation of BUSCO. @@ -3707,10 +3364,6 @@ Aligns a protein sequence against a genome with affine gap penalty, splicing and frameshift.. -## mlpack: - -Fast, and flexible C++ machine learning library with bindings to other languages - ## modbam2bed: A program to aggregate modified base counts stored in a modified-base BAM file to a bedMethyl file. @@ -3727,10 +3380,6 @@ MpCCI is a vendor neutral and application independent interface for co-simulation. MpCCI offers advanced and proven features for multiphysics modelling. -## mpifileutils: - -MPI-Based File Utilities For Distributed Systems - ## muParser: muParser is an extensible high performance math expression @@ -3856,12 +3505,6 @@ parallel fastq-dump wrapper -## parasail: - -parasail is a SIMD C (C99) library containing implementations - of the Smith-Waterman (local), Needleman-Wunsch (global), and semi-global - pairwise sequence alignment algorithms. - ## patchelf: PatchELF is a small utility to modify the dynamic linker and RPATH of ELF executables. @@ -3968,22 +3611,10 @@ PyMOL (open source version) molecular visualization system. -## pyspoa: - -Python bindings to spoa. - ## qcat: Command-line tool for demultiplexing Oxford Nanopore reads from FASTQ files -## rDock: - -rDock is a fast and versatile Open Source docking program that -can be used to dock small molecules against proteins and nucleic acids. It is -designed for High Throughput Virtual Screening (HTVS) campaigns and Binding Mode -prediction studies. rDock is mainly written in C++ and accessory scripts and -programs are written in C++, perl or python languages. - ## randfold: Minimum free energy of folding randomization test software @@ -3992,10 +3623,6 @@ Randomly subsample sequencing reads to a specified coverage. -## razers3: - -Tool for mapping millions of short genomic reads onto a reference genome. - ## rclone: @@ -4010,20 +3637,10 @@ traditional table-driven approach, re2c encodes the generated finite state automata directly in the form of conditional jumps and comparisons. -## rkcommon: - - -A common set of C++ infrastructure and CMake utilities used by various components of Intel® oneAPI Rendering Toolkit. - - ## rnaQUAST: Tool for evaluating RNA-Seq assemblies using reference genome and gene database -## rust-fmlrc: - -FM-index Long Read Corrector (Rust implementation) - ## samblaster: samblaster is a fast and flexible program for marking duplicates in read-id grouped paired-end SAM files. @@ -4031,14 +3648,6 @@ reads to a separate FASTQ file. When marking duplicates, samblaster will require approximately 20MB of memory per 1M read pairs. -## samclip: - -Filter SAM file for soft and hard clipped alignments. - -## savvy: - -Interface to various variant calling formats. - ## sbt: sbt is a build tool for Scala, Java, and more. @@ -4083,11 +3692,6 @@ Toolkit for converting (FAST5 <-> SLOW5), compressing, viewing, indexing and manipulating data in SLOW5 format. -## smafa: - -Smafa attempts to align or cluster pre-aligned biological sequences, handling sequences - which are all the same length. - ## smoove: simplifies and speeds calling and genotyping SVs for short reads. @@ -4096,11 +3700,6 @@ The Snakemake workflow management system is a tool to create reproducible and scalable data analyses. -## snaphu: - -SNAPHU is an implementation of the Statistical-cost, Network-flow Algorithm for Phase Unwrapping - proposed by Chen and Zebker - ## snappy: Snappy is a compression/decompression library. It does not aim @@ -4153,10 +3752,6 @@ A robust and fast clustering method for amplicon-based studies. The purpose of swarm is to provide a novel clustering algorithm that handles massive sets of amplicons. Results of traditional clustering algorithms are strongly input-order dependent, and rely on an arbitrary global clustering threshold. swarm results are resilient to input-order changes and rely on a small local linking threshold d, representing the maximum number of differences between two amplicons. -## swissknife: - -Perl module for reading and writing UniProtKB data in plain text format. - ## tRNAscan-SE: Transfer RNA detection diff --git a/requirements.in b/requirements.in index b33c54863..1460d6bef 100644 --- a/requirements.in +++ b/requirements.in @@ -8,6 +8,7 @@ mkdocs-simple-hooks mkdocs-rss-plugin mkdocs-macros-plugin mkdocs-rss-plugin +mkdocs-autolinks-plugin mkdocs-open-in-new-tab mkdocs-git-authors-plugin mkdocs-git-revision-date-localized-plugin @@ -15,7 +16,6 @@ mkdocs-redirects @ git+https://github.com/CallumWalley/mkdocs-redirects.git@map_ mkdocs-awesome-pages-plugin # checkers -neoteroi-mkdocs codespell proselint linkcheckmd diff --git a/requirements.txt b/requirements.txt index 37da42baf..b12add660 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,11 +2,11 @@ # This file is autogenerated by pip-compile with Python 3.12 # by the following command: # -# pip-compile --allow-unsafe +# pip-compile requirements.in # -aiohappyeyeballs==2.4.4 +aiohappyeyeballs==2.4.6 # via aiohttp -aiohttp==3.11.11 +aiohttp==3.11.12 # via linkcheckmd aiosignal==1.3.2 # via aiohttp @@ -18,7 +18,7 @@ babel==2.17.0 # via # mkdocs-git-revision-date-localized-plugin # mkdocs-material -beautifulsoup4==4.13.0 +beautifulsoup4==4.13.3 # via pyspelling bracex==2.5.post1 # via wcmatch @@ -47,7 +47,7 @@ codespell==2.4.1 # via -r requirements.in colorama==0.4.6 # via mkdocs-material -cryptography==44.0.0 +cryptography==44.0.1 # via pyjwt deprecated==1.2.18 # via pygithub @@ -99,7 +99,7 @@ jinja2==3.1.5 # neoteroi-mkdocs linkcheckmd==1.4.0 # via -r requirements.in -lxml==5.3.0 +lxml==5.3.1 # via pyspelling markdown==3.7 # via @@ -123,6 +123,7 @@ mergedeep==1.3.4 mkdocs==1.6.1 # via # -r requirements.in + # mkdocs-autolinks-plugin # mkdocs-awesome-pages-plugin # mkdocs-bootstrap4 # mkdocs-git-authors-plugin @@ -136,6 +137,8 @@ mkdocs==1.6.1 # mkdocs-section-index # mkdocs-simple-hooks # neoteroi-mkdocs +mkdocs-autolinks-plugin==0.7.1 + # via -r requirements.in mkdocs-awesome-pages-plugin==2.10.1 # via -r requirements.in mkdocs-bootstrap4==0.1.5 @@ -150,7 +153,7 @@ mkdocs-git-revision-date-localized-plugin==1.3.0 # via -r requirements.in mkdocs-macros-plugin==1.3.7 # via -r requirements.in -mkdocs-material==9.6.1 +mkdocs-material==9.6.5 # via -r requirements.in mkdocs-material-extensions==1.3.1 # via mkdocs-material @@ -191,7 +194,7 @@ pip-tools==7.4.1 # via -r requirements.in platformdirs==4.3.6 # via mkdocs-get-deps -propcache==0.2.1 +propcache==0.3.0 # via # aiohttp # yarl @@ -199,7 +202,7 @@ proselint==0.14.0 # via -r requirements.in pycparser==2.22 # via cffi -pygithub==2.5.0 +pygithub==2.6.0 # via mkdocs-git-committers-plugin pygments==2.19.1 # via @@ -287,7 +290,5 @@ yarl==1.18.3 # via aiohttp # The following packages are considered to be unsafe in a requirements file: -pip==25.0 - # via pip-tools -setuptools==75.8.0 - # via pip-tools +# pip +# setuptools