Battle notes
Set HuggingFace Cache Folder
#!/bin/bash -l
#SBATCH --job-name=train
# the partition is the queue your submit your job to
#SBATCH --partition=gpu
# the nodes is the number computes
#SBATCH --nodes=1
# the ntasks is number of cpu cores max number is 34
#SBATCH --ntasks=5
# gres defines the number of gpus: 1 or 2
#SBATCH --gres=gpu:v100:1
# walltime the max time is dependent on partition
#SBATCH --time=3-00:00:00
#SBATCH --export=ALL
#SBATCH --mem=256G
# To configure GNU Environment for Mothur
conda activate /group/pmc010/sli/llm_ner
# Try to tell HuggingFace where to put temporary files
export HF_HOME=/group/pmc010/sli/LLM_NER/LLM-Eva/cache_here/
# Note: SLURM_JOBID is a unique number for every job.
SCRIPT=train.pyLast updated