Skip to content
Open
Show file tree
Hide file tree
Changes from 12 commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
7f1b7d7
adding modules for downloading and running gguf modules
toniher Mar 25, 2026
9031f48
adding docker support
toniher Mar 25, 2026
04cd556
allow custom HF_HOME cache input and other fixes
toniher Mar 26, 2026
537a891
several test fixes
toniher Mar 26, 2026
20b5d26
Upgrade problem with versions and test
toniher Mar 26, 2026
c9997f5
fix precommit linting
toniher Mar 26, 2026
5035e82
fix yaml for prettier
toniher Mar 26, 2026
2c796de
fix retrieval of version for huggingface
toniher Mar 26, 2026
4f64bac
Merge branch 'nf-core:master' into llamacpp
toniher Mar 26, 2026
ff7039c
importing nextflow.config from HF_DOWNLOAD
toniher Mar 26, 2026
7a3f8fc
Merge branch 'llamacpp' of github.com:biocorecrg/nf-core-modules into…
toniher Mar 26, 2026
fb1768c
adding hf_cache for setup as well
toniher Mar 26, 2026
ac7f44c
moving HF_DOWNLOAD to HUGGINGFACE_DOWNLOAD https://nf-co.re/docs/guid…
toniher Apr 4, 2026
26168b7
Update modules/nf-core/huggingface/download/tests/main.nf.test
toniher Apr 4, 2026
6dfff97
more detail and naming of Hugging Face
toniher Apr 4, 2026
2d171ca
Merge remote-tracking branch 'upstream/master' into llamacpp
toniher Apr 4, 2026
ee04a27
Merge branch 'llamacpp' of github.com:biocorecrg/nf-core-modules into…
toniher Apr 4, 2026
3021074
linting modules using
toniher Apr 4, 2026
4630e5a
generate files on the fly
toniher Apr 4, 2026
53c7826
rmed data files for tests
toniher Apr 4, 2026
4156db0
upgrade tests to work on the fly and updated snaps
toniher Apr 4, 2026
421603e
upgrading tests - adding new smollm3
toniher Apr 4, 2026
af5eadd
Update modules/nf-core/llamacpp-python/run/main.nf
toniher Apr 8, 2026
02d3f6d
Update modules/nf-core/llamacpp-python/run/main.nf
toniher Apr 8, 2026
297f503
Update modules/nf-core/llamacpp-python/run/main.nf
toniher Apr 8, 2026
ac61a56
Moving name of the module, script name and adapting tests and stubs
toniher Apr 8, 2026
ca4721b
update tests
toniher Apr 8, 2026
8fdffea
Merge branch 'nf-core:master' into llamacpp
toniher Apr 8, 2026
9a0d9f3
Merge branch 'llamacpp' of github.com:biocorecrg/nf-core-modules into…
toniher Apr 8, 2026
d65181d
update task.accelerator
toniher Apr 9, 2026
cd3d1b9
moving all assertions into the same snapshot
toniher Apr 9, 2026
fb81365
removed unneded nextflow.config for test
toniher Apr 11, 2026
6f2affa
Merge branch 'nf-core:master' into llamacpp
toniher Apr 11, 2026
dfadccb
addressing some coments, such as cache_dir
toniher Apr 13, 2026
1dbb3e9
lint didn't like, so out
toniher Apr 13, 2026
bfe3bf9
script moved to work as template and corresponding changes to version…
toniher Apr 14, 2026
0d594f5
Merge branch 'nf-core:master' into llamacpp
toniher Apr 14, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions modules/nf-core/huggingface/download/environment.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/environment-schema.json
channels:
- conda-forge
dependencies:
- conda-forge::huggingface_hub=1.6.0
30 changes: 30 additions & 0 deletions modules/nf-core/huggingface/download/main.nf
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
process HF_DOWNLOAD {
tag "$meta.id"
label 'process_medium'

conda "${moduleDir}/environment.yml"
container "community.wave.seqera.io/library/huggingface_hub:1.6.0--c106a7f9664ca39b"

input:
tuple val(meta), val(hf_repo), val(hf_file), val(hf_home)

output:
tuple val(meta), path(hf_file), emit: output
tuple val("${task.process}"), val("huggingface_hub"), eval("hf --version 2>&1 | tail -n1 | awk '{print \$NF}'"), topic: versions, emit: versions_huggingface_hub

when:
task.ext.when == null || task.ext.when

script:
def hf_home_resolved = hf_home ?: "${workflow.projectDir}/hf_cache"
"""
export HF_HOME="${hf_home_resolved}"
export HF_HUB_CACHE=\$HF_HOME
hf download ${hf_repo} ${hf_file} --local-dir \$PWD
"""

stub:
"""
touch ${hf_file}
"""
}
66 changes: 66 additions & 0 deletions modules/nf-core/huggingface/download/meta.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
name: hf_download
description: Tool for downloading models from HuggingFace
keywords:
- llm
- llama
- ai
tools:
- huggingface_hub:
description: "HuggingFace Hub CLI interface"
homepage: "https://huggingface.co/docs/huggingface_hub/guides/cli"
licence:
- "MIT"
identifier: ""
input:
- - meta:
type: map
description: |
Groovy Map containing sample information
e.g. `[ id:'sample1' ]`- prompt_file:
- hf_repo:
type: string
description: HuggingFace repository
- hf_file:
type: string
description: HuggingFace GGUF file
- hf_home:
type: string
description: HuggingFace default cache directory
output:
output:
- - meta:
type: map
description: |
Groovy Map containing sample information
e.g. `[ id:'sample1' ]`
- hf_file:
type: file
description: Downloaded HuggingFace GGUF file
ontologies: []
versions_huggingface_hub:
- - ${task.process}:
type: string
description: The name of the process
- huggingface_hub:
type: string
description: The name of the tool
- hf --version 2>&1 | tail -n1 | awk '{print \$NF}':
type: eval
description: The expression to obtain the version of the tool
topics:
versions:
- - ${task.process}:
type: string
description: The name of the process
- huggingface_hub:
type: string
description: The name of the tool
- hf --version 2>&1 | tail -n1 | awk '{print \$NF}':
type: eval
description: The expression to obtain the version of the tool
authors:
- "@toniher"
- "@lucacozzuto"
maintainers:
- "@toniher"
- "@lucacozzuto"
66 changes: 66 additions & 0 deletions modules/nf-core/huggingface/download/tests/main.nf.test
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
nextflow_process {

name "Test Process HF_DOWNLOAD"
script "../main.nf"
process "HF_DOWNLOAD"

tag "modules"
tag "modules_nfcore"
tag "huggingface"
tag "huggingface/download"

test("download gguf file") {

when {
process {
"""
input[0] = [
[ id:'test_model' ],
"ggml-org/gemma-3-1b-it-GGUF",
"gemma-3-1b-it-Q4_K_M.gguf",
"./hf_cache"
]
"""
}
}

then {
assertAll(
{ assert process.success },
{ assert process.out.output.size() == 1 },
{ assert process.out.output[0][0] == [ id:'test_model' ] },
{ assert file(process.out.output[0][1]).name == "gemma-3-1b-it-Q4_K_M.gguf" },
{ assert file(process.out.output[0][1]).size() > 0 },
{ assert snapshot(process.out.findAll { key, val -> key.startsWith('versions') }).match() }
)
}
}

test("stub - download gguf file") {

options "-stub"

when {
process {
"""
input[0] = [
[ id:'test_model' ],
"ggml-org/gemma-3-1b-it-GGUF",
"gemma-3-1b-it-Q4_K_M.gguf",
"./hf_cache"
]
"""
}
}

then {
assertAll(
{ assert process.success },
{ assert process.out.output.size() == 1 },
{ assert process.out.output[0][0] == [ id:'test_model' ] },
{ assert file(process.out.output[0][1]).name == "gemma-3-1b-it-Q4_K_M.gguf" },
{ assert snapshot(process.out.findAll { key, val -> key.startsWith('versions') }).match() }
)
}
}
}
38 changes: 38 additions & 0 deletions modules/nf-core/huggingface/download/tests/main.nf.test.snap
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
{
"stub - download gguf file": {
"content": [
{
"versions_huggingface_hub": [
[
"HF_DOWNLOAD",
"huggingface_hub",
"1.6.0"
]
]
}
],
"timestamp": "2026-03-26T08:39:57.919278809",
"meta": {
"nf-test": "0.9.5",
"nextflow": "25.10.4"
}
},
"download gguf file": {
"content": [
{
"versions_huggingface_hub": [
[
"HF_DOWNLOAD",
"huggingface_hub",
"1.6.0"
]
]
}
],
"timestamp": "2026-03-26T08:38:24.630341776",
"meta": {
"nf-test": "0.9.5",
"nextflow": "25.10.4"
}
}
}
7 changes: 7 additions & 0 deletions modules/nf-core/huggingface/download/tests/nextflow.config
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
nextflow.enable.moduleBinaries = true

process {
withName: 'HF_DOWNLOAD' {
containerOptions = { workflow.profile.contains('docker') ? "--volume ${projectDir}/hf_cache:${projectDir}/hf_cache" : '' }
}
}
5 changes: 5 additions & 0 deletions modules/nf-core/llamacpp-python/run/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
FROM nvidia/cuda:12.4.1-devel-ubuntu22.04

RUN apt-get update && apt-get install -y python3 python3-pip
RUN pip3 install llama-cpp-python \
--extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu124
7 changes: 7 additions & 0 deletions modules/nf-core/llamacpp-python/run/environment.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/environment-schema.json
channels:
- conda-forge
- bioconda
dependencies:
- conda-forge::llama-cpp-python=0.3.16
36 changes: 36 additions & 0 deletions modules/nf-core/llamacpp-python/run/main.nf
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
process LLAMACPP_PYTHON_RUN {
tag "$meta.id"
label 'process_medium'
label 'process_gpu'

conda "${moduleDir}/environment.yml"
container "${ task.ext.use_gpu ? 'quay.io/nf-core/llama-cpp-python:0.1.9' : 'community.wave.seqera.io/library/llama-cpp-python:0.3.16--b351398cd0ea7fc5' }"

input:
tuple val(meta), path(prompt_file), path(gguf_model)

output:
tuple val(meta), path("output.txt"), emit: output
tuple val("${task.process}"), val("llama-cpp-python"), eval("python3 -c 'import llama_cpp; print(llama_cpp.__version__)'"), topic: versions, emit: versions_llama_cpp_python

when:
task.ext.when == null || task.ext.when

script:
def args = task.ext.args ?: ''
prefix = task.ext.prefix ?: "${meta.id}"
"""
llamacpp-python.py \
--model ${gguf_model} \
--messages ${prompt_file} \
--output output.txt \
${args}
"""

stub:
prefix = task.ext.prefix ?: "${meta.id}"
"""
touch output.txt

"""
}
69 changes: 69 additions & 0 deletions modules/nf-core/llamacpp-python/run/meta.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
name: llamacpp_python_run
description: Python wrapper for running locally-hosted LLM with llama.cpp
keywords:
- llm
- llama
- ai
tools:
- llama-cpp-python:
description: "Python wrapper for llama.cpp LLM inference tool"
homepage: "https://llama-cpp-python.readthedocs.io/en/latest/"
licence:
- "MIT"
identifier: ""
input:
- - meta:
type: map
description: |
Groovy Map containing sample information
e.g. `[ id:'sample1' ]`- prompt_file:
- prompt_file:
type: file
description: |
Prompt file
Structure: [ val(meta), path(prompt_file) ]
ontologies: []
- gguf_model:
type: file
description: |
GGUF model
Structure: [ val(meta), path(gguf_model) ]
ontologies: []
output:
output:
- - meta:
type: map
description: |
Groovy Map containing sample information
e.g. `[ id:'sample1' ]`
- "output.txt":
type: file
description: File with the output of LLM inference request
ontologies: []
versions_llama_cpp_python:
- - ${task.process}:
type: string
description: The name of the process
- llama-cpp-python:
type: string
description: The name of the tool
- python3 -c 'import llama_cpp; print(llama_cpp.__version__)':
type: eval
description: The expression to obtain the version of the tool
topics:
versions:
- - ${task.process}:
type: string
description: The name of the process
- llama-cpp-python:
type: string
description: The name of the tool
- python3 -c 'import llama_cpp; print(llama_cpp.__version__)':
type: eval
description: The expression to obtain the version of the tool
authors:
- "@toniher"
- "@lucacozzuto"
maintainers:
- "@toniher"
- "@lucacozzuto"
Loading
Loading