Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,9 @@

## Introduction

**nf-core/deepmodeloptim** augments your bio data towards an optimal task-specific training set.

Methods in deep learning are vastly equivalent (see neural scaling laws paper), most of the performance is driven by the training data.
**nf-core/deepmodeloptim** augments your bio data towards an optimal task-specific training set.

Methods in deep learning are vastly equivalent (see neural scaling laws paper), most of the performance is driven by the training data.

<picture>
<source media="(prefers-color-scheme: dark)" srcset="assets/metromap.png">
Expand Down
6 changes: 5 additions & 1 deletion modules/local/custom/modify_model_config/main.nf
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,11 @@ process CUSTOM_MODIFY_MODEL_CONFIG {
meta_updated = meta + ["n_trials": "${n_trials}"]
"""
# substitte the line containing n_trials in the config file with n_trials: \${n_trials}
awk -v n_trials=${n_trials} '/n_trials: [0-9]+/ {gsub(/n_trials: [0-9]+/, "n_trials: " n_trials)}1' ${config} > ${prefix}.yaml
if [ "${n_trials}" = "[]" ]; then
cp "${config}" "${prefix}.yaml"
else
awk -v n_trials="${n_trials}" '/n_trials: [0-9]+/ {gsub(/n_trials: [0-9]+/, "n_trials: " n_trials)}1' "${config}" > "${prefix}.yaml"
fi

cat <<-END_VERSIONS > versions.yml
"${task.process}":
Expand Down
2 changes: 1 addition & 1 deletion modules/local/stimulus/split_yaml/main.nf
Original file line number Diff line number Diff line change
Expand Up @@ -38,4 +38,4 @@ process STIMULUS_SPLIT_YAML {
stimulus: \$(stimulus -v | cut -d ' ' -f 3)
END_VERSIONS
"""
}
}
8 changes: 4 additions & 4 deletions subworkflows/local/split_data_config_unified/main.nf
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,13 @@ workflow SPLIT_DATA_CONFIG_UNIFIED_WF {
// Process split configs - transpose and add split_id to meta
ch_split_configs = STIMULUS_SPLIT_YAML.out.split_config
.transpose()
.map { meta, yaml ->
.map { meta, yaml ->
// Extract split info from descriptive filename
def split_id = yaml.baseName.replaceAll(/.*_([^_]+_[^_]+)_split$/, '$1')
[ meta + [split_id: split_id], yaml]
[ meta + [split_id: split_id], yaml]
}

// Process transform configs - transpose and add transform_id to meta
// Process transform configs - transpose and add transform_id to meta
ch_transform_configs = STIMULUS_SPLIT_YAML.out.transform_config
.transpose()
.map { meta, yaml ->
Expand All @@ -55,4 +55,4 @@ workflow SPLIT_DATA_CONFIG_UNIFIED_WF {
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
THE END
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
*/
2 changes: 1 addition & 1 deletion subworkflows/local/transform_csv/main.nf
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ workflow TRANSFORM_CSV_WF {
data: item.data
config: item.config
}

// run stimulus transform
STIMULUS_TRANSFORM_CSV(
ch_input.data,
Expand Down
18 changes: 11 additions & 7 deletions subworkflows/local/utils_nfcore_deepmodeloptim_pipeline/main.nf
Original file line number Diff line number Diff line change
Expand Up @@ -110,12 +110,16 @@ workflow PIPELINE_INITIALISATION {
//

range = validate_range(params.tune_trials_range)
val_tune_trials_range = Channel.from(range)
.map { rangeStr ->
def (min, max, step) = rangeStr.tokenize(',')*.toInteger()
(min..max).step(step).toList()
}
.flatten()
if (range) {
val_tune_trials_range = Channel.from(range)
.map { rangeStr ->
def (min, max, step) = rangeStr.tokenize(',')*.toInteger()
(min..max).step(step).toList()
}
.flatten()
} else {
val_tune_trials_range = []
}
//
// Create the channels for the number of replicates
//
Expand Down Expand Up @@ -217,7 +221,7 @@ def validateInputSamplesheet(input) {
def validate_range(range) {

if (range == null) {
return "1,1,1"
return range
}
def (min, max, step) = range.tokenize(',')*.toInteger()
if (min > max) {
Expand Down
Loading