linzipeng pirms 6 gadiem
vecāks
revīzija
5cef954d30
6 mainītis faili ar 204 papildinājumiem un 0 dzēšanām
  1. +12
    -0
      inputs
  2. +35
    -0
      tasks/Call_meth.wdl
  3. +34
    -0
      tasks/Dedup.wdl
  4. +29
    -0
      tasks/Mapping.wdl
  5. +34
    -0
      tasks/Trim.wdl
  6. +60
    -0
      workflow.wdl

+ 12
- 0
inputs Parādīt failu

@@ -0,0 +1,12 @@
{
"{{ project_name }}.fasta": "GRCh38.d1.vd1.fa",
"{{ project_name }}.ref_dir": "oss://pgx-reference-data/GRCh38.d1.vd1/",
"{{ project_name }}.fastq_1": "{{ read1 }}",
"{{ project_name }}.cluster_config": "{{ cluster if cluster != '' else 'OnDemand ecs.sn2ne.2xlarge img-ubuntu-vpc' }}",
"{{ project_name }}.docker": "registry.cn-shanghai.aliyuncs.com/pgx-docker-registry/sentieon-genomics:v2018.08.01",
"{{ project_name }}.sample": "{{ sample_name }}",
"{{ project_name }}.disk_size": "{{ disk_size }}",
"{{ project_name }}.regions": "{{ regions }}",
"{{ project_name }}.fastq_2": "{{ read2 }}"
}


+ 35
- 0
tasks/Call_meth.wdl Parādīt failu

@@ -0,0 +1,35 @@
task Haplotyper {
File ref_dir
File Dedup_bam
String sample
String docker
String cluster_config
String disk_size

command <<<
set -o pipefail
set -e
bismark_methylation_extractor -p --no_overlap --comprehensive --report --bedGraph --genome_folder ${ref_dir} ${Dedup_bam}
>>>
runtime {
docker:docker
cluster: cluster_config
systemDisk: "cloud_ssd 40"
dataDisk: "cloud_ssd " + disk_size + " /cromwell_root/"
}

output {
File bedGraph = "${sample}_R1_val_1_bismark_bt2_pe.bedGraph.gz"
File cov = "${sample}_R1_val_1_bismark_bt2_pe.bismark.cov.gz"
File M-bias = "${sample}_R1_val_1_bismark_bt2_pe.M-bias.txt"
File splitting_report = "${sample}_R1_val_1_bismark_bt2_pe_splitting_report.txt"
File CHG_context = "CHG_context_{sample}_R1_val_1_bismark_bt2_pe.txt"
File CHH_context = "CHH_context_{sample}_R1_val_1_bismark_bt2_pe.txt"
File CpG_context = "CpG_context_{sample}_R1_val_1_bismark_bt2_pe.txt"
}
}



+ 34
- 0
tasks/Dedup.wdl Parādīt failu

@@ -0,0 +1,34 @@
task Dedup {

String sample

File unsorted_bam

String docker
String cluster_config
String disk_size


command <<<
set -o pipefail
set -e
nt=$(nproc)
deduplicate_bismark -p --bam ${unsorted_bam}
>>>
runtime {
docker:docker
cluster: cluster_config
systemDisk: "cloud_ssd 40"
dataDisk: "cloud_ssd " + disk_size + " /cromwell_root/"
}

output {
File Dedup_bam = "${sample}_R1_val_1_bismark_bt2_pe.deduplicated.bam"
}
}







+ 29
- 0
tasks/Mapping.wdl Parādīt failu

@@ -0,0 +1,29 @@
task mapping {

File ref_dir
File trim_read1
File trim_read2

String sample
String docker
String cluster_config
String disk_size

command <<<
set -o pipefail
set -e
nt=$(nproc)
bismark --bowtie2 -p ${nt} --bam ${ref_dir} -1 ${trim_read1} -2 ${trim_read2}

>>>

runtime {
docker:docker
cluster: cluster_config
systemDisk: "cloud_ssd 40"
dataDisk: "cloud_ssd " + disk_size + " /cromwell_root/"
}
output {
File unsorted_bam = "${sample}_R1_val_1_bismark_bt2_pe.bam"
}
}

+ 34
- 0
tasks/Trim.wdl Parādīt failu

@@ -0,0 +1,34 @@
task TNseq {
File fastq_1
File fastq_2
String docker
String cluster_config
String sample
String disk_size

command <<<
set -o pipefail
set -e
trim_galore --clip_R1 4 --clip_R2 4 --three_prime_clip_R1 4 --three_prime_clip_R2 4 --paired ${fastq1} ${fastq2}
>>>

runtime {
docker:docker
cluster: cluster_config
systemDisk: "cloud_ssd 40"
dataDisk: "cloud_ssd " + disk_size + " /cromwell_root/"
}

output {
File trim_read1 = "${sample}_R1_val_1.fq"
File trim_read2 = "${sample}_R2_val_2.fq"
}

}



+ 60
- 0
workflow.wdl Parādīt failu

@@ -0,0 +1,60 @@
import "./tasks/Trim.wdl" as Trim
import "./tasks/Mapping.wdl" as Meapping
import "./tasks/Dedup.wdl" as Dedup
import "./tasks/Call_meth.wdl" as Call_meth


workflow {{ project_name }} {

File fastq_1
File fastq_2

String sample
String docker

String disk_size
String cluster_config

File ref_dir



call BQSR.BQSR as BQSR {
input:
sample=sample,
ref_dir=ref_dir,
fastq_1=fastq_1,
fastq_2=fastq_2,
docker=docker,
disk_size=disk_size,
cluster_config=cluster_config
}
call mapping.mapping as mapping {
input:
sample=sample,
ref_dir=ref_dir,
trim_read1=trim_read1,
trim_read2=trim_read2,
docker=docker,
disk_size=disk_size,
cluster_config=cluster_config
}
call Dedup.Dedup as Dedup {
input:
unsorted_bam=mapping_R1_val_1_bismark_bt2_pe.bam,
sample=sample,
docker=docker,
disk_size=disk_size,
cluster_config=cluster_config
}
call Haplotyper.Haplotyper as Haplotyper {
input:
ref_dir=ref_dir,
Dedup_bam=Dedup_R1_val_1_bismark_bt2_pe.deduplicated.bam,
sample=sample,
docker=docker,
disk_size=disk_size,
cluster_config=cluster_config
}
}

Notiek ielāde…
Atcelt
Saglabāt