瀏覽代碼

first commit

master
qingwangchen 3 年之前
當前提交
ee9b360ac1
共有 5 個檔案被更改,包括 161 行新增0 行删除
  1. +57
    -0
      README.md
  2. +6
    -0
      defaults
  3. +12
    -0
      inputs
  4. +55
    -0
      tasks/spladder_quantification.wdl
  5. +31
    -0
      workflow.wdl

+ 57
- 0
README.md 查看文件

@@ -0,0 +1,57 @@
# README.md

> Author: Qingwang Chen
>
> Email: [qwch20@fudan.edu.cn](mailto:qwch20@fudan.edu.cn)
>
> Last Updates: 22/08/2021

#### Brief Introduction

Having the merged graph at hand, we can now proceed to quantifying nodes and edges of the graph based on the alignment data. Each quantification will be done independently.

#### Requirements

- choppy
- Ali-Cloud
- Linux

```
# 激活choppy环境
$ source activate choppy (open-choppy-env)

# 第一次安装
$ choppy install chenqingwang/SplAdder-quantification-single
# 非第一次安装
$ choppy install chenqingwang/SplAdder-quantification-single -f

# 查询已安装APP
$ choppy apps
```

#### Quick Start

```
# 准备 samples.csv 文件
$ choppy samples chenqingwang/SplAdder-quantification-single-latest > samples.csv
# 准备无默认参数的samples.csv 文件
$ choppy samples --no-default chenqingwang/SplAdder-quantification-single-latest > samples.csv

# 提交任务
$ choppy batch chenqingwang/SplAdder-quantification-single-latest samples.csv -p Your_project_name -l Your_label

# 查询任务运行状况
$ choppy query -L Your_label | grep "status"

# 查询失败任务
$ choppy search -s Failed -p Your_project_name -u chenqingwang --short-format

# 结果文件地址
$ oss://choppy-cromwell-result/test-choppy/Your_project_name/
```

#### Description
```
# samples: The file names to be analyzed should be written line by line in this file, and the file should be uploaded to AliCloud.

```

+ 6
- 0
defaults 查看文件

@@ -0,0 +1,6 @@
{
"reference_gtf_file":"oss://pgx-reference-data/reference/spladder/SplAdder/data/reference/Homo_sapiens.GRCh38.103.gtf",
"spladder_docker":"registry.cn-shanghai.aliyuncs.com/pgx-docker-registry/spladder:v2.4.2",
"spladder_cluster":"OnDemand bcs.a2.xlarge img-ubuntu-vpc",
"disk_size":"200"
}

+ 12
- 0
inputs 查看文件

@@ -0,0 +1,12 @@
{
"{{ project_name }}.sample_id": "{{ sample_id }}",
"{{ project_name }}.bam": "{{ bam }}",
"{{ project_name }}.bai": "{{ bai }}",
"{{ project_name }}.pickle": "{{ pickle }}",
"{{ project_name }}.merge_graphs": "{{ merge_graphs }}",
"{{ project_name }}.reference_gtf_file": "{{ reference_gtf_file }}",
"{{ project_name }}.samples": "{{ samples }}",
"{{ project_name }}.spladder_docker": "{{ spladder_docker }}",
"{{ project_name }}.spladder_cluster": "{{ spladder_cluster }}",
"{{ project_name }}.disk_size": "{{ disk_size }}"
}

+ 55
- 0
tasks/spladder_quantification.wdl 查看文件

@@ -0,0 +1,55 @@
task spladder_quantification {
String sample_id
File bam
File bai
File pickle
File merge_graphs
File reference_gtf_file
File samples

String docker
String cluster
String disk_size

command <<<
set -o pipefail
set -e

mkdir -p ${sample_id}/spladder_out/spladder
ln -s ${pickle}/*.pickle ${sample_id}/spladder_out/spladder/
cat ${samples} | while read a; do ls ${sample_id}/spladder_out/spladder/ | grep $a; done >> pickle.txt
ln -s ${merge_graphs} ${sample_id}/spladder_out/spladder/

nt=$(nproc)
spladder build -o ${sample_id}/spladder_out \
--annotation ${reference_gtf_file} \
--bams ${bam} \
--confidence 2 \
--merge-strat merge_graphs \
--readlen 150 \
--parallel $nt \
--quantify-graph --qmode single\
--no-extract-ase
find . -depth > fileList.txt
>>>

runtime {
docker: docker
cluster: cluster
systemDisk: "cloud_ssd 40"
dataDisk: "cloud_ssd " + disk_size + " /cromwell_root/"
}

output {
File fileList = "fileList.txt"
File pickle_txt = "pickle.txt"
File count_hdf5 = "${sample_id}/spladder_out/spladder/*.count.hdf5"
}
}

+ 31
- 0
workflow.wdl 查看文件

@@ -0,0 +1,31 @@
import "./tasks/spladder_quantification.wdl" as spladder_quantification


workflow {{ project_name }} {
String sample_id
File bam
File bai
File pickle
File merge_graphs
File reference_gtf_file
File samples

String docker
String cluster
String disk_size

call spladder_quantification.spladder_quantification as spladder_quantification {
input:
reference_gtf_file=reference_gtf_file,
sample_id=sample_id,
bam=bam,
bai=bai,
pickle=pickle,
merge_graphs=merge_graphs,
samples=samples,
docker=spladder_docker,
cluster=spladder_cluster,
disk_size=disk_size
}
}

Loading…
取消
儲存