SchemaBio_Bundle / bin /mskcc_maf_to_vcf.py
pzweuj's picture
Upload mskcc_maf_to_vcf.py
3dd81ff verified
#!/usr/bin/env python3
"""
Convert MAF file to VCF format with deduplication.
This script converts cancerhotspots.v2.maf.gz to VCF format.
Duplicate positions are merged and tumor types are aggregated.
"""
import gzip
import argparse
from collections import defaultdict
def parse_args():
parser = argparse.ArgumentParser(description='Convert MAF to VCF format with deduplication')
parser.add_argument('input_maf', help='Input MAF file (can be .gz compressed)')
parser.add_argument('output_vcf', help='Output VCF file')
return parser.parse_args()
def get_maf_columns(header_line):
"""Parse MAF header to get column indices."""
columns = header_line.strip().split('\t')
col_map = {name: idx for idx, name in enumerate(columns)}
return col_map
def maf_to_vcf(maf_file, vcf_file):
"""Convert MAF file to VCF format with deduplication."""
# Columns we need for VCF
required_cols = [
'Chromosome', 'Start_Position', 'End_Position',
'Reference_Allele', 'Tumor_Seq_Allele1', 'Tumor_Seq_Allele2'
]
# Additional columns to include in INFO
info_cols = [
'FILTER', 'TUMORTYPE', 'judgement',
'oncotree_organtype',
'Variant_Classification', 'Variant_Type',
't_depth', 't_ref_count', 't_alt_count',
]
# Dictionary to store deduplicated records
# Key: (chrom, pos, ref, alt) -> Value: dict with aggregated info
records_dict = {}
# Open input (handle gzipped or plain text)
if maf_file.endswith('.gz'):
maf_handle = gzip.open(maf_file, 'rt')
else:
maf_handle = open(maf_file, 'r')
first_line = True
total_records = 0
skipped_records = 0
for line in maf_handle:
line = line.strip()
if not line:
continue
# Skip comment lines
if line.startswith('#'):
continue
# First data line is the header
if first_line:
col_map = get_maf_columns(line)
# Verify required columns exist
missing_cols = [c for c in required_cols if c not in col_map]
if missing_cols:
raise ValueError(f"Missing required columns: {missing_cols}")
first_line = False
continue
# Process data line
fields = line.split('\t')
try:
chrom = fields[col_map['Chromosome']]
start = int(fields[col_map['Start_Position']])
end = int(fields[col_map['End_Position']])
ref = fields[col_map['Reference_Allele']]
alt1 = fields[col_map['Tumor_Seq_Allele1']]
alt2 = fields[col_map['Tumor_Seq_Allele2']]
# Skip invalid records
if not chrom or chrom == '.' or not ref or ref == '.':
skipped_records += 1
continue
# Determine ALT allele(s)
alts = []
if alt1 and alt1 != '.' and alt1 != ref:
alts.append(alt1)
if alt2 and alt2 != '.' and alt2 != ref and alt2 != alt1:
alts.append(alt2)
if not alts:
skipped_records += 1
continue
alt = alts[0] # Use first alt for deduplication
# Create key for deduplication
key = (chrom, start, ref, alt)
# Get TUMORTYPE for this record
tumortype = ''
if 'TUMORTYPE' in col_map:
tumortype = fields[col_map['TUMORTYPE']].strip()
if key not in records_dict:
# Initialize new record
records_dict[key] = {
'chrom': chrom,
'pos': start,
'ref': ref,
'alt': alt,
'tumortype_counts': defaultdict(int),
'FILTER': [],
'judgement': set(),
'oncotree_organtype': set(),
'Variant_Classification': set(),
'Variant_Type': set(),
't_depth': [],
't_ref_count': [],
't_alt_count': [],
}
# Aggregate tumortype counts
if tumortype:
records_dict[key]['tumortype_counts'][tumortype] += 1
# Aggregate other fields (take first non-empty or collect unique)
for col in info_cols:
if col in col_map:
val = fields[col_map[col]]
if val and val != '.':
if col in ['FILTER']:
records_dict[key][col].append(val)
elif col in ['judgement', 'oncotree_organtype', 'Variant_Classification', 'Variant_Type']:
records_dict[key][col].add(val)
elif col in ['t_depth', 't_ref_count', 't_alt_count']:
try:
records_dict[key][col].append(int(val))
except ValueError:
pass
total_records += 1
if total_records % 500000 == 0:
print(f"Processed {total_records:,} records, {len(records_dict):,} unique positions...")
except (IndexError, ValueError) as e:
skipped_records += 1
continue
maf_handle.close()
print(f"\nParsing complete!")
print(f"Total input records: {total_records:,}")
print(f"Skipped records: {skipped_records:,}")
print(f"Unique positions: {len(records_dict):,}")
# Write VCF output
write_vcf(records_dict, vcf_file, col_map, info_cols)
def write_vcf(records_dict, vcf_file, col_map, info_cols):
"""Write deduplicated records to VCF file."""
# Sort by chromosome and position
def sort_key(item):
chrom, pos, ref, alt = item[0]
# Handle chromosome names (1-22, X, Y, MT)
try:
chrom_num = int(chrom) if chrom not in ['X', 'Y', 'MT', 'M'] else (23 if chrom == 'X' else 24 if chrom == 'Y' else 25)
except ValueError:
chrom_num = 26
return (chrom_num, pos)
sorted_records = sorted(records_dict.items(), key=sort_key)
with open(vcf_file, 'w') as vcf_out:
# Build VCF header
vcf_header = build_vcf_header(info_cols)
vcf_out.write(vcf_header)
for key, data in sorted_records:
# Build TUMORTYPE summary string
tumortype_counts = data['tumortype_counts']
if tumortype_counts:
# Sort by count descending, then by name
sorted_types = sorted(tumortype_counts.items(), key=lambda x: (-x[1], x[0]))
tumortype_str = '|'.join([f"{t}:{c}" for t, c in sorted_types])
else:
tumortype_str = '.'
# Build INFO field
info_parts = [f"TUMORTYPE={tumortype_str}"]
# Add other fields
if data['FILTER']:
# Take the most common or first
info_parts.append(f"FILTER={data['FILTER'][0]}")
if data['judgement']:
info_parts.append(f"judgement={','.join(sorted(data['judgement']))}")
if data['oncotree_organtype']:
info_parts.append(f"oncotree_organtype={','.join(sorted(data['oncotree_organtype']))}")
if data['Variant_Classification']:
info_parts.append(f"Variant_Classification={','.join(sorted(data['Variant_Classification']))}")
if data['Variant_Type']:
info_parts.append(f"Variant_Type={','.join(sorted(data['Variant_Type']))}")
# Take median depth if available
if data['t_depth']:
median_depth = sorted(data['t_depth'])[len(data['t_depth']) // 2]
info_parts.append(f"t_depth={median_depth}")
if data['t_ref_count']:
median_ref = sorted(data['t_ref_count'])[len(data['t_ref_count']) // 2]
info_parts.append(f"t_ref_count={median_ref}")
if data['t_alt_count']:
median_alt = sorted(data['t_alt_count'])[len(data['t_alt_count']) // 2]
info_parts.append(f"t_alt_count={median_alt}")
info_str = ';'.join(info_parts)
# Build VCF record
vcf_record = f"{data['chrom']}\t{data['pos']}\t.\t{data['ref']}\t{data['alt']}\t.\tPASS\t{info_str}\n"
vcf_out.write(vcf_record)
print(f"VCF file written: {vcf_file}")
def build_vcf_header(info_cols):
"""Build VCF header with appropriate metadata."""
header_lines = [
"##fileformat=VCFv4.2",
"##source=cancerhotspots_maf2vcf",
'##INFO=<ID=TUMORTYPE,Number=1,Type=String,Description="Tumor type counts: tumor_type:count|tumor_type:count">',
'##INFO=<ID=FILTER,Number=1,Type=String,Description="Filter status">',
'##INFO=<ID=judgement,Number=1,Type=String,Description="Hotspot judgement">',
'##INFO=<ID=oncotree_organtype,Number=1,Type=String,Description="Oncotree organ type">',
'##INFO=<ID=Variant_Classification,Number=1,Type=String,Description="Variant classification from MAF">',
'##INFO=<ID=Variant_Type,Number=1,Type=String,Description="Variant type (SNP, DEL, INS, etc.)">',
'##INFO=<ID=t_depth,Number=1,Type=Integer,Description="Tumor sequencing depth (median)">',
'##INFO=<ID=t_ref_count,Number=1,Type=Integer,Description="Tumor reference allele count (median)">',
'##INFO=<ID=t_alt_count,Number=1,Type=Integer,Description="Tumor alternate allele count (median)">',
"#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO",
]
return '\n'.join(header_lines) + '\n'
if __name__ == '__main__':
args = parse_args()
print(f"Converting {args.input_maf} to VCF format...")
print(f"Output: {args.output_vcf}")
maf_to_vcf(args.input_maf, args.output_vcf)