-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathmain.py
More file actions
1544 lines (1294 loc) · 56.5 KB
/
main.py
File metadata and controls
1544 lines (1294 loc) · 56.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""ENCODE Project MCP Server.
Exposes ENCODE REST API as Claude-compatible tools for searching experiments,
listing files, and downloading genomics data.
All data stays local. Only connects to encodeproject.org over HTTPS.
No telemetry, no analytics, no data sent elsewhere.
"""
from __future__ import annotations
import asyncio
import json
import logging
from contextlib import asynccontextmanager
from typing import Any, Literal
from mcp.server.fastmcp import FastMCP
from mcp.types import ToolAnnotations
from encode_connector.client.auth import CredentialManager
from encode_connector.client.constants import (
ASSAY_TITLES,
BIOSAMPLE_CLASSIFICATIONS,
ORGAN_SLIMS,
)
from encode_connector.client.downloader import FileDownloader
from encode_connector.client.encode_client import EncodeClient
from encode_connector.client.models import _human_size
from encode_connector.client.tracker import (
ExperimentTracker,
parse_encode_pipelines,
parse_encode_publications,
)
from encode_connector.client.validation import (
check_filter_value,
clamp_limit,
validate_accession,
validate_data_export_format,
validate_encode_path,
validate_export_format,
validate_organize_by,
validate_reference_type,
)
logger = logging.getLogger(__name__)
# --- Safety annotations for MCP tools ---
# Read-only tools that query the ENCODE API (network access, no local writes)
_READONLY_API = ToolAnnotations(
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=True,
)
# Read-only tools that read from local tracker DB only (no network)
_READONLY_LOCAL = ToolAnnotations(
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
)
# Tools that write to local tracker DB (no network)
_WRITE_LOCAL = ToolAnnotations(
readOnlyHint=False,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
)
# Tools that write to local tracker DB AND query ENCODE API (network)
_WRITE_LOCAL_API = ToolAnnotations(
readOnlyHint=False,
destructiveHint=False,
idempotentHint=True,
openWorldHint=True,
)
# Tools that download files to disk (network + disk writes)
_DOWNLOAD = ToolAnnotations(
readOnlyHint=False,
destructiveHint=False,
idempotentHint=True,
openWorldHint=True,
)
# Credential management (can clear credentials = destructive)
_CREDENTIAL_MGMT = ToolAnnotations(
readOnlyHint=False,
destructiveHint=True,
idempotentHint=True,
openWorldHint=False,
)
def _validate_filters(
assay_title: str | None = None,
organ: str | None = None,
biosample_type: str | None = None,
) -> list[str]:
"""Validate common filter parameters against known ENCODE values.
Returns a list of warning strings (empty if all values are valid).
"""
warnings: list[str] = []
if assay_title:
warning = check_filter_value(assay_title, ASSAY_TITLES, "assay_title")
if warning:
warnings.append(warning)
if organ:
warning = check_filter_value(organ, ORGAN_SLIMS, "organ")
if warning:
warnings.append(warning)
if biosample_type:
warning = check_filter_value(biosample_type, BIOSAMPLE_CLASSIFICATIONS, "biosample_type")
if warning:
warnings.append(warning)
return warnings
# Global client instances (managed via lifespan)
_client: EncodeClient | None = None
_downloader: FileDownloader | None = None
_credential_manager = CredentialManager()
_tracker: ExperimentTracker | None = None
_client_lock: asyncio.Lock | None = None
def _get_client_lock() -> asyncio.Lock:
"""Get or create the client lock in the current event loop."""
global _client_lock
if _client_lock is None:
_client_lock = asyncio.Lock()
return _client_lock
@asynccontextmanager
async def lifespan(server: FastMCP):
"""Manage client lifecycle."""
global _client, _downloader, _tracker, _client_lock
# Initialize lock eagerly in lifespan so it's always bound to the
# correct event loop, rather than lazy-creating in _get_client_lock()
_client_lock = asyncio.Lock()
_client = EncodeClient(credential_manager=_credential_manager)
_downloader = FileDownloader(credential_manager=_credential_manager)
_tracker = ExperimentTracker()
try:
yield
finally:
if _client:
await _client.close()
if _tracker:
_tracker.close()
mcp = FastMCP(
"ENCODE Project",
instructions=(
"Query and download genomics data from the ENCODE Project (encodeproject.org). "
"Search experiments by assay type, organism, organ, biosample, target, and more. "
"List and download files (FASTQ, BAM, BED, bigWig, etc.). "
"All data stays local - no telemetry or external data sharing."
),
lifespan=lifespan,
)
async def _get_client() -> EncodeClient:
async with _get_client_lock():
if _client is None:
raise RuntimeError("ENCODE client not initialized")
return _client
def _get_downloader() -> FileDownloader:
if _downloader is None:
raise RuntimeError("File downloader not initialized")
return _downloader
def _serialize(obj: Any) -> Any:
"""Serialize Pydantic models and other objects to JSON-compatible dicts."""
if hasattr(obj, "model_dump"):
return obj.model_dump()
if isinstance(obj, list):
return [_serialize(item) for item in obj]
if isinstance(obj, dict):
return {k: _serialize(v) for k, v in obj.items()}
return obj
# ======================================================================
# Tool 1: Search Experiments
# ======================================================================
@mcp.tool(annotations=_READONLY_API, title="Search ENCODE Experiments")
async def encode_search_experiments(
assay_title: str | None = None,
organism: str = "Homo sapiens",
organ: str | None = None,
biosample_type: str | None = None,
biosample_term_name: str | None = None,
target: str | None = None,
status: str = "released",
lab: str | None = None,
award: str | None = None,
assembly: str | None = None,
replication_type: str | None = None,
life_stage: str | None = None,
sex: str | None = None,
treatment: str | None = None,
genetic_modification: str | None = None,
perturbed: bool | None = None,
search_term: str | None = None,
date_released_from: str | None = None,
date_released_to: str | None = None,
limit: int = 25,
offset: int = 0,
) -> str:
"""Search ENCODE experiments with comprehensive filters.
Examples:
- Find all Histone ChIP-seq on human pancreas tissue:
assay_title="Histone ChIP-seq", organ="pancreas", biosample_type="tissue"
- Find ATAC-seq on human brain:
assay_title="ATAC-seq", organ="brain"
- Find RNA-seq on GM12878 cell line:
assay_title="total RNA-seq", biosample_term_name="GM12878"
- Find ChIP-seq targeting H3K27me3:
assay_title="Histone ChIP-seq", target="H3K27me3"
- Find all mouse liver experiments:
organism="Mus musculus", organ="liver"
- Free text search:
search_term="CRISPR screen pancreatic"
Common assay_title values: "Histone ChIP-seq", "TF ChIP-seq", "ATAC-seq",
"DNase-seq", "total RNA-seq", "polyA plus RNA-seq", "WGBS", "intact Hi-C",
"CUT&RUN", "CUT&Tag", "STARR-seq", "MPRA", "eCLIP", "CRISPR screen"
Common organ values: "pancreas", "liver", "brain", "heart", "kidney",
"lung", "intestine", "skin of body", "blood", "spleen", "thymus"
biosample_type values: "tissue", "cell line", "primary cell",
"in vitro differentiated cells", "organoid"
WHEN TO USE: Use as the primary entry point when users want to find experiments.
Start with encode_get_facets if unsure what filters to use.
RELATED TOOLS: encode_get_facets, encode_get_metadata, encode_search_files
Args:
assay_title: Assay type (e.g., "Histone ChIP-seq", "ATAC-seq", "total RNA-seq")
organism: Species (default: "Homo sapiens"). Also: "Mus musculus"
organ: Organ/tissue system (e.g., "pancreas", "brain", "liver")
biosample_type: Sample classification ("tissue", "cell line", "primary cell", "organoid")
biosample_term_name: Specific cell/tissue name (e.g., "GM12878", "HepG2", "pancreas")
target: ChIP/CUT&RUN target (e.g., "H3K27me3", "CTCF", "p300")
status: Data status (default: "released"). Also: "archived", "revoked"
lab: Submitting lab name
award: Funding project
assembly: Genome assembly (e.g., "GRCh38", "mm10")
replication_type: "isogenic", "anisogenic", or "unreplicated"
life_stage: "embryonic", "postnatal", "child", "adult"
sex: "male", "female", "mixed"
treatment: Treatment name if perturbation experiment
genetic_modification: Modification type ("CRISPR", "RNAi")
perturbed: True for perturbation experiments only
search_term: Free text search across all fields
date_released_from: Start date (YYYY-MM-DD) for date range filter
date_released_to: End date (YYYY-MM-DD) for date range filter
limit: Max results to return (default 25, use larger for comprehensive searches)
offset: Skip first N results (for pagination)
Returns:
JSON with experiment results, total count, and pagination info.
"""
client = await _get_client()
limit = clamp_limit(limit)
filter_warnings = _validate_filters(assay_title, organ, biosample_type)
result = await client.search_experiments(
assay_title=assay_title,
organism=organism,
organ=organ,
biosample_type=biosample_type,
biosample_term_name=biosample_term_name,
target=target,
status=status,
lab=lab,
award=award,
assembly=assembly,
replication_type=replication_type,
life_stage=life_stage,
sex=sex,
treatment=treatment,
genetic_modification=genetic_modification,
perturbed=perturbed,
search_term=search_term,
date_released_from=date_released_from,
date_released_to=date_released_to,
limit=limit,
offset=offset,
)
total = result.get("total", 0)
serialized = _serialize(result)
serialized["has_more"] = total > (offset + limit)
serialized["next_offset"] = offset + limit if total > (offset + limit) else None
if filter_warnings:
serialized["filter_warnings"] = filter_warnings
if not result.get("results"):
serialized["suggestion"] = (
"Try broadening your search filters. Use encode_get_facets to see what data is available for your criteria."
)
return json.dumps(serialized, indent=2)
# ======================================================================
# Tool 2: Get Experiment Details
# ======================================================================
@mcp.tool(annotations=_READONLY_API, title="Get Experiment Details")
async def encode_get_experiment(accession: str) -> str:
"""Get full details for a specific ENCODE experiment by accession ID.
Returns complete experiment metadata including all associated files,
quality metrics, controls, replicate information, and audit status.
WHEN TO USE: Use when you have a specific accession and need full details
including files, quality metrics, and audit status.
RELATED TOOLS: encode_list_files, encode_track_experiment, encode_compare_experiments
Args:
accession: ENCODE experiment accession (e.g., "ENCSR133RZO", "ENCSR000AKS")
Returns:
JSON with full experiment details and file listing.
"""
validate_accession(accession)
client = await _get_client()
result = await client.get_experiment(accession)
return json.dumps(_serialize(result), indent=2)
# ======================================================================
# Tool 3: List Files for Experiment
# ======================================================================
@mcp.tool(annotations=_READONLY_API, title="List Experiment Files")
async def encode_list_files(
experiment_accession: str,
file_format: str | None = None,
file_type: str | None = None,
output_type: str | None = None,
output_category: str | None = None,
assembly: str | None = None,
status: str | None = None,
preferred_default: bool | None = None,
limit: int = 200,
) -> str:
"""List all files for a specific ENCODE experiment, with optional filters.
Examples:
- All BED files: experiment_accession="ENCSR133RZO", file_format="bed"
- FASTQs only: experiment_accession="ENCSR133RZO", file_format="fastq"
- Signal tracks: experiment_accession="ENCSR133RZO", output_category="signal"
- Default/recommended files: preferred_default=True
- Peaks from GRCh38: file_format="bed", output_type="IDR thresholded peaks", assembly="GRCh38"
Common file_format values: "fastq", "bam", "bed", "bigWig", "bigBed", "tsv", "hic"
Common output_type values: "reads", "alignments", "signal of unique reads",
"signal of all reads", "fold change over control", "IDR thresholded peaks",
"pseudoreplicated peaks", "replicated peaks", "gene quantifications",
"transcript quantifications", "contact matrix"
WHEN TO USE: Use to browse files within a known experiment. Use encode_search_files
instead to find files across experiments.
RELATED TOOLS: encode_search_files, encode_get_file_info, encode_download_files
Args:
experiment_accession: ENCODE experiment accession (e.g., "ENCSR133RZO")
file_format: Filter by format ("fastq", "bam", "bed", "bigWig", "bigBed", etc.)
file_type: Filter by specific type ("bed narrowPeak", "bed broadPeak", etc.)
output_type: Filter by output type ("reads", "peaks", "signal", etc.)
output_category: Filter by category ("raw data", "alignment", "signal", "annotation")
assembly: Filter by genome assembly ("GRCh38", "hg19", "mm10")
status: Filter by status ("released", "archived", "in progress")
preferred_default: If True, return only default/recommended files
limit: Max files to return (default 200)
Returns:
JSON list of files with accession, format, size, download URL, and metadata.
"""
validate_accession(experiment_accession)
limit = clamp_limit(limit)
client = await _get_client()
results = await client.list_files(
experiment_accession=experiment_accession,
file_format=file_format,
file_type=file_type,
output_type=output_type,
output_category=output_category,
assembly=assembly,
status=status,
preferred_default=preferred_default,
limit=limit,
)
return json.dumps(_serialize(results), indent=2)
# ======================================================================
# Tool 4: Search Files Across Experiments
# ======================================================================
@mcp.tool(annotations=_READONLY_API, title="Search Files Across Experiments")
async def encode_search_files(
file_format: str | None = None,
file_type: str | None = None,
output_type: str | None = None,
output_category: str | None = None,
assembly: str | None = None,
assay_title: str | None = None,
organism: str | None = None,
organ: str | None = None,
biosample_type: str | None = None,
target: str | None = None,
status: str = "released",
preferred_default: bool | None = None,
search_term: str | None = None,
limit: int = 25,
offset: int = 0,
) -> str:
"""Search files across ALL experiments with combined experiment + file filters.
This is powerful for finding specific file types across many experiments.
Examples:
- All BED files from human pancreas ChIP-seq:
file_format="bed", assay_title="Histone ChIP-seq", organ="pancreas"
- FASTQs from mouse liver RNA-seq:
file_format="fastq", assay_title="total RNA-seq", organ="liver", organism="Mus musculus"
- All IDR peak files for H3K27me3:
output_type="IDR thresholded peaks", target="H3K27me3"
- BigWig signal tracks from ATAC-seq on brain tissue:
file_format="bigWig", assay_title="ATAC-seq", organ="brain", biosample_type="tissue"
WHEN TO USE: Use to find specific file types across ALL experiments. More powerful
than encode_list_files for cross-experiment file discovery.
RELATED TOOLS: encode_list_files, encode_batch_download, encode_get_file_info
Args:
file_format: File format ("fastq", "bam", "bed", "bigWig", etc.)
file_type: Specific file type ("bed narrowPeak", "bed broadPeak", etc.)
output_type: Output type ("reads", "peaks", "signal", etc.)
output_category: Output category ("raw data", "alignment", "signal", "annotation")
assembly: Genome assembly ("GRCh38", "hg19", "mm10")
assay_title: Filter by assay type of parent experiment
organism: Filter by organism of parent experiment
organ: Filter by organ of parent experiment
biosample_type: Filter by biosample type ("tissue", "cell line", etc.)
target: Filter by ChIP/CUT&RUN target
status: File status (default: "released")
preferred_default: If True, only default/recommended files
search_term: Free text search
limit: Max results (default 25)
offset: Skip first N results (pagination)
Returns:
JSON with file results, total count, and pagination info.
"""
client = await _get_client()
limit = clamp_limit(limit)
filter_warnings = _validate_filters(assay_title, organ, biosample_type)
result = await client.search_files(
file_format=file_format,
file_type=file_type,
output_type=output_type,
output_category=output_category,
assembly=assembly,
assay_title=assay_title,
organism=organism,
organ=organ,
biosample_type=biosample_type,
target=target,
status=status,
preferred_default=preferred_default,
search_term=search_term,
limit=limit,
offset=offset,
)
total = result.get("total", 0)
serialized = _serialize(result)
serialized["has_more"] = total > (offset + limit)
serialized["next_offset"] = offset + limit if total > (offset + limit) else None
if filter_warnings:
serialized["filter_warnings"] = filter_warnings
if not result.get("results"):
serialized["suggestion"] = (
"Verify assembly and file_format values. Use encode_get_metadata('file_formats') to see valid options."
)
return json.dumps(serialized, indent=2)
# ======================================================================
# Tool 5: Download Files
# ======================================================================
@mcp.tool(annotations=_DOWNLOAD, title="Download ENCODE Files")
async def encode_download_files(
file_accessions: list[str],
download_dir: str,
organize_by: Literal["flat", "experiment", "format", "experiment_format"] = "flat",
verify_md5: bool = True,
) -> str:
"""Download specific ENCODE files by accession to a local directory.
Downloads files from ENCODE to your local machine. Supports MD5 verification,
concurrent downloads, and skip-if-already-downloaded.
WHEN TO USE: Use for downloading specific files by accession. For bulk downloads,
prefer encode_batch_download.
RELATED TOOLS: encode_batch_download, encode_search_files, encode_log_derived_file
Args:
file_accessions: List of file accessions to download (e.g., ["ENCFF635JIA", "ENCFF388RZD"])
download_dir: Local directory path to save files (e.g., "./data/encode")
organize_by: How to organize downloaded files:
- "flat": All files in download_dir (default)
- "experiment": download_dir/ENCSR.../filename
- "format": download_dir/bed/filename
- "experiment_format": download_dir/ENCSR.../bed/filename
verify_md5: Verify file integrity with MD5 checksum (default True)
Returns:
JSON with download results for each file (path, size, success/error, MD5 status).
"""
validate_organize_by(organize_by)
for acc in file_accessions:
validate_accession(acc)
client = await _get_client()
downloader = _get_downloader()
# Get file info for each accession
file_infos = []
errors = []
for acc in file_accessions:
try:
info = await client.get_file_info(acc)
file_infos.append(info)
except Exception as e:
errors.append({"accession": acc, "error": str(e)})
# Download all files
results = await downloader.download_batch(file_infos, download_dir, organize_by, verify_md5)
output = {
"downloaded": _serialize(results),
"errors": errors,
"summary": {
"total_requested": len(file_accessions),
"successful": sum(1 for r in results if r.success),
"failed": sum(1 for r in results if not r.success) + len(errors),
"total_size": sum(r.file_size for r in results if r.success),
"total_size_human": _human_size(sum(r.file_size for r in results if r.success)),
},
}
return json.dumps(output, indent=2)
# ======================================================================
# Tool 6: Get Metadata / Filter Values
# ======================================================================
@mcp.tool(annotations=_READONLY_API, title="Get Filter Values")
async def encode_get_metadata(
metadata_type: Literal[
"assays",
"organisms",
"organs",
"biosample_types",
"file_formats",
"output_types",
"output_categories",
"assemblies",
"life_stages",
"replication_types",
"statuses",
"file_statuses",
],
) -> str:
"""Get available filter values for ENCODE searches.
Use this to discover valid values for search parameters.
WHEN TO USE: Use to discover valid filter values before searching. Helps prevent
typos in assay_title, organ, biosample_type etc.
RELATED TOOLS: encode_get_facets, encode_search_experiments
Args:
metadata_type: Type of metadata to retrieve. Options:
- "assays": Available assay types (Histone ChIP-seq, ATAC-seq, total RNA-seq, etc.)
- "organisms": Available organisms (Homo sapiens, Mus musculus, etc.)
- "organs": Available organ/tissue systems (pancreas, brain, liver, etc.)
- "biosample_types": Biosample classifications (tissue, cell line, primary cell, etc.)
- "file_formats": File format types (fastq, bam, bed, bigWig, etc.)
- "output_types": Output data types (reads, peaks, signal, etc.)
- "output_categories": Output categories (raw data, alignment, signal, etc.)
- "assemblies": Genome assemblies (GRCh38, hg19, mm10, etc.)
- "life_stages": Life stages (embryonic, adult, child, etc.)
- "replication_types": Replication types (isogenic, anisogenic, unreplicated)
- "statuses": Experiment statuses (released, archived, etc.)
- "file_statuses": File statuses (released, archived, in progress, etc.)
Returns:
JSON list of valid values for the specified metadata type.
"""
client = await _get_client()
try:
values = client.get_metadata(metadata_type)
return json.dumps({"metadata_type": metadata_type, "values": values, "count": len(values)}, indent=2)
except ValueError as e:
return json.dumps({"error": str(e)}, indent=2)
# ======================================================================
# Tool 7: Batch Download from Search
# ======================================================================
@mcp.tool(annotations=_DOWNLOAD, title="Batch Search and Download")
async def encode_batch_download(
download_dir: str,
file_format: str | None = None,
output_type: str | None = None,
output_category: str | None = None,
assembly: str | None = None,
assay_title: str | None = None,
organism: str = "Homo sapiens",
organ: str | None = None,
biosample_type: str | None = None,
target: str | None = None,
preferred_default: bool | None = None,
organize_by: Literal["flat", "experiment", "format", "experiment_format"] = "experiment",
verify_md5: bool = True,
limit: int = 100,
dry_run: bool = True,
) -> str:
"""Search for files and download them all in batch.
First searches for files matching the criteria, then downloads them.
By default runs in dry_run mode to preview what would be downloaded.
Set dry_run=False to actually download.
WHEN TO USE: Use for searching and downloading files in one step. Always use
dry_run=True first to preview. For specific file accessions, use encode_download_files.
RELATED TOOLS: encode_download_files, encode_search_files
Examples:
- Download all BED files from human pancreas ChIP-seq:
file_format="bed", assay_title="Histone ChIP-seq", organ="pancreas",
download_dir="/data/encode", dry_run=False
- Preview FASTQ downloads for mouse brain RNA-seq:
file_format="fastq", assay_title="total RNA-seq", organ="brain",
organism="Mus musculus", download_dir="/data/encode"
- Download IDR peaks for H3K27me3 in GRCh38:
output_type="IDR thresholded peaks", target="H3K27me3", assembly="GRCh38",
download_dir="/data/encode", dry_run=False
Args:
download_dir: Local directory to save files
file_format: File format filter ("fastq", "bam", "bed", "bigWig", etc.)
output_type: Output type filter ("reads", "peaks", "signal", etc.)
output_category: Output category ("raw data", "alignment", "annotation", etc.)
assembly: Genome assembly ("GRCh38", "mm10", etc.)
assay_title: Assay type ("Histone ChIP-seq", "ATAC-seq", "total RNA-seq", etc.)
organism: Organism (default: "Homo sapiens")
organ: Organ/tissue ("pancreas", "brain", "liver", etc.)
biosample_type: Biosample type ("tissue", "cell line", "primary cell", etc.)
target: ChIP/CUT&RUN target ("H3K27me3", "CTCF", etc.)
preferred_default: If True, only download default/recommended files
organize_by: File organization ("flat", "experiment", "format", "experiment_format")
verify_md5: Verify downloads with MD5 checksums (default True)
limit: Max files to download (default 100, safety limit)
dry_run: If True (default), only preview what would be downloaded. Set False to download.
Returns:
JSON with download preview (dry_run=True) or download results (dry_run=False).
"""
client = await _get_client()
downloader = _get_downloader()
validate_organize_by(organize_by)
limit = clamp_limit(limit)
filter_warnings = _validate_filters(assay_title, organ, biosample_type)
# Search for files
search_result = await client.search_files(
file_format=file_format,
output_type=output_type,
output_category=output_category,
assembly=assembly,
assay_title=assay_title,
organism=organism,
organ=organ,
biosample_type=biosample_type,
target=target,
status="released",
preferred_default=preferred_default,
limit=limit,
)
files = search_result["results"]
if not files:
empty_result = {
"message": "No files found matching the search criteria.",
"total": 0,
"has_more": False,
"next_offset": None,
"suggestion": "Try broadening your search filters. Use encode_get_facets to see what data is available for your criteria.",
}
if filter_warnings:
empty_result["filter_warnings"] = filter_warnings
return json.dumps(empty_result, indent=2)
if dry_run:
# Preview mode
search_total = search_result["total"]
preview = downloader.preview_downloads(files, download_dir, organize_by)
preview["message"] = (
f"Found {preview['file_count']} files ({preview['total_size_human']}). Set dry_run=False to download."
)
preview["search_total"] = search_total
preview["has_more"] = search_total > limit
preview["next_offset"] = limit if search_total > limit else None
if filter_warnings:
preview["filter_warnings"] = filter_warnings
return json.dumps(_serialize(preview), indent=2)
# Actually download
results = await downloader.download_batch(files, download_dir, organize_by, verify_md5)
search_total = search_result["total"]
output = {
"downloaded": _serialize(results),
"summary": {
"total_found": search_total,
"total_downloaded": len(results),
"successful": sum(1 for r in results if r.success),
"failed": sum(1 for r in results if not r.success),
"total_size": sum(r.file_size for r in results if r.success),
"total_size_human": _human_size(sum(r.file_size for r in results if r.success)),
},
"has_more": search_total > limit,
"next_offset": limit if search_total > limit else None,
}
if filter_warnings:
output["filter_warnings"] = filter_warnings
return json.dumps(output, indent=2)
# ======================================================================
# Tool 8: Store/Manage Credentials
# ======================================================================
@mcp.tool(annotations=_CREDENTIAL_MGMT, title="Manage API Credentials")
async def encode_manage_credentials(
action: Literal["store", "check", "clear"],
access_key: str | None = None,
secret_key: str | None = None,
) -> str:
"""Manage ENCODE API credentials for accessing restricted/unreleased data.
Most ENCODE data is public and requires no authentication.
Credentials are only needed for unreleased or restricted datasets.
Credentials are stored securely in your OS keyring (macOS Keychain,
Linux Secret Service, Windows Credential Locker) and never in plaintext.
WHEN TO USE: Use only for accessing unreleased/restricted ENCODE data.
Public data requires no authentication.
RELATED TOOLS: encode_search_experiments
Args:
action: What to do:
- "store": Save new credentials (requires access_key and secret_key)
- "check": Check if credentials are configured
- "clear": Remove stored credentials
access_key: Your ENCODE access key (only for action="store")
secret_key: Your ENCODE secret key (only for action="store")
Returns:
JSON with action result.
"""
if action == "store":
if not access_key or not secret_key:
return json.dumps(
{
"error": "Both access_key and secret_key are required to store credentials.",
"help": "Get your access key pair from your ENCODE profile at https://www.encodeproject.org/",
},
indent=2,
)
location = _credential_manager.store_credentials(access_key, secret_key)
# Reset client to pick up new credentials
global _client
async with _get_client_lock():
if _client:
await _client.close()
_client = EncodeClient(credential_manager=_credential_manager)
return json.dumps(
{
"success": True,
"message": f"Credentials stored securely in: {location}",
"note": "Credentials are encrypted and never stored in plaintext.",
},
indent=2,
)
elif action == "check":
has_creds = _credential_manager.has_credentials
return json.dumps(
{
"credentials_configured": has_creds,
"message": (
"Credentials are configured. You can access restricted data."
if has_creds
else "No credentials configured. You can still access all public ENCODE data. "
"Use action='store' with your ENCODE access key pair to access restricted data."
),
},
indent=2,
)
elif action == "clear":
_credential_manager.clear_credentials()
# Reset client
async with _get_client_lock():
if _client: # type: ignore[used-before-def]
await _client.close() # type: ignore[used-before-def]
_client = EncodeClient(credential_manager=_credential_manager)
return json.dumps(
{
"success": True,
"message": "All stored credentials have been removed.",
},
indent=2,
)
else:
return json.dumps(
{
"error": f"Unknown action: {action}. Use 'store', 'check', or 'clear'.",
},
indent=2,
)
# ======================================================================
# Tool 9: Get Live Facets (Dynamic Filter Discovery)
# ======================================================================
@mcp.tool(annotations=_READONLY_API, title="Explore Available Data")
async def encode_get_facets(
search_type: str = "Experiment",
assay_title: str | None = None,
organism: str | None = None,
organ: str | None = None,
biosample_type: str | None = None,
) -> str:
"""Get live filter counts from ENCODE to discover what data is available.
Returns faceted counts showing how many experiments/files exist for each
filter value. Useful for exploring what's available before searching.
WHEN TO USE: Use to explore what data exists before searching. Shows counts
per filter value. Best first step for unknown datasets.
RELATED TOOLS: encode_get_metadata, encode_search_experiments
Examples:
- What assays are available for pancreas?
organ="pancreas"
- What organs have Histone ChIP-seq data?
assay_title="Histone ChIP-seq"
- What targets are available for mouse brain ChIP-seq?
assay_title="Histone ChIP-seq", organism="Mus musculus", organ="brain"
Args:
search_type: Object type ("Experiment" or "File")
assay_title: Pre-filter by assay type
organism: Pre-filter by organism
organ: Pre-filter by organ
biosample_type: Pre-filter by biosample type
Returns:
JSON with facet names and their term counts.
"""
client = await _get_client()
filter_warnings = _validate_filters(assay_title, organ, biosample_type)
filters = {}
if assay_title:
filters["assay_title"] = assay_title
if organism:
filters["replicates.library.biosample.donor.organism.scientific_name"] = organism
if organ:
filters["biosample_ontology.organ_slims"] = organ
if biosample_type:
filters["biosample_ontology.classification"] = biosample_type
facets = await client.search_facets(search_type=search_type, **filters)
# Simplify output - show most useful facets
useful_facets = {}
for field, terms in facets.items():
# Only include facets with reasonable number of terms
if len(terms) <= 200:
useful_facets[field] = terms[:50] # Cap at 50 terms per facet
result: dict = dict(useful_facets)
if filter_warnings:
result["filter_warnings"] = filter_warnings
return json.dumps(result, indent=2)
# ======================================================================
# Tool 10: Get File Info
# ======================================================================
@mcp.tool(annotations=_READONLY_API, title="Get File Details")
async def encode_get_file_info(accession: str) -> str:
"""Get detailed information about a specific ENCODE file.
WHEN TO USE: Use when you need detailed metadata for a specific file
(size, md5, assembly, biological replicate info).
RELATED TOOLS: encode_download_files, encode_list_files
Args:
accession: File accession ID (e.g., "ENCFF635JIA")
Returns:
JSON with file metadata including format, size, download URL, MD5, assembly, etc.
"""
validate_accession(accession)
client = await _get_client()
info = await client.get_file_info(accession)
return json.dumps(_serialize(info), indent=2)
# ======================================================================
# Tracker helper
# ======================================================================
def _get_tracker() -> ExperimentTracker:
if _tracker is None:
raise RuntimeError("Experiment tracker not initialized")
return _tracker
# ======================================================================
# Tool 11: Track Experiment
# ======================================================================
@mcp.tool(annotations=_WRITE_LOCAL_API, title="Track Experiment Locally")
async def encode_track_experiment(
accession: str,
fetch_publications: bool = True,
fetch_pipelines: bool = True,
notes: str = "",
) -> str:
"""Track an ENCODE experiment locally with its publications, methods, and pipeline info.
Fetches full experiment metadata from ENCODE and stores it in a local SQLite
database along with any associated publications (PMIDs, DOIs, authors, journal)
and pipeline/analysis information (software versions, methods).
This is like adding an experiment to your "library" - similar to Endnote for papers.
WHEN TO USE: Use to save an experiment to your local library with publications
and pipeline info. Required before compare or citations.
RELATED TOOLS: encode_compare_experiments, encode_get_citations, encode_export_data
Args:
accession: ENCODE experiment accession (e.g., "ENCSR133RZO")
fetch_publications: Also fetch and store publications/citations (default True)
fetch_pipelines: Also fetch and store pipeline/analysis info (default True)
notes: Optional notes to attach to this experiment
Returns:
JSON with tracking result including publications and pipeline info found.