Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 21 additions & 18 deletions app/api/crud.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from . import sparql_models
from . import utility as util
from .env_settings import settings
from .models import QueryModel, SessionResponse
from .models import DataElementURI, QueryModel, SessionResponse

ALL_SUBJECT_ATTRIBUTES = list(SessionResponse.model_fields.keys()) + [
"dataset_uuid",
Expand Down Expand Up @@ -415,7 +415,8 @@ async def get_terms(
dict
Dictionary where the key is the Neurobagel class and the value is a list of dictionaries
corresponding to the available (i.e. used) instances of that class in the graph. Each instance dictionary
has two items: the 'TermURL' and the human-readable 'Label' for the term.
contains the 'TermURL' and the human-readable 'Label' for the term, and may include additional
metadata fields (e.g., 'abbreviation', 'data_type' for imaging modalities) when available.
"""
db_results = await post_query_to_graph(
util.create_terms_query(data_element_URI)
Expand All @@ -424,7 +425,7 @@ async def get_terms(
if std_trm_vocab is None:
std_trm_vocab = []

term_label_dicts = []
term_metadata = []
for result in db_results:
term_url = result["termURL"]
# First, check whether the found instance of the standardized variable contains a recognized namespace
Expand All @@ -443,29 +444,31 @@ async def get_terms(
),
[],
)
term_label = next(
(
term["name"]
for term in namespace_terms
if term["id"] == term_id
),
matched_term = next(
(term for term in namespace_terms if term["id"] == term_id),
None,
)
term_label_dicts.append(
{
"TermURL": util.replace_namespace_uri_with_prefix(
term_url
),
"Label": term_label,
}
)
term_entry = {
"TermURL": util.replace_namespace_uri_with_prefix(term_url),
"Label": matched_term.get("name") if matched_term else None,
}
if data_element_URI == DataElementURI.image.value:
term_entry["Abbreviation"] = (
matched_term.get("abbreviation", None)
if matched_term
else None
)
term_entry["DataType"] = (
matched_term.get("data_type") if matched_term else None
)
term_metadata.append(term_entry)
else:
warnings.warn(
f"The controlled term {term_url} was found in the graph but does not come from a vocabulary recognized by Neurobagel."
"This term will be ignored."
)

term_instances = {data_element_URI: term_label_dicts}
term_instances = {data_element_URI: term_metadata}

return term_instances

Expand Down
1 change: 1 addition & 0 deletions app/api/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@ class DataElementURI(str, Enum):

assessment = "nb:Assessment"
diagnosis = "nb:Diagnosis"
image = "nb:Image"


class StandardizedTermVocabularyNamespace(BaseModel):
Expand Down
22 changes: 22 additions & 0 deletions app/api/routers/imaging_modalities.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
from fastapi import APIRouter

from ..models import DataElementURI, StandardizedTermVocabularyResponse
from . import route_factory

router = APIRouter(prefix="/imaging-modalities", tags=["imaging-modalities"])

router.add_api_route(
path="",
endpoint=route_factory.create_get_instances_handler(
data_element_uri=DataElementURI.image.value
),
methods=["GET"],
)
router.add_api_route(
path="/vocab",
endpoint=route_factory.create_get_vocab_handler(
data_element_uri=DataElementURI.image.value
),
methods=["GET"],
response_model=StandardizedTermVocabularyResponse,
)
17 changes: 17 additions & 0 deletions app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
attributes,
datasets,
diagnoses,
imaging_modalities,
pipelines,
query,
subjects,
Expand Down Expand Up @@ -104,6 +105,21 @@ def fetch_vocabularies(config_name: str) -> dict:
)
all_std_trm_vocabs[var_uri] = std_trm_vocab

# The imaging modalities vocab is not configurable but is still an external file we need to fetch.
# Since it is not configurable across communities, the vocab file is not listed in config.json under a standardized variable.
# So, for now we always fetch it from the Neurobagel config directory.
# TODO revisit the prefix for this specific variable once we support custom standardized variables.
imaging_vocab_uri = f"{std_var_config['namespace_prefix']}:Image"
imaging_vocab_url = util.create_gh_raw_content_url(
env_settings.NEUROBAGEL_CONFIG_REPO,
"configs/Neurobagel/imaging_modalities.json",
)
imaging_vocab = util.request_data(
imaging_vocab_url,
f"Failed to fetch standardized term vocabulary for {imaging_vocab_uri}.",
)
all_std_trm_vocabs[imaging_vocab_uri] = imaging_vocab

return all_std_trm_vocabs


Expand Down Expand Up @@ -282,6 +298,7 @@ def overridden_redoc(request: Request):
app.include_router(attributes.router)
app.include_router(assessments.router)
app.include_router(diagnoses.router)
app.include_router(imaging_modalities.router)
app.include_router(pipelines.router)

# Automatically start uvicorn server on execution of main.py
Expand Down
102 changes: 102 additions & 0 deletions tests/test_attribute_factory_routes.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import pytest

from app.api import env_settings
from app.api.models import DataElementURI


def test_get_instances_endpoint_with_vocab_lookup(
Expand Down Expand Up @@ -88,6 +89,90 @@ async def mock_httpx_post(self, **kwargs):
}


def test_get_imaging_modalities_with_vocab_lookup(
test_app,
monkeypatch,
disable_auth,
mock_context,
):
"""
Given a GET request to /imaging-modalities, test that the endpoint returns graph instances
with labels and imaging-specific metadata (abbreviation, data_type) from the vocabulary.
"""
monkeypatch.setattr(
env_settings,
"ALL_VOCABS",
{
DataElementURI.image.value: [
{
"namespace_prefix": "nidm",
"namespace_url": "http://purl.org/nidash/nidm#",
"vocabulary_name": "Test vocabulary of imaging modalities",
"version": "1.0.0",
"terms": [
{
"id": "T1Weighted",
"name": "T1-weighted image",
"abbreviation": "T1w",
"data_type": "anat",
},
{
"id": "FlowWeighted",
"name": "Blood-Oxygen-Level Dependent image",
"abbreviation": "bold",
"data_type": "func",
},
],
}
]
},
)

mock_response_json = {
"head": {"vars": ["termURL"]},
"results": {
"bindings": [
{
"termURL": {
"type": "uri",
"value": "http://purl.org/nidash/nidm#T1Weighted",
}
},
{
"termURL": {
"type": "uri",
"value": "http://purl.org/nidash/nidm#FlowWeighted",
}
},
]
},
}

async def mock_httpx_post(self, **kwargs):
return httpx.Response(status_code=200, json=mock_response_json)

monkeypatch.setattr(httpx.AsyncClient, "post", mock_httpx_post)

response = test_app.get("/imaging-modalities")

assert response.json() == {
"nb:Image": [
{
"TermURL": "nidm:T1Weighted",
"Label": "T1-weighted image",
"Abbreviation": "T1w",
"DataType": "anat",
},
{
"TermURL": "nidm:FlowWeighted",
"Label": "Blood-Oxygen-Level Dependent image",
"Abbreviation": "bold",
"DataType": "func",
},
]
}


def test_get_instances_endpoint_without_vocab_lookup(
test_app,
monkeypatch,
Expand Down Expand Up @@ -146,6 +231,7 @@ async def mock_httpx_post(self, **kwargs):
[
("assessments", "nb:Assessment"),
("diagnoses", "nb:Diagnosis"),
("imaging-modalities", "nb:Image"),
],
)
def test_get_vocab_endpoint(
Expand Down Expand Up @@ -190,6 +276,22 @@ def test_get_vocab_endpoint(
],
},
],
"nb:Image": [
{
"namespace_prefix": "nidm",
"namespace_url": "http://purl.org/nidash/nidm#",
"vocabulary_name": "Test vocab of imaging modalities",
"version": "1.0.0",
"terms": [
{
"id": "T1Weighted",
"name": "T1-weighted image",
"Abbreviation": "T1w",
"DataType": "anat",
}
],
}
],
}

monkeypatch.setattr(env_settings, "ALL_VOCABS", mock_all_vocabs)
Expand Down