Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Lightning strategy for single XPU devic."""
"""Lightning strategy for single XPU device."""

# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@ def train_model(model, dataset, cfg, distributed=False, validate=False, timestam
)
# The specific dataloader settings
train_loader_cfg = {**loader_cfg, **cfg.data.get("train_dataloader", {})}

data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]

fp16_cfg = cfg.get("fp16_", None)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
"""Test for otx.algorithms.anomaly.adapters.anomalib.accelerators"""

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
"""Test for otx.algorithms.anomaly.adapters.anomalib.accelerators.xpu"""

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

import pytest
import torch
from otx.algorithms.anomaly.adapters.anomalib.accelerators import XPUAccelerator
from otx.algorithms.common.utils import is_xpu_available


@pytest.mark.skipif(not is_xpu_available(), reason="XPU is not available")
class TestXPUAccelerator:
@pytest.fixture
def accelerator(self):
return XPUAccelerator()

def test_setup_device(self, accelerator):
device = torch.device("xpu")
accelerator.setup_device(device)

def test_parse_devices(self, accelerator):
devices = [1, 2, 3]
parsed_devices = accelerator.parse_devices(devices)
assert isinstance(parsed_devices, list)
assert parsed_devices == devices

def test_get_parallel_devices(self, accelerator):
devices = [1, 2, 3]
parallel_devices = accelerator.get_parallel_devices(devices)
assert isinstance(parallel_devices, list)
assert parallel_devices == [torch.device("xpu", idx) for idx in devices]

def test_auto_device_count(self, accelerator):
count = accelerator.auto_device_count()
assert isinstance(count, int)

def test_is_available(self, accelerator):
available = accelerator.is_available()
assert isinstance(available, bool)
assert available == is_xpu_available()

def test_get_device_stats(self, accelerator):
device = torch.device("xpu")
stats = accelerator.get_device_stats(device)
assert isinstance(stats, dict)

def test_teardown(self, accelerator):
accelerator.teardown()
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
"""Test for otx.algorithms.anomaly.adapters.anomalib.plugins"""

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
"""Test for otx.algorithms.anomaly.adapters.anomalib.plugins.xpu_precision"""

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

import pytest
import torch
from torch.optim import Optimizer
from otx.algorithms.anomaly.adapters.anomalib.plugins.xpu_precision import MixedPrecisionXPUPlugin


class TestMixedPrecisionXPUPlugin:
@pytest.fixture
def plugin(self):
return MixedPrecisionXPUPlugin()

def test_init(self, plugin):
assert plugin.scaler is None

def test_pre_backward(self, plugin, mocker):
tensor = torch.zeros(1)
module = mocker.MagicMock()
output = plugin.pre_backward(tensor, module)
assert output == tensor

def test_optimizer_step_no_scaler(self, plugin, mocker):
optimizer = mocker.MagicMock(Optimizer)
model = mocker.MagicMock()
optimizer_idx = 0
closure = mocker.MagicMock()
kwargs = {}
mock_optimizer_step = mocker.patch(
"otx.algorithms.anomaly.adapters.anomalib.plugins.xpu_precision.PrecisionPlugin.optimizer_step"
)
out = plugin.optimizer_step(optimizer, model, optimizer_idx, closure, **kwargs)
assert isinstance(out, mocker.MagicMock)
mock_optimizer_step.assert_called_once()

def test_optimizer_step_with_scaler(self, plugin, mocker):
optimizer = mocker.MagicMock(Optimizer)
model = mocker.MagicMock()
optimizer_idx = 0
closure = mocker.MagicMock()
plugin.scaler = mocker.MagicMock()
kwargs = {}
out = plugin.optimizer_step(optimizer, model, optimizer_idx, closure, **kwargs)
assert isinstance(out, mocker.MagicMock)

def test_clip_gradients(self, plugin, mocker):
optimizer = mocker.MagicMock(Optimizer)
clip_val = 0.1
gradient_clip_algorithm = "norm"
mock_clip_gradients = mocker.patch(
"otx.algorithms.anomaly.adapters.anomalib.plugins.xpu_precision.PrecisionPlugin.clip_gradients"
)
plugin.clip_gradients(optimizer, clip_val, gradient_clip_algorithm)
mock_clip_gradients.assert_called_once()
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
"""Test for otx.algorithms.anomaly.adapters.anomalib.strategies"""

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
"""Tests the XPU strategy."""

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import pytest
import torch
import pytorch_lightning as pl
from otx.algorithms.anomaly.adapters.anomalib.strategies.xpu_single import SingleXPUStrategy
from otx.algorithms.common.utils.utils import is_xpu_available


@pytest.mark.skipif(not is_xpu_available(), reason="XPU is not available")
class TestSingleXPUStrategy:
def test_init(self):
strategy = SingleXPUStrategy(device="xpu:0")
assert strategy._root_device.type == "xpu"
assert strategy.accelerator is None

def test_is_distributed(self):
strategy = SingleXPUStrategy(device="xpu:0")
assert not strategy.is_distributed

def test_setup_optimizers(self):
strategy = SingleXPUStrategy(device="xpu:0")
trainer = pl.Trainer()
# Create mock optimizers and models for testing
model = torch.nn.Linear(10, 2)
strategy._optimizers = [torch.optim.Adam(model.parameters(), lr=0.001)]
strategy._model = model
trainer.model = model
strategy.setup_optimizers(trainer)
assert len(strategy.optimizers) == 1
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
"""Test for otx.algorithms.classification.adapters.mmcls.api"""

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
118 changes: 118 additions & 0 deletions tests/unit/algorithms/classification/adapters/mmcls/api/test_train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
"""Test for otx.algorithms.classification.adapters.mmcls.apis.train"""

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

import pytest
from unittest import mock
from otx.algorithms.classification.adapters.mmcls.apis.train import train_model
import mmcv
import torch
from otx.algorithms.common.utils.utils import is_xpu_available


class TestTrainModel:
@pytest.fixture
def mock_modules(self, mocker):
mocker.patch(
"otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader", return_value=mock.MagicMock()
)
mocker.patch(
"otx.algorithms.classification.adapters.mmcls.apis.train.get_root_logger", return_value=mock.MagicMock()
)
mocker.patch(
"otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader", return_value=mock.MagicMock()
)
mocker.patch(
"otx.algorithms.classification.adapters.mmcls.apis.train.wrap_distributed_model",
return_value=mock.MagicMock(),
)
mocker.patch(
"otx.algorithms.classification.adapters.mmcls.apis.train.wrap_non_distributed_model",
return_value=mock.MagicMock(),
)
mocker.patch(
"otx.algorithms.classification.adapters.mmcls.apis.train.build_optimizer", return_value=mock.MagicMock()
)
mocker.patch(
"otx.algorithms.classification.adapters.mmcls.apis.train.build_runner", return_value=mock.MagicMock()
)
mocker.patch(
"otx.algorithms.classification.adapters.mmcls.apis.train.build_dataset", return_value=mock.MagicMock()
)
mocker.patch(
"otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader", return_value=mock.MagicMock()
)
mocker.patch(
"otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader", return_value=mock.MagicMock()
)
mocker.patch(
"otx.algorithms.classification.adapters.mmcls.apis.train.build_dataloader", return_value=mock.MagicMock()
)
mocker.patch(
"otx.algorithms.classification.adapters.mmcls.apis.train.DistEvalHook", return_value=mock.MagicMock()
)
mocker.patch("otx.algorithms.classification.adapters.mmcls.apis.train.EvalHook", return_value=mock.MagicMock())

@pytest.fixture
def mmcv_cfg(self):
return mmcv.Config(
{
"gpu_ids": [0],
"seed": 42,
"data": mock.MagicMock(),
"device": "cpu",
"optimizer": "SGD",
"optimizer_config": {},
"total_epochs": 1,
"work_dir": "test",
"lr_config": {},
"checkpoint_config": {},
"log_config": {},
"resume_from": False,
"load_from": "",
"workflow": "",
}
)

@pytest.fixture
def model(self):
return mock.MagicMock()

@pytest.fixture
def dataset(self):
return mock.MagicMock()

def test_train_model_single_dataset_no_validation(self, mock_modules, mmcv_cfg, model, dataset):
# Create mock inputs
_ = mock_modules
# Call the function
train_model(model, dataset, mmcv_cfg, validate=False)

def test_train_model_multiple_datasets_distributed_training(self, mock_modules, mmcv_cfg, model, dataset):
# Create mock inputs
_ = mock_modules
# Call the function
train_model(model, [dataset, dataset], mmcv_cfg, distributed=True, validate=True)

@pytest.mark.skipif(is_xpu_available() or not torch.cuda.is_available(), reason="cuda is not available")
def test_train_model_specific_timestamp_and_cuda_device(self, mock_modules, mmcv_cfg, model, dataset):
# Create mock inputs
_ = mock_modules
timestamp = "2024-01-01"
device = "cuda"
mmcv_cfg.device = "cuda"
meta = {"info": "some_info"}
# Call the function
train_model(model, dataset, mmcv_cfg, timestamp=timestamp, device=device, meta=meta)

@pytest.mark.skipif(not is_xpu_available(), reason="xpu is not available")
def test_train_model_xpu_device(self, mock_modules, mmcv_cfg, model, dataset):
# Create mock inputs
_ = mock_modules
device = "xpu"
mmcv_cfg.device = "xpu"

# Call the function
train_model(model, dataset, mmcv_cfg, device=device)
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
"""Test for XPU optimizer hook"""

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import pytest
from otx.algorithms.common.utils.utils import is_xpu_available


@pytest.mark.skipif(not is_xpu_available(), reason="XPU is not available")
def test_init():
from otx.algorithms.common.adapters.mmcv.hooks.xpu_optimizer_hook import BFp16XPUOptimizerHook
from otx.algorithms.common.adapters.torch.amp import XPUGradScaler

hook = BFp16XPUOptimizerHook(grad_clip=None, coalesce=True, bucket_size_mb=-1, loss_scale=512.0, distributed=True)
assert hook.coalesce is True # Check coalesce is True
assert hook.bucket_size_mb == -1 # Check bucket size is -1
assert hook._scale_update_param is 512.0 # Check scale update param is 512.0
assert hook.distributed is True # Check distributed is True
assert isinstance(hook.loss_scaler, XPUGradScaler)
6 changes: 6 additions & 0 deletions tests/unit/algorithms/common/adapters/mmcv/test_configurer.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
"""Test for otx.algorithms.common.adapters.mmcv.configurer"""

# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

import pytest
from mmcv.utils import Config
from otx.algorithms.common.adapters.mmcv import configurer
Expand Down
Loading