Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions examples/30_extended/tasks_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,11 +196,11 @@
# Error code for 'task already exists'
if e.code == 614:
# Lookup task
tasks = openml.tasks.list_tasks(data_id=128, output_format='dataframe').to_numpy()
tasks = tasks[tasks[:, 4] == "Supervised Classification"]
tasks = tasks[tasks[:, 6] == "10-fold Crossvalidation"]
tasks = tasks[tasks[:, 19] == "predictive_accuracy"]
task_id = tasks[0][0]
tasks = openml.tasks.list_tasks(data_id=128, output_format='dataframe')
tasks = tasks.query('task_type == "Supervised Classification" '
'and estimation_procedure == "10-fold Crossvalidation" '
'and evaluation_measures == "predictive_accuracy"')
task_id = tasks.loc[:, "tid"].values[0]
print("Task already exists. Task ID is", task_id)

# reverting to prod server
Expand Down
3 changes: 2 additions & 1 deletion tests/test_evaluations/test_evaluation_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,8 +149,9 @@ def test_evaluation_list_per_fold(self):
self.assertIsNone(evaluations[run_id].values)

def test_evaluation_list_sort(self):
openml.config.server = self.production_server
size = 10
task_id = 115
task_id = 6
# Get all evaluations of the task
unsorted_eval = openml.evaluations.list_evaluations(
"predictive_accuracy", offset=0, task=[task_id])
Expand Down
9 changes: 7 additions & 2 deletions tests/test_runs/test_run_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def _wait_for_processed_run(self, run_id, max_waiting_time_seconds):
if len(run.evaluations) > 0:
return
else:
time.sleep(10)
time.sleep(3)
raise RuntimeError('Could not find any evaluations! Please check whether run {} was '
'evaluated correctly on the server'.format(run_id))

Expand Down Expand Up @@ -1120,8 +1120,13 @@ def test_get_run(self):
)

def _check_run(self, run):
# This tests that the API returns seven entries for each run
# Check out https://openml.org/api/v1/xml/run/list/flow/1154
# They are run_id, task_id, task_type_id, setup_id, flow_id, uploader, upload_time
# error_message and run_details exist, too, but are not used so far. We need to update
# this check once they are used!
self.assertIsInstance(run, dict)
self.assertEqual(len(run), 7)
assert len(run) == 7, str(run)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can you please explain in comments why 7? is not clear from the (direct) context

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's the amount of entries per run returned by the API: https://openml.org/api/v1/xml/run/list/flow/1154

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just pushed a commit explaining this. I will merge once the tests are through.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

good. I am not sure if we should really check this, as this is flexible for extension. But in those cases we can adjust the unit tests as well


def test_get_runs_list(self):
# TODO: comes from live, no such lists on test
Expand Down
4 changes: 2 additions & 2 deletions tests/test_utils/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,14 +75,14 @@ def test_list_all_for_setups(self):
self.assertEqual(len(setups), required_size)

def test_list_all_for_runs(self):
required_size = 48
required_size = 21
runs = openml.runs.list_runs(batch_size=self._batch_size, size=required_size)

# might not be on test server after reset, please rerun test at least once if fails
self.assertEqual(len(runs), required_size)

def test_list_all_for_evaluations(self):
required_size = 57
required_size = 22
# TODO apparently list_evaluations function does not support kwargs
evaluations = openml.evaluations.list_evaluations(function='predictive_accuracy',
size=required_size)
Expand Down