Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions doc/progress.rst
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ Changelog
* MAINT #897: Dropping support for Python 3.5.
* ADD #894: Support caching of datasets using feather format as an option.
* ADD #945: PEP 561 compliance for distributing Type information
* MAINT #371: ``list_evaluations`` default ``size`` changed from ``None`` to ``10_000``.

0.10.2
~~~~~~
Expand Down
2 changes: 1 addition & 1 deletion examples/40_paper/2018_ida_strang_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@

# Downloads all evaluation records related to this study
evaluations = openml.evaluations.list_evaluations(
measure, flows=flow_ids, study=study_id, output_format="dataframe"
measure, size=None, flows=flow_ids, study=study_id, output_format="dataframe"
)
# gives us a table with columns data_id, flow1_value, flow2_value
evaluations = evaluations.pivot(index="data_id", columns="flow_id", values="value").dropna()
Expand Down
7 changes: 4 additions & 3 deletions openml/evaluations/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
def list_evaluations(
function: str,
offset: Optional[int] = None,
size: Optional[int] = None,
size: Optional[int] = 10000,
tasks: Optional[List[Union[str, int]]] = None,
setups: Optional[List[Union[str, int]]] = None,
flows: Optional[List[Union[str, int]]] = None,
Expand All @@ -38,8 +38,9 @@ def list_evaluations(
the evaluation function. e.g., predictive_accuracy
offset : int, optional
the number of runs to skip, starting from the first
size : int, optional
the maximum number of runs to show
size : int, default 10000
The maximum number of runs to show.
If set to ``None``, it returns all the results.

tasks : list[int,str], optional
the list of task IDs
Expand Down
20 changes: 14 additions & 6 deletions tests/test_evaluations/test_evaluation_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,9 @@ def test_evaluation_list_filter_task(self):

task_id = 7312

evaluations = openml.evaluations.list_evaluations("predictive_accuracy", tasks=[task_id])
evaluations = openml.evaluations.list_evaluations(
"predictive_accuracy", size=110, tasks=[task_id]
)

self.assertGreater(len(evaluations), 100)
for run_id in evaluations.keys():
Expand All @@ -56,7 +58,7 @@ def test_evaluation_list_filter_uploader_ID_16(self):

uploader_id = 16
evaluations = openml.evaluations.list_evaluations(
"predictive_accuracy", uploaders=[uploader_id], output_format="dataframe"
"predictive_accuracy", size=60, uploaders=[uploader_id], output_format="dataframe"
)
self.assertEqual(evaluations["uploader"].unique(), [uploader_id])

Expand All @@ -66,7 +68,9 @@ def test_evaluation_list_filter_uploader_ID_10(self):
openml.config.server = self.production_server

setup_id = 10
evaluations = openml.evaluations.list_evaluations("predictive_accuracy", setups=[setup_id])
evaluations = openml.evaluations.list_evaluations(
"predictive_accuracy", size=60, setups=[setup_id]
)

self.assertGreater(len(evaluations), 50)
for run_id in evaluations.keys():
Expand All @@ -81,7 +85,9 @@ def test_evaluation_list_filter_flow(self):

flow_id = 100

evaluations = openml.evaluations.list_evaluations("predictive_accuracy", flows=[flow_id])
evaluations = openml.evaluations.list_evaluations(
"predictive_accuracy", size=10, flows=[flow_id]
)

self.assertGreater(len(evaluations), 2)
for run_id in evaluations.keys():
Expand All @@ -96,7 +102,9 @@ def test_evaluation_list_filter_run(self):

run_id = 12

evaluations = openml.evaluations.list_evaluations("predictive_accuracy", runs=[run_id])
evaluations = openml.evaluations.list_evaluations(
"predictive_accuracy", size=2, runs=[run_id]
)

self.assertEqual(len(evaluations), 1)
for run_id in evaluations.keys():
Expand Down Expand Up @@ -164,7 +172,7 @@ def test_evaluation_list_sort(self):
task_id = 6
# Get all evaluations of the task
unsorted_eval = openml.evaluations.list_evaluations(
"predictive_accuracy", offset=0, tasks=[task_id]
"predictive_accuracy", size=None, offset=0, tasks=[task_id]
)
# Get top 10 evaluations of the same task
sorted_eval = openml.evaluations.list_evaluations(
Expand Down
4 changes: 3 additions & 1 deletion tests/test_study/test_study_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,9 @@ def test_publish_study(self):
self.assertSetEqual(set(run_ids), set(study_downloaded.runs))

# test whether the list evaluation function also handles study data fine
run_ids = openml.evaluations.list_evaluations("predictive_accuracy", study=study.id)
run_ids = openml.evaluations.list_evaluations(
"predictive_accuracy", size=None, study=study.id
)
self.assertSetEqual(set(run_ids), set(study_downloaded.runs))

# attach more runs
Expand Down