Skip to content

Compare

Qrels

Bases: object

Qrels, or query relevance judgments, stores the ground truth for conducting evaluations.

The preferred way for creating a Qrels instance is converting Python dictionary as follows:

qrels_dict = {
    "q_1": {
        "d_1": 1,
        "d_2": 2,
    },
    "q_2": {
        "d_3": 2,
        "d_2": 1,
        "d_5": 3,
    },
}

qrels = Qrels(qrels_dict, name="MSMARCO")

qrels = Qrels()  # Creates an empty Qrels with no name
Source code in ranx/data_structures/qrels.py
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
class Qrels(object):
    """`Qrels`, or _query relevance judgments_, stores the ground truth for conducting evaluations.

    The preferred way for creating a `Qrels` instance is converting Python dictionary as follows:

    ```python
    qrels_dict = {
        "q_1": {
            "d_1": 1,
            "d_2": 2,
        },
        "q_2": {
            "d_3": 2,
            "d_2": 1,
            "d_5": 3,
        },
    }

    qrels = Qrels(qrels_dict, name="MSMARCO")

    qrels = Qrels()  # Creates an empty Qrels with no name
    ```
    """

    def __init__(self, qrels: Dict[str, Dict[str, int]] = None, name: str = None):
        if qrels is None:
            self.qrels = TypedDict.empty(
                key_type=types.unicode_type,
                value_type=types.DictType(types.unicode_type, types.int64),
            )
            self.sorted = False
        else:
            # Query IDs
            q_ids = list(qrels.keys())
            q_ids = TypedList(q_ids)

            # Doc IDs
            doc_ids = [list(doc.keys()) for doc in qrels.values()]
            max_len = max(len(y) for x in doc_ids for y in x)
            dtype = f"<U{max_len}"
            doc_ids = TypedList([np.array(x, dtype=dtype) for x in doc_ids])

            # Scores
            scores = [list(doc.values()) for doc in qrels.values()]
            scores = TypedList([np.array(x, dtype=int) for x in scores])

            self.qrels = create_and_sort(q_ids, doc_ids, scores)
            self.sorted = True

        self.name = name

    def keys(self):
        """Returns query ids. Used internally."""
        return self.qrels.keys()

    def add_score(self, q_id: str, doc_id: str, score: int):
        """Add a (doc_id, score) pair to a query (or, change its value if it already exists).

        Args:
            q_id (str): Query ID
            doc_id (str): Document ID
            score (int): Relevance score judgment
        """
        if self.qrels.get(q_id) is None:
            self.qrels[q_id] = TypedDict.empty(
                key_type=types.unicode_type,
                value_type=types.int64,
            )
        self.qrels[q_id][doc_id] = int(score)
        self.sorted = False

    def add(self, q_id: str, doc_ids: List[str], scores: List[int]):
        """Add a query and its relevant documents with the associated relevance score judgment.

        Args:
            q_id (str): Query ID
            doc_ids (List[str]): List of Document IDs
            scores (List[int]): List of relevance score judgments
        """
        self.add_multi([q_id], [doc_ids], [scores])

    def add_multi(
        self,
        q_ids: List[str],
        doc_ids: List[List[str]],
        scores: List[List[int]],
    ):
        """Add multiple queries at once.

        Args:
            q_ids (List[str]): List of Query IDs
            doc_ids (List[List[str]]): List of list of Document IDs
            scores (List[List[int]]): List of list of relevance score judgments
        """
        q_ids = TypedList(q_ids)
        doc_ids = TypedList([TypedList(x) for x in doc_ids])
        scores = TypedList([TypedList(map(int, x)) for x in scores])

        self.qrels = add_and_sort(self.qrels, q_ids, doc_ids, scores)
        self.sorted = True

    def set_relevance_level(self, rel_lvl: int = 1):
        """Sets relevance level."""
        self.qrels = _set_relevance_level(self.qrels, rel_lvl)

    def get_query_ids(self):
        """Returns query ids."""
        return list(self.qrels.keys())

    def get_doc_ids_and_scores(self):
        """Returns doc ids and relevance judgments."""
        return list(self.qrels.values())

    # Sort in place
    def sort(self):
        """Sort. Used internally."""
        self.qrels = sort_dict_by_key(self.qrels)
        self.qrels = sort_dict_of_dict_by_value(self.qrels)
        self.sorted = True

    def to_typed_list(self):
        """Convert Qrels to Numba Typed List. Used internally."""
        if not self.sorted:
            self.sort()
        return to_typed_list(self.qrels)

    def to_dict(self) -> Dict[str, Dict[str, int]]:
        """Convert Qrels to Python dictionary.

        Returns:
            Dict[str, Dict[str, int]]: Qrels as Python dictionary
        """
        d = defaultdict(dict)
        for q_id in self.keys():
            d[q_id] = dict(self[q_id])
        return d

    def to_dataframe(self) -> pd.DataFrame:
        """Convert Qrels to Pandas DataFrame with the following columns: `q_id`, `doc_id`, and `score`.

        Returns:
            pandas.DataFrame: Qrels as Pandas DataFrame.
        """
        data = {"q_id": [], "doc_id": [], "score": []}

        for q_id in self.qrels:
            for doc_id in self.qrels[q_id]:
                data["q_id"].append(q_id)
                data["doc_id"].append(doc_id)
                data["score"].append(self.qrels[q_id][doc_id])

        return pd.DataFrame.from_dict(data)

    def save(self, path: str = "qrels.json", kind: str = None) -> None:
        """Write `qrels` to `path` as JSON file, TREC qrels format, or Parquet file. File type is automatically inferred form the filename extension: ".json" -> "json", ".trec" -> "trec", ".txt" -> "trec", ".parq" -> "parquet", ".parquet" -> "parquet". Use the "kind" argument to override this behavior.

        Args:
            path (str, optional): Saving path. Defaults to "qrels.json".
            kind (str, optional): Kind of file to save, must be either "json" or "trec". If None, it will be automatically inferred from the filename extension.
        """
        # Infer file extension -------------------------------------------------
        kind = get_file_kind(path, kind)

        # Save Qrels -----------------------------------------------------------
        if kind == "json":
            with open(path, "wb") as f:
                f.write(orjson.dumps(self.to_dict(), option=orjson.OPT_INDENT_2))
        elif kind == "parquet":
            self.to_dataframe().to_parquet(path, index=False)
        else:
            with open(path, "w") as f:
                for i, q_id in enumerate(self.qrels.keys()):
                    for j, doc_id in enumerate(self.qrels[q_id].keys()):
                        score = self.qrels[q_id][doc_id]
                        f.write(f"{q_id} 0 {doc_id} {score}")

                        if (
                            i != len(self.qrels.keys()) - 1
                            or j != len(self.qrels[q_id].keys()) - 1
                        ):
                            f.write("\n")

    @staticmethod
    def from_dict(d: Dict[str, Dict[str, int]]):
        """Convert a Python dictionary in form of {q_id: {doc_id: score}} to ranx.Qrels.

        Args:
            d (Dict[str, Dict[str, int]]): Qrels as Python dictionary

        Returns:
            Qrels: ranx.Qrels
        """
        # Query IDs
        q_ids = list(d.keys())
        q_ids = TypedList(q_ids)

        # Doc IDs
        doc_ids = [list(doc.keys()) for doc in d.values()]
        max_len = max(len(y) for x in doc_ids for y in x)
        dtype = f"<U{max_len}"
        doc_ids = TypedList([np.array(x, dtype=dtype) for x in doc_ids])

        # Scores
        scores = [list(doc.values()) for doc in d.values()]
        scores = TypedList([np.array(x, dtype=int) for x in scores])

        qrels = Qrels()
        qrels.qrels = create_and_sort(q_ids, doc_ids, scores)
        qrels.sorted = True

        return qrels

    @staticmethod
    def from_file(path: str, kind: str = None):
        """Parse a qrels file into ranx.Qrels. Supported formats are JSON, TREC qrels, and gzipped TREC qrels. Correct import behavior is inferred from the file extension: ".json" -> "json", ".trec" -> "trec", ".txt" -> "trec", ".gz" -> "gzipped trec". Use the "kind" argument to override this behavior.

        Args:
            path (str): File path.
            kind (str, optional): Kind of file to load, must be either "json" or "trec".

        Returns:
            Qrels: ranx.Qrels
        """
        # Infer file extension -------------------------------------------------
        kind = get_file_kind(path, kind)

        # Load Qrels -----------------------------------------------------------
        if kind == "json":
            qrels = orjson.loads(open(path, "rb").read())
        else:
            qrels = defaultdict(dict)
            with gzip.open(path, "rt") if kind == "gz" else open(path) as f:
                for line in f:
                    q_id, _, doc_id, rel = line.split()
                    qrels[q_id][doc_id] = int(rel)

        return Qrels.from_dict(qrels)

    @staticmethod
    def from_df(
        df: pd.DataFrame,
        q_id_col: str = "q_id",
        doc_id_col: str = "doc_id",
        score_col: str = "score",
    ):
        """Convert a Pandas DataFrame to ranx.Qrels.

        Args:
            df (pandas.DataFrame): Qrels as Pandas DataFrame.
            q_id_col (str, optional): Query IDs column. Defaults to "q_id".
            doc_id_col (str, optional): Document IDs column. Defaults to "doc_id".
            score_col (str, optional): Relevance score judgments column. Defaults to "score".

        Returns:
            Qrels: ranx.Qrels
        """
        assert (
            df[q_id_col].dtype == "O"
        ), "DataFrame Query IDs column dtype must be `object` (string)"
        assert (
            df[doc_id_col].dtype == "O"
        ), "DataFrame Document IDs column dtype must be `object` (string)"
        assert (
            df[score_col].dtype == np.int64
        ), "DataFrame scores column dtype must be `int`"

        qrels_dict = (
            df.groupby(q_id_col)[[doc_id_col, score_col]]
            .apply(lambda g: {x[0]: x[1] for x in g.values.tolist()})
            .to_dict()
        )

        return Qrels.from_dict(qrels_dict)

    @staticmethod
    def from_parquet(
        path: str,
        q_id_col: str = "q_id",
        doc_id_col: str = "doc_id",
        score_col: str = "score",
        pd_kwargs: Dict[str, Any] = None,
    ):
        """Convert a Parquet file to ranx.Qrels.

        Args:
            path (str): File path.
            q_id_col (str, optional): Query IDs column. Defaults to "q_id".
            doc_id_col (str, optional): Document IDs column. Defaults to "doc_id".
            score_col (str, optional): Relevance score judgments column. Defaults to "score".
            pd_kwargs (Dict[str, Any], optional): Additional arguments to pass to `pandas.read_parquet` (see https://pandas.pydata.org/docs/reference/api/pandas.read_parquet.html). Defaults to None.

        Returns:
            Qrels: ranx.Qrels
        """
        pd_kwargs = {} if pd_kwargs is None else pd_kwargs

        return Qrels.from_df(
            df=pd.read_parquet(path, *pd_kwargs),
            q_id_col=q_id_col,
            doc_id_col=doc_id_col,
            score_col=score_col,
        )

    @staticmethod
    def from_ir_datasets(dataset_id: str):
        """Convert `ir-datasets` qrels into ranx.Qrels. It automatically downloads data if missing.
        Args:
            dataset_id (str): ID of the detaset in `ir-datasets`. `ir-datasets` catalog is available here: https://ir-datasets.com/index.html.
        Returns:
            Qrels: ranx.Qrels
        """
        qrels = Qrels.from_dict(ir_datasets.load(dataset_id).qrels_dict())
        qrels.name = dataset_id
        return qrels

    @property
    def size(self):
        return len(self.qrels)

    def __getitem__(self, q_id):
        return dict(self.qrels[q_id])

    def __len__(self) -> int:
        return len(self.qrels)

    def __repr__(self):
        return self.qrels.__repr__()

    def __str__(self):
        return self.qrels.__str__()

add(q_id, doc_ids, scores)

Add a query and its relevant documents with the associated relevance score judgment.

Parameters:

Name Type Description Default
q_id str

Query ID

required
doc_ids List[str]

List of Document IDs

required
scores List[int]

List of relevance score judgments

required
Source code in ranx/data_structures/qrels.py
109
110
111
112
113
114
115
116
117
def add(self, q_id: str, doc_ids: List[str], scores: List[int]):
    """Add a query and its relevant documents with the associated relevance score judgment.

    Args:
        q_id (str): Query ID
        doc_ids (List[str]): List of Document IDs
        scores (List[int]): List of relevance score judgments
    """
    self.add_multi([q_id], [doc_ids], [scores])

add_multi(q_ids, doc_ids, scores)

Add multiple queries at once.

Parameters:

Name Type Description Default
q_ids List[str]

List of Query IDs

required
doc_ids List[List[str]]

List of list of Document IDs

required
scores List[List[int]]

List of list of relevance score judgments

required
Source code in ranx/data_structures/qrels.py
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
def add_multi(
    self,
    q_ids: List[str],
    doc_ids: List[List[str]],
    scores: List[List[int]],
):
    """Add multiple queries at once.

    Args:
        q_ids (List[str]): List of Query IDs
        doc_ids (List[List[str]]): List of list of Document IDs
        scores (List[List[int]]): List of list of relevance score judgments
    """
    q_ids = TypedList(q_ids)
    doc_ids = TypedList([TypedList(x) for x in doc_ids])
    scores = TypedList([TypedList(map(int, x)) for x in scores])

    self.qrels = add_and_sort(self.qrels, q_ids, doc_ids, scores)
    self.sorted = True

add_score(q_id, doc_id, score)

Add a (doc_id, score) pair to a query (or, change its value if it already exists).

Parameters:

Name Type Description Default
q_id str

Query ID

required
doc_id str

Document ID

required
score int

Relevance score judgment

required
Source code in ranx/data_structures/qrels.py
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
def add_score(self, q_id: str, doc_id: str, score: int):
    """Add a (doc_id, score) pair to a query (or, change its value if it already exists).

    Args:
        q_id (str): Query ID
        doc_id (str): Document ID
        score (int): Relevance score judgment
    """
    if self.qrels.get(q_id) is None:
        self.qrels[q_id] = TypedDict.empty(
            key_type=types.unicode_type,
            value_type=types.int64,
        )
    self.qrels[q_id][doc_id] = int(score)
    self.sorted = False

from_df(df, q_id_col='q_id', doc_id_col='doc_id', score_col='score') staticmethod

Convert a Pandas DataFrame to ranx.Qrels.

Parameters:

Name Type Description Default
df DataFrame

Qrels as Pandas DataFrame.

required
q_id_col str

Query IDs column. Defaults to "q_id".

'q_id'
doc_id_col str

Document IDs column. Defaults to "doc_id".

'doc_id'
score_col str

Relevance score judgments column. Defaults to "score".

'score'

Returns:

Name Type Description
Qrels

ranx.Qrels

Source code in ranx/data_structures/qrels.py
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
@staticmethod
def from_df(
    df: pd.DataFrame,
    q_id_col: str = "q_id",
    doc_id_col: str = "doc_id",
    score_col: str = "score",
):
    """Convert a Pandas DataFrame to ranx.Qrels.

    Args:
        df (pandas.DataFrame): Qrels as Pandas DataFrame.
        q_id_col (str, optional): Query IDs column. Defaults to "q_id".
        doc_id_col (str, optional): Document IDs column. Defaults to "doc_id".
        score_col (str, optional): Relevance score judgments column. Defaults to "score".

    Returns:
        Qrels: ranx.Qrels
    """
    assert (
        df[q_id_col].dtype == "O"
    ), "DataFrame Query IDs column dtype must be `object` (string)"
    assert (
        df[doc_id_col].dtype == "O"
    ), "DataFrame Document IDs column dtype must be `object` (string)"
    assert (
        df[score_col].dtype == np.int64
    ), "DataFrame scores column dtype must be `int`"

    qrels_dict = (
        df.groupby(q_id_col)[[doc_id_col, score_col]]
        .apply(lambda g: {x[0]: x[1] for x in g.values.tolist()})
        .to_dict()
    )

    return Qrels.from_dict(qrels_dict)

from_dict(d) staticmethod

Convert a Python dictionary in form of {q_id: {doc_id: score}} to ranx.Qrels.

Parameters:

Name Type Description Default
d Dict[str, Dict[str, int]]

Qrels as Python dictionary

required

Returns:

Name Type Description
Qrels

ranx.Qrels

Source code in ranx/data_structures/qrels.py
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
@staticmethod
def from_dict(d: Dict[str, Dict[str, int]]):
    """Convert a Python dictionary in form of {q_id: {doc_id: score}} to ranx.Qrels.

    Args:
        d (Dict[str, Dict[str, int]]): Qrels as Python dictionary

    Returns:
        Qrels: ranx.Qrels
    """
    # Query IDs
    q_ids = list(d.keys())
    q_ids = TypedList(q_ids)

    # Doc IDs
    doc_ids = [list(doc.keys()) for doc in d.values()]
    max_len = max(len(y) for x in doc_ids for y in x)
    dtype = f"<U{max_len}"
    doc_ids = TypedList([np.array(x, dtype=dtype) for x in doc_ids])

    # Scores
    scores = [list(doc.values()) for doc in d.values()]
    scores = TypedList([np.array(x, dtype=int) for x in scores])

    qrels = Qrels()
    qrels.qrels = create_and_sort(q_ids, doc_ids, scores)
    qrels.sorted = True

    return qrels

from_file(path, kind=None) staticmethod

Parse a qrels file into ranx.Qrels. Supported formats are JSON, TREC qrels, and gzipped TREC qrels. Correct import behavior is inferred from the file extension: ".json" -> "json", ".trec" -> "trec", ".txt" -> "trec", ".gz" -> "gzipped trec". Use the "kind" argument to override this behavior.

Parameters:

Name Type Description Default
path str

File path.

required
kind str

Kind of file to load, must be either "json" or "trec".

None

Returns:

Name Type Description
Qrels

ranx.Qrels

Source code in ranx/data_structures/qrels.py
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
@staticmethod
def from_file(path: str, kind: str = None):
    """Parse a qrels file into ranx.Qrels. Supported formats are JSON, TREC qrels, and gzipped TREC qrels. Correct import behavior is inferred from the file extension: ".json" -> "json", ".trec" -> "trec", ".txt" -> "trec", ".gz" -> "gzipped trec". Use the "kind" argument to override this behavior.

    Args:
        path (str): File path.
        kind (str, optional): Kind of file to load, must be either "json" or "trec".

    Returns:
        Qrels: ranx.Qrels
    """
    # Infer file extension -------------------------------------------------
    kind = get_file_kind(path, kind)

    # Load Qrels -----------------------------------------------------------
    if kind == "json":
        qrels = orjson.loads(open(path, "rb").read())
    else:
        qrels = defaultdict(dict)
        with gzip.open(path, "rt") if kind == "gz" else open(path) as f:
            for line in f:
                q_id, _, doc_id, rel = line.split()
                qrels[q_id][doc_id] = int(rel)

    return Qrels.from_dict(qrels)

from_ir_datasets(dataset_id) staticmethod

Convert ir-datasets qrels into ranx.Qrels. It automatically downloads data if missing. Args: dataset_id (str): ID of the detaset in ir-datasets. ir-datasets catalog is available here: https://ir-datasets.com/index.html. Returns: Qrels: ranx.Qrels

Source code in ranx/data_structures/qrels.py
341
342
343
344
345
346
347
348
349
350
351
@staticmethod
def from_ir_datasets(dataset_id: str):
    """Convert `ir-datasets` qrels into ranx.Qrels. It automatically downloads data if missing.
    Args:
        dataset_id (str): ID of the detaset in `ir-datasets`. `ir-datasets` catalog is available here: https://ir-datasets.com/index.html.
    Returns:
        Qrels: ranx.Qrels
    """
    qrels = Qrels.from_dict(ir_datasets.load(dataset_id).qrels_dict())
    qrels.name = dataset_id
    return qrels

from_parquet(path, q_id_col='q_id', doc_id_col='doc_id', score_col='score', pd_kwargs=None) staticmethod

Convert a Parquet file to ranx.Qrels.

Parameters:

Name Type Description Default
path str

File path.

required
q_id_col str

Query IDs column. Defaults to "q_id".

'q_id'
doc_id_col str

Document IDs column. Defaults to "doc_id".

'doc_id'
score_col str

Relevance score judgments column. Defaults to "score".

'score'
pd_kwargs Dict[str, Any]

Additional arguments to pass to pandas.read_parquet (see https://pandas.pydata.org/docs/reference/api/pandas.read_parquet.html). Defaults to None.

None

Returns:

Name Type Description
Qrels

ranx.Qrels

Source code in ranx/data_structures/qrels.py
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
@staticmethod
def from_parquet(
    path: str,
    q_id_col: str = "q_id",
    doc_id_col: str = "doc_id",
    score_col: str = "score",
    pd_kwargs: Dict[str, Any] = None,
):
    """Convert a Parquet file to ranx.Qrels.

    Args:
        path (str): File path.
        q_id_col (str, optional): Query IDs column. Defaults to "q_id".
        doc_id_col (str, optional): Document IDs column. Defaults to "doc_id".
        score_col (str, optional): Relevance score judgments column. Defaults to "score".
        pd_kwargs (Dict[str, Any], optional): Additional arguments to pass to `pandas.read_parquet` (see https://pandas.pydata.org/docs/reference/api/pandas.read_parquet.html). Defaults to None.

    Returns:
        Qrels: ranx.Qrels
    """
    pd_kwargs = {} if pd_kwargs is None else pd_kwargs

    return Qrels.from_df(
        df=pd.read_parquet(path, *pd_kwargs),
        q_id_col=q_id_col,
        doc_id_col=doc_id_col,
        score_col=score_col,
    )

get_doc_ids_and_scores()

Returns doc ids and relevance judgments.

Source code in ranx/data_structures/qrels.py
147
148
149
def get_doc_ids_and_scores(self):
    """Returns doc ids and relevance judgments."""
    return list(self.qrels.values())

get_query_ids()

Returns query ids.

Source code in ranx/data_structures/qrels.py
143
144
145
def get_query_ids(self):
    """Returns query ids."""
    return list(self.qrels.keys())

keys()

Returns query ids. Used internally.

Source code in ranx/data_structures/qrels.py
89
90
91
def keys(self):
    """Returns query ids. Used internally."""
    return self.qrels.keys()

save(path='qrels.json', kind=None)

Write qrels to path as JSON file, TREC qrels format, or Parquet file. File type is automatically inferred form the filename extension: ".json" -> "json", ".trec" -> "trec", ".txt" -> "trec", ".parq" -> "parquet", ".parquet" -> "parquet". Use the "kind" argument to override this behavior.

Parameters:

Name Type Description Default
path str

Saving path. Defaults to "qrels.json".

'qrels.json'
kind str

Kind of file to save, must be either "json" or "trec". If None, it will be automatically inferred from the filename extension.

None
Source code in ranx/data_structures/qrels.py
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
def save(self, path: str = "qrels.json", kind: str = None) -> None:
    """Write `qrels` to `path` as JSON file, TREC qrels format, or Parquet file. File type is automatically inferred form the filename extension: ".json" -> "json", ".trec" -> "trec", ".txt" -> "trec", ".parq" -> "parquet", ".parquet" -> "parquet". Use the "kind" argument to override this behavior.

    Args:
        path (str, optional): Saving path. Defaults to "qrels.json".
        kind (str, optional): Kind of file to save, must be either "json" or "trec". If None, it will be automatically inferred from the filename extension.
    """
    # Infer file extension -------------------------------------------------
    kind = get_file_kind(path, kind)

    # Save Qrels -----------------------------------------------------------
    if kind == "json":
        with open(path, "wb") as f:
            f.write(orjson.dumps(self.to_dict(), option=orjson.OPT_INDENT_2))
    elif kind == "parquet":
        self.to_dataframe().to_parquet(path, index=False)
    else:
        with open(path, "w") as f:
            for i, q_id in enumerate(self.qrels.keys()):
                for j, doc_id in enumerate(self.qrels[q_id].keys()):
                    score = self.qrels[q_id][doc_id]
                    f.write(f"{q_id} 0 {doc_id} {score}")

                    if (
                        i != len(self.qrels.keys()) - 1
                        or j != len(self.qrels[q_id].keys()) - 1
                    ):
                        f.write("\n")

set_relevance_level(rel_lvl=1)

Sets relevance level.

Source code in ranx/data_structures/qrels.py
139
140
141
def set_relevance_level(self, rel_lvl: int = 1):
    """Sets relevance level."""
    self.qrels = _set_relevance_level(self.qrels, rel_lvl)

sort()

Sort. Used internally.

Source code in ranx/data_structures/qrels.py
152
153
154
155
156
def sort(self):
    """Sort. Used internally."""
    self.qrels = sort_dict_by_key(self.qrels)
    self.qrels = sort_dict_of_dict_by_value(self.qrels)
    self.sorted = True

to_dataframe()

Convert Qrels to Pandas DataFrame with the following columns: q_id, doc_id, and score.

Returns:

Type Description
DataFrame

pandas.DataFrame: Qrels as Pandas DataFrame.

Source code in ranx/data_structures/qrels.py
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
def to_dataframe(self) -> pd.DataFrame:
    """Convert Qrels to Pandas DataFrame with the following columns: `q_id`, `doc_id`, and `score`.

    Returns:
        pandas.DataFrame: Qrels as Pandas DataFrame.
    """
    data = {"q_id": [], "doc_id": [], "score": []}

    for q_id in self.qrels:
        for doc_id in self.qrels[q_id]:
            data["q_id"].append(q_id)
            data["doc_id"].append(doc_id)
            data["score"].append(self.qrels[q_id][doc_id])

    return pd.DataFrame.from_dict(data)

to_dict()

Convert Qrels to Python dictionary.

Returns:

Type Description
Dict[str, Dict[str, int]]

Dict[str, Dict[str, int]]: Qrels as Python dictionary

Source code in ranx/data_structures/qrels.py
164
165
166
167
168
169
170
171
172
173
def to_dict(self) -> Dict[str, Dict[str, int]]:
    """Convert Qrels to Python dictionary.

    Returns:
        Dict[str, Dict[str, int]]: Qrels as Python dictionary
    """
    d = defaultdict(dict)
    for q_id in self.keys():
        d[q_id] = dict(self[q_id])
    return d

to_typed_list()

Convert Qrels to Numba Typed List. Used internally.

Source code in ranx/data_structures/qrels.py
158
159
160
161
162
def to_typed_list(self):
    """Convert Qrels to Numba Typed List. Used internally."""
    if not self.sorted:
        self.sort()
    return to_typed_list(self.qrels)

Report

Bases: object

A Report instance is automatically generated as the results of a comparison. A Report provide a convenient way of inspecting a comparison results and exporting those il LaTeX for your scientific publications.

# Compare different runs and perform statistical tests
report = compare(
    qrels=qrels,
    runs=[run_1, run_2, run_3, run_4, run_5],
    metrics=["map@100", "mrr@100", "ndcg@10"],
    max_p=0.01  # P-value threshold
)

print(report)
Output:
#    Model    MAP@100     MRR@100     NDCG@10
---  -------  ----------  ----------  ----------
a    model_1  0.3202ᵇ     0.3207ᵇ     0.3684ᵇᶜ
b    model_2  0.2332      0.2339      0.239
c    model_3  0.3082ᵇ     0.3089ᵇ     0.3295ᵇ
d    model_4  0.3664ᵃᵇᶜ   0.3668ᵃᵇᶜ   0.4078ᵃᵇᶜ
e    model_5  0.4053ᵃᵇᶜᵈ  0.4061ᵃᵇᶜᵈ  0.4512ᵃᵇᶜᵈ
print(report.to_latex())  # To get the LaTeX code

Source code in ranx/data_structures/report.py
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
class Report(object):
    """A `Report` instance is automatically generated as the results of a comparison.
    A `Report` provide a convenient way of inspecting a comparison results and exporting those il LaTeX for your scientific publications.

    ```python
    # Compare different runs and perform statistical tests
    report = compare(
        qrels=qrels,
        runs=[run_1, run_2, run_3, run_4, run_5],
        metrics=["map@100", "mrr@100", "ndcg@10"],
        max_p=0.01  # P-value threshold
    )

    print(report)
    ```
    Output:
    ```
    #    Model    MAP@100     MRR@100     NDCG@10
    ---  -------  ----------  ----------  ----------
    a    model_1  0.3202ᵇ     0.3207ᵇ     0.3684ᵇᶜ
    b    model_2  0.2332      0.2339      0.239
    c    model_3  0.3082ᵇ     0.3089ᵇ     0.3295ᵇ
    d    model_4  0.3664ᵃᵇᶜ   0.3668ᵃᵇᶜ   0.4078ᵃᵇᶜ
    e    model_5  0.4053ᵃᵇᶜᵈ  0.4061ᵃᵇᶜᵈ  0.4512ᵃᵇᶜᵈ
    ```
    ```python
    print(report.to_latex())  # To get the LaTeX code
    ```
    """

    def __init__(
        self,
        model_names: List[str],
        results: Dict,
        comparisons: FrozensetDict,
        metrics: List[str],
        max_p: float,
        win_tie_loss: Dict[Tuple[str], Dict[str, Dict[str, int]]],
        rounding_digits: int = 3,
        show_percentages: bool = False,
        stat_test: str = "student",
    ):
        self.model_names = model_names
        self.results = results
        self.comparisons = comparisons
        self.metrics = metrics
        self.max_p = max_p
        self.win_tie_loss = win_tie_loss
        self.rounding_digits = rounding_digits
        self.show_percentages = show_percentages
        self.stat_test = stat_test

    def format_score(self, score):
        if self.show_percentages:
            new_score = round(score * 100, max(0, self.rounding_digits - 2))
            return "%.{n}f".format(n=self.rounding_digits - 2) % new_score
        new_score = round(score, self.rounding_digits)
        return "%.{n}f".format(n=self.rounding_digits) % new_score

    def get_superscript_for_table(self, model, metric):
        superscript = [
            super_chars[j]
            for j, _model in enumerate(self.model_names)
            if model != _model
            and self.comparisons[model, _model][metric]["significant"]
            and (self.results[model][metric] > self.results[_model][metric])
        ]
        return ("").join(superscript)

    def get_metric_label(self, m):
        if "-l" in m:
            m, rel_lvl = m.split("-l")
            if "@" in m:
                m_split = m.split("@")
                label = metric_labels[m_split[0]]
                cutoff = m_split[1]
                return f"{label}@{cutoff}-l{rel_lvl}"
            return f"{metric_labels[m]}-l{rel_lvl}"

        else:
            if "@" in m:
                m_split = m.split("@")
                label = metric_labels[m_split[0]]
                cutoff = m_split[1]
                return f"{label}@{cutoff}"
            return f"{metric_labels[m]}"

    def get_stat_test_label(self, stat_test: str):
        return stat_test_labels[stat_test]

    def to_table(self):
        tabular_data = []

        for i, (run, v) in enumerate(self.results.items()):
            data = [chars[i], run]

            for metric, score in v.items():
                formatted_score = self.format_score(score)
                superscript = self.get_superscript_for_table(run, metric)
                data.append(f"{formatted_score}{superscript}")

            tabular_data.append(data)

        headers = ["#", "Model"]

        for x in self.metrics:
            label = self.get_metric_label(x)
            headers.append(label)

        return tabulate(tabular_data=tabular_data, headers=headers)

    def get_superscript_for_latex(self, model, metric):
        superscript = [
            chars[j]
            for j, _model in enumerate(self.model_names)
            if (
                model != _model
                and self.comparisons[model, _model][metric]["significant"]
                and self.results[model][metric] > self.results[_model][metric]
            )
        ]
        return ("").join(superscript)

    def get_phantoms_for_latex(self, model, metric):
        phantoms = [
            chars[j]
            for j, _model in enumerate(self.model_names)
            if (
                model != _model
                and (
                    not self.comparisons[model, _model][metric]["significant"]
                    or not self.results[model][metric] > self.results[_model][metric]
                )
            )
        ]

        if len(phantoms) > 0:
            return ("").join(phantoms)

        return ""

    def to_latex(self) -> str:
        """Returns Report as LaTeX table.

        Returns:
            str: LaTeX table
        """
        best_scores = {}

        for m in self.metrics:
            best_model = None
            best_score = 0.0
            for model in self.model_names:
                if best_score < round(self.results[model][m], self.rounding_digits):
                    best_score = round(self.results[model][m], self.rounding_digits)
                    best_model = model
            best_scores[m] = best_model

        preamble = "========================\n% Add in preamble\n\\usepackage{graphicx}\n\\usepackage{booktabs}\n========================\n\n"

        table_prefix = (
            "% To change the table size, act on the resizebox argument `0.8`.\n"
            + """\\begin{table*}[ht]\n\centering\n\caption{\nOverall effectiveness of the models.\nThe best results are highlighted in boldface.\nSuperscripts denote significant differences in """
            + self.get_stat_test_label(self.stat_test)
            + """ with $p \le """
            + str(self.max_p)
            + "$.\n}\n\\resizebox{0.8\\textwidth}{!}{"
            + "\n\\begin{tabular}{c|l"
            + "|c" * len(self.metrics)
            + "}"
            + "\n\\toprule"
            + "\n\\textbf{\#}"
            + "\n& \\textbf{Model}"
            + "".join(
                [f"\n& \\textbf{{{self.get_metric_label(m)}}}" for m in self.metrics]
            )
            + " \\\\ \n\midrule"
        )

        table_content = []

        for i, model in enumerate(self.model_names):
            table_raw = f"{chars[i]} &\n" + f"{model} &\n"
            scores = []

            for m in self.metrics:
                score = self.format_score(self.results[model][m])
                score = (
                    f"\\textbf{{{score}}}" if best_scores[m] == model else f"{score}"
                )
                superscript = self.get_superscript_for_latex(model, m)
                phantoms = self.get_phantoms_for_latex(model, m)
                scores.append(
                    f"{score}$^{{{superscript}}}$\\hphantom{{$^{{{phantoms}}}$}} &"
                )

            scores[-1] = scores[-1][:-1]  # Remove `&` at the end

            table_raw += "\n".join(scores) + "\\\\"
            table_content.append(table_raw)

        table_content = (
            "\n".join(table_content).replace("_", "\\_").replace("$^{}$", "")
        )

        table_suffix = (
            "\\bottomrule\n\end{tabular}\n}\n\label{tab:results}\n\end{table*}"
        )

        return (
            preamble + "\n" + table_prefix + "\n" + table_content + "\n" + table_suffix
        )

    def to_dict(self) -> Dict:
        """Returns the Report data as a Python dictionary.

        ```python
        {
            "stat_test": "fisher"
            # metrics and model_names allows to read the report without
            # inspecting the json to discover the used metrics and
            # the compared models
            "metrics": ["metric_1", "metric_2", ...],
            "model_names": ["model_1", "model_2", ...],
            #
            "model_1": {
                "scores": {
                    "metric_1": ...,
                    "metric_2": ...,
                    ...
                },
                "comparisons": {
                    "model_2": {
                        "metric_1": ...,  # p-value
                        "metric_2": ...,  # p-value
                        ...
                    },
                    ...
                },
                "win_tie_loss": {
                    "model_2": {
                        "W": ...,
                        "T": ...,
                        "L": ...,
                    },
                    ...
                },
            },
            ...
        }
        ```

        Returns:
            Dict: Report data as a Python dictionary
        """

        d = {
            "stat_test": self.stat_test,
            "metrics": self.metrics,
            "model_names": self.model_names,
        }

        for m1 in self.model_names:
            d[m1] = {}
            d[m1]["scores"] = self.results[m1]
            d[m1]["comparisons"] = {}
            d[m1]["win_tie_loss"] = {}

            for m2 in self.model_names:
                if m1 != m2:
                    d[m1]["comparisons"][m2] = {}
                    d[m1]["win_tie_loss"][m2] = {}

                    for metric in self.metrics:
                        d[m1]["comparisons"][m2][metric] = self.comparisons[{m1, m2}][
                            metric
                        ]["p_value"]
                        d[m1]["win_tie_loss"][m2][metric] = self.win_tie_loss[(m1, m2)][
                            metric
                        ]

        return d

    def save(self, path: str):
        """Save the Report data as JSON file.
        See [**Report.to_dict**][ranx.report.to_dict] for more details.

        Args:
            path (str): Saving path
        """
        with open(path, "w") as f:
            f.write(json.dumps(self.to_dict(), indent=4))

    def print_results(self):
        """Print report data."""
        print(json.dumps(self.results, indent=4))

    def __repr__(self):
        return self.to_table()

    def __str__(self):
        return self.to_table()

print_results()

Print report data.

Source code in ranx/data_structures/report.py
332
333
334
def print_results(self):
    """Print report data."""
    print(json.dumps(self.results, indent=4))

save(path)

Save the Report data as JSON file. See [Report.to_dict][ranx.report.to_dict] for more details.

Parameters:

Name Type Description Default
path str

Saving path

required
Source code in ranx/data_structures/report.py
322
323
324
325
326
327
328
329
330
def save(self, path: str):
    """Save the Report data as JSON file.
    See [**Report.to_dict**][ranx.report.to_dict] for more details.

    Args:
        path (str): Saving path
    """
    with open(path, "w") as f:
        f.write(json.dumps(self.to_dict(), indent=4))

to_dict()

Returns the Report data as a Python dictionary.

{
    "stat_test": "fisher"
    # metrics and model_names allows to read the report without
    # inspecting the json to discover the used metrics and
    # the compared models
    "metrics": ["metric_1", "metric_2", ...],
    "model_names": ["model_1", "model_2", ...],
    #
    "model_1": {
        "scores": {
            "metric_1": ...,
            "metric_2": ...,
            ...
        },
        "comparisons": {
            "model_2": {
                "metric_1": ...,  # p-value
                "metric_2": ...,  # p-value
                ...
            },
            ...
        },
        "win_tie_loss": {
            "model_2": {
                "W": ...,
                "T": ...,
                "L": ...,
            },
            ...
        },
    },
    ...
}

Returns:

Name Type Description
Dict Dict

Report data as a Python dictionary

Source code in ranx/data_structures/report.py
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
def to_dict(self) -> Dict:
    """Returns the Report data as a Python dictionary.

    ```python
    {
        "stat_test": "fisher"
        # metrics and model_names allows to read the report without
        # inspecting the json to discover the used metrics and
        # the compared models
        "metrics": ["metric_1", "metric_2", ...],
        "model_names": ["model_1", "model_2", ...],
        #
        "model_1": {
            "scores": {
                "metric_1": ...,
                "metric_2": ...,
                ...
            },
            "comparisons": {
                "model_2": {
                    "metric_1": ...,  # p-value
                    "metric_2": ...,  # p-value
                    ...
                },
                ...
            },
            "win_tie_loss": {
                "model_2": {
                    "W": ...,
                    "T": ...,
                    "L": ...,
                },
                ...
            },
        },
        ...
    }
    ```

    Returns:
        Dict: Report data as a Python dictionary
    """

    d = {
        "stat_test": self.stat_test,
        "metrics": self.metrics,
        "model_names": self.model_names,
    }

    for m1 in self.model_names:
        d[m1] = {}
        d[m1]["scores"] = self.results[m1]
        d[m1]["comparisons"] = {}
        d[m1]["win_tie_loss"] = {}

        for m2 in self.model_names:
            if m1 != m2:
                d[m1]["comparisons"][m2] = {}
                d[m1]["win_tie_loss"][m2] = {}

                for metric in self.metrics:
                    d[m1]["comparisons"][m2][metric] = self.comparisons[{m1, m2}][
                        metric
                    ]["p_value"]
                    d[m1]["win_tie_loss"][m2][metric] = self.win_tie_loss[(m1, m2)][
                        metric
                    ]

    return d

to_latex()

Returns Report as LaTeX table.

Returns:

Name Type Description
str str

LaTeX table

Source code in ranx/data_structures/report.py
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
def to_latex(self) -> str:
    """Returns Report as LaTeX table.

    Returns:
        str: LaTeX table
    """
    best_scores = {}

    for m in self.metrics:
        best_model = None
        best_score = 0.0
        for model in self.model_names:
            if best_score < round(self.results[model][m], self.rounding_digits):
                best_score = round(self.results[model][m], self.rounding_digits)
                best_model = model
        best_scores[m] = best_model

    preamble = "========================\n% Add in preamble\n\\usepackage{graphicx}\n\\usepackage{booktabs}\n========================\n\n"

    table_prefix = (
        "% To change the table size, act on the resizebox argument `0.8`.\n"
        + """\\begin{table*}[ht]\n\centering\n\caption{\nOverall effectiveness of the models.\nThe best results are highlighted in boldface.\nSuperscripts denote significant differences in """
        + self.get_stat_test_label(self.stat_test)
        + """ with $p \le """
        + str(self.max_p)
        + "$.\n}\n\\resizebox{0.8\\textwidth}{!}{"
        + "\n\\begin{tabular}{c|l"
        + "|c" * len(self.metrics)
        + "}"
        + "\n\\toprule"
        + "\n\\textbf{\#}"
        + "\n& \\textbf{Model}"
        + "".join(
            [f"\n& \\textbf{{{self.get_metric_label(m)}}}" for m in self.metrics]
        )
        + " \\\\ \n\midrule"
    )

    table_content = []

    for i, model in enumerate(self.model_names):
        table_raw = f"{chars[i]} &\n" + f"{model} &\n"
        scores = []

        for m in self.metrics:
            score = self.format_score(self.results[model][m])
            score = (
                f"\\textbf{{{score}}}" if best_scores[m] == model else f"{score}"
            )
            superscript = self.get_superscript_for_latex(model, m)
            phantoms = self.get_phantoms_for_latex(model, m)
            scores.append(
                f"{score}$^{{{superscript}}}$\\hphantom{{$^{{{phantoms}}}$}} &"
            )

        scores[-1] = scores[-1][:-1]  # Remove `&` at the end

        table_raw += "\n".join(scores) + "\\\\"
        table_content.append(table_raw)

    table_content = (
        "\n".join(table_content).replace("_", "\\_").replace("$^{}$", "")
    )

    table_suffix = (
        "\\bottomrule\n\end{tabular}\n}\n\label{tab:results}\n\end{table*}"
    )

    return (
        preamble + "\n" + table_prefix + "\n" + table_content + "\n" + table_suffix
    )

Run

Bases: object

Run stores the relevance scores estimated by the model under evaluation.<r> The preferred way for creating a Run instance is converting a Python dictionary as follows:

run_dict = {
    "q_1": {
        "d_1": 1.5,
        "d_2": 2.6,
    },
    "q_2": {
        "d_3": 2.8,
        "d_2": 1.2,
        "d_5": 3.1,
    },
}

run = Run(run_dict, name="bm25")

run = Run()  # Creates an empty Run with no name
Source code in ranx/data_structures/run.py
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
class Run(object):
    """`Run` stores the relevance scores estimated by the model under evaluation.<\br>
    The preferred way for creating a `Run` instance is converting a Python dictionary as follows:

    ```python
    run_dict = {
        "q_1": {
            "d_1": 1.5,
            "d_2": 2.6,
        },
        "q_2": {
            "d_3": 2.8,
            "d_2": 1.2,
            "d_5": 3.1,
        },
    }

    run = Run(run_dict, name="bm25")

    run = Run()  # Creates an empty Run with no name
    ```
    """

    def __init__(self, run: Dict[str, Dict[str, float]] = None, name: str = None):
        if run is None:
            self.run = TypedDict.empty(
                key_type=types.unicode_type,
                value_type=types.DictType(types.unicode_type, types.float64),
            )
            self.sorted = False
        else:
            # Query IDs
            q_ids = list(run.keys())
            q_ids = TypedList(q_ids)

            # Doc IDs
            doc_ids = [list(doc.keys()) for doc in run.values()]
            max_len = max(len(y) for x in doc_ids for y in x)
            dtype = f"<U{max_len}"
            doc_ids = TypedList([np.array(x, dtype=dtype) for x in doc_ids])

            # Scores
            scores = [list(doc.values()) for doc in run.values()]
            scores = TypedList([np.array(x, dtype=float) for x in scores])
            self.run = create_and_sort(q_ids, doc_ids, scores)
            self.sorted = True

        self.name = name
        self.metadata = {}
        self.scores = defaultdict(dict)
        self.mean_scores = {}
        self.std_scores = {}

    def keys(self):
        """Returns query ids. Used internally."""
        return self.run.keys()

    def add_score(self, q_id: str, doc_id: str, score: int):
        """Add a (doc_id, score) pair to a query (or, change its value if it already exists).

        Args:
            q_id (str): Query ID
            doc_id (str): Document ID
            score (int): Relevance score
        """
        if self.run.get(q_id) is None:
            self.run[q_id] = TypedDict.empty(
                key_type=types.unicode_type,
                value_type=types.float64,
            )
        self.run[q_id][doc_id] = float(score)
        self.sorted = False

    def add(self, q_id: str, doc_ids: List[str], scores: List[float]):
        """Add a query and its relevant documents with the associated relevance score.

        Args:
            q_id (str): Query ID
            doc_ids (List[str]): List of Document IDs
            scores (List[int]): List of relevance scores
        """
        self.add_multi([q_id], [doc_ids], [scores])

    def add_multi(
        self,
        q_ids: List[str],
        doc_ids: List[List[str]],
        scores: List[List[float]],
    ):
        """Add multiple queries at once.

        Args:
            q_ids (List[str]): List of Query IDs
            doc_ids (List[List[str]]): List of list of Document IDs
            scores (List[List[int]]): List of list of relevance scores
        """
        q_ids = TypedList(q_ids)
        doc_ids = TypedList([TypedList(x) for x in doc_ids])
        scores = TypedList([TypedList(map(float, x)) for x in scores])

        self.run = add_and_sort(self.run, q_ids, doc_ids, scores)
        self.sorted = True

    def get_query_ids(self):
        """Returns query ids."""
        return list(self.run.keys())

    def get_doc_ids_and_scores(self):
        """Returns doc ids and relevance scores."""
        return list(self.run.values())

    # Sort in place
    def sort(self):
        """Sort. Used internally."""
        self.run = sort_dict_by_key(self.run)
        self.run = sort_dict_of_dict_by_value(self.run)
        self.sorted = True

    def make_comparable(self, qrels: Qrels):
        """Adds empty results for queries missing from the run and removes those not appearing in qrels."""
        # Adds empty results for missing queries
        for q_id in qrels.qrels:
            if q_id not in self.run:
                self.run[q_id] = create_empty_results_dict()

        # Remove results for additional queries
        for q_id in self.run:
            if q_id not in qrels.qrels:
                del self.run[q_id]

        self.sort()

        return self

    def to_typed_list(self):
        """Convert Run to Numba Typed List. Used internally."""
        if not self.sorted:
            self.sort()
        return to_typed_list(self.run)

    def to_dict(self):
        """Convert Run to Python dictionary.

        Returns:
            Dict[str, Dict[str, int]]: Run as Python dictionary
        """
        d = defaultdict(dict)
        for q_id in self.keys():
            d[q_id] = dict(self[q_id])
        return d

    def to_dataframe(self) -> pd.DataFrame:
        """Convert Run to Pandas DataFrame with the following columns: `q_id`, `doc_id`, and `score`.

        Returns:
            pandas.DataFrame: Run as Pandas DataFrame.
        """
        data = {"q_id": [], "doc_id": [], "score": []}

        for q_id in self.run:
            for doc_id in self.run[q_id]:
                data["q_id"].append(q_id)
                data["doc_id"].append(doc_id)
                data["score"].append(self.run[q_id][doc_id])

        return pd.DataFrame.from_dict(data)

    def save(self, path: str = "run.json", kind: str = None):
        """Write `run` to `path` as JSON file, TREC run, LZ4 file, or Parquet file. File type is automatically inferred form the filename extension: ".json" -> "json", ".trec" -> "trec", ".txt" -> "trec", and ".lz4" -> "lz4", ".parq" -> "parquet", ".parquet" -> "parquet". Use the "kind" argument to override this behavior.

        Args:
            path (str, optional): Saving path. Defaults to "run.json".
            kind (str, optional): Kind of file to save, must be either "json", "trec", or "ranxhub". If None, it will be automatically inferred from the filename extension.
        """
        # Infer file extension -------------------------------------------------
        kind = get_file_kind(path, kind)

        # Save Run -------------------------------------------------------------
        if not self.sorted:
            self.sort()

        if kind == "json":
            save_json(self.to_dict(), path)
        elif kind == "lz4":
            save_lz4(self.to_dict(), path)
        elif kind == "parquet":
            self.to_dataframe().to_parquet(path, index=False)
        else:
            with open(path, "w") as f:
                for i, q_id in enumerate(self.run.keys()):
                    for rank, doc_id in enumerate(self.run[q_id].keys()):
                        score = self.run[q_id][doc_id]
                        f.write(f"{q_id} Q0 {doc_id} {rank+1} {score} {self.name}")

                        if (
                            i != len(self.run.keys()) - 1
                            or rank != len(self.run[q_id].keys()) - 1
                        ):
                            f.write("\n")

    @staticmethod
    def from_dict(d: Dict[str, Dict[str, float]], name: str = None):
        """Convert a Python dictionary in form of {q_id: {doc_id: score}} to ranx.Run.

        Args:
            d (Dict[str, Dict[str, int]]): Run as Python dictionary
            name (str, optional): Run name. Defaults to None.

        Returns:
            Run: ranx.Run
        """

        # Query IDs
        q_ids = list(d.keys())
        q_ids = TypedList(q_ids)

        # Doc IDs
        doc_ids = [list(doc.keys()) for doc in d.values()]
        max_len = max(len(y) for x in doc_ids for y in x)
        dtype = f"<U{max_len}"
        doc_ids = TypedList([np.array(x, dtype=dtype) for x in doc_ids])

        # Scores
        scores = [list(doc.values()) for doc in d.values()]
        scores = TypedList([np.array(x, dtype=float) for x in scores])

        run = Run()
        run.run = create_and_sort(q_ids, doc_ids, scores)
        run.sorted = True
        run.name = name

        return run

    @staticmethod
    def from_file(path: str, kind: str = None, name: str = None):
        """Parse a run file into ranx.Run. Supported formats are JSON, TREC run, gzipped TREC run, and LZ4. Correct import behavior is inferred from the file extension: ".json" -> "json", ".trec" -> "trec", ".txt" -> "trec", ".gz" -> "gzipped trec", ".lz4" -> "lz4". Use the "kind" argument to override this behavior.

        Args:
            path (str): File path.
            kind (str, optional): Kind of file to load, must be either "json" or "trec".
            name (str, optional): Run name. Defaults to None.

        Returns:
            Run: ranx.Run
        """
        # Infer file extension -------------------------------------------------
        kind = get_file_kind(path, kind)

        # Load Run -------------------------------------------------------------
        if kind == "json":
            run = load_json(path)
        elif kind == "lz4":
            run = load_lz4(path)
        else:
            run = defaultdict(dict)
            with gzip.open(path, "rt") if kind == "gz" else open(path) as f:
                for line in f:
                    q_id, _, doc_id, _, rel, run_name = line.split()
                    run[q_id][doc_id] = float(rel)
                    if name is None:
                        name = run_name

        run = Run.from_dict(run, name)

        return run

    @staticmethod
    def from_df(
        df: pd.DataFrame,
        q_id_col: str = "q_id",
        doc_id_col: str = "doc_id",
        score_col: str = "score",
        name: str = None,
    ):
        """Convert a Pandas DataFrame to ranx.Run.

        Args:
            df (pd.DataFrame): Run as Pandas DataFrame
            q_id_col (str, optional): Query IDs column. Defaults to "q_id".
            doc_id_col (str, optional): Document IDs column. Defaults to "doc_id".
            score_col (str, optional): Relevance scores column. Defaults to "score".
            name (str, optional): Run name. Defaults to None.

        Returns:
            Run: ranx.Run
        """
        assert (
            df[q_id_col].dtype == "O"
        ), "DataFrame Query IDs column dtype must be `object` (string)"
        assert (
            df[doc_id_col].dtype == "O"
        ), "DataFrame Document IDs column dtype must be `object` (string)"
        assert (
            df[score_col].dtype == np.float64
        ), "DataFrame scores column dtype must be `float`"

        run_py = (
            df.groupby(q_id_col)[[doc_id_col, score_col]]
            .apply(lambda g: {x[0]: x[1] for x in g.values.tolist()})
            .to_dict()
        )

        return Run.from_dict(run_py, name)

    @staticmethod
    def from_parquet(
        path: str,
        q_id_col: str = "q_id",
        doc_id_col: str = "doc_id",
        score_col: str = "score",
        pd_kwargs: Dict[str, Any] = None,
        name: str = None,
    ):
        """Convert a Parquet file to ranx.Run.

        Args:
            path (str): File path.
            q_id_col (str, optional): Query IDs column. Defaults to "q_id".
            doc_id_col (str, optional): Document IDs column. Defaults to "doc_id".
            score_col (str, optional): Relevance scores column. Defaults to "score".
            pd_kwargs (Dict[str, Any], optional): Additional arguments to pass to `pandas.read_parquet` (see https://pandas.pydata.org/docs/reference/api/pandas.read_parquet.html). Defaults to None.
            name (str, optional): Run name. Defaults to None.

        Returns:
            Run: ranx.Run
        """
        pd_kwargs = {} if pd_kwargs is None else pd_kwargs

        return Run.from_df(
            df=pd.read_parquet(path, *pd_kwargs),
            q_id_col=q_id_col,
            doc_id_col=doc_id_col,
            score_col=score_col,
            name=name,
        )

    @staticmethod
    def from_ranxhub(id: str):
        """Download and load a ranx.Run from ranxhub.

        Args:
            path (str): Run ID.

        Returns:
            Run: ranx.Run
        """
        content = download(id)

        run = Run.from_dict(content["run"])
        run.name = content["metadata"]["run"]["name"]
        run.metadata = content["metadata"]

        return run

    @property
    def size(self):
        return len(self.run)

    def __getitem__(self, q_id):
        return dict(self.run[q_id])

    def __len__(self) -> int:
        return len(self.run)

    def __repr__(self):
        return self.run.__repr__()

    def __str__(self):
        return self.run.__str__()

add(q_id, doc_ids, scores)

Add a query and its relevant documents with the associated relevance score.

Parameters:

Name Type Description Default
q_id str

Query ID

required
doc_ids List[str]

List of Document IDs

required
scores List[int]

List of relevance scores

required
Source code in ranx/data_structures/run.py
 97
 98
 99
100
101
102
103
104
105
def add(self, q_id: str, doc_ids: List[str], scores: List[float]):
    """Add a query and its relevant documents with the associated relevance score.

    Args:
        q_id (str): Query ID
        doc_ids (List[str]): List of Document IDs
        scores (List[int]): List of relevance scores
    """
    self.add_multi([q_id], [doc_ids], [scores])

add_multi(q_ids, doc_ids, scores)

Add multiple queries at once.

Parameters:

Name Type Description Default
q_ids List[str]

List of Query IDs

required
doc_ids List[List[str]]

List of list of Document IDs

required
scores List[List[int]]

List of list of relevance scores

required
Source code in ranx/data_structures/run.py
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
def add_multi(
    self,
    q_ids: List[str],
    doc_ids: List[List[str]],
    scores: List[List[float]],
):
    """Add multiple queries at once.

    Args:
        q_ids (List[str]): List of Query IDs
        doc_ids (List[List[str]]): List of list of Document IDs
        scores (List[List[int]]): List of list of relevance scores
    """
    q_ids = TypedList(q_ids)
    doc_ids = TypedList([TypedList(x) for x in doc_ids])
    scores = TypedList([TypedList(map(float, x)) for x in scores])

    self.run = add_and_sort(self.run, q_ids, doc_ids, scores)
    self.sorted = True

add_score(q_id, doc_id, score)

Add a (doc_id, score) pair to a query (or, change its value if it already exists).

Parameters:

Name Type Description Default
q_id str

Query ID

required
doc_id str

Document ID

required
score int

Relevance score

required
Source code in ranx/data_structures/run.py
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
def add_score(self, q_id: str, doc_id: str, score: int):
    """Add a (doc_id, score) pair to a query (or, change its value if it already exists).

    Args:
        q_id (str): Query ID
        doc_id (str): Document ID
        score (int): Relevance score
    """
    if self.run.get(q_id) is None:
        self.run[q_id] = TypedDict.empty(
            key_type=types.unicode_type,
            value_type=types.float64,
        )
    self.run[q_id][doc_id] = float(score)
    self.sorted = False

from_df(df, q_id_col='q_id', doc_id_col='doc_id', score_col='score', name=None) staticmethod

Convert a Pandas DataFrame to ranx.Run.

Parameters:

Name Type Description Default
df DataFrame

Run as Pandas DataFrame

required
q_id_col str

Query IDs column. Defaults to "q_id".

'q_id'
doc_id_col str

Document IDs column. Defaults to "doc_id".

'doc_id'
score_col str

Relevance scores column. Defaults to "score".

'score'
name str

Run name. Defaults to None.

None

Returns:

Name Type Description
Run

ranx.Run

Source code in ranx/data_structures/run.py
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
@staticmethod
def from_df(
    df: pd.DataFrame,
    q_id_col: str = "q_id",
    doc_id_col: str = "doc_id",
    score_col: str = "score",
    name: str = None,
):
    """Convert a Pandas DataFrame to ranx.Run.

    Args:
        df (pd.DataFrame): Run as Pandas DataFrame
        q_id_col (str, optional): Query IDs column. Defaults to "q_id".
        doc_id_col (str, optional): Document IDs column. Defaults to "doc_id".
        score_col (str, optional): Relevance scores column. Defaults to "score".
        name (str, optional): Run name. Defaults to None.

    Returns:
        Run: ranx.Run
    """
    assert (
        df[q_id_col].dtype == "O"
    ), "DataFrame Query IDs column dtype must be `object` (string)"
    assert (
        df[doc_id_col].dtype == "O"
    ), "DataFrame Document IDs column dtype must be `object` (string)"
    assert (
        df[score_col].dtype == np.float64
    ), "DataFrame scores column dtype must be `float`"

    run_py = (
        df.groupby(q_id_col)[[doc_id_col, score_col]]
        .apply(lambda g: {x[0]: x[1] for x in g.values.tolist()})
        .to_dict()
    )

    return Run.from_dict(run_py, name)

from_dict(d, name=None) staticmethod

Convert a Python dictionary in form of {q_id: {doc_id: score}} to ranx.Run.

Parameters:

Name Type Description Default
d Dict[str, Dict[str, int]]

Run as Python dictionary

required
name str

Run name. Defaults to None.

None

Returns:

Name Type Description
Run

ranx.Run

Source code in ranx/data_structures/run.py
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
@staticmethod
def from_dict(d: Dict[str, Dict[str, float]], name: str = None):
    """Convert a Python dictionary in form of {q_id: {doc_id: score}} to ranx.Run.

    Args:
        d (Dict[str, Dict[str, int]]): Run as Python dictionary
        name (str, optional): Run name. Defaults to None.

    Returns:
        Run: ranx.Run
    """

    # Query IDs
    q_ids = list(d.keys())
    q_ids = TypedList(q_ids)

    # Doc IDs
    doc_ids = [list(doc.keys()) for doc in d.values()]
    max_len = max(len(y) for x in doc_ids for y in x)
    dtype = f"<U{max_len}"
    doc_ids = TypedList([np.array(x, dtype=dtype) for x in doc_ids])

    # Scores
    scores = [list(doc.values()) for doc in d.values()]
    scores = TypedList([np.array(x, dtype=float) for x in scores])

    run = Run()
    run.run = create_and_sort(q_ids, doc_ids, scores)
    run.sorted = True
    run.name = name

    return run

from_file(path, kind=None, name=None) staticmethod

Parse a run file into ranx.Run. Supported formats are JSON, TREC run, gzipped TREC run, and LZ4. Correct import behavior is inferred from the file extension: ".json" -> "json", ".trec" -> "trec", ".txt" -> "trec", ".gz" -> "gzipped trec", ".lz4" -> "lz4". Use the "kind" argument to override this behavior.

Parameters:

Name Type Description Default
path str

File path.

required
kind str

Kind of file to load, must be either "json" or "trec".

None
name str

Run name. Defaults to None.

None

Returns:

Name Type Description
Run

ranx.Run

Source code in ranx/data_structures/run.py
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
@staticmethod
def from_file(path: str, kind: str = None, name: str = None):
    """Parse a run file into ranx.Run. Supported formats are JSON, TREC run, gzipped TREC run, and LZ4. Correct import behavior is inferred from the file extension: ".json" -> "json", ".trec" -> "trec", ".txt" -> "trec", ".gz" -> "gzipped trec", ".lz4" -> "lz4". Use the "kind" argument to override this behavior.

    Args:
        path (str): File path.
        kind (str, optional): Kind of file to load, must be either "json" or "trec".
        name (str, optional): Run name. Defaults to None.

    Returns:
        Run: ranx.Run
    """
    # Infer file extension -------------------------------------------------
    kind = get_file_kind(path, kind)

    # Load Run -------------------------------------------------------------
    if kind == "json":
        run = load_json(path)
    elif kind == "lz4":
        run = load_lz4(path)
    else:
        run = defaultdict(dict)
        with gzip.open(path, "rt") if kind == "gz" else open(path) as f:
            for line in f:
                q_id, _, doc_id, _, rel, run_name = line.split()
                run[q_id][doc_id] = float(rel)
                if name is None:
                    name = run_name

    run = Run.from_dict(run, name)

    return run

from_parquet(path, q_id_col='q_id', doc_id_col='doc_id', score_col='score', pd_kwargs=None, name=None) staticmethod

Convert a Parquet file to ranx.Run.

Parameters:

Name Type Description Default
path str

File path.

required
q_id_col str

Query IDs column. Defaults to "q_id".

'q_id'
doc_id_col str

Document IDs column. Defaults to "doc_id".

'doc_id'
score_col str

Relevance scores column. Defaults to "score".

'score'
pd_kwargs Dict[str, Any]

Additional arguments to pass to pandas.read_parquet (see https://pandas.pydata.org/docs/reference/api/pandas.read_parquet.html). Defaults to None.

None
name str

Run name. Defaults to None.

None

Returns:

Name Type Description
Run

ranx.Run

Source code in ranx/data_structures/run.py
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
@staticmethod
def from_parquet(
    path: str,
    q_id_col: str = "q_id",
    doc_id_col: str = "doc_id",
    score_col: str = "score",
    pd_kwargs: Dict[str, Any] = None,
    name: str = None,
):
    """Convert a Parquet file to ranx.Run.

    Args:
        path (str): File path.
        q_id_col (str, optional): Query IDs column. Defaults to "q_id".
        doc_id_col (str, optional): Document IDs column. Defaults to "doc_id".
        score_col (str, optional): Relevance scores column. Defaults to "score".
        pd_kwargs (Dict[str, Any], optional): Additional arguments to pass to `pandas.read_parquet` (see https://pandas.pydata.org/docs/reference/api/pandas.read_parquet.html). Defaults to None.
        name (str, optional): Run name. Defaults to None.

    Returns:
        Run: ranx.Run
    """
    pd_kwargs = {} if pd_kwargs is None else pd_kwargs

    return Run.from_df(
        df=pd.read_parquet(path, *pd_kwargs),
        q_id_col=q_id_col,
        doc_id_col=doc_id_col,
        score_col=score_col,
        name=name,
    )

from_ranxhub(id) staticmethod

Download and load a ranx.Run from ranxhub.

Parameters:

Name Type Description Default
path str

Run ID.

required

Returns:

Name Type Description
Run

ranx.Run

Source code in ranx/data_structures/run.py
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
@staticmethod
def from_ranxhub(id: str):
    """Download and load a ranx.Run from ranxhub.

    Args:
        path (str): Run ID.

    Returns:
        Run: ranx.Run
    """
    content = download(id)

    run = Run.from_dict(content["run"])
    run.name = content["metadata"]["run"]["name"]
    run.metadata = content["metadata"]

    return run

get_doc_ids_and_scores()

Returns doc ids and relevance scores.

Source code in ranx/data_structures/run.py
131
132
133
def get_doc_ids_and_scores(self):
    """Returns doc ids and relevance scores."""
    return list(self.run.values())

get_query_ids()

Returns query ids.

Source code in ranx/data_structures/run.py
127
128
129
def get_query_ids(self):
    """Returns query ids."""
    return list(self.run.keys())

keys()

Returns query ids. Used internally.

Source code in ranx/data_structures/run.py
77
78
79
def keys(self):
    """Returns query ids. Used internally."""
    return self.run.keys()

make_comparable(qrels)

Adds empty results for queries missing from the run and removes those not appearing in qrels.

Source code in ranx/data_structures/run.py
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
def make_comparable(self, qrels: Qrels):
    """Adds empty results for queries missing from the run and removes those not appearing in qrels."""
    # Adds empty results for missing queries
    for q_id in qrels.qrels:
        if q_id not in self.run:
            self.run[q_id] = create_empty_results_dict()

    # Remove results for additional queries
    for q_id in self.run:
        if q_id not in qrels.qrels:
            del self.run[q_id]

    self.sort()

    return self

save(path='run.json', kind=None)

Write run to path as JSON file, TREC run, LZ4 file, or Parquet file. File type is automatically inferred form the filename extension: ".json" -> "json", ".trec" -> "trec", ".txt" -> "trec", and ".lz4" -> "lz4", ".parq" -> "parquet", ".parquet" -> "parquet". Use the "kind" argument to override this behavior.

Parameters:

Name Type Description Default
path str

Saving path. Defaults to "run.json".

'run.json'
kind str

Kind of file to save, must be either "json", "trec", or "ranxhub". If None, it will be automatically inferred from the filename extension.

None
Source code in ranx/data_structures/run.py
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
def save(self, path: str = "run.json", kind: str = None):
    """Write `run` to `path` as JSON file, TREC run, LZ4 file, or Parquet file. File type is automatically inferred form the filename extension: ".json" -> "json", ".trec" -> "trec", ".txt" -> "trec", and ".lz4" -> "lz4", ".parq" -> "parquet", ".parquet" -> "parquet". Use the "kind" argument to override this behavior.

    Args:
        path (str, optional): Saving path. Defaults to "run.json".
        kind (str, optional): Kind of file to save, must be either "json", "trec", or "ranxhub". If None, it will be automatically inferred from the filename extension.
    """
    # Infer file extension -------------------------------------------------
    kind = get_file_kind(path, kind)

    # Save Run -------------------------------------------------------------
    if not self.sorted:
        self.sort()

    if kind == "json":
        save_json(self.to_dict(), path)
    elif kind == "lz4":
        save_lz4(self.to_dict(), path)
    elif kind == "parquet":
        self.to_dataframe().to_parquet(path, index=False)
    else:
        with open(path, "w") as f:
            for i, q_id in enumerate(self.run.keys()):
                for rank, doc_id in enumerate(self.run[q_id].keys()):
                    score = self.run[q_id][doc_id]
                    f.write(f"{q_id} Q0 {doc_id} {rank+1} {score} {self.name}")

                    if (
                        i != len(self.run.keys()) - 1
                        or rank != len(self.run[q_id].keys()) - 1
                    ):
                        f.write("\n")

sort()

Sort. Used internally.

Source code in ranx/data_structures/run.py
136
137
138
139
140
def sort(self):
    """Sort. Used internally."""
    self.run = sort_dict_by_key(self.run)
    self.run = sort_dict_of_dict_by_value(self.run)
    self.sorted = True

to_dataframe()

Convert Run to Pandas DataFrame with the following columns: q_id, doc_id, and score.

Returns:

Type Description
DataFrame

pandas.DataFrame: Run as Pandas DataFrame.

Source code in ranx/data_structures/run.py
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
def to_dataframe(self) -> pd.DataFrame:
    """Convert Run to Pandas DataFrame with the following columns: `q_id`, `doc_id`, and `score`.

    Returns:
        pandas.DataFrame: Run as Pandas DataFrame.
    """
    data = {"q_id": [], "doc_id": [], "score": []}

    for q_id in self.run:
        for doc_id in self.run[q_id]:
            data["q_id"].append(q_id)
            data["doc_id"].append(doc_id)
            data["score"].append(self.run[q_id][doc_id])

    return pd.DataFrame.from_dict(data)

to_dict()

Convert Run to Python dictionary.

Returns:

Type Description

Dict[str, Dict[str, int]]: Run as Python dictionary

Source code in ranx/data_structures/run.py
164
165
166
167
168
169
170
171
172
173
def to_dict(self):
    """Convert Run to Python dictionary.

    Returns:
        Dict[str, Dict[str, int]]: Run as Python dictionary
    """
    d = defaultdict(dict)
    for q_id in self.keys():
        d[q_id] = dict(self[q_id])
    return d

to_typed_list()

Convert Run to Numba Typed List. Used internally.

Source code in ranx/data_structures/run.py
158
159
160
161
162
def to_typed_list(self):
    """Convert Run to Numba Typed List. Used internally."""
    if not self.sorted:
        self.sort()
    return to_typed_list(self.run)

compare(qrels, runs, metrics, stat_test='student', n_permutations=1000, max_p=0.01, random_seed=42, threads=0, rounding_digits=3, show_percentages=False, make_comparable=False)

Evaluate multiple runs and compute statistical tests.

Usage example:

from ranx import compare

# Compare different runs and perform statistical tests
report = compare(
    qrels=qrels,
    runs=[run_1, run_2, run_3, run_4, run_5],
    metrics=["map@100", "mrr@100", "ndcg@10"],
    max_p=0.01  # P-value threshold
)

print(report)
Output:
#    Model    MAP@100     MRR@100     NDCG@10
---  -------  ----------  ----------  ----------
a    model_1  0.3202ᵇ     0.3207ᵇ     0.3684ᵇᶜ
b    model_2  0.2332      0.2339      0.239
c    model_3  0.3082ᵇ     0.3089ᵇ     0.3295ᵇ
d    model_4  0.3664ᵃᵇᶜ   0.3668ᵃᵇᶜ   0.4078ᵃᵇᶜ
e    model_5  0.4053ᵃᵇᶜᵈ  0.4061ᵃᵇᶜᵈ  0.4512ᵃᵇᶜᵈ

Parameters:

Name Type Description Default
qrels Qrels

Qrels.

required
runs List[Run]

List of runs.

required
metrics Union[List[str], str]

Metric or list of metrics.

required
n_permutations int

Number of permutation to perform during statistical testing (Fisher's Randomization Test is used by default). Defaults to 1000.

1000
max_p float

Maximum p-value to consider an increment as statistically significant. Defaults to 0.01.

0.01
stat_test str

Statistical test to perform. Use "fisher" for Fisher's Randomization Test, "student" for Two-sided Paired Student's t-Test, or "Tukey" for Tukey's HSD test. Defaults to "student".

'student'
random_seed int

Random seed to use for generating the permutations. Defaults to 42.

42
threads int

Number of threads to use, zero means all the available threads. Defaults to 0.

0
rounding_digits int

Number of digits to round to and to show in the Report. Defaults to 3.

3
show_percentages bool

Whether to show percentages instead of floats in the Report. Defaults to False.

False
make_comparable bool

Adds empty results for queries missing from the runs and removes those not appearing in qrels. Defaults to False.

False

Returns:

Name Type Description
Report Report

See report.

Source code in ranx/meta/compare.py
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
def compare(
    qrels: Qrels,
    runs: List[Run],
    metrics: Union[List[str], str],
    stat_test: str = "student",
    n_permutations: int = 1000,
    max_p: float = 0.01,
    random_seed: int = 42,
    threads: int = 0,
    rounding_digits: int = 3,
    show_percentages: bool = False,
    make_comparable: bool = False,
) -> Report:
    """Evaluate multiple `runs` and compute statistical tests.

    Usage example:
    ```python
    from ranx import compare

    # Compare different runs and perform statistical tests
    report = compare(
        qrels=qrels,
        runs=[run_1, run_2, run_3, run_4, run_5],
        metrics=["map@100", "mrr@100", "ndcg@10"],
        max_p=0.01  # P-value threshold
    )

    print(report)
    ```
    Output:
    ```
    #    Model    MAP@100     MRR@100     NDCG@10
    ---  -------  ----------  ----------  ----------
    a    model_1  0.3202ᵇ     0.3207ᵇ     0.3684ᵇᶜ
    b    model_2  0.2332      0.2339      0.239
    c    model_3  0.3082ᵇ     0.3089ᵇ     0.3295ᵇ
    d    model_4  0.3664ᵃᵇᶜ   0.3668ᵃᵇᶜ   0.4078ᵃᵇᶜ
    e    model_5  0.4053ᵃᵇᶜᵈ  0.4061ᵃᵇᶜᵈ  0.4512ᵃᵇᶜᵈ
    ```

    Args:
        qrels (Qrels): Qrels.
        runs (List[Run]): List of runs.
        metrics (Union[List[str], str]): Metric or list of metrics.
        n_permutations (int, optional): Number of permutation to perform during statistical testing (Fisher's Randomization Test is used by default). Defaults to 1000.
        max_p (float, optional): Maximum p-value to consider an increment as statistically significant. Defaults to 0.01.
        stat_test (str, optional): Statistical test to perform. Use "fisher" for _Fisher's Randomization Test_, "student" for _Two-sided Paired Student's t-Test_, or "Tukey" for _Tukey's HSD test_. Defaults to "student".
        random_seed (int, optional): Random seed to use for generating the permutations. Defaults to 42.
        threads (int, optional): Number of threads to use, zero means all the available threads. Defaults to 0.
        rounding_digits (int, optional): Number of digits to round to and to show in the Report. Defaults to 3.
        show_percentages (bool, optional): Whether to show percentages instead of floats in the Report. Defaults to False.
        make_comparable (bool, optional): Adds empty results for queries missing from the runs and removes those not appearing in qrels. Defaults to False.

    Returns:
        Report: See report.
    """
    metrics = format_metrics(metrics)
    assert all(isinstance(m, str) for m in metrics), "Metrics error"

    model_names = []
    results = defaultdict(dict)

    metric_scores = {}

    # Compute scores for each run for each query -------------------------------
    for i, run in enumerate(runs):
        model_name = run.name if run.name is not None else f"run_{i+1}"
        model_names.append(model_name)

        metric_scores[model_name] = evaluate(
            qrels=qrels,
            run=run,
            metrics=metrics,
            return_mean=False,
            threads=threads,
            make_comparable=make_comparable,
        )

        if len(metrics) == 1:
            metric_scores[model_name] = {metrics[0]: metric_scores[model_name]}

        for m in metrics:
            results[model_name][m] = float(np.mean(metric_scores[model_name][m]))

    # Run statistical testing --------------------------------------------------
    comparisons = compute_statistical_significance(
        model_names=model_names,
        metric_scores=metric_scores,
        stat_test=stat_test,
        n_permutations=n_permutations,
        max_p=max_p,
        random_seed=random_seed,
    )

    # Compute win / tie / lose -------------------------------------------------
    win_tie_loss = defaultdict(dict)

    for control in model_names:
        for treatment in model_names:
            if control != treatment:
                for m in metrics:
                    control_scores = metric_scores[control][m]
                    treatment_scores = metric_scores[treatment][m]
                    win_tie_loss[(control, treatment)][m] = {
                        "W": int(sum(control_scores > treatment_scores)),
                        "T": int(sum(control_scores == treatment_scores)),
                        "L": int(sum(control_scores < treatment_scores)),
                    }

    return Report(
        model_names=model_names,
        results=dict(results),
        comparisons=comparisons,
        metrics=metrics,
        max_p=max_p,
        win_tie_loss=dict(win_tie_loss),
        rounding_digits=rounding_digits,
        show_percentages=show_percentages,
        stat_test=stat_test,
    )

compute_statistical_significance(model_names, metric_scores, stat_test='fisher', n_permutations=1000, max_p=0.01, random_seed=42)

Used internally.

Source code in ranx/statistical_tests/__init__.py
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
def compute_statistical_significance(
    model_names: List[str],
    metric_scores: Dict[str, Dict[str, np.ndarray]],
    stat_test: str = "fisher",
    n_permutations: int = 1000,
    max_p: float = 0.01,
    random_seed: int = 42,
):
    """Used internally."""
    comparisons = FrozensetDict()

    if stat_test in {"fisher", "student"}:
        for control in model_names:
            control_metric_scores = metric_scores[control]
            for treatment in model_names:
                if control != treatment:
                    treatment_metric_scores = metric_scores[treatment]

                    # Compute statistical significance
                    comparisons[
                        frozenset([control, treatment])
                    ] = _compute_statistical_significance(
                        control_metric_scores,
                        treatment_metric_scores,
                        stat_test,
                        n_permutations,
                        max_p,
                        random_seed,
                    )

        return comparisons
    elif stat_test in {"tukey"}:
        metrics = list(metric_scores[model_names[0]])

        # Initialize comparisons
        for i, control in enumerate(model_names):
            for treatment in model_names[i:]:
                comparisons[frozenset([control, treatment])] = {
                    m: None for m in metrics
                }

        for m in metrics:
            scores = [metric_scores[name][m] for name in model_names]
            results = tukey_hsd_test(
                model_names=model_names,
                scores=scores,
                max_p=max_p,
            )

            for res in results:
                comparisons[res["control"], res["treatment"]][m] = {
                    "p_value": res["p-value"],
                    "significant": res["significant"],
                }

    else:
        raise NotImplementedError(f"Statistical test `{stat_test}` not supported.")

    return comparisons

evaluate(qrels, run, metrics, return_mean=True, return_std=False, threads=0, save_results_in_run=True, make_comparable=False)

Compute the performance scores for the provided qrels and run for all the specified metrics.

Usage examples:

from ranx import evaluate

Compute score for a single metric

evaluate(qrels, run, "ndcg@5")

0.7861

Compute scores for multiple metrics at once

evaluate(qrels, run, ["map@5", "mrr"])

{"map@5": 0.6416, "mrr": 0.75}

Computed metric scores are saved in the Run object

run.mean_scores

{"ndcg@5": 0.7861, "map@5": 0.6416, "mrr": 0.75}

Access scores for each query

dict(run.scores)

{ ... "ndcg@5": {"q_1": 0.9430, "q_2": 0.6292}, ... "map@5": {"q_1": 0.8333, "q_2": 0.4500}, ... "mrr": {"q_1": 1.0000, "q_2": 0.5000}, ... } Args: qrels (Union[ Qrels, Dict[str, Dict[str, Number]], nb.typed.typedlist.List, np.ndarray, ]): Qrels. run (Union[ Run, Dict[str, Dict[str, Number]], nb.typed.typedlist.List, np.ndarray, ]): Run. metrics (Union[List[str], str]): Metrics or list of metric to compute. return_mean (bool, optional): Whether to return the metric scores averaged over the query set or the scores for individual queries. Defaults to True. threads (int, optional): Number of threads to use, zero means all the available threads. Defaults to 0. save_results_in_run (bool, optional): Save metric scores for each query in the input run. Defaults to True. make_comparable (bool, optional): Adds empty results for queries missing from the run and removes those not appearing in qrels. Defaults to False.

Returns:

Type Description
Union[Dict[str, float], float]

Union[Dict[str, float], float]: Results.

Source code in ranx/meta/evaluate.py
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
def evaluate(
    qrels: Union[
        Qrels,
        Dict[str, Dict[str, Number]],
        nb.typed.typedlist.List,
        np.ndarray,
    ],
    run: Union[
        Run,
        Dict[str, Dict[str, Number]],
        nb.typed.typedlist.List,
        np.ndarray,
    ],
    metrics: Union[List[str], str],
    return_mean: bool = True,
    return_std: bool = False,
    threads: int = 0,
    save_results_in_run: bool = True,
    make_comparable: bool = False,
) -> Union[Dict[str, float], float]:
    """Compute the performance scores for the provided `qrels` and `run` for all the specified metrics.

    Usage examples:

    from ranx import evaluate

    # Compute score for a single metric
    evaluate(qrels, run, "ndcg@5")
    >>> 0.7861

    # Compute scores for multiple metrics at once
    evaluate(qrels, run, ["map@5", "mrr"])
    >>> {"map@5": 0.6416, "mrr": 0.75}

    # Computed metric scores are saved in the Run object
    run.mean_scores
    >>> {"ndcg@5": 0.7861, "map@5": 0.6416, "mrr": 0.75}

    # Access scores for each query
    dict(run.scores)
    >>> {
    ...     "ndcg@5": {"q_1": 0.9430, "q_2": 0.6292},
    ...     "map@5": {"q_1": 0.8333, "q_2": 0.4500},
    ...     "mrr": {"q_1": 1.0000, "q_2": 0.5000},
    ... }
    Args:
        qrels (Union[ Qrels, Dict[str, Dict[str, Number]], nb.typed.typedlist.List, np.ndarray, ]): Qrels.
        run (Union[ Run, Dict[str, Dict[str, Number]], nb.typed.typedlist.List, np.ndarray, ]): Run.
        metrics (Union[List[str], str]): Metrics or list of metric to compute.
        return_mean (bool, optional): Whether to return the metric scores averaged over the query set or the scores for individual queries. Defaults to True.
        threads (int, optional): Number of threads to use, zero means all the available threads. Defaults to 0.
        save_results_in_run (bool, optional): Save metric scores for each query in the input `run`. Defaults to True.
        make_comparable (bool, optional): Adds empty results for queries missing from the run and removes those not appearing in qrels. Defaults to False.

    Returns:
        Union[Dict[str, float], float]: Results.
    """

    if len(qrels) < 10:
        set_num_threads(1)
    elif threads != 0:
        set_num_threads(threads)

    if not return_mean:
        return_std = False

    if make_comparable and type(qrels) == Qrels and type(run) == Run:
        run = run.make_comparable(qrels)

    if type(qrels) in [Qrels, dict] and type(run) in [Run, dict]:
        check_keys(qrels, run)

    _qrels = convert_qrels(qrels)
    _run = convert_run(run)
    metrics = format_metrics(metrics)
    assert all(isinstance(m, str) for m in metrics), "Metrics error"

    # Compute metrics ----------------------------------------------------------
    metric_scores_dict = {}
    for metric in metrics:
        m, k, rel_lvl = extract_metric_and_params(metric)
        metric_scores_dict[metric] = metric_switch(m)(_qrels, _run, k, rel_lvl)

    # Save results in Run ------------------------------------------------------
    if type(run) == Run and save_results_in_run:
        for m, scores in metric_scores_dict.items():
            run.mean_scores[m] = np.mean(scores)
            if return_std:
                run.std_scores[m] = np.std(scores)
            for i, q_id in enumerate(run.get_query_ids()):
                run.scores[m][q_id] = scores[i]

    # Prepare output -----------------------------------------------------------
    if return_mean:
        for m, scores in metric_scores_dict.items():
            if return_std:
                metric_scores_dict[m] = {
                    "mean": np.mean(scores),
                    "std": np.std(scores),
                }

            else:
                metric_scores_dict[m] = np.mean(scores)

    return metric_scores_dict[m] if len(metrics) == 1 else metric_scores_dict