Skip to content

Commit f17c093

Browse files
bio-mlhuistephen-nju
authored andcommitted
[Dataset] Add CARDBiomedBench (open-compass#2071)
* CARDBiomedBench * fix hash * fix dataset-index * use official llmjudge postprocess * use official llmjudge_postprocess * fix lint * fix init
1 parent 8c02619 commit f17c093

File tree

4 files changed

+138
-0
lines changed

4 files changed

+138
-0
lines changed

dataset-index.yml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -361,6 +361,12 @@
361361
paper: https://arxiv.org/pdf/2004.05986
362362
configpath: opencompass/configs/datasets/CLUE_C3/CLUE_C3_gen.py
363363
configpath_llmjudge: ''
364+
- CARDBiomedBench:
365+
name: CARDBiomedBench
366+
category: Knowledge / Medicine
367+
paper: https://www.biorxiv.org/content/10.1101/2025.01.15.633272v1
368+
configpath: opencompass/configs/datasets/CARDBiomedBench
369+
configpath_llmjudge: 'opencompass/configs/datasets/CARDBiomedBench/CARDBiomedBench_llmjudge_gen_99a231.py'
364370
- cb:
365371
name: SuperGLUE / CB
366372
category: Reasoning
Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
from opencompass.datasets import CARDBiomedBenchDataset
2+
from opencompass.datasets import generic_llmjudge_postprocess
3+
from opencompass.openicl.icl_inferencer import GenInferencer
4+
from opencompass.openicl.icl_prompt_template import PromptTemplate
5+
from opencompass.openicl.icl_retriever import ZeroRetriever
6+
from opencompass.evaluator import GenericLLMEvaluator
7+
ZERO_SHOT_PROMPT = 'You are an expert in {expert}.\n{question}\n'
8+
9+
GRADER_TEMPLATE = """
10+
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
11+
12+
Here are some evaluation criteria:
13+
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
14+
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
15+
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
16+
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
17+
18+
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
19+
A: CORRECT
20+
B: INCORRECT
21+
Just return the letters "A" or "B", with no text around it.
22+
23+
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
24+
25+
<Original Question Begin>: Q: You are an expert in {expert}.\n{question}\n<Original Question End>\n\n
26+
<Gold Target Begin>: \n{answer}\n<Gold Target End>\n\n
27+
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
28+
Judging the correctness of candidates' answers:
29+
""".strip()
30+
31+
32+
# Reader configuration
33+
reader_cfg = dict(
34+
input_columns=[
35+
'question',
36+
'answer',
37+
'Bio_Category',
38+
'SQL_Category',
39+
'uuid',
40+
'template uuid',
41+
'expert',
42+
],
43+
output_column='answer',
44+
)
45+
# Inference configuration
46+
infer_cfg = dict(
47+
prompt_template=dict(
48+
type=PromptTemplate,
49+
template=dict(
50+
round=[
51+
dict(
52+
53+
role='HUMAN',
54+
prompt=ZERO_SHOT_PROMPT, # prompt mode: zero-shot
55+
),
56+
],
57+
),
58+
),
59+
retriever=dict(type=ZeroRetriever),
60+
inferencer=dict(type=GenInferencer),
61+
)
62+
63+
# Evaluation configuration
64+
eval_cfg = dict(
65+
evaluator=dict(
66+
type=GenericLLMEvaluator,
67+
prompt_template=dict(
68+
type=PromptTemplate,
69+
template=dict(
70+
begin=[
71+
dict(
72+
role='SYSTEM',
73+
fallback_role='HUMAN',
74+
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.",
75+
)
76+
],
77+
round=[
78+
dict(role='HUMAN', prompt=GRADER_TEMPLATE),
79+
],
80+
),
81+
),
82+
dataset_cfg=dict(
83+
type=CARDBiomedBenchDataset,
84+
path='NIH-CARD/CARDBiomedBench',
85+
prompt_mode='zero-shot',
86+
reader_cfg=reader_cfg,
87+
),
88+
judge_cfg=dict(),
89+
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
90+
),
91+
)
92+
cardbiomedbench_dataset = dict(
93+
type=CARDBiomedBenchDataset,
94+
abbr='cardbiomedbench',
95+
path='NIH-CARD/CARDBiomedBench',
96+
prompt_mode='zero-shot',
97+
reader_cfg=reader_cfg,
98+
infer_cfg=infer_cfg,
99+
eval_cfg=eval_cfg,
100+
)
101+
cardbiomedbench_datasets = [cardbiomedbench_dataset]
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
from datasets import load_dataset
2+
3+
from opencompass.registry import LOAD_DATASET
4+
5+
from .base import BaseDataset
6+
7+
8+
def _parse(item, prompt_mode):
9+
item['expert'] = item['Bio_Category']
10+
item['start'] = chr(65)
11+
item['end'] = chr(65 + len(item.get('choices', {'label': []})['label']) -
12+
1)
13+
item['prompt_mode'] = prompt_mode
14+
return item
15+
16+
17+
@LOAD_DATASET.register_module()
18+
class CARDBiomedBenchDataset(BaseDataset):
19+
20+
@staticmethod
21+
def load(path: str, prompt_mode: str, **kwargs):
22+
data_files = {'test': 'data/CARDBiomedBench.csv'}
23+
dataset = load_dataset(path, data_files=data_files, split='test')
24+
# dataset = dataset.select(range(200))
25+
if prompt_mode == 'zero-shot':
26+
dataset = dataset.map(lambda item: _parse(item, prompt_mode),
27+
load_from_cache_file=False)
28+
elif prompt_mode == 'few-shot':
29+
pass # TODO: Implement few-shot prompt
30+
return dataset

opencompass/datasets/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
from .bustum import * # noqa: F401, F403
1717
from .c3 import * # noqa: F401, F403
1818
from .calm import * # noqa: F401, F403
19+
from .CARDBiomedBench import CARDBiomedBenchDataset # noqa: F401
1920
from .cb import * # noqa: F401, F403
2021
from .ceval import * # noqa: F401, F403
2122
from .charm import * # noqa: F401, F403

0 commit comments

Comments
 (0)