Skip to content

Commit 59e7c34

Browse files
Myhs-phzstephen-nju
authored andcommitted
[Fix] OpenICL Math Evaluator Config (open-compass#2007)
* fix * fix recommended * fix * fix * fix * fix
1 parent a494af8 commit 59e7c34

File tree

9 files changed

+135
-53
lines changed

9 files changed

+135
-53
lines changed

dataset-index.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -615,8 +615,8 @@
615615
name: MATH
616616
category: Math
617617
paper: https://arxiv.org/pdf/2103.03874
618-
configpath: opencompass/configs/datasets/math
619-
configpath_llmjudge: ''
618+
configpath: opencompass/configs/datasets/math/math_gen.py
619+
configpath_llmjudge: opencompass/configs/datasets/math/math_llm_judge_gen.py
620620
- math500:
621621
name: MATH500
622622
category: Math

docs/en/statis.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
recommanded_dataset_list = [
3636
'ifeval', 'aime2024', 'bbh', 'bigcodebench', 'cmmlu', 'drop', 'gpqa',
3737
'hellaswag', 'humaneval', 'korbench', 'livecodebench', 'math', 'mmlu',
38-
'mmlu_pro', 'musr'
38+
'mmlu_pro', 'musr', 'math500'
3939
]
4040

4141

docs/zh_cn/statis.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
recommanded_dataset_list = [
3434
'ifeval', 'aime2024', 'bbh', 'bigcodebench', 'cmmlu', 'drop', 'gpqa',
3535
'hellaswag', 'humaneval', 'korbench', 'livecodebench', 'math', 'mmlu',
36-
'mmlu_pro', 'musr'
36+
'mmlu_pro', 'musr', 'math500'
3737
]
3838

3939

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
from mmengine.config import read_base
22

33
with read_base():
4-
from .math_gen_265cce import math_datasets # noqa: F401, F403
4+
from .math_gen_a58d9d import math_datasets # noqa: F401, F403
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
from opencompass.openicl.icl_prompt_template import PromptTemplate
2+
from opencompass.openicl.icl_retriever import ZeroRetriever
3+
from opencompass.openicl.icl_inferencer import GenInferencer
4+
from opencompass.datasets import MATHDataset
5+
from opencompass.openicl.icl_evaluator import MATHEvaluator
6+
7+
math_reader_cfg = dict(input_columns=['problem'], output_column='solution')
8+
9+
math_infer_cfg = dict(
10+
prompt_template=dict(
11+
type=PromptTemplate,
12+
template=dict(round=[
13+
dict(role='HUMAN', prompt='Problem:\nFind the domain of the expression $\\frac{{\sqrt{{x-2}}}}{{\sqrt{{5-x}}}}$.}}\nSolution:'),
14+
dict(role='BOT', prompt='The expressions inside each square root must be non-negative. Therefore, $x-2 \ge 0$, so $x\ge2$, and $5 - x \ge 0$, so $x \le 5$. Also, the denominator cannot be equal to zero, so $5-x>0$, which gives $x<5$. Therefore, the domain of the expression is $\\boxed{{[2,5)}}$.\nFinal Answer: The final answer is $[2,5)$. I hope it is correct.\n'),
15+
dict(role='HUMAN', prompt='Problem:\nIf $\det \mathbf{{A}} = 2$ and $\det \mathbf{{B}} = 12,$ then find $\det (\mathbf{{A}} \mathbf{{B}}).$\nSolution:'),
16+
dict(role='BOT', prompt='We have that $\det (\mathbf{{A}} \mathbf{{B}}) = (\det \mathbf{{A}})(\det \mathbf{{B}}) = (2)(12) = \\boxed{{24}}.$\nFinal Answer: The final answer is $24$. I hope it is correct.\n'),
17+
dict(role='HUMAN', prompt='Problem:\nTerrell usually lifts two 20-pound weights 12 times. If he uses two 15-pound weights instead, how many times must Terrell lift them in order to lift the same total weight?\nSolution:'),
18+
dict(role='BOT', prompt='If Terrell lifts two 20-pound weights 12 times, he lifts a total of $2\cdot 12\cdot20=480$ pounds of weight. If he lifts two 15-pound weights instead for $n$ times, he will lift a total of $2\cdot15\cdot n=30n$ pounds of weight. Equating this to 480 pounds, we can solve for $n$: \\begin{{align*}} 30n&=480\\\\ \Rightarrow\qquad n&=480/30=\\boxed{{16}} \end{{align*}}\nFinal Answer: The final answer is $16$. I hope it is correct.\n'),
19+
dict(role='HUMAN', prompt='Problem:\nIf the system of equations: \\begin{{align*}} 6x-4y&=a,\\\\ 6y-9x &=b. \end{{align*}}has a solution $(x, y)$ where $x$ and $y$ are both nonzero, find $\\frac{{a}}{{b}},$ assuming $b$ is nonzero.\nSolution:'),
20+
dict(role='BOT', prompt='If we multiply the first equation by $-\\frac{{3}}{{2}}$, we obtain $$6y-9x=-\\frac{{3}}{{2}}a.$$Since we also know that $6y-9x=b$, we have $$-\\frac{{3}}{{2}}a=b\Rightarrow\\frac{{a}}{{b}}=\\boxed{{-\\frac{{2}}{{3}}}}.$$\nFinal Answer: The final answer is $-\\frac{{2}}{{3}}$. I hope it is correct.\n'),
21+
dict(role='HUMAN', prompt='Problem:\n{problem}\nSolution:\n'),
22+
])),
23+
retriever=dict(type=ZeroRetriever),
24+
inferencer=dict(type=GenInferencer))
25+
26+
math_eval_cfg = dict(
27+
evaluator=dict(type=MATHEvaluator)
28+
)
29+
30+
math_datasets = [
31+
dict(
32+
type=MATHDataset,
33+
abbr='math',
34+
path='opencompass/math',
35+
reader_cfg=math_reader_cfg,
36+
infer_cfg=math_infer_cfg,
37+
eval_cfg=math_eval_cfg)
38+
]

opencompass/configs/datasets/math/math_llm_judge.py

Lines changed: 0 additions & 35 deletions
This file was deleted.
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
from mmengine.config import read_base
2+
3+
with read_base():
4+
from .math_llm_judge_gen_56606f import math_datasets # noqa: F401, F403
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
from opencompass.openicl.icl_prompt_template import PromptTemplate
2+
from opencompass.openicl.icl_retriever import ZeroRetriever
3+
from opencompass.openicl.icl_inferencer import GenInferencer
4+
from opencompass.evaluator import GenericLLMEvaluator
5+
from opencompass.datasets import generic_llmjudge_postprocess
6+
from opencompass.datasets import MATHDataset
7+
8+
9+
math_reader_cfg = dict(input_columns=['problem'], output_column='solution')
10+
11+
math_infer_cfg = dict(
12+
prompt_template=dict(
13+
type=PromptTemplate,
14+
template=dict(round=[
15+
dict(role='HUMAN', prompt="Question: {problem}\nLet's think step by step\nAnswer:")
16+
])),
17+
retriever=dict(type=ZeroRetriever),
18+
inferencer=dict(type=GenInferencer)
19+
)
20+
21+
GRADER_TEMPLATE = """
22+
Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.
23+
24+
Here are some evaluation criteria:
25+
1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.
26+
2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.
27+
3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.
28+
4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.
29+
5. If the prediction is given with \\boxed{}, please ignore the \\boxed{} and only judge whether the candidate's answer is consistent with the standard answer.
30+
31+
Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:
32+
A: CORRECT
33+
B: INCORRECT
34+
Just return the letters "A" or "B", with no text around it.
35+
36+
Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
37+
38+
39+
<Original Question Begin>: \n{problem}\n<Original Question End>\n\n
40+
<Gold Target Begin>: \n{solution}\n<Gold Target End>\n\n
41+
<Predicted Answer Begin>: \n{prediction}\n<Predicted End>\n\n
42+
43+
Judging the correctness of candidates' answers:
44+
""".strip()
45+
46+
# Evaluation configuration
47+
math_eval_cfg = dict(
48+
evaluator=dict(
49+
type=GenericLLMEvaluator,
50+
prompt_template=dict(
51+
type=PromptTemplate,
52+
template=dict(
53+
begin=[
54+
dict(
55+
role='SYSTEM',
56+
fallback_role='HUMAN',
57+
prompt="You are a helpful assistant who evaluates the correctness and quality of models' outputs.")
58+
],
59+
round=[
60+
dict(
61+
role='HUMAN',
62+
prompt = GRADER_TEMPLATE
63+
),
64+
]),
65+
),
66+
dataset_cfg=dict(
67+
type=MATHDataset,
68+
path='opencompass/math',
69+
reader_cfg=math_reader_cfg,
70+
),
71+
judge_cfg=dict(),
72+
dict_postprocessor=dict(type=generic_llmjudge_postprocess),
73+
),
74+
pred_role='BOT',
75+
)
76+
77+
math_datasets = [
78+
dict(
79+
type=MATHDataset,
80+
abbr='math',
81+
path='opencompass/math',
82+
reader_cfg=math_reader_cfg,
83+
infer_cfg=math_infer_cfg,
84+
eval_cfg=math_eval_cfg)
85+
]

opencompass/openicl/icl_evaluator/math_evaluator.py

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -22,26 +22,16 @@ def score(self, predictions, references):
2222
details = []
2323
for i, j in zip(predictions, references):
2424
count += 1
25+
j_with_env = f'${j}$'
2526
gold_parsed = parse(
26-
j,
27+
j_with_env,
2728
extraction_mode='first_match',
2829
extraction_config=[
2930
LatexExtractionConfig(),
3031
ExprExtractionConfig(),
3132
],
3233
)
33-
# If parsing result is empty, try adding LaTeX
34-
# environment and parse again
35-
if len(gold_parsed) == 0:
36-
j_with_env = f'${j}$'
37-
gold_parsed = parse(
38-
j_with_env,
39-
extraction_mode='first_match',
40-
extraction_config=[
41-
LatexExtractionConfig(),
42-
ExprExtractionConfig(),
43-
],
44-
)
34+
4535
if len(gold_parsed) != 0:
4636
# We require the answer to be provided in correct
4737
# latex (no malformed operators)

0 commit comments

Comments
 (0)