-
Notifications
You must be signed in to change notification settings - Fork 39
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'main' into simplify_qa_example
- Loading branch information
Showing
60 changed files
with
899 additions
and
443 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,20 +1,52 @@ | ||
from unitxt import get_logger, produce | ||
from unitxt.inference import GenericInferenceEngine | ||
from unitxt import get_logger, produce # Import necessary functions from unitxt | ||
from unitxt.inference import GenericInferenceEngine # Import the inference engine class | ||
|
||
if __name__ == "__main__": | ||
generic_engine = GenericInferenceEngine( | ||
default="engines.ibm_gen_ai.llama_3_8b_instruct" | ||
# Create an instance of the GenericInferenceEngine with a default engine. | ||
# This means if no engine is specified during inference, it will default to this one. | ||
generic_engine_with_default = GenericInferenceEngine( | ||
default="engines.ibm_gen_ai.llama_3_70b_instruct" | ||
) | ||
|
||
# Define the recipe for data processing and model selection. | ||
# - card: Specifies the underlying data (from cards.almost_evil). | ||
# - template: Selects the specific template within the card (from templates.qa.open.simple). | ||
# - demos_pool_size and num_demos: Control the number of demonstration examples used (set to 0 here). | ||
recipe = "card=cards.almost_evil,template=templates.qa.open.simple,demos_pool_size=0,num_demos=0" | ||
|
||
# Create a list of instances (data points) for inference. | ||
# Each instance has a "question" and its corresponding "answers". | ||
instances = [ | ||
{"question": "How many days there are in a week", "answers": ["7"]}, | ||
{ | ||
"question": "If a ate an apple in the morning, and one in the evening, how many apples did I eat?", | ||
"question": "How many days there are in a week, answer only with numerals", | ||
"answers": ["7"], | ||
}, | ||
{ | ||
"question": "If a ate an apple in the morning, and one in the evening, what is the number of apples I have eaten?, answer only with numerals", | ||
"answers": ["2"], | ||
}, | ||
] | ||
|
||
# Process the instances using the defined recipe. | ||
# This likely formats the data according to the chosen card and template. | ||
dataset = produce(instances, recipe) | ||
|
||
predictions = generic_engine.infer(dataset) | ||
# Perform inference on the processed dataset using the engine with the default model. | ||
predictions = generic_engine_with_default.infer(dataset) | ||
get_logger().info(predictions) # Log the predictions | ||
|
||
# The following code block demonstrates how to use the GenericInferenceEngine without specifying a | ||
# default engine. It expects the engine to be defined in the UNITXT_INFERENCE_ENGINE environment variable. | ||
try: | ||
# Attempt to create an instance without a default engine. | ||
generic_engine_without_default = GenericInferenceEngine() | ||
|
||
get_logger().info(predictions) | ||
# Perform inference (will use the engine specified in the environment variable). | ||
predictions = generic_engine_without_default.infer(dataset) | ||
get_logger().info(predictions) # Log the predictions | ||
except: | ||
# Handle the case where the environment variable is not set. | ||
get_logger().error( | ||
"GenericInferenceEngine could not be initialized without a default since " | ||
"UNITXT_INFERENCE_ENGINE environmental variable is not set." | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
from unitxt.catalog import add_to_catalog | ||
from unitxt.inference import WMLInferenceEngine | ||
|
||
model_list = ["meta-llama/llama-3-70b-instruct"] | ||
|
||
for model in model_list: | ||
model_label = model.split("/")[1].replace("-", "_").replace(".", ",").lower() | ||
inference_model = WMLInferenceEngine( | ||
model_name=model, max_new_tokens=2048, random_seed=42 | ||
) | ||
add_to_catalog(inference_model, f"engines.ibm_wml.{model_label}", overwrite=True) |
Empty file.
62 changes: 62 additions & 0 deletions
62
prepare/metrics/llm_as_judge/pairwise_rating/llama_3_arena_hard_template.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,62 @@ | ||
from unitxt import add_to_catalog | ||
from unitxt.inference import ( | ||
GenericInferenceEngine, | ||
IbmGenAiInferenceEngine, | ||
WMLInferenceEngine, | ||
) | ||
from unitxt.llm_as_judge import LLMAsJudge | ||
|
||
model_list = ["meta-llama/llama-3-8b-instruct", "meta-llama/llama-3-70b-instruct"] | ||
format = "formats.llama3_instruct" | ||
templates = [ | ||
"templates.response_assessment.pairwise_comparative_rating.arena_hard", | ||
"templates.response_assessment.pairwise_comparative_rating.arena_hard_with_shuffling", | ||
] | ||
|
||
inference_engines = [ | ||
("ibm_wml", WMLInferenceEngine), | ||
("ibm_genai", IbmGenAiInferenceEngine), | ||
("generic_engine", GenericInferenceEngine), | ||
] | ||
|
||
|
||
for template in templates: | ||
task = "pairwise_comparative_rating.single_turn" | ||
|
||
for model_id in model_list: | ||
for inference_engine_name, inference_engine in inference_engines: | ||
if ( | ||
inference_engine_name == "ibm_wml" | ||
and model_id == "meta-llama/llama-3-8b-instruct" | ||
): | ||
continue # currently not supported | ||
|
||
# if inference engine is generic, these configurations will be defined when it is saved to the catalog | ||
if inference_engine_name != "generic_engine": | ||
inference_model = inference_engine( | ||
model_name=model_id, max_new_tokens=2048, random_seed=42 | ||
) | ||
else: | ||
inference_model = inference_engine( | ||
default="engines.ibm_gen_ai.llama_3_70b_instruct" | ||
) | ||
|
||
model_label = ( | ||
model_id.split("/")[1].replace("-", "_").replace(".", ",").lower() | ||
) | ||
model_label = f"{model_label}_{inference_engine_name}" | ||
template_label = template.split(".")[-1] | ||
metric_label = f"{model_label}_template_{template_label}" | ||
metric = LLMAsJudge( | ||
inference_model=inference_model, | ||
template=template, | ||
task=task, | ||
format=format, | ||
main_score=metric_label, | ||
) | ||
|
||
add_to_catalog( | ||
metric, | ||
f"metrics.llm_as_judge.pairwise_comparative_rating.{model_label}_template_{template_label}", | ||
overwrite=True, | ||
) |
36 changes: 0 additions & 36 deletions
36
prepare/metrics/llm_as_judge/pairwise_rating/llama_3_ibm_genai_arena_hard_template.py
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.