diff --git a/assemblyai/lemur.py b/assemblyai/lemur.py index d028f67..d63568d 100644 --- a/assemblyai/lemur.py +++ b/assemblyai/lemur.py @@ -172,7 +172,7 @@ def question( Args: questions: One or a list of questions to ask. context: The context which is shared among all questions. This can be a string or a dictionary. - final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b"). + final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", "assemblyai/mistral-7b", and "anthropic/claude-2-1"). max_output_size: Max output size in tokens timeout: The timeout in seconds to wait for the answer(s). temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic. @@ -214,7 +214,7 @@ def summarize( Args: context: An optional context on the transcript. answer_format: The format on how the summary shall be summarized. - final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b"). + final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", "assemblyai/mistral-7b", and "anthropic/claude-2-1"). max_output_size: Max output size in tokens timeout: The timeout in seconds to wait for the summary. temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic. @@ -254,7 +254,7 @@ def action_items( Args: context: An optional context on the transcript. answer_format: The preferred format for the result action items. - final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b"). + final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", "assemblyai/mistral-7b", and "anthropic/claude-2-1"). max_output_size: Max output size in tokens timeout: The timeout in seconds to wait for the action items response. temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic. @@ -289,7 +289,7 @@ def task( Args: prompt: The prompt to use for this task. - final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b"). + final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", "assemblyai/mistral-7b", and "anthropic/claude-2-1"). max_output_size: Max output size in tokens timeout: The timeout in seconds to wait for the task. temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic. diff --git a/assemblyai/types.py b/assemblyai/types.py index 9f5bb37..bb66e1e 100644 --- a/assemblyai/types.py +++ b/assemblyai/types.py @@ -1704,10 +1704,10 @@ def from_lemur_source(cls, source: LemurSource) -> Self: class LemurModel(str, Enum): """ - LeMUR features three model modes, Basic, Default and Mistral 7B, that allow you to configure your request - to suit your needs. These options tell LeMUR whether to use the more advanced Default model or - the cheaper, faster, but simplified Basic model. The implicit setting is Default when no option - is explicitly passed in. + LeMUR features four model modes, Basic, Default, Mistral 7B, and Claude v2.1, that allow you to + configure your request to suit your needs. These options tell LeMUR whether to use the more + advanced Default model or the cheaper, faster, but simplified Basic model. The implicit setting + is Default when no option is explicitly passed in. """ default = "default" @@ -1734,6 +1734,8 @@ class LemurModel(str, Enum): Mistral 7B is an open source model that works well for summarization and answering questions. """ + claude2_1 = "anthropic/claude-2-1" + class LemurQuestionAnswer(BaseModel): """ diff --git a/tests/unit/test_lemur.py b/tests/unit/test_lemur.py index 6a67b62..3a5a9de 100644 --- a/tests/unit/test_lemur.py +++ b/tests/unit/test_lemur.py @@ -514,7 +514,14 @@ def test_lemur_task_succeeds_input_text(httpx_mock: HTTPXMock): assert len(httpx_mock.get_requests()) == 1 -def test_lemur_task_succeeds_mistral(httpx_mock: HTTPXMock): +@pytest.mark.parametrize( + "final_model", + ( + aai.LemurModel.mistral7b, + aai.LemurModel.claude2_1, + ), +) +def test_lemur_task_succeeds(final_model, httpx_mock: HTTPXMock): """ Tests whether creating a task request succeeds with mistral. """ @@ -534,7 +541,7 @@ def test_lemur_task_succeeds_mistral(httpx_mock: HTTPXMock): # test input_text input lemur = aai.Lemur() result = lemur.task( - final_model=aai.LemurModel.mistral7b, + final_model=final_model, prompt="Create action items of the meeting", input_text="Test test", )