Skip to content

Commit 05ee66a

Browse files
he-jamesploeberSwimburgerjhazenaaiRobMcH
authored
feat(lemur): add claude v2.1 support (#46)
Co-authored-by: Patrick Loeber <[email protected]> Co-authored-by: Niels Swimberghe <[email protected]> Co-authored-by: Justin Hazen <[email protected]> Co-authored-by: Robert McHardy <[email protected]> Co-authored-by: Robert McHardy <[email protected]> Co-authored-by: Martin Schweiger <[email protected]>
1 parent d0830b9 commit 05ee66a

File tree

3 files changed

+19
-10
lines changed

3 files changed

+19
-10
lines changed

assemblyai/lemur.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ def question(
172172
Args:
173173
questions: One or a list of questions to ask.
174174
context: The context which is shared among all questions. This can be a string or a dictionary.
175-
final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b").
175+
final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", "assemblyai/mistral-7b", and "anthropic/claude-2-1").
176176
max_output_size: Max output size in tokens
177177
timeout: The timeout in seconds to wait for the answer(s).
178178
temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic.
@@ -214,7 +214,7 @@ def summarize(
214214
Args:
215215
context: An optional context on the transcript.
216216
answer_format: The format on how the summary shall be summarized.
217-
final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b").
217+
final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", "assemblyai/mistral-7b", and "anthropic/claude-2-1").
218218
max_output_size: Max output size in tokens
219219
timeout: The timeout in seconds to wait for the summary.
220220
temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic.
@@ -254,7 +254,7 @@ def action_items(
254254
Args:
255255
context: An optional context on the transcript.
256256
answer_format: The preferred format for the result action items.
257-
final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b").
257+
final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", "assemblyai/mistral-7b", and "anthropic/claude-2-1").
258258
max_output_size: Max output size in tokens
259259
timeout: The timeout in seconds to wait for the action items response.
260260
temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic.
@@ -289,7 +289,7 @@ def task(
289289
290290
Args:
291291
prompt: The prompt to use for this task.
292-
final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b").
292+
final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", "assemblyai/mistral-7b", and "anthropic/claude-2-1").
293293
max_output_size: Max output size in tokens
294294
timeout: The timeout in seconds to wait for the task.
295295
temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic.

assemblyai/types.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1704,10 +1704,10 @@ def from_lemur_source(cls, source: LemurSource) -> Self:
17041704

17051705
class LemurModel(str, Enum):
17061706
"""
1707-
LeMUR features three model modes, Basic, Default and Mistral 7B, that allow you to configure your request
1708-
to suit your needs. These options tell LeMUR whether to use the more advanced Default model or
1709-
the cheaper, faster, but simplified Basic model. The implicit setting is Default when no option
1710-
is explicitly passed in.
1707+
LeMUR features four model modes, Basic, Default, Mistral 7B, and Claude v2.1, that allow you to
1708+
configure your request to suit your needs. These options tell LeMUR whether to use the more
1709+
advanced Default model or the cheaper, faster, but simplified Basic model. The implicit setting
1710+
is Default when no option is explicitly passed in.
17111711
"""
17121712

17131713
default = "default"
@@ -1734,6 +1734,8 @@ class LemurModel(str, Enum):
17341734
Mistral 7B is an open source model that works well for summarization and answering questions.
17351735
"""
17361736

1737+
claude2_1 = "anthropic/claude-2-1"
1738+
17371739

17381740
class LemurQuestionAnswer(BaseModel):
17391741
"""

tests/unit/test_lemur.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -514,7 +514,14 @@ def test_lemur_task_succeeds_input_text(httpx_mock: HTTPXMock):
514514
assert len(httpx_mock.get_requests()) == 1
515515

516516

517-
def test_lemur_task_succeeds_mistral(httpx_mock: HTTPXMock):
517+
@pytest.mark.parametrize(
518+
"final_model",
519+
(
520+
aai.LemurModel.mistral7b,
521+
aai.LemurModel.claude2_1,
522+
),
523+
)
524+
def test_lemur_task_succeeds(final_model, httpx_mock: HTTPXMock):
518525
"""
519526
Tests whether creating a task request succeeds with mistral.
520527
"""
@@ -534,7 +541,7 @@ def test_lemur_task_succeeds_mistral(httpx_mock: HTTPXMock):
534541
# test input_text input
535542
lemur = aai.Lemur()
536543
result = lemur.task(
537-
final_model=aai.LemurModel.mistral7b,
544+
final_model=final_model,
538545
prompt="Create action items of the meeting",
539546
input_text="Test test",
540547
)

0 commit comments

Comments
 (0)