Skip to content

Commit fde15ee

Browse files
committed
feat(mistral): add mistral support to the sdk (#3157)
GitOrigin-RevId: 67e94fa0b2f00c8343d3c19988c3c8a682575cdd
1 parent 4316b00 commit fde15ee

File tree

3 files changed

+44
-5
lines changed

3 files changed

+44
-5
lines changed

assemblyai/lemur.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ def question(
172172
Args:
173173
questions: One or a list of questions to ask.
174174
context: The context which is shared among all questions. This can be a string or a dictionary.
175-
final_model: The model that is used for the final prompt after compression is performed (options: "basic" and "default").
175+
final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b").
176176
max_output_size: Max output size in tokens
177177
timeout: The timeout in seconds to wait for the answer(s).
178178
temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic.
@@ -214,7 +214,7 @@ def summarize(
214214
Args:
215215
context: An optional context on the transcript.
216216
answer_format: The format on how the summary shall be summarized.
217-
final_model: The model that is used for the final prompt after compression is performed (options: "basic" and "default").
217+
final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b").
218218
max_output_size: Max output size in tokens
219219
timeout: The timeout in seconds to wait for the summary.
220220
temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic.
@@ -254,7 +254,7 @@ def action_items(
254254
Args:
255255
context: An optional context on the transcript.
256256
answer_format: The preferred format for the result action items.
257-
final_model: The model that is used for the final prompt after compression is performed (options: "basic" and "default").
257+
final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b").
258258
max_output_size: Max output size in tokens
259259
timeout: The timeout in seconds to wait for the action items response.
260260
temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic.
@@ -289,7 +289,7 @@ def task(
289289
290290
Args:
291291
prompt: The prompt to use for this task.
292-
final_model: The model that is used for the final prompt after compression is performed (options: "basic" and "default").
292+
final_model: The model that is used for the final prompt after compression is performed (options: "basic", "default", and "assemblyai/mistral-7b").
293293
max_output_size: Max output size in tokens
294294
timeout: The timeout in seconds to wait for the task.
295295
temperature: Change how deterministic the response is, with 0 being the most deterministic and 1 being the least deterministic.

assemblyai/types.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1704,7 +1704,7 @@ def from_lemur_source(cls, source: LemurSource) -> Self:
17041704

17051705
class LemurModel(str, Enum):
17061706
"""
1707-
LeMUR features two model modes, Basic and Default, that allow you to configure your request
1707+
LeMUR features three model modes, Basic, Default and Mistral 7B, that allow you to configure your request
17081708
to suit your needs. These options tell LeMUR whether to use the more advanced Default model or
17091709
the cheaper, faster, but simplified Basic model. The implicit setting is Default when no option
17101710
is explicitly passed in.
@@ -1729,6 +1729,11 @@ class LemurModel(str, Enum):
17291729
for complex/subjective tasks where answers require more nuance to be effective.
17301730
"""
17311731

1732+
mistral7b = "assemblyai/mistral-7b"
1733+
"""
1734+
Mistral 7B is an open source model that works well for summarization and answering questions.
1735+
"""
1736+
17321737

17331738
class LemurQuestionAnswer(BaseModel):
17341739
"""

tests/unit/test_lemur.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -514,6 +514,40 @@ def test_lemur_task_succeeds_input_text(httpx_mock: HTTPXMock):
514514
assert len(httpx_mock.get_requests()) == 1
515515

516516

517+
def test_lemur_task_succeeds_mistral(httpx_mock: HTTPXMock):
518+
"""
519+
Tests whether creating a task request succeeds with mistral.
520+
"""
521+
522+
# create a mock response of a LemurSummaryResponse
523+
mock_lemur_task_response = factories.generate_dict_factory(
524+
factories.LemurTaskResponse
525+
)()
526+
527+
# mock the specific endpoints
528+
httpx_mock.add_response(
529+
url=f"{aai.settings.base_url}{ENDPOINT_LEMUR}/task",
530+
status_code=httpx.codes.OK,
531+
method="POST",
532+
json=mock_lemur_task_response,
533+
)
534+
# test input_text input
535+
lemur = aai.Lemur()
536+
result = lemur.task(
537+
final_model=aai.LemurModel.mistral7b,
538+
prompt="Create action items of the meeting",
539+
input_text="Test test",
540+
)
541+
542+
# check the response
543+
assert isinstance(result, aai.LemurTaskResponse)
544+
545+
assert result.response == mock_lemur_task_response["response"]
546+
547+
# check whether we mocked everything
548+
assert len(httpx_mock.get_requests()) == 1
549+
550+
517551
def test_lemur_ask_coach_fails(httpx_mock: HTTPXMock):
518552
"""
519553
Tests whether creating a task request fails.

0 commit comments

Comments
 (0)