diff --git a/assemblyai/__version__.py b/assemblyai/__version__.py index 9442842..da7ed90 100644 --- a/assemblyai/__version__.py +++ b/assemblyai/__version__.py @@ -1 +1 @@ -__version__ = "0.39.1" +__version__ = "0.40.0" diff --git a/assemblyai/types.py b/assemblyai/types.py index 4751f1d..51177bf 100644 --- a/assemblyai/types.py +++ b/assemblyai/types.py @@ -19,13 +19,13 @@ try: # pydantic v2 import - from pydantic import UUID4, BaseModel, ConfigDict, Field + from pydantic import UUID4, BaseModel, ConfigDict, Field, field_validator from pydantic_settings import BaseSettings, SettingsConfigDict pydantic_v2 = True except ImportError: # pydantic v1 import - from pydantic.v1 import UUID4, BaseModel, BaseSettings, ConfigDict, Field + from pydantic.v1 import UUID4, BaseModel, BaseSettings, ConfigDict, Field, validator pydantic_v2 = False @@ -1468,6 +1468,19 @@ class Word(BaseModel): speaker: Optional[str] = None channel: Optional[str] = None + # This is a workaround to address an issue where sentiment_analysis_results + # may return contains sentiments where `start` is null. + if pydantic_v2: + + @field_validator("start", mode="before") + def set_start_default(cls, v): + return 0 if v is None else v + else: + + @validator("start", pre=True) + def set_start_default(cls, v): + return 0 if v is None else v + class UtteranceWord(Word): channel: Optional[str] = None @@ -2031,9 +2044,14 @@ class LemurModel(str, Enum): LeMUR features different model modes that allow you to configure your request to suit your needs. """ + claude3_7_sonnet_20250219 = "anthropic/claude-3-7-sonnet" + """ + Claude 3.7 Sonnet is the most intelligent model to date, providing the highest level of intelligence and capability with toggleable extended thinking. + """ + claude3_5_sonnet = "anthropic/claude-3-5-sonnet" """ - Claude 3.5 Sonnet is the most intelligent model to date, outperforming Claude 3 Opus on a wide range of evaluations, with the speed and cost of Claude 3 Sonnet. + Claude 3.5 Sonnet is the previous most intelligent model to date, providing high level of intelligence and capability. """ claude3_opus = "anthropic/claude-3-opus" @@ -2041,9 +2059,14 @@ class LemurModel(str, Enum): Claude 3 Opus is good at handling complex analysis, longer tasks with many steps, and higher-order math and coding tasks. """ + claude3_5_haiku_20241022 = "anthropic/claude-3-5-haiku" + """ + Claude 3.5 Haiku is the fastest model, providing intelligence at blazing speeds. + """ + claude3_haiku = "anthropic/claude-3-haiku" """ - Claude 3 Haiku is the fastest model that can execute lightweight actions. + Claude 3 Haiku is the fastest and most compact model for near-instant responsiveness. """ claude3_sonnet = "anthropic/claude-3-sonnet" diff --git a/tests/unit/test_sentiment_analysis.py b/tests/unit/test_sentiment_analysis.py index e8fdfd9..2d86c01 100644 --- a/tests/unit/test_sentiment_analysis.py +++ b/tests/unit/test_sentiment_analysis.py @@ -76,3 +76,33 @@ def test_sentiment_analysis_enabled(httpx_mock: HTTPXMock): assert ( transcript_sentiment_result.speaker == response_sentiment_result["speaker"] ) + + +def test_sentiment_analysis_null_start(httpx_mock: HTTPXMock): + """ + Tests that `start` converts null values to 0. + """ + mock_response = { + "audio_url": "https://example/audio.mp3", + "status": "completed", + "sentiment_analysis_results": [ + { + "text": "hi", + "start": None, + "end": 100, + "confidence": 0.99, + "sentiment": "POSITIVE", + } + ], + } + request_body, transcript = unit_test_utils.submit_mock_transcription_request( + httpx_mock, + mock_response=mock_response, + config=aai.TranscriptionConfig(sentiment_analysis=True), + ) + + for response_sentiment_result, transcript_sentiment_result in zip( + mock_response["sentiment_analysis_results"], + transcript.sentiment_analysis, + ): + assert transcript_sentiment_result.start == 0