Skip to content

Gemini Client

A wrapper class for the Google GenAI SDK to interact with Gemini models.

Source code in src/gemini_core/gemini.py
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
class Gemini:
    """
    A wrapper class for the Google GenAI SDK to interact with Gemini models.
    """

    def __init__(
        self,
        api_key: Optional[str] = None,
        model_name: Optional[str] = None,
        system_instruction: Optional[str] = None,
        generation_config: Optional[Union[GeminiConfig, Dict[str, Any]]] = None,
    ):
        """
        Initialize the Gemini client.

        Args:
            api_key (str, optional): Google API key. If None, tries to load from environment.
            model_name (str, optional): Name of the model to use. Defaults to config value.
            system_instruction (str, optional): System instruction for the model.
            generation_config (GeminiConfig | Dict, optional): Configuration for generation.
        """
        # Load default config from env if not provided
        try:
            self.config = Config.from_env()
        except ValueError:
            # If loading from env fails (e.g. no API key), we expect api_key to be passed explicitly
            if not api_key:
                raise
            self.config = Config(api_key=api_key)

        # Override config with passed arguments
        if api_key:
            self.config.api_key = api_key
        if model_name:
            self.config.model_name = model_name

        self.client = genai.Client(api_key=self.config.api_key)
        self.system_instruction = system_instruction

        if isinstance(generation_config, GeminiConfig):
            self.generation_config = generation_config
        elif isinstance(generation_config, dict):
            self.generation_config = GeminiConfig(**generation_config)
        else:
            self.generation_config = GeminiConfig()

    def _prepare_config(
        self,
        generation_config: Optional[Union[GeminiConfig, Dict[str, Any]]] = None,
    ) -> types.GenerateContentConfig:
        """
        Prepare the generation config for the SDK.

        Args:
            generation_config (GeminiConfig | Dict, optional): Override default generation config.

        Returns:
            types.GenerateContentConfig: The prepared SDK config.
        """
        config = self.generation_config.model_copy()
        if generation_config:
            if isinstance(generation_config, dict):
                update_data = generation_config
            else:
                update_data = generation_config.model_dump(exclude_unset=True)
            config = config.model_copy(update=update_data)

        # Convert Pydantic model to dict for the SDK, filtering out None values
        gen_config_dict = config.model_dump(exclude_none=True)

        # Auto-set response_mime_type to application/json if response_schema is present
        if (
            "response_schema" in gen_config_dict
            and "response_mime_type" not in gen_config_dict
        ):
            gen_config_dict["response_mime_type"] = "application/json"

        # Handle thinking_level -> thinking_config
        if "thinking_level" in gen_config_dict:
            thinking_level = gen_config_dict.pop("thinking_level")
            if thinking_level:
                gen_config_dict["thinking_config"] = {
                    "thinking_level": thinking_level,
                    "include_thoughts": True,
                }

        # Create GenerateContentConfig
        return types.GenerateContentConfig(
            system_instruction=self.system_instruction, **gen_config_dict
        )

    def generate_content(
        self,
        prompt: Union[str, List[str]],
        generation_config: Optional[Union[GeminiConfig, Dict[str, Any]]] = None,
    ) -> types.GenerateContentResponse:
        """
        Generate content using the Gemini model.

        Args:
            prompt (str | List[str]): The prompt to generate content from.
            generation_config (GeminiConfig | Dict, optional): Override default generation config.

        Returns:
            types.GenerateContentResponse: The generated content response.
        """
        gc_config = self._prepare_config(generation_config)

        logger.debug(f"Generating content with model {self.config.model_name}")

        try:
            response = self.client.models.generate_content(
                model=self.config.model_name,
                contents=prompt,
                config=gc_config,
            )
            return response
        except Exception as e:
            logger.error(f"Error generating content: {e}")
            raise

    def upload_file(
        self, path: Union[str, Any], mime_type: Optional[str] = None
    ) -> types.File:
        """
        Upload a file to the File API.

        Args:
            path (str | Path): Path to the file.
            mime_type (str, optional): Mime type of the file.

        Returns:
            types.File: The uploaded file object.
        """
        logger.debug(f"Uploading file: {path}")
        try:
            # If mime_type is provided, we might need to pass it in config
            upload_config = None
            if mime_type:
                upload_config = types.UploadFileConfig(mime_type=mime_type)

            file_obj = self.client.files.upload(file=path, config=upload_config)
            return file_obj
        except Exception as e:
            logger.error(f"Error uploading file: {e}")
            raise

    def start_chat(
        self,
        history: Optional[List[Union[str, types.Content, Dict[str, Any]]]] = None,
        generation_config: Optional[Union[GeminiConfig, Dict[str, Any]]] = None,
    ) -> chats.Chat:
        """
        Start a chat session.

        Args:
            history (List, optional): Initial chat history.
            generation_config (GeminiConfig | Dict, optional): Configuration for generation.

        Returns:
            chats.Chat: The chat session object.
        """
        gc_config = self._prepare_config(generation_config)

        logger.debug(f"Starting chat with model {self.config.model_name}")
        return self.client.chats.create(
            model=self.config.model_name, config=gc_config, history=history
        )

    def count_tokens(self, prompt: Union[str, List[str]]) -> types.CountTokensResponse:
        """
        Count the number of tokens in the prompt.

        Args:
            prompt (str | List[str]): The prompt to count tokens for.

        Returns:
            types.CountTokensResponse: The count tokens response.
        """
        logger.debug(f"Counting tokens for model {self.config.model_name}")
        try:
            response = self.client.models.count_tokens(
                model=self.config.model_name,
                contents=prompt,
            )
            return response
        except Exception as e:
            logger.error(f"Error counting tokens: {e}")
            raise

    def generate_content_stream(
        self,
        prompt: Union[str, List[str]],
        generation_config: Optional[Union[GeminiConfig, Dict[str, Any]]] = None,
    ) -> Any:
        """
        Generate content stream using the Gemini model.

        Args:
            prompt (str | List[str]): The prompt to generate content from.
            generation_config (GeminiConfig | Dict, optional): Override default generation config.

        Yields:
            types.GenerateContentResponse: Chunks of the generated content.
        """
        gc_config = self._prepare_config(generation_config)

        logger.debug(f"Generating content stream with model {self.config.model_name}")

        try:
            response = self.client.models.generate_content_stream(
                model=self.config.model_name,
                contents=prompt,
                config=gc_config,
            )
            for chunk in response:
                yield chunk
        except Exception as e:
            logger.error(f"Error generating content stream: {e}")
            raise

    async def generate_content_async(
        self,
        prompt: Union[str, List[str]],
        generation_config: Optional[Union[GeminiConfig, Dict[str, Any]]] = None,
    ) -> types.GenerateContentResponse:
        """
        Generate content asynchronously using the Gemini model.

        Args:
            prompt (str | List[str]): The prompt to generate content from.
            generation_config (GeminiConfig | Dict, optional): Override default generation config.

        Returns:
            types.GenerateContentResponse: The generated content response.
        """
        gc_config = self._prepare_config(generation_config)

        logger.debug(f"Generating content async with model {self.config.model_name}")

        try:
            response = await self.client.aio.models.generate_content(
                model=self.config.model_name,
                contents=prompt,
                config=gc_config,
            )
            return response
        except Exception as e:
            logger.error(f"Error generating content async: {e}")
            raise

    async def generate_content_stream_async(
        self,
        prompt: Union[str, List[str]],
        generation_config: Optional[Union[GeminiConfig, Dict[str, Any]]] = None,
    ) -> Any:
        """
        Generate content stream asynchronously using the Gemini model.

        Args:
            prompt (str | List[str]): The prompt to generate content from.
            generation_config (GeminiConfig | Dict, optional): Override default generation config.

        Yields:
            types.GenerateContentResponse: Chunks of the generated content.
        """
        gc_config = self._prepare_config(generation_config)

        logger.debug(
            f"Generating content stream async with model {self.config.model_name}"
        )

        try:
            async for chunk in self.client.aio.models.generate_content_stream(
                model=self.config.model_name,
                contents=prompt,
                config=gc_config,
            ):
                yield chunk
        except Exception as e:
            logger.error(f"Error generating content stream async: {e}")
            raise

    async def count_tokens_async(
        self, prompt: Union[str, List[str]]
    ) -> types.CountTokensResponse:
        """
        Count the number of tokens in the prompt asynchronously.

        Args:
            prompt (str | List[str]): The prompt to count tokens for.

        Returns:
            types.CountTokensResponse: The count tokens response.
        """
        logger.debug(f"Counting tokens async for model {self.config.model_name}")
        try:
            response = await self.client.aio.models.count_tokens(
                model=self.config.model_name,
                contents=prompt,
            )
            return response
        except Exception as e:
            logger.error(f"Error counting tokens async: {e}")
            raise

    async def upload_file_async(
        self, path: Union[str, Any], mime_type: Optional[str] = None
    ) -> types.File:
        """
        Upload a file to the File API asynchronously.

        Args:
            path (str | Path): Path to the file.
            mime_type (str, optional): Mime type of the file.

        Returns:
            types.File: The uploaded file object.
        """
        logger.debug(f"Uploading file async: {path}")
        try:
            # If mime_type is provided, we might need to pass it in config
            upload_config = None
            if mime_type:
                upload_config = types.UploadFileConfig(mime_type=mime_type)

            file_obj = await self.client.aio.files.upload(
                file=path, config=upload_config
            )
            return file_obj
        except Exception as e:
            logger.error(f"Error uploading file async: {e}")
            raise

    async def start_chat_async(
        self,
        history: Optional[List[Union[str, types.Content, Dict[str, Any]]]] = None,
        generation_config: Optional[Union[GeminiConfig, Dict[str, Any]]] = None,
    ) -> chats.AsyncChat:
        """
        Start an async chat session.

        Args:
            history (List, optional): Initial chat history.
            generation_config (GeminiConfig | Dict, optional): Configuration for generation.

        Returns:
            chats.AsyncChat: The async chat session object.
        """
        gc_config = self._prepare_config(generation_config)

        logger.debug(f"Starting async chat with model {self.config.model_name}")
        return self.client.aio.chats.create(
            model=self.config.model_name, config=gc_config, history=history
        )

__init__(api_key=None, model_name=None, system_instruction=None, generation_config=None)

Initialize the Gemini client.

Parameters:

Name Type Description Default
api_key str

Google API key. If None, tries to load from environment.

None
model_name str

Name of the model to use. Defaults to config value.

None
system_instruction str

System instruction for the model.

None
generation_config GeminiConfig | Dict

Configuration for generation.

None
Source code in src/gemini_core/gemini.py
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def __init__(
    self,
    api_key: Optional[str] = None,
    model_name: Optional[str] = None,
    system_instruction: Optional[str] = None,
    generation_config: Optional[Union[GeminiConfig, Dict[str, Any]]] = None,
):
    """
    Initialize the Gemini client.

    Args:
        api_key (str, optional): Google API key. If None, tries to load from environment.
        model_name (str, optional): Name of the model to use. Defaults to config value.
        system_instruction (str, optional): System instruction for the model.
        generation_config (GeminiConfig | Dict, optional): Configuration for generation.
    """
    # Load default config from env if not provided
    try:
        self.config = Config.from_env()
    except ValueError:
        # If loading from env fails (e.g. no API key), we expect api_key to be passed explicitly
        if not api_key:
            raise
        self.config = Config(api_key=api_key)

    # Override config with passed arguments
    if api_key:
        self.config.api_key = api_key
    if model_name:
        self.config.model_name = model_name

    self.client = genai.Client(api_key=self.config.api_key)
    self.system_instruction = system_instruction

    if isinstance(generation_config, GeminiConfig):
        self.generation_config = generation_config
    elif isinstance(generation_config, dict):
        self.generation_config = GeminiConfig(**generation_config)
    else:
        self.generation_config = GeminiConfig()

count_tokens(prompt)

Count the number of tokens in the prompt.

Parameters:

Name Type Description Default
prompt str | List[str]

The prompt to count tokens for.

required

Returns:

Type Description
CountTokensResponse

types.CountTokensResponse: The count tokens response.

Source code in src/gemini_core/gemini.py
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
def count_tokens(self, prompt: Union[str, List[str]]) -> types.CountTokensResponse:
    """
    Count the number of tokens in the prompt.

    Args:
        prompt (str | List[str]): The prompt to count tokens for.

    Returns:
        types.CountTokensResponse: The count tokens response.
    """
    logger.debug(f"Counting tokens for model {self.config.model_name}")
    try:
        response = self.client.models.count_tokens(
            model=self.config.model_name,
            contents=prompt,
        )
        return response
    except Exception as e:
        logger.error(f"Error counting tokens: {e}")
        raise

count_tokens_async(prompt) async

Count the number of tokens in the prompt asynchronously.

Parameters:

Name Type Description Default
prompt str | List[str]

The prompt to count tokens for.

required

Returns:

Type Description
CountTokensResponse

types.CountTokensResponse: The count tokens response.

Source code in src/gemini_core/gemini.py
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
async def count_tokens_async(
    self, prompt: Union[str, List[str]]
) -> types.CountTokensResponse:
    """
    Count the number of tokens in the prompt asynchronously.

    Args:
        prompt (str | List[str]): The prompt to count tokens for.

    Returns:
        types.CountTokensResponse: The count tokens response.
    """
    logger.debug(f"Counting tokens async for model {self.config.model_name}")
    try:
        response = await self.client.aio.models.count_tokens(
            model=self.config.model_name,
            contents=prompt,
        )
        return response
    except Exception as e:
        logger.error(f"Error counting tokens async: {e}")
        raise

generate_content(prompt, generation_config=None)

Generate content using the Gemini model.

Parameters:

Name Type Description Default
prompt str | List[str]

The prompt to generate content from.

required
generation_config GeminiConfig | Dict

Override default generation config.

None

Returns:

Type Description
GenerateContentResponse

types.GenerateContentResponse: The generated content response.

Source code in src/gemini_core/gemini.py
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
def generate_content(
    self,
    prompt: Union[str, List[str]],
    generation_config: Optional[Union[GeminiConfig, Dict[str, Any]]] = None,
) -> types.GenerateContentResponse:
    """
    Generate content using the Gemini model.

    Args:
        prompt (str | List[str]): The prompt to generate content from.
        generation_config (GeminiConfig | Dict, optional): Override default generation config.

    Returns:
        types.GenerateContentResponse: The generated content response.
    """
    gc_config = self._prepare_config(generation_config)

    logger.debug(f"Generating content with model {self.config.model_name}")

    try:
        response = self.client.models.generate_content(
            model=self.config.model_name,
            contents=prompt,
            config=gc_config,
        )
        return response
    except Exception as e:
        logger.error(f"Error generating content: {e}")
        raise

generate_content_async(prompt, generation_config=None) async

Generate content asynchronously using the Gemini model.

Parameters:

Name Type Description Default
prompt str | List[str]

The prompt to generate content from.

required
generation_config GeminiConfig | Dict

Override default generation config.

None

Returns:

Type Description
GenerateContentResponse

types.GenerateContentResponse: The generated content response.

Source code in src/gemini_core/gemini.py
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
async def generate_content_async(
    self,
    prompt: Union[str, List[str]],
    generation_config: Optional[Union[GeminiConfig, Dict[str, Any]]] = None,
) -> types.GenerateContentResponse:
    """
    Generate content asynchronously using the Gemini model.

    Args:
        prompt (str | List[str]): The prompt to generate content from.
        generation_config (GeminiConfig | Dict, optional): Override default generation config.

    Returns:
        types.GenerateContentResponse: The generated content response.
    """
    gc_config = self._prepare_config(generation_config)

    logger.debug(f"Generating content async with model {self.config.model_name}")

    try:
        response = await self.client.aio.models.generate_content(
            model=self.config.model_name,
            contents=prompt,
            config=gc_config,
        )
        return response
    except Exception as e:
        logger.error(f"Error generating content async: {e}")
        raise

generate_content_stream(prompt, generation_config=None)

Generate content stream using the Gemini model.

Parameters:

Name Type Description Default
prompt str | List[str]

The prompt to generate content from.

required
generation_config GeminiConfig | Dict

Override default generation config.

None

Yields:

Type Description
Any

types.GenerateContentResponse: Chunks of the generated content.

Source code in src/gemini_core/gemini.py
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
def generate_content_stream(
    self,
    prompt: Union[str, List[str]],
    generation_config: Optional[Union[GeminiConfig, Dict[str, Any]]] = None,
) -> Any:
    """
    Generate content stream using the Gemini model.

    Args:
        prompt (str | List[str]): The prompt to generate content from.
        generation_config (GeminiConfig | Dict, optional): Override default generation config.

    Yields:
        types.GenerateContentResponse: Chunks of the generated content.
    """
    gc_config = self._prepare_config(generation_config)

    logger.debug(f"Generating content stream with model {self.config.model_name}")

    try:
        response = self.client.models.generate_content_stream(
            model=self.config.model_name,
            contents=prompt,
            config=gc_config,
        )
        for chunk in response:
            yield chunk
    except Exception as e:
        logger.error(f"Error generating content stream: {e}")
        raise

generate_content_stream_async(prompt, generation_config=None) async

Generate content stream asynchronously using the Gemini model.

Parameters:

Name Type Description Default
prompt str | List[str]

The prompt to generate content from.

required
generation_config GeminiConfig | Dict

Override default generation config.

None

Yields:

Type Description
Any

types.GenerateContentResponse: Chunks of the generated content.

Source code in src/gemini_core/gemini.py
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
async def generate_content_stream_async(
    self,
    prompt: Union[str, List[str]],
    generation_config: Optional[Union[GeminiConfig, Dict[str, Any]]] = None,
) -> Any:
    """
    Generate content stream asynchronously using the Gemini model.

    Args:
        prompt (str | List[str]): The prompt to generate content from.
        generation_config (GeminiConfig | Dict, optional): Override default generation config.

    Yields:
        types.GenerateContentResponse: Chunks of the generated content.
    """
    gc_config = self._prepare_config(generation_config)

    logger.debug(
        f"Generating content stream async with model {self.config.model_name}"
    )

    try:
        async for chunk in self.client.aio.models.generate_content_stream(
            model=self.config.model_name,
            contents=prompt,
            config=gc_config,
        ):
            yield chunk
    except Exception as e:
        logger.error(f"Error generating content stream async: {e}")
        raise

start_chat(history=None, generation_config=None)

Start a chat session.

Parameters:

Name Type Description Default
history List

Initial chat history.

None
generation_config GeminiConfig | Dict

Configuration for generation.

None

Returns:

Type Description
Chat

chats.Chat: The chat session object.

Source code in src/gemini_core/gemini.py
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
def start_chat(
    self,
    history: Optional[List[Union[str, types.Content, Dict[str, Any]]]] = None,
    generation_config: Optional[Union[GeminiConfig, Dict[str, Any]]] = None,
) -> chats.Chat:
    """
    Start a chat session.

    Args:
        history (List, optional): Initial chat history.
        generation_config (GeminiConfig | Dict, optional): Configuration for generation.

    Returns:
        chats.Chat: The chat session object.
    """
    gc_config = self._prepare_config(generation_config)

    logger.debug(f"Starting chat with model {self.config.model_name}")
    return self.client.chats.create(
        model=self.config.model_name, config=gc_config, history=history
    )

start_chat_async(history=None, generation_config=None) async

Start an async chat session.

Parameters:

Name Type Description Default
history List

Initial chat history.

None
generation_config GeminiConfig | Dict

Configuration for generation.

None

Returns:

Type Description
AsyncChat

chats.AsyncChat: The async chat session object.

Source code in src/gemini_core/gemini.py
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
async def start_chat_async(
    self,
    history: Optional[List[Union[str, types.Content, Dict[str, Any]]]] = None,
    generation_config: Optional[Union[GeminiConfig, Dict[str, Any]]] = None,
) -> chats.AsyncChat:
    """
    Start an async chat session.

    Args:
        history (List, optional): Initial chat history.
        generation_config (GeminiConfig | Dict, optional): Configuration for generation.

    Returns:
        chats.AsyncChat: The async chat session object.
    """
    gc_config = self._prepare_config(generation_config)

    logger.debug(f"Starting async chat with model {self.config.model_name}")
    return self.client.aio.chats.create(
        model=self.config.model_name, config=gc_config, history=history
    )

upload_file(path, mime_type=None)

Upload a file to the File API.

Parameters:

Name Type Description Default
path str | Path

Path to the file.

required
mime_type str

Mime type of the file.

None

Returns:

Type Description
File

types.File: The uploaded file object.

Source code in src/gemini_core/gemini.py
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
def upload_file(
    self, path: Union[str, Any], mime_type: Optional[str] = None
) -> types.File:
    """
    Upload a file to the File API.

    Args:
        path (str | Path): Path to the file.
        mime_type (str, optional): Mime type of the file.

    Returns:
        types.File: The uploaded file object.
    """
    logger.debug(f"Uploading file: {path}")
    try:
        # If mime_type is provided, we might need to pass it in config
        upload_config = None
        if mime_type:
            upload_config = types.UploadFileConfig(mime_type=mime_type)

        file_obj = self.client.files.upload(file=path, config=upload_config)
        return file_obj
    except Exception as e:
        logger.error(f"Error uploading file: {e}")
        raise

upload_file_async(path, mime_type=None) async

Upload a file to the File API asynchronously.

Parameters:

Name Type Description Default
path str | Path

Path to the file.

required
mime_type str

Mime type of the file.

None

Returns:

Type Description
File

types.File: The uploaded file object.

Source code in src/gemini_core/gemini.py
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
async def upload_file_async(
    self, path: Union[str, Any], mime_type: Optional[str] = None
) -> types.File:
    """
    Upload a file to the File API asynchronously.

    Args:
        path (str | Path): Path to the file.
        mime_type (str, optional): Mime type of the file.

    Returns:
        types.File: The uploaded file object.
    """
    logger.debug(f"Uploading file async: {path}")
    try:
        # If mime_type is provided, we might need to pass it in config
        upload_config = None
        if mime_type:
            upload_config = types.UploadFileConfig(mime_type=mime_type)

        file_obj = await self.client.aio.files.upload(
            file=path, config=upload_config
        )
        return file_obj
    except Exception as e:
        logger.error(f"Error uploading file async: {e}")
        raise