about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are here HEAD master
Diffstat (limited to '.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info')
-rw-r--r--.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/INSTALLER1
-rw-r--r--.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/LICENSE202
-rw-r--r--.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/METADATA973
-rw-r--r--.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/RECORD46
-rw-r--r--.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/WHEEL5
-rw-r--r--.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/top_level.txt1
6 files changed, 1228 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/INSTALLER b/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/INSTALLER
new file mode 100644
index 00000000..a1b589e3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/LICENSE b/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/METADATA b/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/METADATA
new file mode 100644
index 00000000..7f80af87
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/METADATA
@@ -0,0 +1,973 @@
+Metadata-Version: 2.2
+Name: google-genai
+Version: 0.6.0
+Summary: GenAI Python SDK
+Author-email: Google LLC <googleapis-packages@google.com>
+License: Apache-2.0
+Project-URL: Homepage, https://github.com/googleapis/python-genai
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3.13
+Classifier: Topic :: Internet
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: >=3.9
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Requires-Dist: google-auth<3.0.0dev,>=2.14.1
+Requires-Dist: pillow<12.0.0,>=10.0.0
+Requires-Dist: pydantic<3.0.0dev,>=2.0.0
+Requires-Dist: requests<3.0.0dev,>=2.28.1
+Requires-Dist: websockets<15.0dev,>=13.0
+
+# Google Gen AI SDK
+
+[![PyPI version](https://img.shields.io/pypi/v/google-genai.svg)](https://pypi.org/project/google-genai/)
+
+--------
+**Documentation:** https://googleapis.github.io/python-genai/
+
+-----
+
+Google Gen AI Python SDK provides an interface for developers to integrate Google's generative models into their Python applications. It supports the [Gemini Developer API](https://ai.google.dev/gemini-api/docs) and [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview) APIs. This is an early release. API is subject to change. Please do not use this SDK in production environments at this stage.
+
+## Installation
+
+```cmd
+pip install google-genai
+```
+
+## Imports
+
+```python
+from google import genai
+from google.genai import types
+```
+
+## Create a client
+
+Please run one of the following code blocks to create a client for
+different services ([Gemini Developer API](https://ai.google.dev/gemini-api/docs) or [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview)).
+
+```python
+# Only run this block for Gemini Developer API
+client = genai.Client(api_key="GEMINI_API_KEY")
+```
+
+```python
+# Only run this block for Vertex AI API
+client = genai.Client(
+    vertexai=True, project="your-project-id", location="us-central1"
+)
+```
+
+## Types
+
+Parameter types can be specified as either dictionaries(`TypedDict`) or
+[Pydantic Models](https://pydantic.readthedocs.io/en/stable/model.html).
+Pydantic model types are available in the `types` module.
+
+## Models
+
+The `client.models` modules exposes model inferencing and model getters.
+
+### Generate Content
+
+#### with text content
+
+```python
+response = client.models.generate_content(
+    model="gemini-2.0-flash-exp", contents="What is your name?"
+)
+print(response.text)
+```
+
+#### with uploaded file (Google AI only)
+download the file in console.
+
+```cmd
+!wget -q https://storage.googleapis.com/generativeai-downloads/data/a11.txt
+```
+
+python code.
+
+```python
+file = client.files.upload(path="a11.text")
+response = client.models.generate_content(
+    model="gemini-2.0-flash-exp", contents=["Summarize this file", file]
+)
+print(response.text)
+```
+
+### System Instructions and Other Configs
+
+```python
+response = client.models.generate_content(
+    model="gemini-2.0-flash-exp",
+    contents="high",
+    config=types.GenerateContentConfig(
+        system_instruction="I say high, you say low",
+        temperature=0.3,
+    ),
+)
+print(response.text)
+```
+
+### Typed Config
+
+All API methods support Pydantic types for parameters as well as
+dictionaries. You can get the type from `google.genai.types`.
+
+```python
+response = client.models.generate_content(
+    model="gemini-2.0-flash-exp",
+    contents=types.Part.from_text("Why is the sky blue?"),
+    config=types.GenerateContentConfig(
+        temperature=0,
+        top_p=0.95,
+        top_k=20,
+        candidate_count=1,
+        seed=5,
+        max_output_tokens=100,
+        stop_sequences=["STOP!"],
+        presence_penalty=0.0,
+        frequency_penalty=0.0,
+    ),
+)
+
+response
+```
+
+### List Base Models
+
+To retrieve tuned models, see [list tuned models](#list-tuned-models).
+
+```python
+for model in client.models.list(config={'query_base':True}):
+    print(model)
+```
+
+```python
+pager = client.models.list(config={"page_size": 10, 'query_base':True})
+print(pager.page_size)
+print(pager[0])
+pager.next_page()
+print(pager[0])
+```
+
+#### Async
+
+```python
+async for job in await client.aio.models.list(config={'query_base':True}):
+    print(job)
+```
+
+```python
+async_pager = await client.aio.models.list(config={"page_size": 10, 'query_base':True})
+print(async_pager.page_size)
+print(async_pager[0])
+await async_pager.next_page()
+print(async_pager[0])
+```
+
+### Safety Settings
+
+```python
+response = client.models.generate_content(
+    model="gemini-2.0-flash-exp",
+    contents="Say something bad.",
+    config=types.GenerateContentConfig(
+        safety_settings=[
+            types.SafetySetting(
+                category="HARM_CATEGORY_HATE_SPEECH",
+                threshold="BLOCK_ONLY_HIGH",
+            )
+        ]
+    ),
+)
+print(response.text)
+```
+
+### Function Calling
+
+#### Automatic Python function Support
+
+You can pass a Python function directly and it will be automatically
+called and responded.
+
+```python
+def get_current_weather(location: str) -> str:
+    """Returns the current weather.
+
+    Args:
+      location: The city and state, e.g. San Francisco, CA
+    """
+    return "sunny"
+
+
+response = client.models.generate_content(
+    model="gemini-2.0-flash-exp",
+    contents="What is the weather like in Boston?",
+    config=types.GenerateContentConfig(tools=[get_current_weather]),
+)
+
+print(response.text)
+```
+
+#### Manually declare and invoke a function for function calling
+
+If you don't want to use the automatic function support, you can manually
+declare the function and invoke it.
+
+The following example shows how to declare a function and pass it as a tool.
+Then you will receive a function call part in the response.
+
+```python
+function = types.FunctionDeclaration(
+    name="get_current_weather",
+    description="Get the current weather in a given location",
+    parameters=types.FunctionParameters(
+        type="OBJECT",
+        properties={
+            "location": types.ParameterType(
+                type="STRING",
+                description="The city and state, e.g. San Francisco, CA",
+            ),
+        },
+        required=["location"],
+    ),
+)
+
+tool = types.Tool(function_declarations=[function])
+
+response = client.models.generate_content(
+    model="gemini-2.0-flash-exp",
+    contents="What is the weather like in Boston?",
+    config=types.GenerateContentConfig(tools=[tool]),
+)
+
+print(response.function_calls[0])
+```
+
+After you receive the function call part from the model, you can invoke the function
+and get the function response. And then you can pass the function response to
+the model.
+The following example shows how to do it for a simple function invocation.
+
+```python
+user_prompt_content = types.Content(
+    role="user",
+    parts=[types.Part.from_text("What is the weather like in Boston?")],
+)
+function_call_content = response.candidates[0].content
+function_call_part = function_call_content.parts[0]
+
+
+try:
+    function_result = get_current_weather(
+        **function_call_part.function_call.args
+    )
+    function_response = {"result": function_result}
+except (
+    Exception
+) as e:  # instead of raising the exception, you can let the model handle it
+    function_response = {"error": str(e)}
+
+
+function_response_part = types.Part.from_function_response(
+    name=function_call_part.function_call.name,
+    response=function_response,
+)
+function_response_content = types.Content(
+    role="tool", parts=[function_response_part]
+)
+
+response = client.models.generate_content(
+    model="gemini-2.0-flash-exp",
+    contents=[
+        user_prompt_content,
+        function_call_content,
+        function_response_content,
+    ],
+    config=types.GenerateContentConfig(
+        tools=[tool],
+    ),
+)
+
+print(response.text)
+```
+
+### JSON Response Schema
+
+#### Pydantic Model Schema support
+
+Schemas can be provided as Pydantic Models.
+
+```python
+from pydantic import BaseModel
+
+
+class CountryInfo(BaseModel):
+    name: str
+    population: int
+    capital: str
+    continent: str
+    gdp: int
+    official_language: str
+    total_area_sq_mi: int
+
+
+response = client.models.generate_content(
+    model="gemini-2.0-flash-exp",
+    contents="Give me information for the United States.",
+    config=types.GenerateContentConfig(
+        response_mime_type="application/json",
+        response_schema=CountryInfo,
+    ),
+)
+print(response.text)
+```
+
+```python
+response = client.models.generate_content(
+    model="gemini-2.0-flash-exp",
+    contents="Give me information for the United States.",
+    config=types.GenerateContentConfig(
+        response_mime_type="application/json",
+        response_schema={
+            "required": [
+                "name",
+                "population",
+                "capital",
+                "continent",
+                "gdp",
+                "official_language",
+                "total_area_sq_mi",
+            ],
+            "properties": {
+                "name": {"type": "STRING"},
+                "population": {"type": "INTEGER"},
+                "capital": {"type": "STRING"},
+                "continent": {"type": "STRING"},
+                "gdp": {"type": "INTEGER"},
+                "official_language": {"type": "STRING"},
+                "total_area_sq_mi": {"type": "INTEGER"},
+            },
+            "type": "OBJECT",
+        },
+    ),
+)
+print(response.text)
+```
+
+### Streaming
+
+#### Streaming for text content
+
+```python
+for chunk in client.models.generate_content_stream(
+    model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
+):
+    print(chunk.text, end="")
+```
+
+#### Streaming for image content
+
+If your image is stored in [Google Cloud Storage](https://cloud.google.com/storage),
+you can use the `from_uri` class method to create a `Part` object.
+
+```python
+for chunk in client.models.generate_content_stream(
+    model="gemini-2.0-flash-exp",
+    contents=[
+        "What is this image about?",
+        types.Part.from_uri(
+            file_uri="gs://generativeai-downloads/images/scones.jpg",
+            mime_type="image/jpeg",
+        ),
+    ],
+):
+    print(chunk.text, end="")
+```
+
+If your image is stored in your local file system, you can read it in as bytes
+data and use the `from_bytes` class method to create a `Part` object.
+
+```python
+YOUR_IMAGE_PATH = "your_image_path"
+YOUR_IMAGE_MIME_TYPE = "your_image_mime_type"
+with open(YOUR_IMAGE_PATH, "rb") as f:
+    image_bytes = f.read()
+
+for chunk in client.models.generate_content_stream(
+    model="gemini-2.0-flash-exp",
+    contents=[
+        "What is this image about?",
+        types.Part.from_bytes(data=image_bytes, mime_type=YOUR_IMAGE_MIME_TYPE),
+    ],
+):
+    print(chunk.text, end="")
+```
+
+### Async
+
+`client.aio` exposes all the analogous [`async` methods](https://docs.python.org/3/library/asyncio.html)
+that are available on `client`
+
+For example, `client.aio.models.generate_content` is the `async` version
+of `client.models.generate_content`
+
+```python
+response = await client.aio.models.generate_content(
+    model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
+)
+
+print(response.text)
+```
+
+### Streaming
+
+```python
+async for response in client.aio.models.generate_content_stream(
+    model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
+):
+    print(response.text, end="")
+```
+
+### Count Tokens and Compute Tokens
+
+```python
+response = client.models.count_tokens(
+    model="gemini-2.0-flash-exp",
+    contents="What is your name?",
+)
+print(response)
+```
+
+#### Compute Tokens
+
+Compute tokens is only supported in Vertex AI.
+
+```python
+response = client.models.compute_tokens(
+    model="gemini-2.0-flash-exp",
+    contents="What is your name?",
+)
+print(response)
+```
+
+##### Async
+
+```python
+response = await client.aio.models.count_tokens(
+    model="gemini-2.0-flash-exp",
+    contents="What is your name?",
+)
+print(response)
+```
+
+### Embed Content
+
+```python
+response = client.models.embed_content(
+    model="text-embedding-004",
+    contents="What is your name?",
+)
+print(response)
+```
+
+```python
+# multiple contents with config
+response = client.models.embed_content(
+    model="text-embedding-004",
+    contents=["What is your name?", "What is your age?"],
+    config=types.EmbedContentConfig(output_dimensionality=10),
+)
+
+print(response)
+```
+
+### Imagen
+
+#### Generate Image
+
+Support for generate image in Gemini Developer API is behind an allowlist
+
+```python
+# Generate Image
+response1 = client.models.generate_image(
+    model="imagen-3.0-generate-001",
+    prompt="An umbrella in the foreground, and a rainy night sky in the background",
+    config=types.GenerateImageConfig(
+        negative_prompt="human",
+        number_of_images=1,
+        include_rai_reason=True,
+        output_mime_type="image/jpeg",
+    ),
+)
+response1.generated_images[0].image.show()
+```
+
+#### Upscale Image
+
+Upscale image is only supported in Vertex AI.
+
+```python
+# Upscale the generated image from above
+response2 = client.models.upscale_image(
+    model="imagen-3.0-generate-001",
+    image=response1.generated_images[0].image,
+    upscale_factor="x2",
+    config=types.UpscaleImageConfig(
+        include_rai_reason=True,
+        output_mime_type="image/jpeg",
+    ),
+)
+response2.generated_images[0].image.show()
+```
+
+#### Edit Image
+
+Edit image uses a separate model from generate and upscale.
+
+Edit image is only supported in Vertex AI.
+
+```python
+# Edit the generated image from above
+from google.genai.types import RawReferenceImage, MaskReferenceImage
+
+raw_ref_image = RawReferenceImage(
+    reference_id=1,
+    reference_image=response1.generated_images[0].image,
+)
+
+# Model computes a mask of the background
+mask_ref_image = MaskReferenceImage(
+    reference_id=2,
+    config=types.MaskReferenceConfig(
+        mask_mode="MASK_MODE_BACKGROUND",
+        mask_dilation=0,
+    ),
+)
+
+response3 = client.models.edit_image(
+    model="imagen-3.0-capability-001",
+    prompt="Sunlight and clear sky",
+    reference_images=[raw_ref_image, mask_ref_image],
+    config=types.EditImageConfig(
+        edit_mode="EDIT_MODE_INPAINT_INSERTION",
+        number_of_images=1,
+        negative_prompt="human",
+        include_rai_reason=True,
+        output_mime_type="image/jpeg",
+    ),
+)
+response3.generated_images[0].image.show()
+```
+
+## Chats
+
+Create a chat session to start a multi-turn conversations with the model.
+
+### Send Message
+
+```python
+chat = client.chats.create(model="gemini-2.0-flash-exp")
+response = chat.send_message("tell me a story")
+print(response.text)
+```
+
+### Streaming
+
+```python
+chat = client.chats.create(model="gemini-2.0-flash-exp")
+for chunk in chat.send_message_stream("tell me a story"):
+    print(chunk.text)
+```
+
+### Async
+
+```python
+chat = client.aio.chats.create(model="gemini-2.0-flash-exp")
+response = await chat.send_message("tell me a story")
+print(response.text)
+```
+
+### Async Streaming
+
+```python
+chat = client.aio.chats.create(model="gemini-2.0-flash-exp")
+async for chunk in chat.send_message_stream("tell me a story"):
+    print(chunk.text)
+```
+
+## Files
+
+Files are only supported in Gemini Developer API.
+
+```cmd
+!gsutil cp gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf .
+!gsutil cp gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf .
+```
+
+### Upload
+
+```python
+file1 = client.files.upload(path="2312.11805v3.pdf")
+file2 = client.files.upload(path="2403.05530.pdf")
+
+print(file1)
+print(file2)
+```
+
+### Delete
+
+```python
+file3 = client.files.upload(path="2312.11805v3.pdf")
+
+client.files.delete(name=file3.name)
+```
+
+## Caches
+
+`client.caches` contains the control plane APIs for cached content
+
+### Create
+
+```python
+if client.vertexai:
+    file_uris = [
+        "gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf",
+        "gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf",
+    ]
+else:
+    file_uris = [file1.uri, file2.uri]
+
+cached_content = client.caches.create(
+    model="gemini-1.5-pro-002",
+    config=types.CreateCachedContentConfig(
+        contents=[
+            types.Content(
+                role="user",
+                parts=[
+                    types.Part.from_uri(
+                        file_uri=file_uris[0], mime_type="application/pdf"
+                    ),
+                    types.Part.from_uri(
+                        file_uri=file_uris[1],
+                        mime_type="application/pdf",
+                    ),
+                ],
+            )
+        ],
+        system_instruction="What is the sum of the two pdfs?",
+        display_name="test cache",
+        ttl="3600s",
+    ),
+)
+```
+
+### Get
+
+```python
+cached_content = client.caches.get(name=cached_content.name)
+```
+
+### Generate Content
+
+```python
+response = client.models.generate_content(
+    model="gemini-1.5-pro-002",
+    contents="Summarize the pdfs",
+    config=types.GenerateContentConfig(
+        cached_content=cached_content.name,
+    ),
+)
+print(response.text)
+```
+
+## Tunings
+
+`client.tunings` contains tuning job APIs and supports supervised fine
+tuning through `tune` and distillation through `distill`
+
+### Tune
+
+-   Vertex AI supports tuning from GCS source
+-   Gemini Developer API supports tuning from inline examples
+
+```python
+if client.vertexai:
+    model = "gemini-1.5-pro-002"
+    training_dataset = types.TuningDataset(
+        gcs_uri="gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl",
+    )
+else:
+    model = "models/gemini-1.0-pro-001"
+    training_dataset = types.TuningDataset(
+        examples=[
+            types.TuningExample(
+                text_input=f"Input text {i}",
+                output=f"Output text {i}",
+            )
+            for i in range(5)
+        ],
+    )
+```
+
+```python
+tuning_job = client.tunings.tune(
+    base_model=model,
+    training_dataset=training_dataset,
+    config=types.CreateTuningJobConfig(
+        epoch_count=1, tuned_model_display_name="test_dataset_examples model"
+    ),
+)
+print(tuning_job)
+```
+
+### Get Tuning Job
+
+```python
+tuning_job = client.tunings.get(name=tuning_job.name)
+print(tuning_job)
+```
+
+```python
+import time
+
+running_states = set(
+    [
+        "JOB_STATE_PENDING",
+        "JOB_STATE_RUNNING",
+    ]
+)
+
+while tuning_job.state in running_states:
+    print(tuning_job.state)
+    tuning_job = client.tunings.get(name=tuning_job.name)
+    time.sleep(10)
+```
+
+#### Use Tuned Model
+
+```python
+response = client.models.generate_content(
+    model=tuning_job.tuned_model.endpoint,
+    contents="What is your name?",
+)
+
+print(response.text)
+```
+
+### Get Tuned Model
+
+```python
+tuned_model = client.models.get(model=tuning_job.tuned_model.model)
+print(tuned_model)
+```
+
+### List Tuned Models
+
+To retrieve base models, see [list base models](#list-base-models).
+
+```python
+for model in client.models.list(config={"page_size": 10}):
+    print(model)
+```
+
+```python
+pager = client.models.list(config={"page_size": 10})
+print(pager.page_size)
+print(pager[0])
+pager.next_page()
+print(pager[0])
+```
+
+#### Async
+
+```python
+async for job in await client.aio.models.list(config={"page_size": 10}):
+    print(job)
+```
+
+```python
+async_pager = await client.aio.models.list(config={"page_size": 10})
+print(async_pager.page_size)
+print(async_pager[0])
+await async_pager.next_page()
+print(async_pager[0])
+```
+
+### Update Tuned Model
+
+```python
+model = pager[0]
+
+model = client.models.update(
+    model=model.name,
+    config=types.UpdateModelConfig(
+        display_name="my tuned model", description="my tuned model description"
+    ),
+)
+
+print(model)
+```
+
+### Distillation
+
+Only supported in Vertex AI. Requires allowlist.
+
+```python
+distillation_job = client.tunings.distill(
+    student_model="gemma-2b-1.1-it",
+    teacher_model="gemini-1.5-pro-002",
+    training_dataset=genai.types.DistillationDataset(
+        gcs_uri="gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl",
+    ),
+    config=genai.types.CreateDistillationJobConfig(
+        epoch_count=1,
+        pipeline_root_directory=("gs://my-bucket"),
+    ),
+)
+print(distillation_job)
+```
+
+```python
+completed_states = set(
+    [
+        "JOB_STATE_SUCCEEDED",
+        "JOB_STATE_FAILED",
+        "JOB_STATE_CANCELLED",
+        "JOB_STATE_PAUSED",
+    ]
+)
+
+while distillation_job.state not in completed_states:
+    print(distillation_job.state)
+    distillation_job = client.tunings.get(name=distillation_job.name)
+    time.sleep(10)
+
+print(distillation_job)
+```
+
+
+### List Tuning Jobs
+
+```python
+for job in client.tunings.list(config={"page_size": 10}):
+    print(job)
+```
+
+```python
+pager = client.tunings.list(config={"page_size": 10})
+print(pager.page_size)
+print(pager[0])
+pager.next_page()
+print(pager[0])
+```
+
+#### Async
+
+```python
+async for job in await client.aio.tunings.list(config={"page_size": 10}):
+    print(job)
+```
+
+```python
+async_pager = await client.aio.tunings.list(config={"page_size": 10})
+print(async_pager.page_size)
+print(async_pager[0])
+await async_pager.next_page()
+print(async_pager[0])
+```
+
+## Batch Prediction
+
+Only supported in Vertex AI.
+
+### Create
+
+```python
+# Specify model and source file only, destination and job display name will be auto-populated
+job = client.batches.create(
+    model="gemini-1.5-flash-002",
+    src="bq://my-project.my-dataset.my-table",
+)
+
+job
+```
+
+```python
+# Get a job by name
+job = client.batches.get(name=job.name)
+
+job.state
+```
+
+```python
+completed_states = set(
+    [
+        "JOB_STATE_SUCCEEDED",
+        "JOB_STATE_FAILED",
+        "JOB_STATE_CANCELLED",
+        "JOB_STATE_PAUSED",
+    ]
+)
+
+while job.state not in completed_states:
+    print(job.state)
+    job = client.batches.get(name=job.name)
+    time.sleep(30)
+
+job
+```
+
+### List
+
+```python
+for job in client.batches.list(config=types.ListBatchJobConfig(page_size=10)):
+    print(job)
+```
+
+```python
+pager = client.batches.list(config=types.ListBatchJobConfig(page_size=10))
+print(pager.page_size)
+print(pager[0])
+pager.next_page()
+print(pager[0])
+```
+
+#### Async
+
+```python
+async for job in await client.aio.batches.list(
+    config=types.ListBatchJobConfig(page_size=10)
+):
+    print(job)
+```
+
+```python
+async_pager = await client.aio.batches.list(
+    config=types.ListBatchJobConfig(page_size=10)
+)
+print(async_pager.page_size)
+print(async_pager[0])
+await async_pager.next_page()
+print(async_pager[0])
+```
+
+### Delete
+
+```python
+# Delete the job resource
+delete_job = client.batches.delete(name=job.name)
+
+delete_job
+```
diff --git a/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/RECORD b/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/RECORD
new file mode 100644
index 00000000..8884e9c3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/RECORD
@@ -0,0 +1,46 @@
+google/genai/__init__.py,sha256=IYw-PcsdgjSpS1mU_ZcYkTfPocsJ4aVmrDxP7vX7c6Y,709

+google/genai/__pycache__/__init__.cpython-312.pyc,,

+google/genai/__pycache__/_api_client.cpython-312.pyc,,

+google/genai/__pycache__/_automatic_function_calling_util.cpython-312.pyc,,

+google/genai/__pycache__/_common.cpython-312.pyc,,

+google/genai/__pycache__/_extra_utils.cpython-312.pyc,,

+google/genai/__pycache__/_replay_api_client.cpython-312.pyc,,

+google/genai/__pycache__/_test_api_client.cpython-312.pyc,,

+google/genai/__pycache__/_transformers.cpython-312.pyc,,

+google/genai/__pycache__/batches.cpython-312.pyc,,

+google/genai/__pycache__/caches.cpython-312.pyc,,

+google/genai/__pycache__/chats.cpython-312.pyc,,

+google/genai/__pycache__/client.cpython-312.pyc,,

+google/genai/__pycache__/errors.cpython-312.pyc,,

+google/genai/__pycache__/files.cpython-312.pyc,,

+google/genai/__pycache__/live.cpython-312.pyc,,

+google/genai/__pycache__/models.cpython-312.pyc,,

+google/genai/__pycache__/pagers.cpython-312.pyc,,

+google/genai/__pycache__/tunings.cpython-312.pyc,,

+google/genai/__pycache__/types.cpython-312.pyc,,

+google/genai/__pycache__/version.cpython-312.pyc,,

+google/genai/_api_client.py,sha256=_Ta8Sjkg5hyO9UspTboevtk19-h7x1DBD940reMVM5c,23341

+google/genai/_automatic_function_calling_util.py,sha256=qbMCO8x6THe1O7Bn-L97rlbDSYJoX_gUfztvKHh-u6E,10078

+google/genai/_common.py,sha256=xJadfXcUP2ry348wz6Pd7v6abTi_5KlCieJre2nZUi8,8724

+google/genai/_extra_utils.py,sha256=QhmZV-vfKfNQ2KMUVdAD-Le37qzLD4dSLl953zyOvCk,11101

+google/genai/_replay_api_client.py,sha256=xD6NAxy-8vJQS8FtcWf4dpVMjW90V5xaDO9-ndzY8VU,14722

+google/genai/_test_api_client.py,sha256=p771T27icmzENxKtyNDwPG1sTI7jaoJNFPwlwq9GK6o,4759

+google/genai/_transformers.py,sha256=pMMq6IJTLh_QKHi63XTYKNbWOPJ2mpXLB2Q6v78eQsk,19268

+google/genai/batches.py,sha256=1TYMFgpe6-jz1WG-PzP2aj0p_Pvb0y81HFfnrJ8Vj7g,37504

+google/genai/caches.py,sha256=yXk1apId5SWzfZ5yi6ejonfT7AWiYgzb3FsRxAXFN5s,53998

+google/genai/chats.py,sha256=GobIFlez3eTRWWDtUycnubrMz0hB3v3gvDVSdMFJTNc,7642

+google/genai/client.py,sha256=AM7yAthSW5Ajnig2BfwCNPExjTTtrZR0b0pgyThnDBo,9497

+google/genai/errors.py,sha256=DtpDZT5UDqumk2cTRUlg3k4ypmO_0tkMNzJgA3qzCmc,3666

+google/genai/files.py,sha256=ySacctcUekVTHMHKWepEfge-KgVMvJlek6UIxn7qHSw,42537

+google/genai/live.py,sha256=vWAzuyAbK63n1Vr-iRNMuDq3McjgmSXY5ToO2bzdp28,23106

+google/genai/models.py,sha256=2Aj98r1B5cxryAxBRm1iUhpZ3px0c26ZhyH_0h9NQp4,160762

+google/genai/pagers.py,sha256=hSHd-gLvEzYWwK85i8EcFNWUMKtszUs7Nw2r3L7d6_U,6686

+google/genai/tunings.py,sha256=IddtUlYqPZmaicPoO4yhCPefop_FhJiKl9l4dyPEeVE,49396

+google/genai/types.py,sha256=wFhtYoto-ZfcqqRyu9ZGOQss9wBwPYTdcLuCjnil_kY,274625

+google/genai/version.py,sha256=GkH2NQgVNItrcxGOfKBijKTmc_gqY3cPzldxCE11Jm4,626

+google_genai-0.6.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4

+google_genai-0.6.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358

+google_genai-0.6.0.dist-info/METADATA,sha256=AiUA_1JhtT5M6vqc6mzs0i6qI1RJFOccuUJH-ZlX_z8,22743

+google_genai-0.6.0.dist-info/RECORD,,

+google_genai-0.6.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91

+google_genai-0.6.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7

diff --git a/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/WHEEL b/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/WHEEL
new file mode 100644
index 00000000..505164bc
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: setuptools (75.8.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/top_level.txt b/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/top_level.txt
new file mode 100644
index 00000000..cb429113
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/google_genai-0.6.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+google