Feed aggregator

file transfer

Tom Kyte - 3 hours 8 min ago
Hi Tom, I am getting following error when i use copy_file procedure of dbms_file_transfer package. Here i am trying to copy log file from one folder to other. Thanks SQL> BEGIN 2 dbms_file_transfer.copy_file(source_directory_object => 3 'SOURCE_DIR', source_file_name => 'sqlnet.log', 4 destination_directory_object => 'DEST_DIR', 5 destination_file_name => 'sqlnet.log'); 6 END; 7 / BEGIN * ERROR at line 1: ORA-19505: failed to identify file "c:\temp\source\sqlnet.log" ORA-27046: file size is not a multiple of logical block size OSD-04012: file size mismatch (OS 3223) ORA-06512: at "SYS.DBMS_FILE_TRANSFER", line 84 ORA-06512: at "SYS.DBMS_FILE_TRANSFER", line 193 ORA-06512: at line 2
Categories: DBA Blogs

Use GPT-4o Mini Locally with Text and Images

Pakistan's First Oracle Blog - 3 hours 20 min ago

  This video introduces and shows how to use GPT-4o mini by OpenAI which is quite cost efficient and performant.


Code:

from openai import OpenAI
import base64
import requests
import os

## Set the API key and model name
MODEL="gpt-4o-mini"
os.environ.get('OPENAI_API_KEY')
client = OpenAI(api_key=os.environ.get('OPENAI_API_KEY'))

def encode_image(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")

IMAGE_PATH="nm.png"
base64_image = encode_image(IMAGE_PATH)

response = client.chat.completions.create(
    model=MODEL,
    messages=[
        {"role": "system", "content": "You are a helpful assistant that responds in Markdown. Help me with this image!"},
        {"role": "user", "content": [
            {"type": "text", "text": "Describe the image? how many girls are there?"},
            {"type": "image_url", "image_url": {
                "url": f"data:image/png;base64,{base64_image}"}
            }
        ]}
    ],
    temperature=0.0,
)

print(response.choices[0].message.content)

-

#pip install -U openai
#export OPENAI_API_KEY=""

from openai import OpenAI
import os

## Set the API key and model name
MODEL="gpt-4o-mini"
os.environ.get('OPENAI_API_KEY')
client = OpenAI(api_key=os.environ.get('OPENAI_API_KEY'))

completion = client.chat.completions.create(
  model=MODEL,
  messages=[
    {"role": "system", "content": "You are a helpful assistant. Help me with my question!"},
    {"role": "user", "content": "A bat and a ball together cost $1.10. The bat costs $1.00 more than the ball. How much does the ball cost?"}  
  ]
)

print("Assistant: " + completion.choices[0].message.content)
Categories: DBA Blogs

Install Mistral Nemo Locally and Test for Multi-Lingual , Function Calling

Pakistan's First Oracle Blog - 3 hours 23 min ago

 This video installs Mistral NeMo locally and tests it on multi-lingual, math, coding, and function calling.


Code:

conda create -n nemo python=3.11 -y && conda activate nemo

pip install torch
pip install git+https://github.com/huggingface/transformers.git
pip install mistral_inference
pip install huggingface_hub pathlib

from huggingface_hub import snapshot_download
from pathlib import Path

mistral_models_path = Path.home().joinpath('mistral_models', 'Nemo-Instruct')
mistral_models_path.mkdir(parents=True, exist_ok=True)

snapshot_download(repo_id="mistralai/Mistral-Nemo-Instruct-2407", allow_patterns=["params.json", "consolidated.safetensors", "tekken.json"], local_dir=mistral_models_path)

conda install jupyter -y
pip uninstall charset_normalizer -y
pip install charset_normalizer
jupyter notebook


from mistral_inference.transformer import Transformer
from mistral_inference.generate import generate

from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
from mistral_common.protocol.instruct.messages import UserMessage
from mistral_common.protocol.instruct.request import ChatCompletionRequest

tokenizer = MistralTokenizer.from_file(f"{mistral_models_path}/tekken.json")
model = Transformer.from_folder(mistral_models_path)

prompt = "Write 10 sentences ending with the word beauty."

completion_request = ChatCompletionRequest(messages=[UserMessage(content=prompt)])

tokens = tokenizer.encode_chat_completion(completion_request).tokens

out_tokens, _ = generate([tokens], model, max_tokens=64, temperature=0.35, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
result = tokenizer.decode(out_tokens[0])

print(result)


#===============================
# Function Calling
#===============================

from mistral_common.protocol.instruct.tool_calls import Function, Tool
from mistral_inference.transformer import Transformer
from mistral_inference.generate import generate

from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
from mistral_common.protocol.instruct.messages import UserMessage
from mistral_common.protocol.instruct.request import ChatCompletionRequest

completion_request = ChatCompletionRequest(
    tools=[
        Tool(
            function=Function(
                name="get_current_weather",
                description="Get the current weather",
                parameters={
                    "type": "object",
                    "properties": {
                        "location": {
                            "type": "string",
                            "description": "The city and state, e.g. San Francisco, CA",
                        },
                        "format": {
                            "type": "string",
                            "enum": ["celsius", "fahrenheit"],
                            "description": "The temperature unit to use. Infer this from the users location.",
                        },
                    },
                    "required": ["location", "format"],
                },
            )
        )
    ],
    messages=[
        UserMessage(content="What's the weather like today in Paris?"),
        ],
)

tokens = tokenizer.encode_chat_completion(completion_request).tokens

out_tokens, _ = generate([tokens], model, max_tokens=256, temperature=0.35, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
result = tokenizer.decode(out_tokens[0])

print(result)
Categories: DBA Blogs

Extreme PL/SQL - An Interpreter for a Simple Language

Pete Finnigan - Wed, 2024-07-17 22:26
I talked at a high level a few weeks ago about Extreme PL/SQL and gave a brief look at an interpreter I have been creating for a simple language based on BASIC. I have been keeping notes in a Word....[Read More]

Posted by Pete On 17/07/24 At 12:00 PM

Categories: Security Blogs

Oracle VirtualBox 7.0.20

Tim Hall - Wed, 2024-07-17 01:48

VirtualBox 7.0.20 has been released. The downloads and changelog are in the usual places. I’ve installed it on my Windows 10 and 11 machines with no drama. Vagrant There was no new version of Vagrant since the last VirtualBox release. If you are new to Vagrant and want to learn, you might find this useful. Once you understand that, I … Continue reading "Oracle VirtualBox 7.0.20"

The post Oracle VirtualBox 7.0.20 first appeared on The ORACLE-BASE Blog.Oracle VirtualBox 7.0.20 was first posted on July 17, 2024 at 7:48 am.
©2012 "The ORACLE-BASE Blog". Use of this feed is for personal non-commercial use only. If you are not reading this article in your feed reader, then the site is guilty of copyright infringement.

Install Codestral Mamba Locally - Best Math AI Model

Pakistan's First Oracle Blog - Tue, 2024-07-16 17:07

 This video installs Codestral Mamba locally which is an open code model based on the Mamba2 architecture. 



Code: 

conda create -n codestralmamba python=3.11 -y && conda activate codestralmamba

pip install torch huggingface_hub pathlib2

pip install mistral_inference>=1 mamba-ssm causal-conv1d

from huggingface_hub import snapshot_download
from pathlib import Path

mistral_models_path = Path.home().joinpath('mistral_models', 'mamba-codestral-7B-v0.1')
mistral_models_path.mkdir(parents=True, exist_ok=True)

snapshot_download(repo_id="mistralai/mamba-codestral-7B-v0.1", allow_patterns=["params.json", "consolidated.safetensors", "tokenizer.model.v3"], local_dir=mistral_models_path)

mistral-chat $HOME/mistral_models/mamba-codestral-7B-v0.1 --instruct  --max_tokens 256
Categories: DBA Blogs

What’s that Skippy ? SYS_CONTEXT USERENV Parameters in the database don’t match the docs ?

The Anti-Kyte - Mon, 2024-07-15 01:30

The USERENV namespace lurks in one of the darker corners of your Oracle Databse.
In conjunction with the SYS_CONTEXT function, it’s incredibly useful if you want to know what’s happening in your session environment at any given time.
However, the parameters defined for this namespace are locked away beyond the reach of mere mortals, which means you have to rely on the documentation to know which parameters are valid on which version of Oracle.
You might think that’s not really a problem, after all, Oracle Documentation is pretty reliable, right ?
Yes…mostly…

Having grown lazy over the year and decided that I wanted to do as little typing as possible when logging from my PL/SQL code, I wrote a simple framework called Skippy, which is on GitHub, if you’re interested.

One element of Skippy is a simple table which holds a list of all the available parameters for the USERENV namespace and the version from which they are valid. There is also a view – SKIPPY_ENV, which overlays the table and returns values for the parameters which are valid for the Oracle version it’s currently running.

Originally, the parameters listed in the table were gleaned from the Oracle documentation. The most recent examples of which are :

Unfortunately, there are a couple of discrepancies between the documentation and the database.

According to the 19c documentation, CDB_DOMAIN is a valid parameter on 19c, whilst CLOUD_SERVICE is not mentioned.
Meanwhile, IS_APPLICATION_ROOT and IS_APPLICATION_PDB are absent from the 23ai docs, despite them having been around since 19c.

The reality on 19c is that CDB_DOMAIN is not valid, but CLOUD_SERVICE is (tested on an OCI Free Tier instance) :

select product, version_full
from product_component_version;

PRODUCT                                            VERSION_FULL        
-------------------------------------------------- --------------------
Oracle Database 19c Enterprise Edition             19.24.0.1.0         
select sys_context('userenv', 'cdb_domain') as cdb_domain from dual; 

…results in…

ORA-02003: invalid USERENV parameter

By contrast…

select sys_context('userenv', 'cloud_service') as cloud_service from dual;

CLOUD_SERVICE       
--------------------
OLTP
select sys_context('userenv', 'is_application_root') as is_application_root from dual;

IS_APPLICATION_ROOT 
--------------------
NO
select sys_context('userenv', 'is_application_pdb') as is_application_pdb from dual;

IS_APPLICATION_PDB  
--------------------
NO

Meanwhile, it appears that IS_APPLICATION_ROOT and IS_APPLICATION_PDB are still valid on 23ai. This time, I’ve tested on a VirtualBox Dev Day Appliance :

select product, version_full
from product_component_version;

PRODUCT                                            VERSION_FULL        
-------------------------------------------------- --------------------
Oracle Database 23ai Free                          23.4.0.24.05        
select sys_context('userenv', 'is_application_root') as is_application_root from dual;

IS_APPLICATION_ROOT 
--------------------
NO
select sys_context('userenv', 'is_application_pdb') as is_application_pdb from dual;

IS_APPLICATION_PDB  
--------------------
NO

CDB_DOMAIN has also made an appearance in this version :

select sys_context('userenv', 'cdb_domain') as cdb_domain from dual; 

CDB_DOMAIN          
--------------------

CLOUD_SERVICE is still kicking around :

select sys_context('userenv', 'cloud_service') as cloud_service from dual;

CLOUD_SERVICE       
--------------------

I’ve submitted comments on the appropriate documentation pages but I can’t see any way to track the response or progress on these.

Fortunately, for me, my friendly neighbourhood marsupial has now got this covered, but it’s something you may want to keep an eye out for if you maintain you’re own list of Userenv Parameters.

A History of Tech Sprawl

Tim Hall - Mon, 2024-07-15 01:15

Here’s a little story about how things are all the same but different… Software Sprawl Let’s cast our minds back to the bad old days, where x86 machines were so underpowered, the thought of using them for a server was almost laughable. In those days the only option for something serious was to use UNIX … Continue reading "A History of Tech Sprawl"

The post A History of Tech Sprawl first appeared on The ORACLE-BASE Blog.A History of Tech Sprawl was first posted on July 15, 2024 at 7:15 am.
©2012 "The ORACLE-BASE Blog". Use of this feed is for personal non-commercial use only. If you are not reading this article in your feed reader, then the site is guilty of copyright infringement.

Easy Tutorial to Fine-Tune Vision Model on Image Data Locally

Pakistan's First Oracle Blog - Sun, 2024-07-14 19:56

 This video is a step-by-step easy tutorial to fine-tune any vision model on your own custom image dataset locally easily.



Code:

conda create -n ft python=3.11 -y && conda activate ft
!pip install -U transformers datasets trl peft accelerate Pillow torch

from datasets import features, load_dataset
from transformers import AutoModelForVision2Seq, AutoProcessor
import torch
from trl import DPOConfig, DPOTrainer
from peft import LoraConfig
import os

ds_id = "openbmb/RLAIF-V-Dataset"
dataset = load_dataset(ds_id, split="train")
dataset = dataset.shuffle(seed=42).select(range(100))

model_id = "HuggingFaceM4/idefics2-8b"

model = AutoModelForVision2Seq.from_pretrained(model_id, torch_dtype=torch.bfloat16)
processor = AutoProcessor.from_pretrained(model_id, do_image_splitting=False)

def format_ds(example):
    # Prepare the input for the chat template
    prompt = [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": example["question"]}]}]
    chosen = [{"role": "assistant", "content": [{"type": "text", "text": example["chosen"]}]}]
    rejected = [{"role": "assistant", "content": [{"type": "text", "text": example["rejected"]}]}]
    # Apply the chat template
    prompt = processor.apply_chat_template(prompt, tokenize=False)
    chosen = processor.apply_chat_template(chosen, tokenize=False)
    rejected = processor.apply_chat_template(rejected, tokenize=False)
    # Resize the image to ensure it fits within the maximum allowable
    # size of the processor to prevent OOM errors.
    max_size = processor.image_processor.size["longest_edge"] // 2
    example["image"].thumbnail((max_size, max_size))
    return {"images": [example["image"]], "prompt": prompt, "chosen": chosen, "rejected": rejected}

dataset = dataset.map(format_ds, remove_columns=dataset.column_names, num_proc=os.cpu_count())

f = dataset.features
f["images"] = features.Sequence(features.Image(decode=True))
dataset = dataset.cast(f)

training_args = DPOConfig(
    output_dir="my-idefics2",
    bf16=True,
    gradient_checkpointing=True,
    per_device_train_batch_size=2,
    gradient_accumulation_steps=32,
    num_train_epochs=1,
    push_to_hub=False,
    dataset_num_proc=os.cpu_count(),
    dataloader_num_workers=os.cpu_count(),
    logging_steps=10,
    )

trainer = DPOTrainer(
    model,
    ref_model=None,  # not needed when using peft
    args=training_args,
    train_dataset=dataset,
    tokenizer=processor,
    peft_config=LoraConfig(target_modules="all-linear"),
)

trainer.train()
Categories: DBA Blogs

Install aTrain Locally - Offline Transcription of Speech Recordings Free with AI

Pakistan's First Oracle Blog - Sun, 2024-07-14 03:23

 This video is a step-by-step easy tutorial to install aTrain which is a tool for automatically transcribing speech recordings utilizing state-of-the-art machine learning models without uploading any data. 




Code:

conda create -n atrain python=3.11 -y && conda activate atrain
sudo apt install ffmpeg
pip install aTrain@git+https://github.com/BANDAS-Center/aTrain.git --extra-index-url https://download.pytorch.org/whl/cu118
sudo apt install nvidia-cudnn
aTrain init
aTrain start
Categories: DBA Blogs

Sparrow OCR Service with PaddleOCR

Andrejus Baranovski - Sun, 2024-07-14 01:29
In this video, I demonstrate the latest updates to the Sparrow OCR Service using PaddleOCR. I walk you through the OCR service workflow in Sparrow, showcasing its integration with FastAPI and highlighting the enhanced functionalities brought by the recent PaddleOCR update. Join me to see how you can leverage these powerful tools for efficient OCR processing! 

 

llama-cpp-agent Installation to Use AI Models Locally in Simple Way

Pakistan's First Oracle Blog - Sat, 2024-07-13 20:46

 This video is a step-by-step easy tutorial to install llama-cpp-agent which is a tool designed to simplify interactions with LLMs. It provides an interface for chatting with LLMs, executing function calls, generating structured output.



Code:



conda create -n llamagent python=3.11 -y && conda activate llamagent

pip install --no-cache-dir llama-cpp-python==0.2.77 --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu124

pip install llama-cpp-agent

conda install jupyter -y
pip uninstall charset_normalizer -y
pip install charset_normalizer
jupyter notebook

# Import the Llama class of llama-cpp-python and the LlamaCppPythonProvider of llama-cpp-agent
from llama_cpp import Llama
from llama_cpp_agent.providers import LlamaCppPythonProvider

# Create an instance of the Llama class and load the model
llama_model = Llama(r"/home/Ubuntu/mymodels/mistral-7b-instruct-v0.2.Q6_K.gguf", n_batch=1024, n_threads=10, n_gpu_layers=40)

# Create the provider by passing the Llama class instance to the LlamaCppPythonProvider class
provider = LlamaCppPythonProvider(llama_model)


from llama_cpp_agent import LlamaCppAgent
from llama_cpp_agent import MessagesFormatterType

agent = LlamaCppAgent(provider, system_prompt="You are a helpful assistant.", predefined_messages_formatter_type=MessagesFormatterType.MISTRAL)

agent_output = agent.get_chat_response("Hello, World!")

print(f"Agent: {agent_output.strip()}")


import math
from llama_cpp_agent import FunctionCallingAgent
from llama_cpp_agent.llm_output_settings import LlmStructuredOutputSettings
from typing import Union

# Callback for receiving messages for the user.
def send_message_to_user_callback(message: str):
    print(message)
   
def calculate_a_to_the_power_b(a: Union[int, float], b: Union[int, float]):
    """
    Calculates a to the power of b

    Args:
        a: number
        b: exponent

    """
    return f"Result: {math.pow(a, b)}"

output_settings = LlmStructuredOutputSettings.from_functions([calculate_a_to_the_power_b], allow_parallel_function_calling=True)

llama_cpp_agent = LlamaCppAgent(
    provider,
    debug_output=True,
    system_prompt=f"You are an advanced AI, tasked to assist the user by calling functions in JSON format.",
    predefined_messages_formatter_type=MessagesFormatterType.CHATML,
)

user_input = "Calculate a to the power of b: a = 2, b = 3"

print(
    llama_cpp_agent.get_chat_response(
        user_input, structured_output_settings=output_settings
    )
)

Categories: DBA Blogs

AuraFlow - Best and Free Text to Image Model - Install Locally

Pakistan's First Oracle Blog - Fri, 2024-07-12 23:32

 This video installs AuraFlow v0.1 locally which is the fully open-sourced largest flow-based text-to-image generation model. It generates hyper realistic images from text prompts.



Code:

conda create -n auraflow python=3.11 -y && conda activate auraflow

pip install transformers accelerate protobuf sentencepiece
pip install torch torchvision

pip install git+https://github.com/huggingface/diffusers.git@bbd2f9d4e9ae70b04fedf65903fd1fb035437db4

conda install jupyter -y
pip uninstall charset_normalizer -y
pip install charset_normalizer
jupyter notebook


from diffusers import AuraFlowPipeline
import torch

pipeline = AuraFlowPipeline.from_pretrained(
    "fal/AuraFlow",
    torch_dtype=torch.float16
).to("cuda")

image = pipeline(
    prompt="close-up portrait of a majestic iguana with vibrant blue-green scales, piercing amber eyes, and orange spiky crest. Intricate textures and details visible on scaly skin. Wrapped in dark hood, giving regal appearance. Dramatic lighting against black background. Hyper-realistic, high-resolution image showcasing the reptile's expressive features and coloration.",
    height=1024,
    width=1024,
    num_inference_steps=50,
    generator=torch.Generator().manual_seed(666),
    guidance_scale=3.5,
).images[0]
Categories: DBA Blogs

Plan problem using composite domain index on CLOB and XMLTYPE

Tom Kyte - Fri, 2024-07-12 15:06
HI, I need to perform fulltext searches in an xmltype XML field. The table in which this field is located has other fields that can be used to narrow the search in the fulltext index (for example, a date field). However, I noticed that by creating a composite domain index on the xmltype field, the optimizer ignores the possible filter on the data field, and performs a subsequent filter, once the records with the context index have been identified. This only happens if the field where the context index is created is of type xmltype. I did a test by creating the same table, changing only the field in clob, and the plan seems correct to me, i.e. the records are searched for using only the context index, not doing a subsequent filter. It can also be seen from the trace that in the second case, with the same data, the cost of the query is significantly lower. the two table, one wih xmltype field, the other with clob field. <code> create table test_xml ( id number, dt date, xmltext xmltype ); create table test_xml_clob ( id number, dt date, xmltext clob ); </code> Script to populate some data <code> declare cnt number := 0; begin for x in 1..100 loop for m in 1..12 loop for d in 1..28 loop for y in 2022..2024 loop insert into test_xml values(cnt, to_date(to_char(m) || '/' || to_char(d) || '/' || to_char(y), 'mm/dd/yyyy'), '<?xml version="1.0" encoding="UTF-8"?><root><item>str1</item></root>'); insert into test_xml_clob values(cnt, to_date(to_char(m) || '/' || to_char(d) || '/' || to_char(y), 'mm/dd/yyyy'), '<?xml version="1.0" encoding="UTF-8"?><root><item>str1</item></root>'); cnt := cnt+1; end loop; end loop; end loop; commit; end loop; end; </code> The two indexes, both with the <b>filter by</b> option <code> CREATE INDEX ndx_cdi_xmlft_test ON test_xml (xmltext) INDEXTYPE IS CTXSYS.CONTEXT FILTER BY dt PARAMETERS ('FILTER CTXSYS.NULL_FILTER'); CREATE INDEX ndx_cdi_xmlclobft_test ON test_xml_clob (xmltext) INDEXTYPE IS CTXSYS.CONTEXT FILTER BY dt PARAMETERS ('FILTER CTXSYS.NULL_FILTER'); </code> Statistics: <code> exec dbms_stats.gather_table_stats('', tabname => 'TEST_XML_CLOB', cascade => TRUE); exec dbms_stats.gather_table_stats('', tabname => 'TEST_XML', cascade => TRUE); </code> The queries used for testing. The first one has a much higher cost: <code> select * from test_xml where contains(xmltext, 'str1') > 0 and dt between to_date('01/01/2022', 'dd/mm/yyyy') and to_date('02/01/2022', 'dd/mm/yyyy') ; select * from test_xml_clob where contains(xmltext, 'str1') > 0 and dt between to_date('01/01/2022', 'dd/mm/yyyy') and to_date('02/01/2022', 'dd/mm/yyyy') ; </code> <code>select banner_full from v$version</code> BANNER_FULL ...
Categories: DBA Blogs

Install LightRAG Locally - Moduler RAG and Lightning Library for LLM Apps

Pakistan's First Oracle Blog - Fri, 2024-07-12 07:37

This video is a step-by-step easy tutorial to install LightRAG which helps developers with both building and optimizing Retriever-Agent-Generator pipelines. It is light, modular, and robust, with a 100% readable codebase.


Code:

conda create -n lightrag python=3.11 -y && conda activate lightrag

git clone https://github.com/SylphAI-Inc/LightRAG.git && cd LightRAG

pip install lightrag
pip install openai==1.12.0
pip install faiss-cpu==1.8.0
pip install sqlalchemy==2.0.30
pip install pgvector==0.2.5
pip install groq==0.5.0

mv .env_example .env   #set your openai and groq api keys in .env file.


conda install jupyter -y
pip uninstall charset_normalizer -y
pip install charset_normalizer
jupyter notebook

from lightrag.utils import setup_env
setup_env()



from dataclasses import dataclass, field

from lightrag.core import Component, Generator, DataClass
from lightrag.components.model_client import GroqAPIClient
from lightrag.components.output_parsers import JsonOutputParser

@dataclass
class QAOutput(DataClass):
    explanation: str = field(
        metadata={"desc": "A brief explanation of the concept in one sentence."}
    )
    example: str = field(metadata={"desc": "An example of the concept in a sentence."})



qa_template = r"""<SYS>
You are a helpful assistant.
<OUTPUT_FORMAT>
{{output_format_str}}
</OUTPUT_FORMAT>
</SYS>
User: {{input_str}}
You:"""

class QA(Component):
    def __init__(self):
        super().__init__()

        parser = JsonOutputParser(data_class=QAOutput, return_data_class=True)
        self.generator = Generator(
            model_client=GroqAPIClient(),
            model_kwargs={"model": "llama3-8b-8192"},
            template=qa_template,
            prompt_kwargs={"output_format_str": parser.format_instructions()},
            output_processors=parser,
        )

    def call(self, query: str):
        return self.generator.call({"input_str": query})

    async def acall(self, query: str):
        return await self.generator.acall({"input_str": query})
       


qa = QA()
print(qa)

# call
output = qa("What is LLM?")
print(output)


qa.generator.print_prompt(
        output_format_str=qa.generator.output_processors.format_instructions(),
        input_str="What is LLM?",
)

Categories: DBA Blogs

Pages

Subscribe to Oracle FAQ aggregator