Future-House / paper-qa

LLM Chain for answering questions from documents with citations

Geek Repo:Geek Repo

Github PK Tool:Github PK Tool

'ChatOllama' object has no attribute 'model_name'

BenjaminRosell opened this issue · comments

I am trying to use Paper QA with some models locally.

When trying to create a Docs() object, I get an attribute error saying that the ChatOllama object has to attribute model_name.

This is my code :

from paperqa import Docs
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_community.chat_models import ChatOllama

# have tried a few models
model = "llama3"
llm = ChatOllama(model=model, base_url="http://localhost:11434")
embeddings = OllamaEmbeddings(base_url="http://localhost:11434", model=model)

# Demonstrate Ollama and langchain are working
print(llm.invoke("Who was the first US President?"))

docs = Docs(llm="langchain", client=llm, embedding_client=embeddings)
docs.add("I Pencil.pdf")
answer = docs.invoke("Are pencils made of wood?")

anything solution? i have same question.

Hi, got it working with ollama with the following setup:

from paperqa import Docs, OpenAILLMModel
from openai import AsyncOpenAI

local_client = AsyncOpenAI(
    base_url='http://localhost:11434/v1',
    api_key='ollama',
)

docs = Docs(
    client=local_client,
    embedding="nomic-embed-text",
    llm_model=OpenAILLMModel(
        config=dict(
            model="llama3.1", temperature=0.1, frequency_penalty=1.5, max_tokens=512,
        )
    ),
    summary_llm_model=OpenAILLMModel(
        config=dict(
            model="llama3.1", temperature=0.1, frequency_penalty=1.5, max_tokens=512,
        )
    ),
)
commented

``> Hi, got it working with ollama with the following setup:

from paperqa import Docs, OpenAILLMModel
from openai import AsyncOpenAI

local_client = AsyncOpenAI(
    base_url='http://localhost:11434/v1',
    api_key='ollama',
)

docs = Docs(
    client=local_client,
    embedding="nomic-embed-text",
    llm_model=OpenAILLMModel(
        config=dict(
            model="llama3.1", temperature=0.1, frequency_penalty=1.5, max_tokens=512,
        )
    ),
    summary_llm_model=OpenAILLMModel(
        config=dict(
            model="llama3.1", temperature=0.1, frequency_penalty=1.5, max_tokens=512,
        )
    ),
)

Hi @Madnex
I'm using your code suggestion but I get this error:
This does not look like a text document: PersonInfoReport-14030127_143225.pdf. Path disable_check to ignore this error.

from paperqa import Docs
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import Ollama
from langchain.embeddings import OllamaEmbeddings

from paperqa import Docs, OpenAILLMModel
from openai import AsyncOpenAI

local_client = AsyncOpenAI(
base_url='http://localhost:11434/v1',
api_key='ollama',
)

docs = Docs(
client=local_client,
embedding="nomic-embed-text",
llm_model=OpenAILLMModel(
config=dict(
model="llama3.1", temperature=0.1, frequency_penalty=1.5, max_tokens=512,
)
),
summary_llm_model=OpenAILLMModel(
config=dict(
model="llama3.1", temperature=0.1, frequency_penalty=1.5, max_tokens=512,
)
),
)
docs.add('PersonInfoReport-14030127_143225.pdf')
answer = docs.query("Where dose he live?")

``> Hi, got it working with ollama with the following setup:

from paperqa import Docs, OpenAILLMModel

from openai import AsyncOpenAI

local_client = AsyncOpenAI(

base_url='http://localhost:11434/v1',
api_key='ollama',

)

docs = Docs(

client=local_client,
embedding="nomic-embed-text",
llm_model=OpenAILLMModel(
    config=dict(
        model="llama3.1", temperature=0.1, frequency_penalty=1.5, max_tokens=512,
    )
),
summary_llm_model=OpenAILLMModel(
    config=dict(
        model="llama3.1", temperature=0.1, frequency_penalty=1.5, max_tokens=512,
    )
),

)

Hi @Madnex

I'm using your code suggestion but I get this error:

This does not look like a text document: PersonInfoReport-14030127_143225.pdf. Path disable_check to ignore this error.

from paperqa import Docs

from langchain.callbacks.manager import CallbackManager

from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

from langchain.llms import Ollama

from langchain.embeddings import OllamaEmbeddings

from paperqa import Docs, OpenAILLMModel

from openai import AsyncOpenAI

local_client = AsyncOpenAI(

base_url='http://localhost:11434/v1',
api_key='ollama',

)

docs = Docs(

client=local_client,
embedding="nomic-embed-text",
llm_model=OpenAILLMModel(
    config=dict(
        model="llama3.1", temperature=0.1, frequency_penalty=1.5, max_tokens=512,
    )
),
summary_llm_model=OpenAILLMModel(
    config=dict(
        model="llama3.1", temperature=0.1, frequency_penalty=1.5, max_tokens=512,
    )
),

)

docs.add('PersonInfoReport-14030127_143225.pdf')

answer = docs.query("Where dose he live?")

Did you try with a different PDF? It sounds like it's not a valid PDF. Maybe it's a scanned document? Then you'll need to do some OCR on it before maybe 🤔