from crewai import LLM llm = LLM( model="ollama/llama3.2:1b", base_url="http://localhost:11434" )
ollama pull llama3.2:1b