Skip site navigation (1)Skip section navigation (2)
Date:      Thu, 8 Aug 2024 20:01:12 GMT
From:      Yuri Victorovich <yuri@FreeBSD.org>
To:        ports-committers@FreeBSD.org, dev-commits-ports-all@FreeBSD.org, dev-commits-ports-main@FreeBSD.org
Subject:   git: b5bb445feab3 - main - misc/ollama: Fix inference; Add ONLY_FOR_ARGHxx lines; Add pkg-message
Message-ID:  <202408082001.478K1CeY014336@gitrepo.freebsd.org>

next in thread | raw e-mail | index | archive | help
The branch main has been updated by yuri:

URL: https://cgit.FreeBSD.org/ports/commit/?id=b5bb445feab3021d3e09b963be2afeb9082e5497

commit b5bb445feab3021d3e09b963be2afeb9082e5497
Author:     Yuri Victorovich <yuri@FreeBSD.org>
AuthorDate: 2024-08-08 19:59:51 +0000
Commit:     Yuri Victorovich <yuri@FreeBSD.org>
CommitDate: 2024-08-08 20:01:10 +0000

    misc/ollama: Fix inference; Add ONLY_FOR_ARGHxx lines; Add pkg-message
---
 misc/ollama/Makefile    | 17 ++++++++++++++---
 misc/ollama/pkg-message | 24 ++++++++++++++++++++++++
 2 files changed, 38 insertions(+), 3 deletions(-)

diff --git a/misc/ollama/Makefile b/misc/ollama/Makefile
index 2989a5229b2e..d388e976d172 100644
--- a/misc/ollama/Makefile
+++ b/misc/ollama/Makefile
@@ -1,7 +1,7 @@
 PORTNAME=	ollama
 DISTVERSIONPREFIX=	v
 DISTVERSION=	0.3.4
-PORTREVISION=	1
+PORTREVISION=	2
 CATEGORIES=	misc # machine-learning
 
 MAINTAINER=	yuri@FreeBSD.org
@@ -11,11 +11,13 @@ WWW=		https://ollama.com/
 LICENSE=	MIT
 LICENSE_FILE=	${WRKSRC}/LICENSE
 
+ONLY_FOR_ARCHS=	amd64
+ONLY_FOR_ARCHS_REASON=	bundled patched llama-cpp is placed into the arch-specific path
+
 BUILD_DEPENDS=	bash:shells/bash \
 		cmake:devel/cmake-core \
 		vulkan-headers>0:graphics/vulkan-headers
-LIB_DEPENDS=	libllama.so:misc/llama-cpp \
-		libvulkan.so:graphics/vulkan-loader
+LIB_DEPENDS=	libvulkan.so:graphics/vulkan-loader
 
 USES=		go:1.22,modules pkgconfig
 
@@ -27,6 +29,15 @@ GH_TUPLE=	ggerganov:llama.cpp:6eeaeba:llama_cpp/llm/llama.cpp
 
 PLIST_FILES=	bin/${PORTNAME}
 
+post-patch: # workaround for https://github.com/ollama/ollama/issues/6259 (use of extenral libllama.so)
+	@${REINPLACE_CMD} \
+		-e '\
+			s| llama | llama omp |; \
+			s| llama | ${WRKSRC}/llm/build/bsd/x86_64_static/src/libllama.a |; \
+			s| ggml | ${WRKSRC}/llm/build/bsd/x86_64_static/ggml/src/libggml.a |; \
+		' \
+		${WRKSRC}/llm/ext_server/CMakeLists.txt
+
 pre-build:
 	@${CP} ${WRKSRC}/app/store/store_linux.go ${WRKSRC}/app/store/store_bsd.go
 	@cd ${GO_WRKSRC} && \
diff --git a/misc/ollama/pkg-message b/misc/ollama/pkg-message
new file mode 100644
index 000000000000..90096ac82cef
--- /dev/null
+++ b/misc/ollama/pkg-message
@@ -0,0 +1,24 @@
+[
+{ type: install
+  message: <<EOM
+You installed ollama: the AI model runner.
+
+To run ollama, plese open 2 terminals.
+1. In the first terminal, please run:
+   $ ollama start
+2. In the second terminal, please run:
+   $ ollama run mistral
+
+This will download and run the AI model "mistral".
+You will be able to interact with it in plain English.
+
+Please see https://ollama.com/library for the list
+of all supported models.
+
+ollama uses many gigbytes of disk space in your home directory,
+because advanced AI models are often very large.
+Pease symlink ~/.ollama to a large disk if needed.
+
+EOM
+}
+]



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?202408082001.478K1CeY014336>