|
1 |
| -FROM python:3.10.18-slim |
2 |
| - |
3 |
| -### Install Tesseract |
4 |
| -ENV SHELL=/bin/bash |
5 |
| -ENV CC /usr/bin/clang |
6 |
| -ENV CXX /usr/bin/clang++ |
7 |
| -ENV LANG=C.UTF-8 |
8 |
| -ENV TESSDATA_PREFIX=/usr/local/share/tessdata |
9 |
| - |
10 |
| -# Install all build tools needed for our pip installs |
11 |
| -RUN apt update |
12 |
| -RUN apt install -y clang g++ make automake autoconf libtool cmake |
13 |
| - |
14 |
| -## Install the same packages with apt as with apk, but ensure they exist in apt |
15 |
| -RUN apt install -y jq git curl |
16 |
| -RUN apt install -y file openssl bash tini libpng-dev aspell-en |
17 |
| -RUN apt install -y git clang g++ make automake autoconf libtool cmake |
18 |
| -RUN apt install -y autoconf-archive wget |
19 |
| - |
20 |
| -# Install cuda toolkit |
21 |
| -#RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-archive-keyring.gpg |
22 |
| -#RUN dpkg -i cuda-archive-keyring.gpg |
23 |
| -#RUN rm cuda-archive-keyring.gpg |
24 |
| -#RUN apt update |
25 |
| -#RUN apt install -y cuda |
26 |
| -#RUN echo 'export PATH=/usr/local/cuda/bin:$PATH' >> ~/.bashrc |
27 |
| -#RUN echo 'export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH' >> ~/.bashrc |
28 |
| -#RUN source ~/.bashrc |
29 |
| - |
30 |
| -# Larger model |
31 |
| -RUN mkdir -p /models |
32 |
| - |
33 |
| -# Fails. 6 bit, 8B model. |
34 |
| -#RUN wget https://huggingface.co/RichardErkhov/meta-llama_-_Meta-Llama-3-8B-gguf/blob/main/Meta-Llama-3-8B.Q6_K.gguf?download=true -O /models/Meta-Llama-3-8B.Q6_K.gguf |
35 |
| -#ENV MODEL_PATH="/models/Meta-Llama-3-8B.Q6_K.gguf" |
36 |
| - |
37 |
| -# Simple small Llama wrapper |
38 |
| -RUN wget https://huggingface.co/unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF/resolve/main/DeepSeek-R1-Distill-Llama-8B-Q2_K.gguf?download=true -O /models/DeepSeek-R1-Distill-Llama.gguf |
39 |
| -# Larger one |
40 |
| -#RUN wget https://huggingface.co/unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF/resolve/main/DeepSeek-R1-Distill-Llama-8B-Q8_0.gguf?download=true -O /models/DeepSeek-R1-Distill-Llama.gguf |
41 |
| -ENV MODEL_PATH="/models/DeepSeek-R1-Distill-Llama.gguf" |
42 |
| - |
43 |
| -# Failing? Bad magic bytes. |
44 |
| -#RUN wget https://huggingface.co/QuantFactory/Llama-3.2-3B-GGUF/resolve/main/Llama-3.2-3B.Q2_K.gguf?download=true -O /models/Llama-3.2-3B.Q2_K.gguf |
| 1 | +FROM python:3.10-alpine |
45 | 2 |
|
| 3 | +# Install all alpine build tools needed for our pip installs |
| 4 | +#RUN apk --no-cache add --update alpine-sdk libffi libffi-dev musl-dev openssl-dev git poppler-utils |
46 | 5 |
|
47 | 6 | # Install all of our pip packages in a single directory that we can copy to our base image later
|
48 | 7 | RUN mkdir /install
|
49 | 8 | WORKDIR /install
|
50 | 9 |
|
51 | 10 | # Switch back to our base image and copy in all of our built packages and source code
|
| 11 | +#COPY --from=builder /install /usr/local |
| 12 | +COPY src /app |
52 | 13 | COPY requirements.txt /requirements.txt
|
53 | 14 | RUN python3 -m pip install -r /requirements.txt
|
54 |
| -RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" python3 -m pip install llama-cpp-python --upgrade --force-reinstall --no-cache-dir |
55 |
| - |
56 | 15 |
|
57 | 16 | # Install any binary dependencies needed in our final image
|
| 17 | +# RUN apk --no-cache add --update my_binary_dependency |
| 18 | +RUN apk --no-cache add jq git curl |
58 | 19 |
|
| 20 | +ENV SHELL=/bin/bash |
| 21 | + |
| 22 | +### Install Tesseract |
| 23 | +ENV CC /usr/bin/clang |
| 24 | +ENV CXX /usr/bin/clang++ |
| 25 | +ENV LANG=C.UTF-8 |
| 26 | +ENV TESSDATA_PREFIX=/usr/local/share/tessdata |
59 | 27 |
|
60 | 28 | # Dev tools
|
61 | 29 | WORKDIR /tmp
|
62 |
| -#RUN apk update |
63 |
| -#RUN apk upgrade |
64 |
| - |
65 |
| - |
| 30 | +RUN apk update |
| 31 | +RUN apk upgrade |
| 32 | +RUN apk add file openssl openssl-dev bash tini leptonica-dev openjpeg-dev tiff-dev libpng-dev zlib-dev libgcc mupdf-dev jbig2dec-dev |
| 33 | +RUN apk add freetype-dev openblas-dev ffmpeg-dev linux-headers aspell-dev aspell-en # enchant-dev jasper-dev |
| 34 | +RUN apk add --virtual .dev-deps git clang clang-dev g++ make automake autoconf libtool pkgconfig cmake ninja |
| 35 | +RUN apk add --virtual .dev-testing-deps -X http://dl-3.alpinelinux.org/alpine/edge/testing autoconf-archive |
66 | 36 | RUN ln -s /usr/include/locale.h /usr/include/xlocale.h
|
67 | 37 |
|
68 |
| -#RUN apk add tesseract-ocr |
69 |
| -RUN apt install -y tesseract-ocr |
70 |
| -#RUN apk add poppler-utils |
71 |
| -RUN apt install -y poppler-utils |
72 |
| -RUN apt clean && rm -rf /var/lib/apt/lists/* |
| 38 | +RUN apk add tesseract-ocr |
| 39 | +RUN apk add poppler-utils |
73 | 40 |
|
74 | 41 | # Install from main
|
75 | 42 | RUN mkdir /usr/local/share/tessdata
|
76 |
| -RUN wget https://github.com/tesseract-ocr/tessdata_fast/raw/main/eng.traineddata -P /usr/local/share/tessdata |
77 |
| - |
78 | 43 | RUN mkdir src
|
79 | 44 | RUN cd src
|
80 |
| - |
| 45 | +RUN wget https://github.com/tesseract-ocr/tessdata_fast/raw/main/eng.traineddata -P /usr/local/share/tessdata |
81 | 46 | RUN git clone --depth 1 https://github.com/tesseract-ocr/tesseract.git
|
82 | 47 |
|
83 | 48 | #RUN curl -fsSL https://ollama.com/install.sh | sh
|
84 |
| -# Install to /usr/local |
85 |
| -#RUN wget https://ollama.com/install.sh -O /usr/local/bin/ollama-install |
86 |
| -#RUN chmod +x /usr/local/bin/ollama-install |
87 |
| -#RUN sh /usr/local/bin/ollama-install |
88 |
| -# |
89 |
| -#RUN ls -alh /usr/bin |
90 |
| -#RUN ollama serve & sleep 2 && ollama pull nezahatkorkmaz/deepseek-v3 |
91 |
| -#CMD ["sh", "-c", "ollama serve & sleep 2 && python app.py --log-level DEBUG"] |
92 |
| - |
93 |
| -#RUN wget https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/resolve/main/mistral-7b-instruct-v0.1.Q4_K_M.gguf |
94 |
| -RUN python3 -m pip install ctransformers --no-binary ctransformers |
| 49 | +#RUN ollama pull llama3.2 |
| 50 | +#RUN cd tesseract && ./autogen.sh && ./configure --build=x86_64-alpine-linux-musl --host=x86_64-alpine-linux-musl && make && make install && cd /tmp/src |
95 | 51 |
|
96 | 52 | # Finally, lets run our app!
|
97 |
| -ENV GIN_MODE=release |
98 |
| -ENV SHUFFLE_APP_SDK_TIMEOUT=300 |
99 |
| -#ENV LD_LIBRARY_PATH=/usr/local/lib/python3.10/site-packages/ctransformers/lib/basic/libctransformers.so |
100 |
| -#RUN chmod 755 /usr/local/lib/python3.10/site-packages/ctransformers/lib/basic/libctransformers.so |
101 |
| - |
102 |
| -COPY src /app |
103 | 53 | WORKDIR /app
|
104 | 54 | CMD ["python", "app.py", "--log-level", "DEBUG"]
|
0 commit comments