# Dockerfile for Distributed Llama Worker (Raspberry Pi) # This variant runs as a worker node and connects to a controller FROM arm64v8/debian:bookworm-slim # Install dependencies RUN apt-get update && apt-get install -y \ build-essential \ g++ \ make \ && rm -rf /var/lib/apt/lists/* # Set working directory WORKDIR /app # Copy source code COPY src/ ./src/ COPY Makefile ./ # Build only the worker application RUN make dllama # Create entrypoint script COPY < Worker port (default: 9999)" echo " --nthreads Number of threads (default: 4)" echo "" echo "Example:" echo " docker run -p 9999:9999 distributed-llama-worker --port 9999 --nthreads 4" exit 0 ;; *) echo "Unknown option: \$1" exit 1 ;; esac done # Build the command CMD="./dllama worker --port \$PORT --nthreads \$NTHREADS" echo "Starting worker with command:" echo "\$CMD" echo "" exec \$CMD EOF RUN chmod +x /app/entrypoint.sh # Expose the default worker port EXPOSE 9999 # Use the entrypoint script ENTRYPOINT ["/app/entrypoint.sh"]