diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 9d676c0..0000000 --- a/Dockerfile +++ /dev/null @@ -1,146 +0,0 @@ -## WORK IN PROGRESS -## TODO: Please complete the Dockerfile file -## Please refer to the Dockerfile.all file for the final image - -# ------------------- -# Stage 1: Build Go binaries -# ------------------- -FROM golang:1.24 AS builder - -WORKDIR /app -COPY . . - -RUN cd nexoan/crud-api && go mod download -RUN cd nexoan/crud-api && go build ./... -RUN cd nexoan/crud-api && go build -o crud-service cmd/server/service.go cmd/server/utils.go - -RUN mkdir -p /app/testbin -RUN cd nexoan/crud-api/cmd/server && go test -c -o /app/testbin/crud-test . -RUN cd nexoan/crud-api/db/repository/mongo && go test -c -o /app/testbin/mongo-test . -RUN cd nexoan/crud-api/db/repository/neo4j && go test -c -o /app/testbin/neo4j-test . - -# ------------------- -# Stage 2: Final Image -# ------------------- -FROM ubuntu:22.04 - -# Install system packages -RUN apt-get update && apt-get install -y \ - curl gnupg lsb-release wget net-tools nano \ - apt-transport-https software-properties-common unzip \ - openjdk-17-jdk openjdk-17-jre \ - && rm -rf /var/lib/apt/lists/* - -# Install MongoDB 6.0 -RUN curl -fsSL https://pgp.mongodb.com/server-6.0.asc | gpg --dearmor -o /usr/share/keyrings/mongodb-server-6.0.gpg \ - && echo "deb [ signed-by=/usr/share/keyrings/mongodb-server-6.0.gpg ] https://repo.mongodb.org/apt/ubuntu jammy/mongodb-org/6.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-6.0.list \ - && apt-get update \ - && apt-get install -y mongodb-org \ - && rm -rf /var/lib/apt/lists/* - -# Install Eclipse Temurin JDK 21 -RUN wget -O - https://packages.adoptium.net/artifactory/api/gpg/key/public | apt-key add - \ - && echo "deb https://packages.adoptium.net/artifactory/deb $(awk -F= '/^VERSION_CODENAME/{print$2}' /etc/os-release) main" | tee /etc/apt/sources.list.d/adoptium.list \ - && apt-get update \ - && apt-get install -y temurin-21-jdk \ - && rm -rf /var/lib/apt/lists/* - -# Set Java environment -ENV JAVA_HOME=/usr/lib/jvm/java-17-openjdk-amd64 -ENV PATH=$JAVA_HOME/bin:$PATH - -# Verify Java installation -RUN java -version \ - && javac -version \ - && echo "JAVA_HOME: $JAVA_HOME" \ - && ls -la /usr/lib/jvm/ \ - && test -f $JAVA_HOME/bin/java \ - && test -f $JAVA_HOME/bin/javac - -# Install Ballerina 2201.8.0 (compatible with Java 17) -RUN wget https://dist.ballerina.io/downloads/2201.8.0/ballerina-2201.8.0-swan-lake.zip \ - && unzip ballerina-2201.8.0-swan-lake.zip \ - && mv ballerina-2201.8.0-swan-lake /usr/lib/ballerina \ - && ln -s /usr/lib/ballerina/bin/bal /usr/bin/bal \ - && rm ballerina-2201.8.0-swan-lake.zip - -# Install Neo4j 5.13 -RUN wget -O - https://debian.neo4j.com/neotechnology.gpg.key | gpg --dearmor -o /usr/share/keyrings/neo4j.gpg \ - && echo "deb [signed-by=/usr/share/keyrings/neo4j.gpg] https://debian.neo4j.com stable 5" | tee /etc/apt/sources.list.d/neo4j.list \ - && apt-get update \ - && apt-get install -y neo4j=1:5.13.0 cypher-shell \ - && mkdir -p /var/lib/neo4j/data /var/log/neo4j - -# Neo4j configuration -RUN sed -i 's/#server.default_listen_address=0.0.0.0/server.default_listen_address=0.0.0.0/' /etc/neo4j/neo4j.conf \ - && sed -i 's/#server.bolt.enabled=true/server.bolt.enabled=true/' /etc/neo4j/neo4j.conf \ - && sed -i 's/#server.bolt.address=0.0.0.0:7687/server.bolt.address=0.0.0.0:7687/' /etc/neo4j/neo4j.conf \ - && sed -i 's/#server.http.enabled=true/server.http.enabled=true/' /etc/neo4j/neo4j.conf \ - && sed -i 's/#server.http.address=0.0.0.0:7474/server.http.address=0.0.0.0:7474/' /etc/neo4j/neo4j.conf \ - && sed -i 's/#dbms.security.auth_enabled=true/dbms.security.auth_enabled=true/' /etc/neo4j/neo4j.conf \ - && echo "dbms.security.procedures.unrestricted=apoc.*" >> /etc/neo4j/neo4j.conf - -# Copy compiled binaries and source code -COPY --from=builder /app/nexoan/crud-api/crud-service /usr/local/bin/ -COPY --from=builder /app/testbin/* /usr/local/bin/ -COPY --from=builder /app/nexoan/crud-api /app/nexoan/crud-api -COPY --from=builder /app/nexoan/update-api /app/nexoan/update-api - -WORKDIR /app - -# Environment variables -ENV NEO4J_URI=bolt://localhost:7687 -ENV NEO4J_USER=neo4j -ENV NEO4J_PASSWORD=neo4j123 -ENV MONGO_URI=mongodb://localhost:27017 -ENV MONGO_DB_NAME=testdb -ENV MONGO_COLLECTION=metadata - -# Expose ports -EXPOSE 7474 7687 27017 - -# Add entrypoint script -RUN echo '#!/bin/bash\n\ -set -e\n\ -\n\ -NEO4J_PASSWORD=neo4j123\n\ -\n\ -echo "Starting MongoDB..."\n\ -mongod --fork --logpath /var/log/mongodb/mongod.log\n\ -\n\ -echo "Starting Neo4j..."\n\ -neo4j start\n\ -\n\ -until curl -s http://localhost:7474 > /dev/null; do\n\ - echo "Waiting for Neo4j..."\n\ - sleep 2\n\ -done\n\ -\n\ -echo "Setting Neo4j password..."\n\ -echo "ALTER CURRENT USER SET PASSWORD FROM '\''neo4j'\'' TO '\''$NEO4J_PASSWORD'\'';" | cypher-shell -u neo4j -p '\''neo4j'\'' -d system\n\ -\n\ -until mongosh --eval "db.version()" > /dev/null 2>&1; do\n\ - echo "Waiting for MongoDB..."\n\ - sleep 2\n\ -done\n\ -\n\ -echo "Running CRUD service tests..."\n\ -cd /app/nexoan/crud-api\n\ -crud-test -test.v && mongo-test -test.v && neo4j-test -test.v\n\ -\n\ -echo "Starting CRUD server..."\n\ -./crud-service &\n\ -CRUD_PID=$!\n\ -sleep 5\n\ -\n\ -echo "Running update-api tests..."\n\ -cd /app/nexoan/update-api\n\ -bal test\n\ -\n\ -echo "Stopping CRUD server..."\n\ -kill $CRUD_PID\n\ -\n\ -tail -f /dev/null' > /start.sh && chmod +x /start.sh - -CMD ["/start.sh"] - \ No newline at end of file diff --git a/Dockerfile.all b/Dockerfile.all deleted file mode 100644 index 635af2d..0000000 --- a/Dockerfile.all +++ /dev/null @@ -1,95 +0,0 @@ -## WORK IN PROGRESS -## TODO: Please complete the Dockerfile.all - -# ------------------- -# Stage 1: Build Go binaries -# ------------------- -FROM golang:1.24 AS builder - -WORKDIR /app -COPY . . - -RUN cd nexoan/crud-api && go mod download -RUN cd nexoan/crud-api && go build ./... -RUN cd nexoan/crud-api && go build -o crud-service cmd/server/service.go cmd/server/utils.go - -RUN mkdir -p /app/testbin -RUN cd nexoan/crud-api/cmd/server && go test -c -o /app/testbin/crud-test . -RUN cd nexoan/crud-api/db/repository/mongo && go test -c -o /app/testbin/mongo-test . -RUN cd nexoan/crud-api/db/repository/neo4j && go test -c -o /app/testbin/neo4j-test . - -# ------------------- -# Stage 2: Final Image -# ------------------- -FROM ballerina/ballerina:2201.12.2 - -# Switch to root for package installation -USER root - -# Install system packages -RUN apk add --no-cache \ - curl wget net-tools nano \ - unzip bash tar gzip gnupg - -# Install MongoDB -RUN wget https://fastdl.mongodb.org/linux/mongodb-linux-aarch64-ubuntu2204-6.0.12.tgz \ - && tar -zxvf mongodb-linux-aarch64-ubuntu2204-6.0.12.tgz \ - && mv mongodb-linux-aarch64-ubuntu2204-6.0.12/bin/* /usr/local/bin/ \ - && rm -rf mongodb-linux-aarch64-ubuntu2204-6.0.12.tgz mongodb-linux-aarch64-ubuntu2204-6.0.12 \ - && mkdir -p /data/db - -# MongoDB configuration -RUN echo "storage:" > /etc/mongodb.conf \ - && echo " dbPath: /data/db" >> /etc/mongodb.conf \ - && echo " journal:" >> /etc/mongodb.conf \ - && echo " enabled: true" >> /etc/mongodb.conf \ - && echo "systemLog:" >> /etc/mongodb.conf \ - && echo " destination: file" >> /etc/mongodb.conf \ - && echo " logAppend: true" >> /etc/mongodb.conf \ - && echo " path: /var/log/mongodb/mongodb.log" >> /etc/mongodb.conf - -# Install Neo4j -RUN wget https://dist.neo4j.org/neo4j-community-5.13.0-unix.tar.gz \ - && tar -xf neo4j-community-5.13.0-unix.tar.gz \ - && mv neo4j-community-5.13.0 /usr/local/neo4j \ - && rm neo4j-community-5.13.0-unix.tar.gz \ - && mkdir -p /var/lib/neo4j/data /var/log/neo4j - -# Neo4j configuration -RUN sed -i 's/#server.default_listen_address=0.0.0.0/server.default_listen_address=0.0.0.0/' /usr/local/neo4j/conf/neo4j.conf \ - && sed -i 's/#server.bolt.enabled=true/server.bolt.enabled=true/' /usr/local/neo4j/conf/neo4j.conf \ - && sed -i 's/#server.bolt.address=0.0.0.0:7687/server.bolt.address=0.0.0.0:7687/' /usr/local/neo4j/conf/neo4j.conf \ - && sed -i 's/#server.http.enabled=true/server.http.enabled=true/' /usr/local/neo4j/conf/neo4j.conf \ - && sed -i 's/#server.http.address=0.0.0.0:7474/server.http.address=0.0.0.0:7474/' /usr/local/neo4j/conf/neo4j.conf \ - && sed -i 's/#dbms.security.auth_enabled=true/dbms.security.auth_enabled=true/' /usr/local/neo4j/conf/neo4j.conf \ - && echo "dbms.security.procedures.unrestricted=apoc.*" >> /usr/local/neo4j/conf/neo4j.conf - -# Copy compiled binaries and source code -COPY --from=builder /app/nexoan/crud-api/crud-service /usr/local/bin/ -COPY --from=builder /app/testbin/* /usr/local/bin/ -COPY --from=builder /app/nexoan/crud-api /app/nexoan/crud-api -COPY --from=builder /app/nexoan/update-api /app/nexoan/update-api - -WORKDIR /app - -# Environment variables -ENV NEO4J_URI=bolt://localhost:7687 -ENV NEO4J_USER=neo4j -ENV NEO4J_PASSWORD=neo4j123 -ENV MONGO_URI=mongodb://localhost:27017 -ENV MONGO_DB_NAME=testdb -ENV MONGO_COLLECTION=metadata -ENV PATH="/usr/local/neo4j/bin:$PATH" - -# Expose ports -EXPOSE 7474 7687 27017 - -# Create scripts directory and copy start script -RUN mkdir -p /scripts -COPY scripts/start.sh /scripts/start.sh -RUN chmod +x /scripts/start.sh - -# Switch back to the original user -# USER ballerina - -ENTRYPOINT ["/scripts/start.sh"] diff --git a/choreo/DEPLOYMENT.md b/choreo/DEPLOYMENT.md index f2cd152..1e55de3 100644 --- a/choreo/DEPLOYMENT.md +++ b/choreo/DEPLOYMENT.md @@ -34,4 +34,13 @@ and a few file mounts to make things work. When deploying the CRUD service on thing to note is that GRPC services are not exposed through the Gateway in Choreo. So we have to choose the `PROJECT_URL` from `Manage`->`Overview` tabs in Choreo console. Make sure to extract that URL and use it as the `crudServiceURL` config in both `Update` API and -`Query` API services. \ No newline at end of file +`Query` API services. + +### Choreo Configuration Groups + +Note that when you add variables to your deployment configurations through Choreo Configuration Groups, +it is a must to remove the prefix added during the deployment time, otherwise, the code won't be able to interpret it. + +For instance if you set `DB_URI` as a config parameter, once you link the configuration group to your deployment, it will have a mapping `Environment Variable` -> `Configuration Param` as follows. And let's assume your component is `SERVICE`. + +`SERVICE_ENV_VAR_DB_URI` and we need to remove `SERVICE_ENV_VAR` from it to make sure the code can understand it. \ No newline at end of file diff --git a/deployment/choreo/development/docker/mongodb/Dockerfile b/deployment/choreo/development/docker/mongodb/Dockerfile index 0c2551a..b39a449 100644 --- a/deployment/choreo/development/docker/mongodb/Dockerfile +++ b/deployment/choreo/development/docker/mongodb/Dockerfile @@ -1,219 +1,15 @@ -# MongoDB Server Dockerfile with GitHub backup restore -FROM ubuntu:20.04 - -# Install prerequisites -RUN apt-get update && apt-get install -y \ - wget gnupg2 curl apt-transport-https sudo \ - && rm -rf /var/lib/apt/lists/* - -# Add MongoDB GPG key and repository for 4.4 -RUN wget -qO - https://www.mongodb.org/static/pgp/server-4.4.asc | apt-key add - \ - && echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu focal/mongodb-org/4.4 multiverse" > /etc/apt/sources.list.d/mongodb-org-4.4.list - -# Install MongoDB 4.4 and additional tools -RUN apt-get update && apt-get install -y \ - mongodb-org=4.4.28 \ - mongodb-org-server=4.4.28 \ - mongodb-org-shell=4.4.28 \ - mongodb-org-tools=4.4.28 \ - wget unzip \ - && rm -rf /var/lib/apt/lists/* - -# Create choreo user and group (required for Choreo platform) -RUN groupadd -g 10014 choreo && \ - useradd -u 10014 -g choreo -s /bin/bash -m choreouser && \ - mkdir -p /home/choreouser/.cache && \ - chown -R choreouser:choreo /home/choreouser && \ - chmod -R 755 /home/choreouser && \ - echo "choreouser ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers - -# Create directories with proper permissions for choreo user -RUN mkdir -p /var/lib/mongodb /var/log/mongodb /data/db /data/configdb /data/backup /var/run/mongodb \ - && chown -R 10014:10014 /var/lib/mongodb /var/log/mongodb /data/db /data/configdb /data/backup /var/run/mongodb \ - && chmod -R 755 /var/lib/mongodb /var/log/mongodb /data/db /data/configdb /data/backup /var/run/mongodb +# MongoDB Server Dockerfile +FROM mongo:4.4 # Set environment variables ENV MONGO_INITDB_ROOT_USERNAME=admin ENV MONGO_INITDB_ROOT_PASSWORD=admin123 ENV MONGO_INITDB_DATABASE=nexoan -# GitHub backup restore configuration -ENV GITHUB_BACKUP_REPO=${NEXOAN_GITHUB_BACKUP_REPO:-LDFLK/data-backups} \ - BACKUP_VERSION=${NEXOAN_DB_BACKUP_VERSION:-0.0.1} \ - BACKUP_ENVIRONMENT=${NEXOAN_CHOREO_ENVIRONMENT:-development} \ - RESTORE_FROM_GITHUB=true - -# Create MongoDB configuration (without fork for choreo user) -RUN echo "net:\n\ - port: 27017\n\ - bindIp: 0.0.0.0\n\ -storage:\n\ - dbPath: /data/db\n\ -systemLog:\n\ - destination: file\n\ - logAppend: true\n\ - path: /var/log/mongodb/mongod.log\n\ -processManagement:\n\ - fork: false" > /etc/mongod.conf - -# Additional directories are already created above with proper permissions - -# Create entrypoint script with GitHub backup restore -RUN echo '#!/bin/bash\n\ -set -e\n\ -\n\ -# Logging function\n\ -log() {\n\ - echo "[$(date +%Y-%m-%d\ %H:%M:%S)] $1: $2"\n\ -}\n\ -\n\ -# Ensure choreo user has proper permissions (volumes may reset ownership)\n\ -log "INFO" "Setting up permissions for choreo user..."\n\ -sudo chown -R 10014:10014 /var/lib/mongodb /var/log/mongodb /data/db /data/configdb /data/backup /var/run/mongodb\n\ -sudo chmod -R 755 /var/lib/mongodb /var/log/mongodb /data/db /data/configdb /data/backup /var/run/mongodb\n\ -\n\ -# Function to restore from GitHub backup\n\ -restore_from_github() {\n\ - local github_repo="${GITHUB_BACKUP_REPO:-LDFLK/data-backups}"\n\ - local version="${BACKUP_VERSION:-0.0.1}"\n\ - local environment="${BACKUP_ENVIRONMENT:-development}"\n\ - \n\ - log "INFO" "Starting MongoDB GitHub backup restore..."\n\ - \n\ - # Create temporary directory for download\n\ - local temp_dir=$(mktemp -d)\n\ - local archive_url="https://github.com/$github_repo/archive/refs/tags/$version.zip"\n\ - local archive_file="$temp_dir/archive.zip"\n\ - \n\ - log "INFO" "Downloading backup from: $archive_url"\n\ - if ! wget -q "$archive_url" -O "$archive_file"; then\n\ - log "ERROR" "Failed to download backup archive"\n\ - rm -rf "$temp_dir"\n\ - return 1\n\ - fi\n\ - \n\ - # Extract the archive\n\ - if ! unzip -q "$archive_file" -d "$temp_dir"; then\n\ - log "ERROR" "Failed to extract backup archive"\n\ - rm -rf "$temp_dir"\n\ - return 1\n\ - fi\n\ - \n\ - # Find the MongoDB backup file\n\ - local archive_dir="$temp_dir/data-backups-$version"\n\ - local mongodb_backup="$archive_dir/nexoan/version/$version/$environment/mongodb/nexoan.tar.gz"\n\ - \n\ - if [ ! -f "$mongodb_backup" ]; then\n\ - log "ERROR" "MongoDB backup file not found: $mongodb_backup"\n\ - rm -rf "$temp_dir"\n\ - return 1\n\ - fi\n\ - \n\ - log "INFO" "Found MongoDB backup: $(basename "$mongodb_backup")"\n\ - \n\ - # Extract the backup file\n\ - local backup_extract_dir="$temp_dir/mongodb_restore"\n\ - mkdir -p "$backup_extract_dir"\n\ - \n\ - if ! tar -xzf "$mongodb_backup" -C "$backup_extract_dir"; then\n\ - log "ERROR" "Failed to extract MongoDB backup"\n\ - rm -rf "$temp_dir"\n\ - return 1\n\ - fi\n\ - \n\ - # Check if nexoan database exists in the backup\n\ - if [ ! -d "$backup_extract_dir/nexoan" ]; then\n\ - log "ERROR" "nexoan database not found in backup"\n\ - rm -rf "$temp_dir"\n\ - return 1\n\ - fi\n\ - \n\ - # Handle nested backup structure\n\ - local backup_source="$backup_extract_dir/nexoan"\n\ - if [ -d "$backup_extract_dir/nexoan/nexoan" ]; then\n\ - log "INFO" "Found nested backup structure, using inner directory"\n\ - backup_source="$backup_extract_dir/nexoan/nexoan"\n\ - fi\n\ - \n\ - # Copy backup to MongoDB backup directory\n\ - log "INFO" "Preparing backup for mongorestore..."\n\ - cp -r "$backup_source" /data/backup/nexoan\n\ - \n\ - # Clean up any macOS metadata files\n\ - find /data/backup/nexoan -name "._*" -delete 2>/dev/null || true\n\ - \n\ - # Wait for MongoDB to be ready\n\ - log "INFO" "Waiting for MongoDB to be ready..."\n\ - for i in {1..30}; do\n\ - if mongo --eval "db.adminCommand('\''ping'\'')" > /dev/null 2>&1; then\n\ - log "INFO" "MongoDB is ready!"\n\ - break\n\ - fi\n\ - log "INFO" "Waiting for MongoDB... attempt $i/30"\n\ - sleep 2\n\ - done\n\ - \n\ - # Restore using mongorestore (following the documentation approach)\n\ - log "INFO" "Restoring MongoDB database using mongorestore..."\n\ - if mongorestore --host=localhost:27017 \\\n\ - --db=nexoan \\\n\ - --drop \\\n\ - /data/backup/nexoan; then\n\ - \n\ - log "SUCCESS" "MongoDB database restored successfully using mongorestore"\n\ - # Clean up backup files\n\ - rm -rf /data/backup/nexoan\n\ - else\n\ - log "ERROR" "Failed to restore MongoDB database using mongorestore"\n\ - rm -rf /data/backup/nexoan\n\ - rm -rf "$temp_dir"\n\ - return 1\n\ - fi\n\ - \n\ - rm -rf "$temp_dir"\n\ - return 0\n\ -}\n\ -\n\ -# Start MongoDB in background first\n\ -log "INFO" "Starting MongoDB in background..."\n\ -mongod --dbpath /data/db --logpath /var/log/mongodb/mongod.log --bind_ip_all &\n\ -MONGODB_PID=$!\n\ -\n\ -# Wait for MongoDB to start\n\ -log "INFO" "Waiting for MongoDB to start..."\n\ -for i in {1..30}; do\n\ - if mongo --eval "db.adminCommand('\''ping'\'')" > /dev/null 2>&1; then\n\ - log "INFO" "MongoDB is ready!"\n\ - break\n\ - fi\n\ - log "INFO" "Waiting for MongoDB... attempt $i/30"\n\ - sleep 2\n\ -done\n\ -\n\ -# Restore from GitHub if enabled and database doesn'\''t exist\n\ -if [ "${RESTORE_FROM_GITHUB:-false}" = "true" ]; then\n\ - # Check if nexoan database exists\n\ - if ! mongo --eval "db.adminCommand('\''listDatabases'\'')" 2>/dev/null | grep -q "nexoan"; then\n\ - log "INFO" "nexoan database not found, starting GitHub restore..."\n\ - restore_from_github || log "WARNING" "GitHub restore failed, continuing with empty database"\n\ - else\n\ - log "INFO" "nexoan database already exists, skipping restore"\n\ - fi\n\ -fi\n\ -\n\ -# Stop the background MongoDB and start in foreground\n\ -log "INFO" "Stopping background MongoDB and starting in foreground..."\n\ -kill $MONGODB_PID 2>/dev/null || true\n\ -wait $MONGODB_PID 2>/dev/null || true\n\ -sleep 3\n\ -\n\ -# Start MongoDB in foreground\n\ -log "INFO" "Starting MongoDB in foreground mode..."\n\ -exec mongod --dbpath /data/db --logpath /var/log/mongodb/mongod.log --bind_ip_all' > /custom-entrypoint.sh \ - && chmod +x /custom-entrypoint.sh - -# Switch to choreo user (required for Choreo platform) -USER 10014 +# Create additional directories for backups +RUN mkdir -p /data/backup && \ + chown -R mongodb:mongodb /data/backup && \ + chmod -R 755 /data/backup # Define volumes for data persistence VOLUME ["/data/db", "/data/configdb", "/data/backup"] @@ -225,5 +21,4 @@ EXPOSE 27017 HEALTHCHECK --interval=10s --timeout=10s --start-period=60s --retries=10 \ CMD mongo --eval "db.adminCommand('ping')" || exit 1 -# Use custom entrypoint -CMD ["/custom-entrypoint.sh"] +# Default command (inherited from base image) diff --git a/Dockerfile.cleanup b/deployment/docker/cleanup/Dockerfile.cleanup similarity index 100% rename from Dockerfile.cleanup rename to deployment/docker/cleanup/Dockerfile.cleanup diff --git a/Dockerfile.cleanup.choreo b/deployment/docker/cleanup/Dockerfile.cleanup.choreo similarity index 100% rename from Dockerfile.cleanup.choreo rename to deployment/docker/cleanup/Dockerfile.cleanup.choreo diff --git a/docker-compose-choreo.yml b/docker-compose-choreo.yml index 6cf992b..f091007 100644 --- a/docker-compose-choreo.yml +++ b/docker-compose-choreo.yml @@ -13,6 +13,8 @@ services: - "27017:27017" volumes: - mongodb_choreo_data:/data/db + - mongodb_choreo_config:/data/configdb + - mongodb_choreo_backup:/data/backup networks: - choreo-network environment: @@ -28,8 +30,8 @@ services: test: ["CMD", "mongo", "--eval", "db.adminCommand('ping')"] interval: 10s timeout: 10s - retries: 5 - start_period: 30s + retries: 10 + start_period: 60s neo4j-choreo: platform: linux/amd64 @@ -125,7 +127,7 @@ services: cleanup-choreo: platform: linux/amd64 build: - context: . + context: ./deployment/docker/cleanup dockerfile: Dockerfile.cleanup.choreo container_name: cleanup-choreo networks: @@ -151,7 +153,7 @@ services: crud-choreo: platform: linux/amd64 build: - context: . + context: ./nexoan/crud-api dockerfile: Dockerfile.crud.choreo container_name: crud-choreo ports: @@ -195,7 +197,7 @@ services: update-choreo: platform: linux/amd64 build: - context: . + context: ./nexoan/update-api dockerfile: Dockerfile.update.choreo container_name: update-choreo ports: @@ -219,7 +221,7 @@ services: query-choreo: platform: linux/amd64 build: - context: . + context: ./nexoan/query-api dockerfile: Dockerfile.query.choreo container_name: query-choreo ports: @@ -272,6 +274,8 @@ networks: volumes: mongodb_choreo_data: + mongodb_choreo_config: + mongodb_choreo_backup: neo4j_choreo_data: neo4j_choreo_logs: postgres_choreo_data: diff --git a/docker-compose.override.yml b/docker-compose.override.yml deleted file mode 100644 index 5a293ae..0000000 --- a/docker-compose.override.yml +++ /dev/null @@ -1,42 +0,0 @@ -# Docker Compose Override for Testing/CI Environment -# This file uses tmpfs volumes to ensure fresh databases on every run -# Usage: docker compose -f docker-compose.yml -f docker-compose.override.yml up - -version: '3.8' - -services: - mongodb: - volumes: - - type: tmpfs - target: /data/db - tmpfs: - size: 1000000000 # 1GB - - neo4j: - volumes: - - type: tmpfs - target: /data - tmpfs: - size: 1000000000 # 1GB - - type: tmpfs - target: /logs - tmpfs: - size: 100000000 # 100MB - - postgres: - volumes: - - type: tmpfs - target: /var/lib/postgresql/data - tmpfs: - size: 1000000000 # 1GB - -# Remove named volumes since we're using tmpfs -volumes: - mongodb_data: - external: false - neo4j_data: - external: false - neo4j_logs: - external: false - postgres_data: - external: false \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index ef0b9f9..02decb9 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -76,7 +76,7 @@ services: cleanup: platform: linux/amd64 build: - context: . + context: ./deployment/docker/cleanup dockerfile: Dockerfile.cleanup container_name: cleanup networks: @@ -102,7 +102,7 @@ services: crud: platform: linux/amd64 build: - context: . + context: ./nexoan/crud-api dockerfile: Dockerfile.crud args: - DOCKER_DEFAULT_PLATFORM=linux/amd64 @@ -145,7 +145,7 @@ services: update: platform: linux/amd64 build: - context: . + context: ./nexoan/update-api dockerfile: Dockerfile.update container_name: update ports: @@ -169,7 +169,7 @@ services: query: platform: linux/amd64 build: - context: . + context: ./nexoan/query-api dockerfile: Dockerfile.query container_name: query ports: diff --git a/docker/README.md b/docker/README.md deleted file mode 100644 index ac77ba2..0000000 --- a/docker/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# All Must be in Choreo - -## Let's Containerize It. - diff --git a/Dockerfile.crud b/nexoan/crud-api/Dockerfile.crud similarity index 94% rename from Dockerfile.crud rename to nexoan/crud-api/Dockerfile.crud index e2ab507..58894eb 100644 --- a/Dockerfile.crud +++ b/nexoan/crud-api/Dockerfile.crud @@ -47,12 +47,10 @@ WORKDIR /app COPY . . # Download dependencies -RUN cd nexoan/crud-api && \ - go mod download +RUN go mod download # Build the application -RUN cd nexoan/crud-api && \ - go build -o crud-service cmd/server/service.go cmd/server/utils.go +RUN go build -o crud-service cmd/server/service.go cmd/server/utils.go ## Create a new user with UID 10014 # RUN addgroup -g 10014 choreo && \ @@ -90,8 +88,8 @@ ENV JAVA_HOME=/usr/lib/jvm/default-java ENV PATH=$JAVA_HOME/bin:$PATH # Copy the built binary from builder stage -COPY --from=builder /app/nexoan/crud-api/crud-service /usr/local/bin/ -COPY --from=builder /app/nexoan/crud-api /app/nexoan/crud-api +COPY --from=builder /app/crud-service /usr/local/bin/ +COPY --from=builder /app /app/nexoan/crud-api COPY --from=builder /etc/passwd /etc/passwd COPY --from=builder /etc/group /etc/group diff --git a/Dockerfile.crud.choreo b/nexoan/crud-api/Dockerfile.crud.choreo similarity index 95% rename from Dockerfile.crud.choreo rename to nexoan/crud-api/Dockerfile.crud.choreo index 2506005..a69a266 100644 --- a/Dockerfile.crud.choreo +++ b/nexoan/crud-api/Dockerfile.crud.choreo @@ -72,12 +72,10 @@ RUN groupadd -g 10014 choreo && \ chmod -R 755 /home/choreouser # Download dependencies -RUN cd nexoan/crud-api && \ - go mod download +RUN go mod download # Build the application as a static binary -RUN cd nexoan/crud-api && \ - CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o crud-service cmd/server/service.go cmd/server/utils.go +RUN CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o crud-service cmd/server/service.go cmd/server/utils.go # Final stage FROM --platform=${TARGETPLATFORM:-linux/amd64} golang:1.24 @@ -111,8 +109,8 @@ ENV JAVA_HOME=/usr/lib/jvm/default-java ENV PATH=$JAVA_HOME/bin:$PATH # Copy the built binary to PATH and the entire crud-api directory -COPY --from=builder /app/nexoan/crud-api/crud-service /usr/local/bin/ -COPY --from=builder /app/nexoan/crud-api /app/nexoan/crud-api +COPY --from=builder /app/crud-service /usr/local/bin/ +COPY --from=builder /app /app/nexoan/crud-api COPY --from=builder /etc/passwd /etc/passwd COPY --from=builder /etc/group /etc/group diff --git a/crud_startup-choreo.sh b/nexoan/crud-api/crud_startup-choreo.sh similarity index 100% rename from crud_startup-choreo.sh rename to nexoan/crud-api/crud_startup-choreo.sh diff --git a/crud_startup.sh b/nexoan/crud-api/crud_startup.sh similarity index 100% rename from crud_startup.sh rename to nexoan/crud-api/crud_startup.sh diff --git a/nexoan/examples/custom_meta_search.py b/nexoan/examples/custom_meta_search.py new file mode 100644 index 0000000..e282679 --- /dev/null +++ b/nexoan/examples/custom_meta_search.py @@ -0,0 +1,534 @@ +import requests +import json +import os +import binascii +from datetime import datetime, timezone +from google.protobuf.wrappers_pb2 import StringValue + +def get_service_urls(): + """Get service URLs from environment variables.""" + query_service_url = os.getenv('QUERY_SERVICE_URL', 'http://0.0.0.0:8081') + update_service_url = os.getenv('UPDATE_SERVICE_URL', 'http://0.0.0.0:8080') + + return { + 'query': f"{query_service_url}/v1/entities", + 'update': f"{update_service_url}/entities" + } + +def decode_protobuf_any_value(any_value): + """Decode a protobuf Any value to get the actual value""" + if isinstance(any_value, str) and any_value.startswith('{"typeUrl"'): + # It's a JSON string, parse it first + try: + any_value = json.loads(any_value) + except json.JSONDecodeError: + return any_value + + if isinstance(any_value, dict) and 'typeUrl' in any_value and 'value' in any_value: + type_url = any_value['typeUrl'] + value = any_value['value'] + + if 'Struct' in type_url: + try: + # For Struct type, the value is hex-encoded protobuf data + binary_data = bytes.fromhex(value) + + # Try to use protobuf library first + try: + from google.protobuf import struct_pb2 + from google.protobuf.json_format import MessageToDict + + struct_msg = struct_pb2.Struct() + struct_msg.ParseFromString(binary_data) + result = MessageToDict(struct_msg) + + # The result should contain the actual JSON data + if isinstance(result, dict): + # Look for the actual data field + if 'data' in result and isinstance(result['data'], str): + try: + # The data field contains the JSON string + data_json = json.loads(result['data']) + return data_json + except json.JSONDecodeError: + pass + # If no 'data' field, return the result as is + return result + + except ImportError: + print("protobuf library not available, trying manual extraction") + except Exception as e: + print(f"Failed to decode with protobuf library: {e}") + + # Manual extraction fallback - look for JSON patterns in the binary data + try: + # Convert binary data to string and look for JSON + text_data = binary_data.decode('utf-8', errors='ignore') + print(f"Debug: Extracted text from binary: {repr(text_data[:200])}...") + + # Find JSON-like content (look for { and }) + start_idx = text_data.find('{') + end_idx = text_data.rfind('}') + + if start_idx != -1 and end_idx != -1 and end_idx > start_idx: + json_str = text_data[start_idx:end_idx + 1] + print(f"Debug: Extracted JSON string: {repr(json_str[:100])}...") + try: + return json.loads(json_str) + except json.JSONDecodeError as e: + print(f"Debug: JSON decode failed: {e}") + + except Exception as e: + print(f"Failed to extract JSON from binary data: {e}") + + # Try a different approach - look for specific patterns + try: + # The hex data might contain the JSON in a different format + text_data = binary_data.decode('utf-8', errors='ignore') + + # Look for common JSON patterns + patterns = ['"columns"', '"rows"', '"data"'] + for pattern in patterns: + if pattern in text_data: + # Find the start of the JSON object containing this pattern + start_idx = text_data.find('{', text_data.find(pattern) - 100) + if start_idx != -1: + # Find the matching closing brace + brace_count = 0 + end_idx = start_idx + for i, char in enumerate(text_data[start_idx:], start_idx): + if char == '{': + brace_count += 1 + elif char == '}': + brace_count -= 1 + if brace_count == 0: + end_idx = i + break + + if end_idx > start_idx: + json_str = text_data[start_idx:end_idx + 1] + print(f"Debug: Found JSON with pattern {pattern}: {repr(json_str[:100])}...") + try: + return json.loads(json_str) + except json.JSONDecodeError as e: + print(f"Debug: JSON decode failed: {e}") + continue + + except Exception as e: + print(f"Failed to extract JSON with pattern matching: {e}") + + # Final fallback: return the hex value + return value + + except Exception as e: + print(f"Failed to decode Struct: {e}") + return value + + # Return the original value if decoding fails + return any_value + +def get_attribute_display_name(entity_metadata, attribute_key): + """ + Get the human-readable display name for an attribute from entity metadata. + + Args: + entity_metadata: List of metadata key-value pairs from the entity + attribute_key: The technical attribute key (e.g., 'personal_information') + + Returns: + str: Human-readable display name or fallback to formatted key + """ + if entity_metadata: + for meta in entity_metadata: + if meta.get('key') == attribute_key: + return meta.get('value', attribute_key.replace('_', ' ').title()) + + # Fallback to formatted key name + return attribute_key.replace('_', ' ').title() + +def display_tabular_data(data, attribute_name, entity_metadata=None): + """ + Display tabular data in a nice format. + + Args: + data: The decoded tabular data (dict with 'columns' and 'rows') + attribute_name: Name of the attribute for display purposes + entity_metadata: Optional metadata to get human-readable names + """ + if not isinstance(data, dict) or 'columns' not in data or 'rows' not in data: + print(f" โš ๏ธ Unexpected data structure for {attribute_name}: {type(data)}") + print(f" Raw data: {data}") + return + + try: + # Get the human-readable display name from metadata if available + if entity_metadata: + title = get_attribute_display_name(entity_metadata, attribute_name) + else: + title = attribute_name.replace('_', ' ').title() + + # Display the title with nice formatting + print(f"\n ๐Ÿ“Š {title}") + print(" " + "=" * (len(title) + 4)) + + # Display basic info + print(f" ๐Ÿ“‹ Shape: {len(data['rows'])} rows ร— {len(data['columns'])} columns") + print(f" ๐Ÿ“‹ Columns: {data['columns']}") + + # Display the table + print(f"\n ๐Ÿ“Š Data Table:") + for i, row in enumerate(data['rows']): + print(f" Row {i+1}: {row}") + + # Show intelligent analysis based on column structure + if 'field' in data['columns'] and 'value' in data['columns']: + print(f"\n ๐Ÿ” Key Information:") + for row in data['rows']: + if len(row) >= 2: + field = row[1] if len(row) > 1 else 'Unknown' + value = row[2] if len(row) > 2 else 'N/A' + print(f" - {field}: {value}") + + elif 'metric' in data['columns'] and 'target' in data['columns'] and 'actual' in data['columns']: + print(f"\n ๐Ÿ“ˆ Performance Analysis:") + for row in data['rows']: + if len(row) >= 5: + metric = row[3] if len(row) > 3 else 'Unknown' + target = row[4] if len(row) > 4 else 'N/A' + actual = row[0] if len(row) > 0 else 'N/A' + status = row[2] if len(row) > 2 else 'N/A' + print(f" - {metric}: {actual}/{target} ({status})") + + elif 'category' in data['columns'] and 'allocated_amount' in data['columns']: + print(f"\n ๐Ÿ’ฐ Budget Analysis:") + total_allocated = 0 + total_spent = 0 + for row in data['rows']: + if len(row) >= 5: + category = row[4] if len(row) > 4 else 'Unknown' + allocated = row[1] if len(row) > 1 and isinstance(row[1], (int, float)) else 0 + spent = row[2] if len(row) > 2 and isinstance(row[2], (int, float)) else 0 + if isinstance(allocated, (int, float)): + total_allocated += allocated + if isinstance(spent, (int, float)): + total_spent += spent + print(f" - {category}: {allocated:,} allocated, {spent:,} spent") + print(f" - Total: {total_allocated:,} allocated, {total_spent:,} spent") + + except Exception as e: + print(f" โŒ Failed to display data: {e}") + print(f" Raw data: {data}") + +def create_minister_entity_example(): + """ + Example: Create a Minister entity with rich attributes and metadata. + This demonstrates how to create a government minister with tabular data. + """ + print("๐Ÿ›๏ธ Creating Minister Entity Example") + print("=" * 50) + + # Get service URLs + urls = get_service_urls() + update_url = urls['update'] + + # Minister data + minister_data = { + "id": "minister-agriculture-001", + "name": "Minister of Agriculture and Food Security", + "short_name": "Agriculture Minister", + "portfolio": "Agriculture", + "appointment_date": "2024-01-15T00:00:00Z" + } + + # Personal information table + personal_info = { + "columns": ["field", "value", "last_updated"], + "rows": [ + ["full_name", minister_data["name"], "2024-01-15T00:00:00Z"], + ["short_name", minister_data["short_name"], "2024-01-15T00:00:00Z"], + ["portfolio", minister_data["portfolio"], "2024-01-15T00:00:00Z"], + ["appointment_date", minister_data["appointment_date"], "2024-01-15T00:00:00Z"], + ["office_location", "Ministry of Agriculture, Colombo", "2024-01-15T00:00:00Z"], + ["contact_email", f"{minister_data['id']}@gov.lk", "2024-01-15T00:00:00Z"], + ["security_clearance", "Confidential", "2024-01-15T00:00:00Z"], + ["education_background", "PhD in Agricultural Sciences", "2024-01-15T00:00:00Z"], + ["years_of_experience", "15", "2024-01-15T00:00:00Z"] + ] + } + + # Performance metrics table + performance_metrics = { + "columns": ["metric", "target", "actual", "period", "status"], + "rows": [ + ["crop_yield_improvement", "20%", "18%", "Q1-2024", "On Track"], + ["farmer_support_programs", "10", "8", "Q1-2024", "In Progress"], + ["food_security_index", "85%", "82%", "Q1-2024", "Good"], + ["rural_development_projects", "25", "22", "Q1-2024", "Good"], + ["sustainable_farming_adoption", "60%", "55%", "Q1-2024", "Good"] + ] + } + + # Budget allocation table + budget_allocation = { + "columns": ["category", "allocated_amount", "spent_amount", "remaining", "fiscal_year"], + "rows": [ + ["crop_subsidies", 150000000, 120000000, 30000000, "2024"], + ["research_and_development", 80000000, 50000000, 30000000, "2024"], + ["farmer_training_programs", 30000000, 25000000, 5000000, "2024"], + ["infrastructure_development", 200000000, 150000000, 50000000, "2024"], + ["emergency_food_reserve", 100000000, 20000000, 80000000, "2024"] + ] + } + + # Create the entity payload + payload = { + "id": minister_data["id"], + "kind": {"major": "Organization", "minor": "Minister"}, + "created": minister_data["appointment_date"], + "terminated": "", + "name": { + "startTime": minister_data["appointment_date"], + "endTime": "", + "value": minister_data["name"] + }, + "metadata": [ + {"key": "portfolio", "value": minister_data["portfolio"]}, + {"key": "appointment_date", "value": minister_data["appointment_date"]}, + {"key": "entity_type", "value": "government_minister"}, + {"key": "hierarchy_level", "value": "minister"}, + {"key": "ministry", "value": "Ministry of Agriculture"}, + {"key": "responsibility", "value": "Food Security and Rural Development"}, + {"key": "reporting_to", "value": "Prime Minister"}, + # Attribute name mappings for human-readable display + {"key": "personal_information", "value": "Personal Information of Minister"}, + {"key": "performance_metrics", "value": "Performance Metrics and KPIs"}, + {"key": "budget_allocation", "value": "Budget Allocation and Financial Planning"} + ], + "attributes": [ + { + "key": "personal_information", + "value": { + "values": [ + { + "startTime": minister_data["appointment_date"], + "endTime": "", + "value": personal_info + } + ] + } + }, + { + "key": "performance_metrics", + "value": { + "values": [ + { + "startTime": "2024-01-01T00:00:00Z", + "endTime": "", + "value": performance_metrics + } + ] + } + }, + { + "key": "budget_allocation", + "value": { + "values": [ + { + "startTime": "2024-01-01T00:00:00Z", + "endTime": "2024-12-31T23:59:59Z", + "value": budget_allocation + } + ] + } + } + ], + "relationships": [] + } + + print(f"๐Ÿ“‹ Creating Minister: {minister_data['name']}") + print(f"๐Ÿข Portfolio: {minister_data['portfolio']}") + print(f"๐Ÿ“… Appointment Date: {minister_data['appointment_date']}") + + # Send the request + try: + response = requests.post(update_url, json=payload) + + if response.status_code in [200, 201]: + print(f"โœ… Successfully created Minister: {minister_data['id']}") + print(f"๐Ÿ“Š Entity includes:") + print(f" - Personal Information: {len(personal_info['rows'])} records") + print(f" - Performance Metrics: {len(performance_metrics['rows'])} records") + print(f" - Budget Allocation: {len(budget_allocation['rows'])} records") + print(f" - Metadata: {len(payload['metadata'])} fields") + return True + else: + print(f"โŒ Failed to create Minister: {response.status_code}") + print(f"Error: {response.text}") + return False + + except Exception as e: + print(f"โŒ Error creating entity: {e}") + return False + +def query_minister_example(): + """ + Example: Query the created minister entity. + """ + print("\n๐Ÿ” Querying Minister Entity Example") + print("=" * 50) + + # Get service URLs + urls = get_service_urls() + query_url = urls['query'] + + # Query for the specific minister + search_url = f"{query_url}/search" + payload = { + "id": "minister-agriculture-001" + } + + try: + response = requests.post(search_url, json=payload) + + if response.status_code == 200: + data = response.json() + print("โœ… Successfully queried minister entity") + print(f"๐Ÿ“Š Response: {json.dumps(data, indent=2)}") + return True + else: + print(f"โŒ Failed to query minister: {response.status_code}") + print(f"Error: {response.text}") + return False + + except Exception as e: + print(f"โŒ Error querying entity: {e}") + return False + +def query_minister_attributes_example(): + """ + Example: Query specific attributes of the minister. + """ + print("\n๐Ÿ“Š Querying Minister Attributes Example") + print("=" * 50) + + # Get service URLs + urls = get_service_urls() + query_url = urls['query'] + + entity_id = "minister-agriculture-001" + attributes_to_query = [ + "personal_information", + "performance_metrics", + "budget_allocation" + ] + + # First, get the entity metadata to retrieve display names + print("๐Ÿ” Retrieving entity metadata for display names...") + entity_metadata = None + try: + search_url = f"{query_url}/search" + search_payload = {"id": entity_id} + search_response = requests.post(search_url, json=search_payload) + + if search_response.status_code == 200: + entity_data = search_response.json() + if 'body' in entity_data and len(entity_data['body']) > 0: + entity_metadata = entity_data['body'][0].get('metadata', []) + print(f"โœ… Retrieved {len(entity_metadata)} metadata entries") + # Show the attribute name mappings + for meta in entity_metadata: + if meta.get('key') in attributes_to_query: + print(f" ๐Ÿ“‹ {meta.get('key')} โ†’ {meta.get('value')}") + else: + print("โš ๏ธ No entity data found") + else: + print(f"โš ๏ธ Failed to retrieve entity metadata: {search_response.status_code}") + except Exception as e: + print(f"โš ๏ธ Error retrieving entity metadata: {e}") + + for attr_name in attributes_to_query: + print(f"\n๐Ÿ” Querying attribute: {attr_name}") + attr_url = f"{query_url}/{entity_id}/attributes/{attr_name}" + + try: + response = requests.get(attr_url) + + if response.status_code == 200: + data = response.json() + print(f"โœ… Successfully retrieved {attr_name}") + print(f"๐Ÿ“… Time Range: {data.get('start', 'N/A')} to {data.get('end', 'N/A')}") + + # Decode the protobuf value + raw_value = data.get('value', {}) + decoded_value = decode_protobuf_any_value(raw_value) + + print(f"๐Ÿ“Š Decoded Value:") + if isinstance(decoded_value, dict) and 'columns' in decoded_value and 'rows' in decoded_value: + # Display as tabular data with metadata for display names + display_tabular_data(decoded_value, attr_name, entity_metadata) + else: + print(f" Raw decoded data: {json.dumps(decoded_value, indent=2)}") + else: + print(f"โŒ Failed to query {attr_name}: {response.status_code}") + + except Exception as e: + print(f"โŒ Error querying attribute {attr_name}: {e}") + +def test_protobuf_decoding(): + """ + Test function to debug protobuf decoding with a sample value. + """ + print("\n๐Ÿงช Testing Protobuf Decoding") + print("=" * 50) + + # Sample protobuf value from your output + sample_value = '{"typeUrl":"type.googleapis.com/google.protobuf.Struct","value":"0AB5050A046461746112AC051AA9057B22636F6C756D6E73223A5B226964222C226669656C64222C2276616C7565222C226C6173745F75706461746564225D2C22726F7773223A5B5B312C2266756C6C5F6E616D65222C224D696E6973746572206F66204167726963756C7475726520616E6420466F6F64205365637572697479222C22323032342D30312D31355430303A30303A30305A225D2C5B322C2273686F72745F6E616D65222C224167726963756C74757265204D696E6973746572222C22323032342D30312D31355430303A30303A30305A225D2C5B332C22706F7274666F6C696F222C224167726963756C74757265222C22323032342D30312D31355430303A30303A30305A225D2C5B342C226170706F696E746D656E745F64617465222C22323032342D30312D31355430303A30303A30305A222C22323032342D30312D31355430303A30303A30305A225D2C5B352C226F66666963655F6C6F636174696F6E222C224D696E6973747279206F66204167726963756C747572652C20436F6C6F6D626F222C22323032342D30312D31355430303A30303A30305A225D2C5B362C22636F6E746163745F656D61696C222C226D696E69737465722D6167726963756C747572652D30303140676F762E6C6B222C22323032342D30312D31355430303A30303A30305A225D2C5B372C2273656375726974795F636C656172616E6365222C22436F6E666964656E7469616C222C22323032342D30312D31355430303A30303A30305A225D2C5B382C22656475636174696F6E5F6261636B67726F756E64222C2250684420696E204167726963756C747572616C20536369656E636573222C22323032342D30312D31355430303A30303A30305A225D2C5B392C2279656172735F6F665F657870657269656E6365222C223135222C22323032342D30312D31355430303A30303A30305A225D5D7D"}' + + print("๐Ÿ” Testing with sample protobuf value...") + decoded = decode_protobuf_any_value(sample_value) + print(f"๐Ÿ“Š Decoded result: {json.dumps(decoded, indent=2)}") + + if isinstance(decoded, dict) and 'columns' in decoded and 'rows' in decoded: + print("โœ… Successfully decoded tabular data!") + display_tabular_data(decoded, "test_attribute") + else: + print("โŒ Failed to decode as tabular data") + print(f"Type: {type(decoded)}") + print(f"Content: {decoded}") + +def main(): + """ + Main function to demonstrate entity creation and querying. + """ + print("๐Ÿ›๏ธ Custom Meta Search Example") + print("Creating and querying a Minister entity with attributes and metadata") + print("=" * 70) + + # Test protobuf decoding first + test_protobuf_decoding() + + # Step 1: Create the minister entity + creation_success = create_minister_entity_example() + + if creation_success: + # Step 2: Query the entity + query_success = query_minister_example() + + if query_success: + # Step 3: Query specific attributes + query_minister_attributes_example() + + print("\n๐ŸŽ‰ Example completed successfully!") + print("=" * 50) + print("๐Ÿ“Š Summary:") + print(" โœ… Created Minister entity with 3 attributes") + print(" โœ… Added 7 metadata fields") + print(" โœ… Demonstrated entity querying") + print(" โœ… Demonstrated attribute querying") + else: + print("\nโŒ Entity querying failed!") + else: + print("\nโŒ Entity creation failed!") + +if __name__ == "__main__": + main() diff --git a/nexoan/examples/requirements.txt b/nexoan/examples/requirements.txt new file mode 100644 index 0000000..7325256 --- /dev/null +++ b/nexoan/examples/requirements.txt @@ -0,0 +1,3 @@ +requests +protobuf +pandas \ No newline at end of file diff --git a/Dockerfile.query b/nexoan/query-api/Dockerfile.query similarity index 93% rename from Dockerfile.query rename to nexoan/query-api/Dockerfile.query index 280f88c..f7d59f3 100644 --- a/Dockerfile.query +++ b/nexoan/query-api/Dockerfile.query @@ -31,8 +31,7 @@ ENV BAL_CONFIG_VAR_QUERYSERVICEHOST=${BAL_CONFIG_VAR_QUERYSERVICEHOST:-0.0.0.0} ENV BAL_CONFIG_VAR_QUERYSERVICEPORT=${BAL_CONFIG_VAR_QUERYSERVICEPORT:-8081} # Build the Ballerina service -RUN cd nexoan/query-api && \ - bal build +RUN bal build EXPOSE 8081 @@ -49,10 +48,10 @@ done\n\ echo "CRUD service is ready!"\n\ \n\ echo "Run ballerina test..."\n\ -bal test nexoan/query-api\n\ +bal test\n\ \n\ echo "Starting Query service..."\n\ -exec bal run nexoan/query-api\n' > /app/start.sh && \ +exec bal run\n' > /app/start.sh && \ chmod +x /app/start.sh && \ dos2unix /app/start.sh diff --git a/Dockerfile.query.choreo b/nexoan/query-api/Dockerfile.query.choreo similarity index 97% rename from Dockerfile.query.choreo rename to nexoan/query-api/Dockerfile.query.choreo index cd2b619..27fa7b7 100644 --- a/Dockerfile.query.choreo +++ b/nexoan/query-api/Dockerfile.query.choreo @@ -132,8 +132,7 @@ USER 10014 # Build the application with explicit platform settings # ENV JAVA_OPTS="-XX:+UseContainerSupport -XX:MaxRAMPercentage=75.0" # Build the Ballerina service -RUN cd nexoan/query-api && \ - bal build +RUN bal build # Final stage FROM --platform=linux/amd64 ballerina/ballerina:2201.11.0 @@ -152,7 +151,7 @@ RUN mkdir -p /home/choreouser/.ballerina && \ chmod -R 755 /home/choreouser # Copy the source code and build artifacts -COPY --from=builder /app/nexoan/query-api /app/nexoan/query-api +COPY --from=builder /app /app/nexoan/query-api COPY --from=builder /etc/passwd /etc/passwd COPY --from=builder /etc/group /etc/group @@ -168,7 +167,7 @@ RUN echo '#!/bin/sh' > /app/start.sh && \ echo 'echo "BAL_CONFIG_VAR_QUERYSERVICEHOST: $BAL_CONFIG_VAR_QUERYSERVICEHOST"' >> /app/start.sh && \ echo 'echo "BAL_CONFIG_VAR_QUERYSERVICEPORT: $BAL_CONFIG_VAR_QUERYSERVICEPORT"' >> /app/start.sh && \ echo '' >> /app/start.sh && \ - echo 'exec bal run /app/nexoan/query-api' >> /app/start.sh && \ + echo 'exec bal run /app' >> /app/start.sh && \ chmod +x /app/start.sh && \ dos2unix /app/start.sh diff --git a/Dockerfile.update b/nexoan/update-api/Dockerfile.update similarity index 93% rename from Dockerfile.update rename to nexoan/update-api/Dockerfile.update index 7a223fa..9c711cc 100644 --- a/Dockerfile.update +++ b/nexoan/update-api/Dockerfile.update @@ -31,8 +31,7 @@ ENV BAL_CONFIG_VAR_UPDATESERVICEHOST=${UPDATE_SERVICE_HOST:-0.0.0.0} ENV BAL_CONFIG_VAR_UPDATESERVICEPORT=${UPDATE_SERVICE_PORT:-8080} # Build the Ballerina service -RUN cd nexoan/update-api && \ - bal build +RUN bal build EXPOSE 8080 @@ -50,10 +49,10 @@ done\n\ echo "CRUD service is ready!"\n\ \n\ echo "Run ballerina test..."\n\ -bal test nexoan/update-api\n\ +bal test\n\ \n\ echo "Starting Update service..."\n\ -exec bal run nexoan/update-api\n' > /app/start.sh && \ +exec bal run\n' > /app/start.sh && \ chmod +x /app/start.sh && \ dos2unix /app/start.sh diff --git a/Dockerfile.update.choreo b/nexoan/update-api/Dockerfile.update.choreo similarity index 97% rename from Dockerfile.update.choreo rename to nexoan/update-api/Dockerfile.update.choreo index 3503717..0fa5efa 100644 --- a/Dockerfile.update.choreo +++ b/nexoan/update-api/Dockerfile.update.choreo @@ -132,8 +132,7 @@ USER 10014 # Build the application with explicit platform settings # ENV JAVA_OPTS="-XX:+UseContainerSupport -XX:MaxRAMPercentage=75.0" # Build the Ballerina service -RUN cd nexoan/update-api && \ - bal build +RUN bal build # Final stage FROM --platform=linux/amd64 ballerina/ballerina:2201.11.0 @@ -152,7 +151,7 @@ RUN mkdir -p /home/choreouser/.ballerina && \ chmod -R 755 /home/choreouser # Copy the source code and build artifacts -COPY --from=builder /app/nexoan/update-api /app/nexoan/update-api +COPY --from=builder /app /app/nexoan/update-api COPY --from=builder /etc/passwd /etc/passwd COPY --from=builder /etc/group /etc/group @@ -168,7 +167,7 @@ RUN echo '#!/bin/sh' > /app/start.sh && \ echo 'echo "BAL_CONFIG_VAR_UPDATESERVICEHOST: $BAL_CONFIG_VAR_UPDATESERVICEHOST"' >> /app/start.sh && \ echo 'echo "BAL_CONFIG_VAR_UPDATESERVICEPORT: $BAL_CONFIG_VAR_UPDATESERVICEPORT"' >> /app/start.sh && \ echo '' >> /app/start.sh && \ - echo 'exec bal run /app/nexoan/update-api' >> /app/start.sh && \ + echo 'exec bal run /app' >> /app/start.sh && \ chmod +x /app/start.sh && \ dos2unix /app/start.sh