Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions helm-charts/docsum/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,11 @@ dependencies:
- name: tgi
version: 0-latest
repository: "file://../common/tgi"
condition: tgi.enabled
- name: vllm
version: 0-latest
repository: "file://../common/vllm"
condition: vllm.enabled
- name: llm-uservice
version: 0-latest
repository: "file://../common/llm-uservice"
Expand Down
6 changes: 4 additions & 2 deletions helm-charts/docsum/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,10 @@ export HFTOKEN="insert-your-huggingface-token-here"
export MODELDIR="/mnt/opea-models"
export MODELNAME="Intel/neural-chat-7b-v3-3"
helm install docsum docsum --set global.HUGGINGFACEHUB_API_TOKEN=${HFTOKEN} --set global.modelUseHostPath=${MODELDIR} --set tgi.LLM_MODEL_ID=${MODELNAME}
# To use Gaudi device
# helm install docsum docsum --set global.HUGGINGFACEHUB_API_TOKEN=${HFTOKEN} --values docsum/gaudi-values.yaml
# To use Gaudi device with TGI
# helm install docsum docsum --set global.HUGGINGFACEHUB_API_TOKEN=${HFTOKEN} --values docsum/gaudi-tgi-values.yaml ...
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why have the command in comments? Remove the # for the actual commands.

Copy link
Collaborator Author

@eero-t eero-t Jan 15, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is consistent with how all the application READMEs are indicating Helm invocation alternatives. I guess it's to avoid user accidentally copy pasting them.

Another reason why it's commented here, is because it's not a complete command (notice ... at the end).

# To use Gaudi device with vLLM
# helm install docsum docsum --set global.HUGGINGFACEHUB_API_TOKEN=${HFTOKEN} --values docsum/gaudi-vllm-values.yaml ..
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ditto

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For consistency, your change should be done for all READMEs in this repo, not just this particular one => IMHO it's out of scope for this PR.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Agree that READMEs need to be updated together.

```

## Verify
Expand Down
1 change: 1 addition & 0 deletions helm-charts/docsum/ci-gaudi-tgi-values.yaml
1 change: 0 additions & 1 deletion helm-charts/docsum/ci-gaudi-values.yaml

This file was deleted.

1 change: 1 addition & 0 deletions helm-charts/docsum/ci-gaudi-vllm-values.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,14 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

vllm:
enabled: false

llm-uservice:
DOCSUM_BACKEND: "TGI"

tgi:
enabled: true
accelDevice: "gaudi"
image:
repository: ghcr.io/huggingface/tgi-gaudi
Expand Down
43 changes: 43 additions & 0 deletions helm-charts/docsum/gaudi-vllm-values.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
# Copyright (C) 2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

# Accelerate inferencing in heaviest components to improve performance
# by overriding their subchart values

tgi:
enabled: false

llm-uservice:
DOCSUM_BACKEND: "vLLM"

vllm:
enabled: true
image:
repository: opea/vllm-gaudi
tag: "latest"
resources:
limits:
habana.ai/gaudi: 1
startupProbe:
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
failureThreshold: 120
readinessProbe:
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
livenessProbe:
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1

PT_HPU_ENABLE_LAZY_COLLECTIVES: "true"
OMPI_MCA_btl_vader_single_copy_mechanism: "none"

extraCmdArgs: [
"--tensor-parallel-size", "1",
"--block-size", "128",
"--max-num-seqs", "256",
"--max-seq_len-to-capture", "2048"
]
9 changes: 7 additions & 2 deletions helm-charts/docsum/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,16 +59,21 @@ affinity: {}
# To override values in subchart llm-uservice
llm-uservice:
image:
repository: opea/llm-docsum-tgi
repository: opea/llm-docsum
DOCSUM_BACKEND: "TGI"
MAX_INPUT_TOKENS: "1024"
MAX_TOTAL_TOKENS: "2048"
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3

# To override values in subchart tgi
# To override values in TGI/vLLM subcharts
tgi:
enabled: true
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3
MAX_INPUT_LENGTH: "1024"
MAX_TOTAL_TOKENS: "2048"
vllm:
enabled: false
LLM_MODEL_ID: Intel/neural-chat-7b-v3-3

# Use docsum gradio UI
nginx:
Expand Down
Loading