Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions .github/workflows/llm_integration.yml
Original file line number Diff line number Diff line change
Expand Up @@ -639,14 +639,14 @@ jobs:
serve -m test=file:/opt/ml/model/test/
python3 llm/client.py lmi_dist starcoder2-7b
docker rm -f $(docker ps -aq)
- name: Test gemma-7b
- name: Test gemma-2b
working-directory: tests/integration
run: |
rm -rf models
python3 llm/prepare.py lmi_dist gemma-7b
python3 llm/prepare.py lmi_dist gemma-2b
./launch_container.sh deepjavalibrary/djl-serving:$DJLSERVING_DOCKER_TAG $PWD/models lmi \
serve -m test=file:/opt/ml/model/test/
python3 llm/client.py lmi_dist gemma-7b
python3 llm/client.py lmi_dist gemma-2b
docker rm -f $(docker ps -aq)
- name: Test llama2-13b-gptq
working-directory: tests/integration
Expand Down Expand Up @@ -784,14 +784,14 @@ jobs:
serve -m test=file:/opt/ml/model/test/
python3 llm/client.py vllm starcoder2-7b
docker rm -f $(docker ps -aq)
- name: Test gemma-7b
- name: Test gemma-2b
working-directory: tests/integration
run: |
rm -rf models
python3 llm/prepare.py vllm gemma-7b
python3 llm/prepare.py vllm gemma-2b
./launch_container.sh deepjavalibrary/djl-serving:$DJLSERVING_DOCKER_TAG $PWD/models lmi \
serve -m test=file:/opt/ml/model/test/
python3 llm/client.py vllm gemma-7b
python3 llm/client.py vllm gemma-2b
docker rm -f $(docker ps -aq)
- name: Test llama2-7b-chat
working-directory: tests/integration
Expand Down
19 changes: 19 additions & 0 deletions tests/integration/launch_container.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,22 @@ is_p4d_or_p5() {
echo "false"
fi
}

support_nvme() {
local instance_type=$(get_instance_type)
if [[ "$instance_type" == *"p4d"* || "$instance_type" == *"p5"* || "$instance_type" == *"g5"* || "$instance_type" == *"g6"* ]]; then
echo "true"
else
echo "false"
fi
}

if [[ "$(support_nvme)" == *"true"* ]]; then
sudo rm -rf /opt/dlami/nvme/inf_tmp || true
sudo mkdir -p /opt/dlami/nvme/inf_tmp && sudo chmod 777 /opt/dlami/nvme/inf_tmp
nvme="/opt/dlami/nvme/inf_tmp:/tmp"
fi

is_llm=false
if [[ "$platform" == *"-gpu"* ]]; then # if the platform has cuda capabilities
runtime="nvidia"
Expand Down Expand Up @@ -80,6 +96,7 @@ if $is_partition; then
-v ${PWD}/logs:/opt/djl/logs \
-v ~/.aws:/root/.aws \
-v ~/sagemaker_infra/:/opt/ml/.sagemaker_infra/:ro \
${nvme:+-v ${nvme}} \
${env_file} \
-e TEST_TELEMETRY_COLLECTION='true' \
${runtime:+--runtime="${runtime}"} \
Expand All @@ -96,6 +113,7 @@ elif [[ "$docker_image" == *"text-generation-inference"* ]]; then
-p 8080:80 \
${model_path:+-v ${model_path}:/opt/ml/model:ro} \
-v ~/sagemaker_infra/:/opt/ml/.sagemaker_infra/:ro \
${nvme:+-v ${nvme}} \
${env_file} \
${runtime:+--runtime="${runtime}"} \
${shm:+--shm-size="${shm}"} \
Expand All @@ -114,6 +132,7 @@ else
-v ${PWD}/logs:/opt/djl/logs \
-v ~/.aws:/home/djl/.aws \
-v ~/sagemaker_infra/:/opt/ml/.sagemaker_infra/:ro \
${nvme:+-v ${nvme}} \
${env_file} \
-e TEST_TELEMETRY_COLLECTION='true' \
-e SERVING_OPTS='-Dai.djl.logging.level=debug' \
Expand Down
15 changes: 15 additions & 0 deletions tests/integration/llm/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,11 @@ def get_model_name():
"batch_size": [1, 4],
"seq_length": [256]
},
"gemma-2b": {
"max_memory_per_gpu": [25.0],
"batch_size": [1, 4],
"seq_length": [256],
},
"llama2-13b-gptq": {
"max_memory_per_gpu": [23.0],
"batch_size": [1, 4],
Expand Down Expand Up @@ -407,6 +412,11 @@ def get_model_name():
"batch_size": [1, 4],
"seq_length": [256]
},
"gemma-2b": {
"max_memory_per_gpu": [25.0],
"batch_size": [1, 4],
"seq_length": [256],
},
}

vllm_chat_model_spec = {
Expand Down Expand Up @@ -553,6 +563,11 @@ def get_model_name():
"batch_size": [1, 4],
"seq_length": [256],
},
"gemma-2b": {
"max_memory_per_gpu": [25.0],
"batch_size": [1, 4],
"seq_length": [256],
},
"mistral-7b": {
"max_memory_per_gpu": [25.0],
"batch_size": [1, 4],
Expand Down
14 changes: 14 additions & 0 deletions tests/integration/llm/prepare.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,6 +382,13 @@
"option.max_rolling_batch_size": 4,
"option.max_model_len": 2656,
},
"gemma-2b": {
"option.model_id": "s3://djl-llm/gemma-2b",
"option.task": "text-generation",
"option.trust_remote_code": True,
"option.tensor_parallel_degree": 1,
"option.max_rolling_batch_size": 256,
},
"llama2-13b-gptq": {
"option.model_id": "s3://djl-llm/TheBloke-Llama-2-13b-Chat-GPTQ/",
"option.task": "text-generation",
Expand Down Expand Up @@ -678,6 +685,13 @@
"option.max_rolling_batch_size": 4,
"option.max_model_len": 2656,
},
"gemma-2b": {
"option.model_id": "s3://djl-llm/gemma-2b",
"option.task": "text-generation",
"option.trust_remote_code": True,
"option.tensor_parallel_degree": 1,
"option.max_rolling_batch_size": 256,
},
"llama2-7b-chat": {
"option.model_id": "s3://djl-llm/meta-llama-Llama-2-7b-chat-hf/",
"option.task": "text-generation",
Expand Down