Skip to content

Commit 8162b67

Browse files
committed
[Misc][Platform] Improve log info
Signed-off-by: wangxiyuan <[email protected]>
1 parent cdc1fa1 commit 8162b67

File tree

1 file changed

+16
-6
lines changed

1 file changed

+16
-6
lines changed

vllm/platforms/__init__.py

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ def tpu_platform_plugin() -> Optional[str]:
4040
import libtpu # noqa: F401
4141
is_tpu = True
4242
except Exception:
43+
logger.debug("Failed to import libtpu. tpu platform is skipped.")
4344
pass
4445

4546
return "vllm.platforms.tpu.TpuPlatform" if is_tpu else None
@@ -65,6 +66,8 @@ def cuda_platform_plugin() -> Optional[str]:
6566
except Exception as e:
6667
if "nvml" not in e.__class__.__name__.lower():
6768
# If the error is not related to NVML, re-raise it.
69+
logger.debug("Unexpected error when importing pynvml, "
70+
"gpu platform is skipped.")
6871
raise e
6972

7073
# CUDA is supported on Jetson, but NVML may not be.
@@ -76,6 +79,8 @@ def cuda_is_jetson() -> bool:
7679

7780
if cuda_is_jetson():
7881
is_cuda = True
82+
else:
83+
logger.debug("Failed to import pynvml, cuda platform is skipped.")
7984

8085
return "vllm.platforms.cuda.CudaPlatform" if is_cuda else None
8186

@@ -92,7 +97,7 @@ def rocm_platform_plugin() -> Optional[str]:
9297
finally:
9398
amdsmi.amdsmi_shut_down()
9499
except Exception:
95-
pass
100+
logger.debug("Failed to import amdsmi. rocm platform is skipped.")
96101

97102
return "vllm.platforms.rocm.RocmPlatform" if is_rocm else None
98103

@@ -103,7 +108,8 @@ def hpu_platform_plugin() -> Optional[str]:
103108
from importlib import util
104109
is_hpu = util.find_spec('habana_frameworks') is not None
105110
except Exception:
106-
pass
111+
logger.debug(
112+
"Failed to import habana_frameworks. hpu platform is skipped.")
107113

108114
return "vllm.platforms.hpu.HpuPlatform" if is_hpu else None
109115

@@ -119,7 +125,8 @@ def xpu_platform_plugin() -> Optional[str]:
119125
if hasattr(torch, 'xpu') and torch.xpu.is_available():
120126
is_xpu = True
121127
except Exception:
122-
pass
128+
logger.debug("Failed to import intel_extension_for_pytorch or "
129+
"oneccl_bindings_for_pytorch. xpu platform is skipped.")
123130

124131
return "vllm.platforms.xpu.XPUPlatform" if is_xpu else None
125132

@@ -144,7 +151,9 @@ def neuron_platform_plugin() -> Optional[str]:
144151
import transformers_neuronx # noqa: F401
145152
is_neuron = True
146153
except ImportError:
147-
pass
154+
logger.debug(
155+
"Failed to import transformers_neuronx. neuron platform is skipped."
156+
)
148157

149158
return "vllm.platforms.neuron.NeuronPlatform" if is_neuron else None
150159

@@ -182,7 +191,7 @@ def resolve_current_platform_cls_qualname() -> str:
182191
if platform_cls_qualname is not None:
183192
activated_plugins.append(name)
184193
except Exception:
185-
pass
194+
logger.debug("Failed to resolve platform %s.", name, exc_info=True)
186195

187196
activated_builtin_plugins = list(
188197
set(activated_plugins) & set(builtin_platform_plugins.keys()))
@@ -209,7 +218,8 @@ def resolve_current_platform_cls_qualname() -> str:
209218
else:
210219
platform_cls_qualname = "vllm.platforms.interface.UnspecifiedPlatform"
211220
logger.info(
212-
"No platform detected, vLLM is running on UnspecifiedPlatform")
221+
"No platform detected, vLLM is running on UnspecifiedPlatform. "
222+
"Enable debug logging to get more details.")
213223
return platform_cls_qualname
214224

215225

0 commit comments

Comments
 (0)