@@ -40,6 +40,7 @@ def tpu_platform_plugin() -> Optional[str]:
40
40
import libtpu # noqa: F401
41
41
is_tpu = True
42
42
except Exception :
43
+ logger .debug ("Failed to import libtpu. tpu platform is skipped." )
43
44
pass
44
45
45
46
return "vllm.platforms.tpu.TpuPlatform" if is_tpu else None
@@ -65,6 +66,8 @@ def cuda_platform_plugin() -> Optional[str]:
65
66
except Exception as e :
66
67
if "nvml" not in e .__class__ .__name__ .lower ():
67
68
# If the error is not related to NVML, re-raise it.
69
+ logger .debug ("Unexpected error when importing pynvml, "
70
+ "gpu platform is skipped." )
68
71
raise e
69
72
70
73
# CUDA is supported on Jetson, but NVML may not be.
@@ -76,6 +79,8 @@ def cuda_is_jetson() -> bool:
76
79
77
80
if cuda_is_jetson ():
78
81
is_cuda = True
82
+ else :
83
+ logger .debug ("Failed to import pynvml, cuda platform is skipped." )
79
84
80
85
return "vllm.platforms.cuda.CudaPlatform" if is_cuda else None
81
86
@@ -92,7 +97,7 @@ def rocm_platform_plugin() -> Optional[str]:
92
97
finally :
93
98
amdsmi .amdsmi_shut_down ()
94
99
except Exception :
95
- pass
100
+ logger . debug ( "Failed to import amdsmi. rocm platform is skipped." )
96
101
97
102
return "vllm.platforms.rocm.RocmPlatform" if is_rocm else None
98
103
@@ -103,7 +108,8 @@ def hpu_platform_plugin() -> Optional[str]:
103
108
from importlib import util
104
109
is_hpu = util .find_spec ('habana_frameworks' ) is not None
105
110
except Exception :
106
- pass
111
+ logger .debug (
112
+ "Failed to import habana_frameworks. hpu platform is skipped." )
107
113
108
114
return "vllm.platforms.hpu.HpuPlatform" if is_hpu else None
109
115
@@ -119,7 +125,8 @@ def xpu_platform_plugin() -> Optional[str]:
119
125
if hasattr (torch , 'xpu' ) and torch .xpu .is_available ():
120
126
is_xpu = True
121
127
except Exception :
122
- pass
128
+ logger .debug ("Failed to import intel_extension_for_pytorch or "
129
+ "oneccl_bindings_for_pytorch. xpu platform is skipped." )
123
130
124
131
return "vllm.platforms.xpu.XPUPlatform" if is_xpu else None
125
132
@@ -144,7 +151,9 @@ def neuron_platform_plugin() -> Optional[str]:
144
151
import transformers_neuronx # noqa: F401
145
152
is_neuron = True
146
153
except ImportError :
147
- pass
154
+ logger .debug (
155
+ "Failed to import transformers_neuronx. neuron platform is skipped."
156
+ )
148
157
149
158
return "vllm.platforms.neuron.NeuronPlatform" if is_neuron else None
150
159
@@ -182,7 +191,7 @@ def resolve_current_platform_cls_qualname() -> str:
182
191
if platform_cls_qualname is not None :
183
192
activated_plugins .append (name )
184
193
except Exception :
185
- pass
194
+ logger . debug ( "Failed to resolve platform %s." , name , exc_info = True )
186
195
187
196
activated_builtin_plugins = list (
188
197
set (activated_plugins ) & set (builtin_platform_plugins .keys ()))
@@ -209,7 +218,8 @@ def resolve_current_platform_cls_qualname() -> str:
209
218
else :
210
219
platform_cls_qualname = "vllm.platforms.interface.UnspecifiedPlatform"
211
220
logger .info (
212
- "No platform detected, vLLM is running on UnspecifiedPlatform" )
221
+ "No platform detected, vLLM is running on UnspecifiedPlatform. "
222
+ "Enable debug logging to get more details." )
213
223
return platform_cls_qualname
214
224
215
225
0 commit comments