We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent d407728 commit 633f536Copy full SHA for 633f536
paddlenlp/utils/memory_utils.py
@@ -15,15 +15,25 @@
15
16
import paddle
17
18
+from .log import logger
19
+from .tools import get_env_device
20
+
21
__all__ = [
22
"empty_device_cache",
23
]
24
25
26
def empty_device_cache():
- if paddle.device.is_compiled_with_cuda():
27
+ device = get_env_device()
28
+ if device == "gpu":
29
paddle.device.cuda.empty_cache()
- elif paddle.device.is_compiled_with_xpu():
30
+ elif device == "xpu":
31
paddle.device.xpu.empty_cache()
32
else:
- pass
33
+ if not getattr(empty_device_cache, "has_warned", False):
34
+ logger.warning(
35
+ "The current device ({}) does not support empty cache, calling empty_device_cache() will have no effect.".format(
36
+ device
37
+ )
38
39
+ setattr(empty_device_cache, "has_warned", True)
0 commit comments