11import argparse
22import glob
33import os
4+ import platform
45import site
56import subprocess
67import sys
@@ -34,6 +35,25 @@ def is_macos():
3435 return sys .platform .startswith ("darwin" )
3536
3637
38+ def is_x86_64 ():
39+ return platform .machine () == "x86_64"
40+
41+
42+ def cpu_has_avx2 ():
43+ import cpuinfo
44+
45+ info = cpuinfo .get_cpu_info ()
46+ if 'avx2' in info ['flags' ]:
47+ return True
48+
49+ return False
50+
51+
52+ def torch_version ():
53+ from torch import __version__ as torver
54+ return torver
55+
56+
3757def is_installed ():
3858 for sitedir in site .getsitepackages ():
3959 if "site-packages" in sitedir and conda_env_path in sitedir :
@@ -137,7 +157,7 @@ def install_webui():
137157 install_pytorch = "python -m pip install torch==2.0.1a0 torchvision==0.15.2a0 intel_extension_for_pytorch==2.0.110+xpu -f https://developer.intel.com/ipex-whl-stable-xpu"
138158
139159 # Install Git and then Pytorch
140- run_cmd (f"{ install_git } && { install_pytorch } " , assert_success = True , environment = True )
160+ run_cmd (f"{ install_git } && { install_pytorch } && python -m pip install py-cpuinfo " , assert_success = True , environment = True )
141161
142162 # Install the webui requirements
143163 update_requirements (initial_installation = True )
@@ -162,7 +182,37 @@ def update_requirements(initial_installation=False):
162182 if os .path .exists (extension_req_path ):
163183 run_cmd ("python -m pip install -r " + extension_req_path + " --upgrade" , assert_success = True , environment = True )
164184
165- textgen_requirements = open ("requirements.txt" ).read ().splitlines ()
185+ # Detect the PyTorch version
186+ torver = torch_version ()
187+ is_cuda = '+cu' in torver # 2.0.1+cu117
188+ is_rocm = '+rocm' in torver # 2.0.1+rocm5.4.2
189+ is_intel = '+cxx11' in torver # 2.0.1a0+cxx11.abi
190+ is_cpu = '+cpu' in torver # 2.0.1+cpu
191+
192+ if is_rocm :
193+ if cpu_has_avx2 ():
194+ requirements_file = "requirements_amd.txt"
195+ else :
196+ requirements_file = "requirements_amd_noavx2.txt"
197+ elif is_cpu :
198+ if cpu_has_avx2 ():
199+ requirements_file = "requirements_cpu_only.txt"
200+ else :
201+ requirements_file = "requirements_cpu_only_noavx2.txt"
202+ elif is_macos ():
203+ if is_x86_64 ():
204+ requirements_file = "requirements_apple_intel.txt"
205+ else :
206+ requirements_file = "requirements_apple_silicon.txt"
207+ else :
208+ if cpu_has_avx2 ():
209+ requirements_file = "requirements.txt"
210+ else :
211+ requirements_file = "requirements_noavx2.txt"
212+
213+ print (f"Using the following requirements file: { requirements_file } " )
214+
215+ textgen_requirements = open (requirements_file ).read ().splitlines ()
166216
167217 # Workaround for git+ packages not updating properly. Also store requirements.txt for later use
168218 git_requirements = [req for req in textgen_requirements if req .startswith ("git+" )]
@@ -178,14 +228,7 @@ def update_requirements(initial_installation=False):
178228 print (f"Uninstalled { package_name } " )
179229
180230 # Install/update the project requirements
181- run_cmd ("python -m pip install -r requirements.txt --upgrade" , assert_success = True , environment = True )
182-
183- # The following requirements are for CUDA, not CPU
184- # Parse output of 'pip show torch' to determine torch version
185- torver_cmd = run_cmd ("python -m pip show torch" , assert_success = True , environment = True , capture_output = True )
186- torver = [v .split ()[1 ] for v in torver_cmd .stdout .decode ('utf-8' ).splitlines () if 'Version:' in v ][0 ]
187- is_cuda = '+cu' in torver
188- is_rocm = '+rocm' in torver
231+ run_cmd (f"python -m pip install -r { requirements_file } --upgrade" , assert_success = True , environment = True )
189232
190233 # Check for '+cu' or '+rocm' in version string to determine if torch uses CUDA or ROCm. Check for pytorch-cuda as well for backwards compatibility
191234 if not any ((is_cuda , is_rocm )) and run_cmd ("conda list -f pytorch-cuda | grep pytorch-cuda" , environment = True , capture_output = True ).returncode == 1 :
@@ -216,29 +259,6 @@ def update_requirements(initial_installation=False):
216259 # Install the correct version of g++
217260 run_cmd ("conda install -y -k conda-forge::gxx_linux-64=11.2.0" , environment = True )
218261
219- if is_rocm :
220- # Pre-installed ExLlama module does not support AMD GPU
221- run_cmd ("python -m pip uninstall -y exllama" , environment = True )
222- # Get download URL for latest ExLlama ROCm wheel
223- exllama_rocm = run_cmd ('curl -s https://api.github.com/repos/jllllll/exllama/releases/latest | grep browser_download_url | grep rocm5.4.2-cp310-cp310-linux_x86_64.whl | cut -d : -f 2,3 | tr -d \' "\' ' , environment = True , capture_output = True ).stdout .decode ('utf-8' )
224- if 'rocm5.4.2-cp310-cp310-linux_x86_64.whl' in exllama_rocm :
225- run_cmd ("python -m pip install " + exllama_rocm , environment = True )
226-
227- # Install/Update ROCm AutoGPTQ for AMD GPUs
228- auto_gptq_version = [req for req in textgen_requirements if req .startswith ('https://github.com/PanQiWei/AutoGPTQ/releases/download/' )][0 ].split ('/' )[7 ]
229- auto_gptq_wheel = run_cmd (f'curl -s https://api.github.com/repos/PanQiWei/AutoGPTQ/releases/tags/{ auto_gptq_version } | grep browser_download_url | grep rocm5.4.2-cp310-cp310-linux_x86_64.whl | cut -d : -f 2,3 | tr -d \' "\' ' , environment = True , capture_output = True ).stdout .decode ('utf-8' )
230- if not auto_gptq_wheel and run_cmd (f"python -m pip install { auto_gptq_wheel } --force-reinstall --no-deps" , environment = True ).returncode != 0 :
231- print_big_message ("ERROR: AutoGPTQ wheel installation failed!\n You will not be able to use GPTQ-based models with AutoGPTQ." )
232-
233- # Install GPTQ-for-LLaMa for ROCm
234- gptq_wheel = run_cmd ('curl -s https://api.github.com/repos/jllllll/GPTQ-for-LLaMa-CUDA/releases/latest | grep browser_download_url | grep rocm5.4.2-cp310-cp310-linux_x86_64.whl | cut -d : -f 2,3 | tr -d \' "\' ' , environment = True , capture_output = True ).stdout .decode ('utf-8' )
235- install_gptq = run_cmd ("python -m pip install " + gptq_wheel , environment = True ).returncode == 0
236- if install_gptq :
237- print ("Wheel installation success!" )
238- else :
239- print ("ERROR: GPTQ wheel installation failed." )
240- print ("You will not be able to use GPTQ-based models with GPTQ-for-LLaMa." )
241-
242262 clear_cache ()
243263
244264
0 commit comments