William Mattingly commited on
Commit
1cb798b
·
1 Parent(s): faace96

removed flash attn

Browse files
Files changed (2) hide show
  1. app.py +0 -1
  2. requirements.txt +0 -1
app.py CHANGED
@@ -17,7 +17,6 @@ processor = AutoProcessor.from_pretrained(
17
  model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
18
  model_id,
19
  torch_dtype=torch.bfloat16,
20
- attn_implementation="flash_attention_2",
21
  device_map="auto",
22
  trust_remote_code=True,
23
  )
 
17
  model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
18
  model_id,
19
  torch_dtype=torch.bfloat16,
 
20
  device_map="auto",
21
  trust_remote_code=True,
22
  )
requirements.txt CHANGED
@@ -6,5 +6,4 @@ accelerate
6
  pillow
7
  safetensors
8
  huggingface-hub
9
- flash-attn
10
  pydantic==2.10.6
 
6
  pillow
7
  safetensors
8
  huggingface-hub
 
9
  pydantic==2.10.6