Spaces:
Running
on
Zero
Running
on
Zero
donbr
commited on
Commit
·
1b211a2
1
Parent(s):
07bd805
enable zerogpu
Browse files- app.py +2 -0
- requirements.txt +2 -1
app.py
CHANGED
|
@@ -13,6 +13,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria,
|
|
| 13 |
from data import extract_leaves, split_document, handle_broken_output, clean_json_text, sync_empty_fields
|
| 14 |
from examples import examples as input_examples
|
| 15 |
from nuextract_logging import log_event
|
|
|
|
| 16 |
|
| 17 |
|
| 18 |
MAX_INPUT_SIZE = 10_000
|
|
@@ -140,6 +141,7 @@ model = AutoModelForCausalLM.from_pretrained(model_name,
|
|
| 140 |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=auth_token)
|
| 141 |
model.eval()
|
| 142 |
|
|
|
|
| 143 |
def gradio_interface_function(template, text, is_example):
|
| 144 |
if len(tokenizer.tokenize(text)) > MAX_INPUT_SIZE:
|
| 145 |
yield "", "Input text too long for space. Download model to use unrestricted.", ""
|
|
|
|
| 13 |
from data import extract_leaves, split_document, handle_broken_output, clean_json_text, sync_empty_fields
|
| 14 |
from examples import examples as input_examples
|
| 15 |
from nuextract_logging import log_event
|
| 16 |
+
import spaces
|
| 17 |
|
| 18 |
|
| 19 |
MAX_INPUT_SIZE = 10_000
|
|
|
|
| 141 |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=auth_token)
|
| 142 |
model.eval()
|
| 143 |
|
| 144 |
+
@spaces.GPU
|
| 145 |
def gradio_interface_function(template, text, is_example):
|
| 146 |
if len(tokenizer.tokenize(text)) > MAX_INPUT_SIZE:
|
| 147 |
yield "", "Input text too long for space. Download model to use unrestricted.", ""
|
requirements.txt
CHANGED
|
@@ -1,3 +1,4 @@
|
|
| 1 |
transformers
|
| 2 |
torch
|
| 3 |
-
accelerate
|
|
|
|
|
|
| 1 |
transformers
|
| 2 |
torch
|
| 3 |
+
accelerate
|
| 4 |
+
spaces
|