Debug Token Access
Browse files
app.py
CHANGED
|
@@ -4,7 +4,7 @@ import torch
|
|
| 4 |
import gradio as gr
|
| 5 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 6 |
|
| 7 |
-
HF_TOKEN = os.getenv("HF_TOKEN"
|
| 8 |
|
| 9 |
BASE_MODEL_ID = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
| 10 |
PEFT_MODEL_ID = "befm/Be.FM-8B"
|
|
@@ -17,6 +17,11 @@ except Exception:
|
|
| 17 |
print("[WARN] 'peft' not installed; running base model only.")
|
| 18 |
|
| 19 |
def load_model_and_tokenizer():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
| 21 |
tok = AutoTokenizer.from_pretrained(BASE_MODEL_ID, use_auth_token=HF_TOKEN)
|
| 22 |
if tok.pad_token is None:
|
|
|
|
| 4 |
import gradio as gr
|
| 5 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 6 |
|
| 7 |
+
HF_TOKEN = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
| 8 |
|
| 9 |
BASE_MODEL_ID = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
| 10 |
PEFT_MODEL_ID = "befm/Be.FM-8B"
|
|
|
|
| 17 |
print("[WARN] 'peft' not installed; running base model only.")
|
| 18 |
|
| 19 |
def load_model_and_tokenizer():
|
| 20 |
+
if HF_TOKEN is None:
|
| 21 |
+
raise RuntimeError(
|
| 22 |
+
"HF_TOKEN is not set. Add it in Space → Settings → Secrets. "
|
| 23 |
+
"Also ensure your account has access to the gated base model."
|
| 24 |
+
)
|
| 25 |
dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
| 26 |
tok = AutoTokenizer.from_pretrained(BASE_MODEL_ID, use_auth_token=HF_TOKEN)
|
| 27 |
if tok.pad_token is None:
|