yongpengfu's picture
Upload folder using huggingface_hub
47e1d5c verified
# imports
from datetime import datetime
from dotenv import load_dotenv
from openai import OpenAI
import json
import os
import requests
from pypdf import PdfReader
import gradio as gr
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
load_dotenv(override=True)
openai = OpenAI()
# def send_email(to_email, subject, body):
# from_email = os.getenv("GMAIL_EMAIL")
# password = os.getenv("GMAIL_APP_PASSWORD")
# if not password:
# print("❌ Error: GMAIL_APP_PASSWORD not found in environment variables")
# return
# msg = MIMEMultipart()
# msg['From'] = from_email
# msg['To'] = to_email
# msg['Subject'] = subject
# msg.attach(MIMEText(body, 'plain'))
# try:
# with smtplib.SMTP('smtp.gmail.com', 587) as server:
# server.starttls()
# server.login(from_email, password)
# server.send_message(msg)
# print("βœ… Email sent successfully!")
# except Exception as e:
# print(f"❌ Error sending email: {e}")
def send_email(to_email, subject, body):
# Just log to console - viewable in HF Spaces logs
log_entry = {
"timestamp": datetime.now().isoformat(),
"to": to_email,
"subject": subject,
"body": body
}
print(f"βœ… Contact Info: {json.dumps(log_entry)}", flush=True)
def record_user_details(email, name="Name not provided", notes="not provided"):
send_email(to_email=os.getenv("GMAIL_EMAIL"),
subject=f"Recording interest from {name} with email {email} and notes {notes}",
body=f"Recording interest from {name} with email {email} and notes {notes}")
return {"recorded": "ok"}
def record_unknown_question(question):
send_email(to_email=os.getenv("GMAIL_EMAIL"),
subject=f"Recording {question} asked that I couldn't answer",
body=f"Recording {question} asked that I couldn't answer")
return {"recorded": "ok"}
# this json will be sent to LLM to record the user details
record_user_details_json = {
"name": "record_user_details",
"description": "Use this tool to record that a user is interested in being in touch and provided an email address",
"parameters": {
"type": "object",
"properties": {
"email": {
"type": "string",
"description": "The email address of this user"
},
"name": {
"type": "string",
"description": "The user's name, if they provided it"
},
"notes": {
"type": "string",
"description": "Any additional information about the conversation that's worth recording to give context"
}
},
"required": ["email"],
"additionalProperties": False
}
}
# this json will be sent to LLM to record the unknown question
record_unknown_question_json = {
"name": "record_unknown_question",
"description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer",
"parameters": {
"type": "object",
"properties": {
"question": {
"type": "string",
"description": "The question that couldn't be answered"
},
},
"required": ["question"],
"additionalProperties": False
}
}
tools = [{"type": "function", "function": record_user_details_json},
{"type": "function", "function": record_unknown_question_json}]
class Me:
def __init__(self):
self.openai = OpenAI()
self.name = "Yongpeng Fu"
reader = PdfReader("me/Profile.pdf")
self.linkedin = ""
for page in reader.pages:
text = page.extract_text()
if text:
self.linkedin += text
with open("me/summary.txt", "r", encoding="utf-8") as f:
self.summary = f.read()
def handle_tool_call(self, tool_calls):
results = []
for tool_call in tool_calls:
tool_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
print(f"Tool called: {tool_name}", flush=True)
tool = globals().get(tool_name)
result = tool(**arguments) if tool else {}
results.append({"role": "tool", "content": json.dumps(
result), "tool_call_id": tool_call.id})
return results
def system_prompt(self):
system_prompt = f"You are acting as {self.name}, a seasoned Staff Data/AI Engineer with deep expertise in artificial intelligence, \
data engineering, and enterprise-scale system architecture. You are answering questions on {self.name}'s website, \
particularly questions related to {self.name}'s career, background, technical skills, and engineering experience. \
\n\nYour responsibility is to represent {self.name} as a highly skilled technical leader who specializes in:\n\
- Designing and implementing robust, scalable data pipelines\n\
- Building enterprise-grade AI and machine learning systems\n\
- Architecting distributed systems and cloud infrastructure\n\
- Leading complex engineering projects from conception to production deployment\n\
- Working across multiple industries (banking, energy, retail) with proven technical leadership\n\
\n\
When answering questions, emphasize {self.name}'s strong technical foundation, problem-solving capabilities, and ability to deliver \
production-ready solutions at scale. Be professional, confident, and engaging, as if speaking to a potential employer, \
client, or technical collaborator who is evaluating {self.name}'s capabilities.\n\
\n\
You are given a detailed summary of {self.name}'s background and LinkedIn profile to reference when answering questions. \
Use specific examples and technical depth when discussing projects, skills, and experience.\n\
\n\
If you don't know the answer to any question, use your record_unknown_question tool to record the question that you couldn't answer, \
even if it's about something trivial or unrelated to career. \
If the user is engaging in discussion and shows interest, try to facilitate next steps by encouraging them to get in touch via email; \
ask for their email and record it using your record_user_details tool."
system_prompt += f"\n\n## Summary:\n{self.summary}\n\n## LinkedIn Profile:\n{self.linkedin}\n\n"
system_prompt += f"With this context, please chat with the user, always staying in character as {self.name}. \
Showcase technical expertise, leadership experience, and the ability to architect and deliver complex data and AI solutions."
return system_prompt
def chat(self, message, history):
messages = [{"role": "system", "content": self.system_prompt(
)}] + history + [{"role": "user", "content": message}]
done = False
while not done:
response = self.openai.chat.completions.create(
model="gpt-4o-mini", messages=messages, tools=tools)
if response.choices[0].finish_reason == "tool_calls":
message = response.choices[0].message
tool_calls = message.tool_calls
results = self.handle_tool_call(tool_calls)
messages.append(message)
messages.extend(results)
else:
done = True
return response.choices[0].message.content
if __name__ == "__main__":
me = Me()
gr.ChatInterface(
me.chat,
type="messages",
title="Chat with Yongpeng Fu - AI Agent",
description="Hi! I'm an AI assistant representing Yongpeng Fu, a Staff Data/AI Engineer at RBC Royal Bank. Ask me about Yongpeng's experience, skills, or projects. Feel free to get in touch!",
examples=[
"Tell me about yourself",
"What projects have you worked on?",
"What are your technical skills?",
"Let's get in touch"
]
).launch()