AT
Drop files here
or click to upload
from flask import Flask, request, render_template, redirect, url_for, flash
import sqlite3
import PyPDF2
import re
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
import cohere
import ast
import requests
import time
import google.generativeai as genai
import datetime
nltk.download('stopwords')
nltk.download('punkt')
app = Flask(__name__)
app.secret_key = 'your_secret_key'
def init_db():
conn = sqlite3.connect("ats.db")
cursor = conn.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS jobs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT NOT NULL,
description TEXT NOT NULL)''')
cursor.execute('''CREATE TABLE IF NOT EXISTS applications (
id INTEGER PRIMARY KEY AUTOINCREMENT,
job_id INTEGER,
name TEXT NOT NULL,
email TEXT NOT NULL,
resume_link TEXT,
ats_score REAL,
FOREIGN KEY(job_id) REFERENCES jobs(id))''')
conn.commit()
conn.close()
init_db()
def extract_text_from_pdf(pdf_file):
pdf_reader = PyPDF2.PdfFileReader(pdf_file)
text = ""
for page in range(pdf_reader.numPages):
text += pdf_reader.getPage(page).extract_text()
return text
def calculate_ats_score(resume_text, job_description):
resume_words = word_tokenize(resume_text.lower())
job_words = word_tokenize(job_description.lower())
stop_words = set(stopwords.words("english"))
resume_words = [word for word in resume_words if word not in stop_words and word.isalnum()]
job_words = [word for word in job_words if word not in stop_words and word.isalnum()]
matched_keywords = set(resume_words) & set(job_words)
if len(job_words) == 0:
return 0
ats_score = (len(matched_keywords) / len(job_words)) * 100
return round(ats_score, 2)
def clean_text(text):
text = re.sub(r'[^a-zA-Z\s]', '', text)
text = text.lower()
return text
def get_keywords(text):
# Extract keywords (nouns and verbs) using a vectorizer
vectorizer = CountVectorizer(stop_words='english')
vectorizer.fit_transform([text])
return set(vectorizer.get_feature_names_out())
def calculate_similarity_and_suggestions(resume, job_description):
resume_keywords = get_keywords(clean_text(resume))
job_keywords = get_keywords(clean_text(job_description))
matched_keywords = resume_keywords.intersection(job_keywords)
missing_keywords = job_keywords - resume_keywords
match_percentage = (len(matched_keywords) / len(job_keywords)) * 100
return missing_keywords
# cohere function to generate phrases from raw wprds
def get_phrases_cohere(words):
co = cohere.Client('6w7ji2k74YoaeG7beQwyrtGxkDqZ9eGcCRVIWd4n')
response = co.generate(
model='command-xlarge',
prompt=f"Create four high quality resume job responsibility sentences using the following list of words to get high ATS score, ensuring every word is included , Ensure that every word is included and the sentences make sense professionally and write from my perspective dont use You instead use I:, {words}",
max_tokens=400,
# temperature=0.7
)
print(response.generations[0].text)
a = response.generations[0].text
raw = a.split("\n")
phrases = []
for i in raw:
if "1." in i or "2." in i or "3." in i or "4." in i:
phrases.append(i.strip()[2:])
return phrases
def get_phrases(words):
genai.configure(api_key='AIzaSyBPWkKzmMH9ROklj48abuNAd2DxoAe4kOM')
model = genai.GenerativeModel('gemini-pro')
chat = model.start_chat()
prompt=f"Create four high quality resume job responsibility sentences using the following list of words to get high ATS score, ensuring every word is included , Ensure that every word is included and the sentences make sense professionally, the following words are from a job description so i want 100% ATS score so write accordingly:, {words}",
response = chat.send_message(prompt)
return str(response.text).split('\n')
# This will edit in latex file
def edit_latex_file(input_file, output_file, edits):
try:
# Read the original file
with open(input_file, 'r') as file:
lines = file.readlines()
# Apply edits
for line_number, new_content in edits.items():
if 1 <= line_number <= len(lines):
lines[line_number - 1] = new_content + '\n'
else:
print(f"Warning: Line {line_number} is out of range and will be ignored.")
# Write the updated content to a new file
with open(output_file, 'w') as file:
file.writelines(lines)
print(f"Successfully updated {output_file}")
except FileNotFoundError:
print(f"Error: The file '{input_file}' does not exist.")
except Exception as e:
print(f"An error occurred: {e}")
# This will convert TEX file to PDF file
def convert_latex_to_pdf(input_file_path, output_file_path):
api_url = "https://api.cloudconvert.com/v2/jobs"
api_key = "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJhdWQiOiIxIiwianRpIjoiODIzNjQ1MTMzYzQ3YzE2ZjJhOWVhMTY3ODg3MGJkNDVkZWYzMTQyNjk4ZWFmMzcyMWMzNzRlOWFlMTE4OWUyMDBlNTNkMTQ1NDM5NjMwYTciLCJpYXQiOjE3MzE3NTAyMjIuNDg1MjcxLCJuYmYiOjE3MzE3NTAyMjIuNDg1MjcyLCJleHAiOjQ4ODc0MjM4MjIuNDgwNTYzLCJzdWIiOiI3MDIxODM1NiIsInNjb3BlcyI6WyJ1c2VyLnJlYWQiLCJ1c2VyLndyaXRlIiwidGFzay5yZWFkIiwidGFzay53cml0ZSIsIndlYmhvb2sucmVhZCIsIndlYmhvb2sud3JpdGUiLCJwcmVzZXQucmVhZCIsInByZXNldC53cml0ZSJdfQ.LX1VnFFDrOYg6MzMfuMSEn2cwfdfdyW0X9dL20n77oMeLPnuG6u_goMGGgvQnnc9KjCfyJZ-zZO7N2_AXa_Wj6o836AwxIbMMNqQW3Yb3DoLetlyHkhJ_6dwdWAoQmkapW3wiXzdFFFqSC7FPQfzakYQD9DtPpNwdEdQzjA0qI742bScVuU5S8jfuvUclYBYinPIdw7s5gZiWpvgUAoxpNTQ6zIcsARKTVPxRK8NnCF_0loGMk6aoluBiZSm5Ei4fr5l2K3kLdHq9rGmdCPppOaaiNHMqYtvKbEgmD0FbRqZ6SK45j66PxV-R5QJ2BhDXOhWNY2iKfkK-qahn7QF4UprbHJMeCAeVtloYv_f1MkfdFmgDRLcqQe-MHsB5RBGrezyDlffdlt-NTTD-i7sj_0wITNyMV-lICwiPZHJ-N6zhPL8Ua1FgP6OxQeVG23-gysuJWI5wZE9KLBjEnamMvwzW-C6EZYDledqQHRtBcgAg_oTh5MS1FipMv40_AMxtQ02jaSqg4GtOBwupdEvm_UyBtUMihawhgd1RdGl3Ar_oDwNWEycyFqy9uvbK3URUvpnkA9_taJmYxSr4Q4pZG3ToB4xqjbLwLOnyJ2mFjtUIYvp379515LtZ92Q-V2xSuCjBq2B1wwU2VllIT6QhZPbeJwKJdfi0vla5bcIrkc"
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
job_payload = {
"tasks": {
"upload-file": {
"operation": "import/upload"
},
"convert-file": {
"operation": "convert",
"input": "upload-file",
"output_format": "pdf"
},
"export-file": {
"operation": "export/url",
"input": "convert-file"
}
}
}
response = requests.post(api_url, json=job_payload, headers=headers)
if response.status_code != 201:
print("Error creating job:", response.text)
return
job_data = response.json()
upload_task = next(task for task in job_data["data"]["tasks"] if task["operation"] == "import/upload")
upload_url = upload_task["result"]["form"]["url"]
upload_parameters = upload_task["result"]["form"]["parameters"]
with open(input_file_path, "rb") as file:
files = {"file": file}
upload_response = requests.post(upload_url, data=upload_parameters, files=files)
# if upload_response.status_code != 200:
# print("Error uploading file:", upload_response.text)
# return
print("File uploaded successfully, waiting for the conversion job to start...")
job_id = job_data["data"]["id"]
while True:
job_status_response = requests.get(f"{api_url}/{job_id}", headers=headers)
job_status = job_status_response.json()
print("Job Status:", job_status["data"]["status"])
if job_status["data"]["status"] == "finished":
print("Job finished!")
break
elif job_status["data"]["status"] == "error":
print("Conversion failed:", job_status)
return
time.sleep(5)
export_task = next(task for task in job_status["data"]["tasks"] if task["operation"] == "export/url")
file_url = export_task["result"]["files"][0]["url"]
pdf_response = requests.get(file_url)
if pdf_response.status_code == 200:
with open(output_file_path, "wb") as output:
output.write(pdf_response.content)
print(f"Conversion successful! PDF saved as '{output_file_path}'")
else:
print("Error downloading the file:", pdf_response.text)
@app.route("/")
def index():
conn = sqlite3.connect("ats.db")
cursor = conn.cursor()
cursor.execute("SELECT * FROM jobs ORDER BY created_at DESC")
jobs = cursor.fetchall()
conn.close()
return render_template("index.html", jobs=jobs)
@app.route("/add_job", methods=["GET", "POST"])
def add_job():
if request.method == "POST":
title = request.form["title"]
description = request.form["description"]
conn = sqlite3.connect("ats.db")
cursor = conn.cursor()
cursor.execute("INSERT INTO jobs (title, description, created_at) VALUES (?, ?,CURRENT_TIMESTAMP)", (title, description))
conn.commit()
conn.close()
return redirect(url_for("index"))
return render_template("add_job.html")
@app.route("/apply/<int:job_id>", methods=["GET", "POST"])
def apply(job_id):
conn = sqlite3.connect("ats.db")
cursor = conn.cursor()
cursor.execute("SELECT * FROM jobs WHERE id = ?", (job_id,))
job = cursor.fetchone()
conn.close()
if request.method == "POST":
name = request.form["name"]
email = request.form["email"]
resume_file = request.files["resume_file"]
if resume_file and resume_file.filename.endswith('.pdf'):
resume_text = extract_text_from_pdf(resume_file)
print("####,", job[2],"#######",job)
ats_score = calculate_ats_score(resume_text, job[2])
missing_keywords = str(calculate_similarity_and_suggestions(resume_text, job[2]))
if ats_score<50:
phrases = get_phrases_cohere(list(ast.literal_eval(missing_keywords)))
print(222,phrases)
# These are to edit TEX file
input_tex_file = "/Users/alterego/Desktop/ATS/ATS-project/Extra/input.tex"
output_tex_file = "/Users/alterego/Desktop/ATS/ATS-project/Extra/Output.tex"
# Define edits: {line_number: "new content"}
# pr1 = r"\item[$\bullet$]"
role = "{"+job[1]+"}"
edits = {
204: repr(role)[1:-1],
209: r"\item[$\bullet$] " + repr(phrases[0])[1:-1],
210: r"\item[$\bullet$] " + repr(phrases[1])[1:-1],
211: r"\item[$\bullet$] " + repr(phrases[2])[1:-1],
212: r"\item[$\bullet$] " + repr(phrases[3])[1:-1],
}
edit_latex_file(input_tex_file, output_tex_file, edits)
input_file = "/Users/alterego/Desktop/ATS/ATS-project/Extra/Output.tex" # Local .tex file path
output_file = "/Users/alterego/Downloads/Resume.pdf" # Output PDF file path
convert_latex_to_pdf(input_file, output_file)
conn = sqlite3.connect("ats.db")
cursor = conn.cursor()
cursor.execute(
"INSERT INTO applications (job_id, name, email, resume_link, ats_score,missing_keywords) VALUES (?, ?, ?, ?, ?, ?)",
(job_id, name, email, "uploaded_resume.pdf", ats_score, missing_keywords)
)
conn.commit()
conn.close()
flash(f"Application submitted successfully! Your ATS Score: {ats_score}%")
return redirect(url_for("index"))
else:
flash("Only PDF files are accepted for resumes.")
return render_template("apply.html", job_id=job_id, job=job)
@app.route("/view_applications/<int:job_id>")
def view_applications(job_id):
conn = sqlite3.connect("ats.db")
cursor = conn.cursor()
cursor.execute("SELECT * FROM applications WHERE job_id = ?", (job_id,))
applications = cursor.fetchall()
conn.close()
return render_template("view_applications.html", applications=applications, job_id=job_id)
if __name__ == "__main__":
app.run(debug=True)
Hi! I can help you with any questions about Streamlit and Python. What would you like to know?