mirror of
https://github.com/UofCBaja/Interview-Backend.git
synced 2025-06-14 21:04:21 -06:00
feat(Actions): added auto linting to main and pull requests fixed some typos
This commit is contained in:
parent
b23d807d8d
commit
078bc71bf9
46
.github/workflows/Actions.yaml
vendored
Normal file
46
.github/workflows/Actions.yaml
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
|
||||
# name of the workflow.
|
||||
# this is optional.
|
||||
name: PyLint
|
||||
|
||||
# events that will trigger this workflow.
|
||||
# here, we only have "pull_request", so the workflow will run
|
||||
# whenever we create a pull request.
|
||||
# other examples: [push] and [pull_request, push]
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
# each workflow must have at least one job.
|
||||
# jobs run in parallel by default (we can change that).
|
||||
# each job groups together a series of steps to accomplish a purpose.
|
||||
jobs:
|
||||
# name of the job
|
||||
ruffLint:
|
||||
# the platform or OS that the workflow will run on.
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# series of steps to finish the job.
|
||||
steps:
|
||||
# name of the step.
|
||||
# steps run sequentially.
|
||||
# this is optionale
|
||||
- name: checkout
|
||||
# each step can either have "uses" or "run".
|
||||
# "uses" run an action written somewhere other than this workflow .
|
||||
# usually from the community.
|
||||
# this action checks out the repo code to the runner (instance)
|
||||
# running the action
|
||||
uses: actions/checkout@v3
|
||||
|
||||
# another step.
|
||||
# this step runs a bash (Ubuntu's default shell) command
|
||||
- name: install ruff
|
||||
run: pip install ruff
|
||||
|
||||
- name: Lint
|
||||
uses: ruff check ./*/*.py --ignore E402
|
||||
|
@ -3,7 +3,6 @@ import json
|
||||
from filelock import FileLock, Timeout
|
||||
import time
|
||||
|
||||
from NoSheet import NoSheet
|
||||
import datetime
|
||||
|
||||
"""
|
||||
@ -47,7 +46,7 @@ def ReadDatabase(file_path):
|
||||
|
||||
# Group the DataFrame by Date, Start Time, and Slot for organization
|
||||
for _, row in df.iterrows():
|
||||
date = str(row['Date'])
|
||||
date = str(row['Date']).split(" ")[0]
|
||||
start_time = str(row['Start Time Slot'])
|
||||
slot = int(row['Slot']) if not pd.isna(row['Slot']) else 0
|
||||
|
||||
|
@ -35,15 +35,7 @@ def SelectAppointment (file_name, appointmentJson):
|
||||
``Contact``: darkicewolf50@gmail.com
|
||||
|
||||
"""
|
||||
"""
|
||||
Example of an incoming http post body
|
||||
{
|
||||
"intervieweeName": "Brock",
|
||||
"date": "9/16/2024",
|
||||
"startTime": "11:00:00",
|
||||
"intervieweeEmail": "darkicewolf50@gmail.com"
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
validEmail = validate_email(appointmentJson["intervieweeEmail"], check_deliverability=True)
|
||||
if validEmail:
|
||||
@ -58,5 +50,6 @@ def SelectAppointment (file_name, appointmentJson):
|
||||
return resBody
|
||||
|
||||
except EmailNotValidError as e:
|
||||
print(e)
|
||||
return {"Success": False, "validEmail": "false"}
|
||||
|
||||
|
17
Other Items/testcontainer.py
Normal file
17
Other Items/testcontainer.py
Normal file
@ -0,0 +1,17 @@
|
||||
import requests
|
||||
import json
|
||||
|
||||
if __name__ == "__main__":
|
||||
getres = requests.get("http://bajacloud.ddnsking.com:43443/getAppointments")
|
||||
print(getres)
|
||||
print(json.dumps(json.loads(getres.text), indent=4))
|
||||
# example of a request
|
||||
# postdata = {
|
||||
# "intervieweeName": "Brock",
|
||||
# "date": "2024-09-16",
|
||||
# "startTime": "11:00:00",
|
||||
# "intervieweeEmail": "darkicewolf50@gmail.com"
|
||||
# }
|
||||
# res = requests.post("http://bajacloud.ddnsking.com:43443/SelectInterview", json.dumps(postdata))
|
||||
# print(res)
|
||||
# print(res.text)
|
@ -1,4 +1,3 @@
|
||||
import time
|
||||
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||
|
||||
hit_count = 0 # In-memory counter
|
||||
|
Loading…
x
Reference in New Issue
Block a user