Skip to content

Commit

Permalink
Cleanup in progress. Don't step onto the working area!
Browse files Browse the repository at this point in the history
  • Loading branch information
svaningelgem committed Jul 25, 2024
1 parent 0a908b2 commit 8c178c9
Show file tree
Hide file tree
Showing 6 changed files with 169 additions and 244 deletions.
4 changes: 2 additions & 2 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@ GET_URL="https://<YOUR PANEL DOMAIN>/api/client"
SERVERS_URL="https://<YOUR PANEL DOMAIN>/api/client/servers/"

# If your request fails MAX_RETRIES is how many times it will try to request before "failing"
# Default value is 5 & Default delay is 15 (in seconds)
# Default value is 5 & default backoff factor is 1 (1s, 2s, 4s, ...)
MAX_RETRIES="5"
RETRY_DELAY_SECS="15"
RETRY_BACKOFF_FACTOR="1"

## If you do not wish to recieve any emails or alerts on script failure please set SEND_EMAILS="False" ##
SEND_EMAILS="True"
Expand Down
2 changes: 1 addition & 1 deletion alert.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import smtplib
from setup import logger
from common import logger
from email.message import EmailMessage


Expand Down
93 changes: 29 additions & 64 deletions api_request.py
Original file line number Diff line number Diff line change
@@ -1,80 +1,45 @@
import os
import time
import json
from functools import lru_cache

import requests
from requests.adapters import HTTPAdapter
from urllib3 import Retry

from common import API_KEY, MAX_RETRIES, RETRY_BACKOFF_FACTOR, logger

from setup import logger
from http import HTTPStatus

API_KEY = os.getenv("API_KEY")
GET_URL = os.getenv("GET_URL")
MAX_RETRIES = int(os.getenv("MAX_RETRIES", 5))
RETRY_DELAY_SECS = int(os.getenv("RETRY_DELAY_SECS", 15))
__all__ = ["request"]

if not API_KEY:
raise ValueError("API key not found. Please set the API_KEY environment variable.")
if not GET_URL:
raise ValueError("URL not found. Please set the GET_URL environment variable.")

@lru_cache(1)
def get_session():
retry_strategy = Retry(
total=MAX_RETRIES,
backoff_factor=RETRY_BACKOFF_FACTOR,
)

def get_session(api_key):
session = requests.Session()
session.headers.update({"Authorization": f"Bearer {api_key}"})
session.headers.update({
"Authorization": "Bearer " + API_KEY,
# https://dashflo.net/docs/api/pterodactyl/v1/
"Content-Type": "application/json",
"Accept": "application/json",
})

adapter = HTTPAdapter(max_retries=retry_strategy)

session.mount("https://", adapter)
session.mount("http://", adapter)

return session


def get_response(session):
def request(url, method: str = "GET", data=None) -> dict:
for retry in range(MAX_RETRIES):
try:
response = session.get(GET_URL)
if response.status_code == HTTPStatus.OK:
if "application/json" in response.headers["Content-Type"]:
return response
else:
logger.error("Invalid API Key - received non-JSON response")
raise ValueError("Invalid API Key")
else:
logger.error(
f"Error: {response.status_code} - {HTTPStatus(response.status_code).phrase}"
)
raise requests.exceptions.HTTPError(response.status_code)
response = get_session().request(method=method, url=url, data=data)
response.raise_for_status()
return response.json()
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
logger.error(f"Network Error: {str(e)}")
except requests.exceptions.RequestException as e:
logger.error(f"Request Exception: {str(e)}")

if retry < MAX_RETRIES - 1:
logger.info(f"Retrying in {RETRY_DELAY_SECS} seconds...")
time.sleep(RETRY_DELAY_SECS)
else:
raise requests.exceptions.RetryError("Max retries exceeded.")


def process_response(response):
try:
if "application/json" in response.headers["Content-Type"]:
server_info = response.json()["data"]
return server_info
else:
logger.error("Received non-JSON response")
raise ValueError("Received non-JSON response")
except json.JSONDecodeError:
logger.error("Error decoding JSON response")
raise
except (ValueError, KeyError):
logger.error("Error processing response")
raise


def main():
with get_session(API_KEY) as session:
response = get_response(session)
return process_response(response)


if __name__ == "__main__":
try:
result = main()
logger.info(f"Request executed successfully. Result: {result}")
except Exception as e:
logger.error(f"Script execution failed: {e}")
203 changes: 57 additions & 146 deletions backup.py
Original file line number Diff line number Diff line change
@@ -1,100 +1,52 @@
import os
import time
import requests
from setup import logger
from api_request import main as make_request
from alert import EmailAlert

API_KEY = f"Bearer {os.getenv('API_KEY')}"
SERVERS_URL = os.getenv("SERVERS_URL")
SEND_EMAILS = os.getenv("SEND_EMAILS", "True") # Default to "True" if not set
ROTATE = os.getenv("ROTATE", "False") # Default to "False" if not set
POST_BACKUP_SCRIPT = os.getenv("POST_BACKUP_SCRIPT") # optional

# Instantiate EmailAlert object
email_alert = EmailAlert(
os.getenv("FROM_EMAIL"),
os.getenv("FROM_PASSWORD"),
os.getenv("SMTP_SERVER"),
os.getenv("SMTP_PORT"),
)


def notify_error():
if SEND_EMAILS.lower() == "true":
if not (
os.getenv("EMAIL_SUBJECT")
and os.getenv("EMAIL_BODY")
and os.getenv("TO_EMAIL")
):
logger.error(
"One or more email environment variables are not set. Can't send notification email."
)
return
email_alert.send(
os.getenv("EMAIL_SUBJECT"), os.getenv("EMAIL_BODY"), os.getenv("TO_EMAIL")
)

import requests

if not API_KEY:
logger.error("API_KEY environment variable not set. Can't proceed without it.")
notify_error()
exit(1)

if not SERVERS_URL:
logger.error("SERVERS_URL environment variable not set. Can't proceed without it.")
notify_error()
exit(1)
from api_request import request
from common import GET_URL, POST_BACKUP_SCRIPT, ROTATE, SERVERS_URL, logger


# remove backups when limits reached
def remove_old_backup(server):
headers = {"Authorization": API_KEY}
server_id = server["attributes"]["identifier"]
backup_limit = server["attributes"]["feature_limits"]["backups"]
logger.info(f" backup limit is {backup_limit}")
try:
url = f"{SERVERS_URL}{server_id}/backups"
response = requests.get(url, data="", headers=headers)
response = request(url)

if response.status_code == 200:
backups = sorted(
response.json()["data"], key=lambda b: b["attributes"]["created_at"]
)
backups = sorted(
response["data"], key=lambda b: b["attributes"]["created_at"]
)

if len(backups) >= backup_limit:
# backup limit reached
# remove oldest N backups
if backup_limit == 0:
N = len(backups) # remove all backup
else:
N = (
len(backups) - backup_limit + 1
) # remove difference and leave space for one more

for i in range(0, N):
# delete oldest backup
if backups[i]["attributes"]["is_locked"]:
logger.warning(
f" backup {backups[i]['attributes']['name']} is locked, skipping"
)
else:
url = f"{SERVERS_URL}{server_id}/backups/{backups[i]['attributes']['uuid']}"
logger.info(
f" removing backup: \"{backups[i]['attributes']['name']}\""
)

response = requests.delete(url, headers=headers)
if response.status_code == 204:
logger.info(" -> success")
else:
logger.info(
f" -> failed with status {response.status_code}"
)

time.sleep(2)
if len(backups) >= backup_limit:
# backup limit reached
# remove oldest N backups
if backup_limit == 0:
n = len(backups) # remove all backup
else:
logger.info(" nothing to delete")
n = (
len(backups) - backup_limit + 1
) # remove difference and leave space for one more

for i in range(n):
# delete oldest backup
if backups[i]["attributes"]["is_locked"]:
logger.warning(
f" backup {backups[i]['attributes']['name']} is locked, skipping"
)
else:
url = f"{SERVERS_URL}{server_id}/backups/{backups[i]['attributes']['uuid']}"
logger.info(
f" removing backup: \"{backups[i]['attributes']['name']}\""
)

request(url, method='DELETE')

time.sleep(2)
else:
logger.info(" nothing to delete")

except requests.exceptions.RequestException as e:
logger.error(
Expand All @@ -105,8 +57,6 @@ def remove_old_backup(server):
def backup_servers(server_list):
failed_servers = []

headers = {"Authorization": API_KEY}

for server in server_list:
server_attr = server["attributes"]
server_id = server_attr["identifier"]
Expand All @@ -116,50 +66,34 @@ def backup_servers(server_list):
logger.info(f"processing server {server_id} '{server_name}'")

try:
if ROTATE.lower() == "true":
if ROTATE:
remove_old_backup(server)

if backup_limit == 0:
logger.info(" skipping backup")
continue

url = f"{SERVERS_URL}{server_id}/backups"
response = requests.post(url, data="", headers=headers)
if response.status_code == 200:
backup = response.json()
backup_uuid = backup["attributes"]["uuid"]

logger.info(" backup started")

if POST_BACKUP_SCRIPT:
# we should only run this when the backup has been finished....
# this will prevent that the backups are made concurrently, thats OK, maybe it is
# even better as it reduces overall load
logger.info(" waiting for backup to finish...")
while True:
response = requests.get(
f"{url}/{backup_uuid}", data="", headers=headers
)

if response.status_code == 200:
if response.json()["attributes"]["completed_at"]:
logger.info(" running post-backup script")
run_script(server_id, backup_uuid)
break
else:
logger.error(
f" failed to get backup info, giving up to run post-backup script. Error code: {response.status_code} {response.text}"
)
break

time.sleep(10)
time.sleep(2)
else:
failed_servers.append(server_id)
logger.error(
f" backup failed. Error code: {response.status_code} {response.text}"
)
time.sleep(30)
backup = request(url, method='POST')
backup_uuid = backup["attributes"]["uuid"]

logger.info(" backup started")

if POST_BACKUP_SCRIPT:
# we should only run this when the backup has been finished....
# this will prevent that the backups are made concurrently, thats OK, maybe it is
# even better as it reduces overall load
logger.info(" waiting for backup to finish...")
while True:
response = request(f"{url}/{backup_uuid}")

if response["attributes"]["completed_at"]:
logger.info(" running post-backup script")
run_script(server_id, backup_uuid)
break

time.sleep(10)
time.sleep(2)
except requests.exceptions.RequestException as e:
logger.error(
f" An error occurred while making the backup request: {str(e)}"
Expand All @@ -168,35 +102,12 @@ def backup_servers(server_list):
time.sleep(30)


def retry_failed_servers(failed_servers, headers):
for server_id in failed_servers:
try:
url = f"{SERVERS_URL}{server_id}/backups"
retry = requests.post(url, data="", headers=headers)

if retry.status_code == 200:
logger.info(
f" Retry succeeded for server {server_id}. Status code: {retry.status_code}"
)
time.sleep(5)
else:
logger.error(
f" Retry failed for server {server_id}. Error code: {retry.status_code}"
)
notify_error()
except requests.exceptions.RequestException as e:
logger.error(
f" An error occurred while retrying the backup request for server {server_id}: {str(e)}"
)
notify_error()


def run_script(server_id, backup_uuid):
exit_status = os.system(f"sh {POST_BACKUP_SCRIPT} {server_id} {backup_uuid}")
if exit_status > 0:
logger.error(f" post backup script: failed with exit status {exit_status}")


server_list = make_request()

backup_servers(server_list)
if __name__ == '__main__':
server_list = request(GET_URL)
backup_servers(server_list)
Loading

0 comments on commit 8c178c9

Please sign in to comment.