PhotosAPI/extensions/videos.py

217 lines
9.8 KiB
Python
Raw Normal View History

2022-12-21 00:59:35 +02:00
import re
import pickle
from secrets import token_urlsafe
2023-01-05 17:38:00 +02:00
from shutil import move
2023-01-17 15:39:21 +02:00
from typing import Union
2022-12-21 00:59:35 +02:00
from magic import Magic
2023-01-25 17:02:28 +02:00
from datetime import datetime, timezone
2022-12-21 00:59:35 +02:00
from os import makedirs, path, remove
from classes.models import Video, SearchResultsVideo
2023-01-10 16:23:49 +02:00
#from modules.unified_exif_reader import extract_location
2022-12-21 00:59:35 +02:00
from modules.security import User, get_current_active_user
from modules.app import app
from modules.database import col_videos, col_albums, col_tokens
from bson.objectid import ObjectId
from bson.errors import InvalidId
from pymongo import DESCENDING
2022-12-21 00:59:35 +02:00
from fastapi import HTTPException, UploadFile, Security
from fastapi.responses import UJSONResponse, Response
2023-01-17 15:39:21 +02:00
from starlette.status import HTTP_204_NO_CONTENT, HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED, HTTP_404_NOT_FOUND, HTTP_422_UNPROCESSABLE_ENTITY
2022-12-21 00:59:35 +02:00
@app.post("/albums/{album}/videos", response_class=UJSONResponse, response_model=Video, description="Upload a video to album")
2023-01-17 15:39:21 +02:00
async def video_upload(file: UploadFile, album: str, caption: Union[str, None] = None, current_user: User = Security(get_current_active_user, scopes=["videos.write"])):
2022-12-21 00:59:35 +02:00
if col_albums.find_one( {"user": current_user.user, "name": album} ) is None:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f"Provided album '{album}' does not exist.")
2022-12-21 00:59:35 +02:00
# if not file.content_type.startswith("video"):
# raise HTTPException(status_code=HTTP_406_NOT_ACCEPTABLE, detail="Provided file is not a video, not accepting.")
2022-12-21 00:59:35 +02:00
makedirs(path.join("data", "users", current_user.user, "albums", album), exist_ok=True)
filename = file.filename
if path.exists(path.join("data", "users", current_user.user, "albums", album, file.filename)):
base_name = file.filename.split(".")[:-1]
extension = file.filename.split(".")[-1]
filename = ".".join(base_name)+f"_{int(datetime.now().timestamp())}."+extension
with open(path.join("data", "users", current_user.user, "albums", album, filename), "wb") as f:
f.write(await file.read())
# file_hash = await get_phash(path.join("data", "users", current_user.user, "albums", album, filename))
# duplicates = await get_duplicates(file_hash, album)
# if len(duplicates) > 0 and ignore_duplicates is False:
# return UJSONResponse(
# {
# "detail": "video duplicates found. Pass 'ignore_duplicates=true' to ignore.",
# "duplicates": duplicates
# },
# status_code=HTTP_409_CONFLICT
# )
2023-01-10 16:23:49 +02:00
#coords = extract_location(path.join("data", "users", current_user.user, "albums", album, filename))
uploaded = col_videos.insert_one(
{
"user": current_user.user,
"album": album,
"filename": filename,
"dates": {
2023-01-25 17:02:28 +02:00
"uploaded": datetime.now(tz=timezone.utc),
"modified": datetime.now(tz=timezone.utc)
2023-01-10 16:23:49 +02:00
},
2023-01-17 15:39:21 +02:00
"caption": caption,
2023-01-10 16:23:49 +02:00
# "location": [
# coords["lng"],
# coords["lat"],
# coords["alt"]
# ]
}
)
2022-12-21 00:59:35 +02:00
return UJSONResponse(
{
"id": uploaded.inserted_id.__str__(),
"album": album,
"filename": filename
}
)
@app.get("/videos/{id}", description="Get a video by id")
2023-02-14 15:32:03 +02:00
async def video_get(id: str, current_user: User = Security(get_current_active_user, scopes=["videos.read"])):
2022-12-21 00:59:35 +02:00
try:
video = col_videos.find_one( {"_id": ObjectId(id)} )
if video is None:
raise InvalidId(id)
except InvalidId:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Could not find a video with such id.")
2022-12-21 00:59:35 +02:00
video_path = path.join("data", "users", current_user.user, "albums", video["album"], video["filename"])
mime = Magic(mime=True).from_file(video_path)
with open(video_path, "rb") as f: video_file = f.read()
return Response(video_file, media_type=mime)
2023-01-05 17:38:00 +02:00
@app.put("/videos/{id}", description="Move a video into another album")
async def video_move(id: str, album: str, current_user: User = Security(get_current_active_user, scopes=["videos.write"])):
try:
video = col_videos.find_one( {"_id": ObjectId(id)} )
if video is None:
raise InvalidId(id)
except InvalidId:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Could not find an video with such id.")
2023-01-05 17:38:00 +02:00
if col_albums.find_one( {"user": current_user.user, "name": album} ) is None:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f"Provided album '{album}' does not exist.")
2023-01-05 17:38:00 +02:00
if path.exists(path.join("data", "users", current_user.user, "albums", album, video["filename"])):
base_name = video["filename"].split(".")[:-1]
extension = video["filename"].split(".")[-1]
filename = ".".join(base_name)+f"_{int(datetime.now().timestamp())}."+extension
else:
filename = video["filename"]
2023-01-25 17:02:28 +02:00
col_videos.find_one_and_update( {"_id": ObjectId(id)}, {"$set": {"album": album, "filename": filename, "dates.modified": datetime.now(tz=timezone.utc)}} )
2023-01-05 17:38:00 +02:00
move(
path.join("data", "users", current_user.user, "albums", video["album"], video["filename"]),
path.join("data", "users", current_user.user, "albums", album, filename)
)
return UJSONResponse(
{
"id": video["_id"].__str__(),
"filename": filename
}
)
2023-01-17 15:39:21 +02:00
@app.patch("/videos/{id}", description="Change properties of a video")
async def video_patch(id: str, caption: str, current_user: User = Security(get_current_active_user, scopes=["videos.write"])):
try:
video = col_videos.find_one( {"_id": ObjectId(id)} )
if video is None:
raise InvalidId(id)
except InvalidId:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Could not find an video with such id.")
2023-01-17 15:39:21 +02:00
2023-01-25 17:02:28 +02:00
col_videos.find_one_and_update( {"_id": ObjectId(id)}, {"$set": {"caption": caption, "dates.modified": datetime.now(tz=timezone.utc)}} )
2023-01-17 15:39:21 +02:00
return UJSONResponse(
{
"id": video["_id"].__str__(),
"filename": caption
}
)
@app.delete("/videos/{id}", description="Delete a video by id", status_code=HTTP_204_NO_CONTENT)
2022-12-21 00:59:35 +02:00
async def video_delete(id: str, current_user: User = Security(get_current_active_user, scopes=["videos.write"])):
try:
video = col_videos.find_one_and_delete( {"_id": ObjectId(id)} )
if video is None:
raise InvalidId(id)
except InvalidId:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Could not find a video with such id.")
2022-12-21 00:59:35 +02:00
album = col_albums.find_one( {"name": video["album"]} )
remove(path.join("data", "users", current_user.user, "albums", video["album"], video["filename"]))
return Response(status_code=HTTP_204_NO_CONTENT)
@app.get("/albums/{album}/videos", description="Find a video by filename", response_class=UJSONResponse, response_model=SearchResultsVideo)
2023-01-17 15:39:21 +02:00
async def video_find(album: str, q: Union[str, None] = None, caption: Union[str, None] = None, page: int = 1, page_size: int = 100, current_user: User = Security(get_current_active_user, scopes=["videos.list"])):
2022-12-21 00:59:35 +02:00
if col_albums.find_one( {"user": current_user.user, "name": album} ) is None:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f"Provided album '{album}' does not exist.")
2022-12-21 00:59:35 +02:00
if page <= 0 or page_size <= 0:
raise HTTPException(status_code=HTTP_400_BAD_REQUEST, detail="Parameters 'page' and 'page_size' must be greater or equal to 1.")
2022-12-21 00:59:35 +02:00
output = {"results": []}
skip = (page-1)*page_size
2023-01-17 15:39:21 +02:00
if q is None and caption is None:
raise HTTPException(status_code=HTTP_422_UNPROCESSABLE_ENTITY, detail="You must provide query or caption to look for videos")
if q is None and caption is not None:
db_query = {"user": current_user.user, "album": album, "caption": re.compile(caption)}
db_query_count = {"user": current_user.user, "album": album, "caption": re.compile(caption)}
elif q is not None and caption is None:
db_query = list(col_videos.find({"user": current_user.user, "album": album, "filename": re.compile(q)}, limit=page_size, skip=skip).sort('dates.uploaded', DESCENDING))
db_query_count = {"user": current_user.user, "album": album, "caption": re.compile(q)}
else:
db_query = list(col_videos.find({"user": current_user.user, "album": album, "filename": re.compile(q), "caption": re.compile(caption)}, limit=page_size, skip=skip).sort('dates.uploaded', DESCENDING)) # type: ignore
db_query_count = {"user": current_user.user, "album": album, "filename": re.compile(q), "caption": re.compile(caption)} # type: ignore
videos = list(col_videos.find(db_query, limit=page_size, skip=skip).sort('dates.uploaded', DESCENDING))
2022-12-21 00:59:35 +02:00
for video in videos:
output["results"].append({"id": video["_id"].__str__(), "filename": video["filename"], "caption": video["caption"]})
2022-12-21 00:59:35 +02:00
2023-01-17 15:39:21 +02:00
if col_videos.count_documents( db_query_count ) > page*page_size:
2022-12-21 00:59:35 +02:00
token = str(token_urlsafe(32))
col_tokens.insert_one( {"token": token, "query": q, "album": album, "page": page+1, "page_size": page_size, "user": pickle.dumps(current_user)} )
output["next_page"] = f"/albums/{album}/videos/token?token={token}" # type: ignore
else:
output["next_page"] = None # type: ignore
return UJSONResponse(output)
@app.get("/albums/{album}/videos/token", description="Find a video by token", response_class=UJSONResponse, response_model=SearchResultsVideo)
2022-12-21 00:59:35 +02:00
async def video_find_token(token: str):
found_record = col_tokens.find_one( {"token": token} )
if found_record is None:
raise HTTPException(status_code=HTTP_401_UNAUTHORIZED, detail="Invalid search token.")
2022-12-21 00:59:35 +02:00
return await video_find(q=found_record["query"], album=found_record["album"], page=found_record["page"], page_size=found_record["page_size"], current_user=pickle.loads(found_record["user"]))