diff --git a/Dockerfile b/Dockerfile index 9db5f69..457372c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,19 +1,19 @@ -FROM python:3.6-alpine AS build +FROM python:3.10-alpine AS build RUN apk add build-base python3-dev linux-headers pcre-dev jpeg-dev zlib-dev RUN pip install --upgrade pip RUN pip install yt-dlp pillow uwsgi -FROM python:3.6-alpine AS deps +FROM python:3.10-alpine AS deps WORKDIR /twitfix COPY requirements.txt requirements.txt -COPY --from=build /usr/local/lib/python3.6/site-packages /usr/local/lib/python3.6/site-packages +COPY --from=build /usr/local/lib/python3.10/site-packages /usr/local/lib/python3.10/site-packages RUN pip install -r requirements.txt -FROM python:3.6-alpine AS runner +FROM python:3.10-alpine AS runner EXPOSE 9000 RUN apk add pcre-dev jpeg-dev zlib-dev WORKDIR /twitfix CMD ["uwsgi", "twitfix.ini"] COPY --from=build /usr/local/bin/uwsgi /usr/local/bin/uwsgi -COPY --from=deps /usr/local/lib/python3.6/site-packages /usr/local/lib/python3.6/site-packages +COPY --from=deps /usr/local/lib/python3.10/site-packages /usr/local/lib/python3.10/site-packages COPY . . diff --git a/combineImg/__init__.py b/combineImg/__init__.py index 2784b9b..82dfb2e 100644 --- a/combineImg/__init__.py +++ b/combineImg/__init__.py @@ -30,9 +30,10 @@ def scaleImageIterable(args): targetWidth = args[1] targetHeight = args[2] pad=args[3] + image = image.convert('RGBA') + image = ImageOps.expand(image,20) if pad: - image = image.convert('RGBA') - newImg = ImageOps.pad(image, (targetWidth, targetHeight),color=(0, 0, 0, 0)) + newImg = ImageOps.contain(image, (targetWidth, targetHeight)) else: newImg = ImageOps.fit(image, (targetWidth, targetHeight)) # scale + crop return newImg @@ -68,7 +69,8 @@ def combineImages(imageArray, totalWidth, totalHeight,pad=True): x += image.size[0] y += imageArray[0].size[1] x = 0 - newImage.paste(imageArray[2], (x, y)) + # paste the final image so that it's centered + newImage.paste(imageArray[2], (int((totalWidth - imageArray[2].size[0]) / 2), y)) elif (len(imageArray) == 4): # if there are four images, combine the first two horizontally, then combine the last two vertically for image in imageArray[0:2]: newImage.paste(image, (x, y)) @@ -91,11 +93,12 @@ def saveImage(image, name): def genImage(imageArray): totalSize=getTotalImgSize(imageArray) combined = combineImages(imageArray, *totalSize) - combinedBG = combineImages(imageArray, *totalSize,False) - combinedBG = blurImage(combinedBG,50) - finalImg = Image.alpha_composite(combinedBG,combined) - #finalImg = ImageOps.pad(finalImg, findImageWithMostPixels(imageArray).size,color=(0, 0, 0, 0)) - finalImg = finalImg.convert('RGB') + + finalImg = combined.convert('RGB') + + bbox = finalImg.getbbox() + finalImg = finalImg.crop(bbox) + return finalImg def downloadImage(url): diff --git a/msgs.py b/msgs.py index b9cffdc..770df48 100644 --- a/msgs.py +++ b/msgs.py @@ -1,6 +1,6 @@ failedToScan="Failed to scan your link! This may be due to an incorrect link, private/suspended account, deleted tweet, or Twitter itself might be having issues (Check here: https://api.twitterstat.us/)" failedToScanExtra = "\n\nTwitter gave me this error: " -tweetNotFound="Tweet not found." +tweetNotFound="Tweet not found. Note that this may be a result of Twitter blocking some tweets from being viewed as of April 8 2023." tweetSuspended="This Tweet is from a suspended account." tweetDescLimit=340 diff --git a/readme.md b/readme.md index a566fdf..5eb7e6b 100644 --- a/readme.md +++ b/readme.md @@ -39,7 +39,7 @@ vxTwitter generates a config.json in its root directory the first time you run i **link_cache** - (Options: **db**, **json**) -- **db**: Caches all links to a mongoDB database. This should be used it you are using uWSGI and are not just running the script on its own as one worker +- **db**: Caches all links to a mongoDB database. This should be used if you are using uWSGI and are not just running the script on its own as one worker - **json**: This saves cached links to a local **links.json** file - **dynamodb**: Saves cached links to a DynamoDB database - set `table` to the table name to cache links to. - **none**: Does not cache requests. Not reccomended as you can easily use up your Twitter API credits with this. Intended for use with another cache system (i.e NGINX uwsgi_cache) @@ -58,4 +58,4 @@ vxTwitter generates a config.json in its root directory the first time you run i We check for t.co links in non video tweets, and if one is found, we direct the discord useragent to embed that link directly, this means that twitter links containing youtube / vimeo links will automatically embed those as if you had just directly linked to that content -This project is licensed under the **Do What The Fuck You Want Public License** \ No newline at end of file +This project is licensed under the **Do What The Fuck You Want Public License** diff --git a/requirements.txt b/requirements.txt index 97306a6..3f58c01 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ -pymongo==4.3.3 -boto3==1.26.69 +pymongo==4.3.3 +boto3==1.26.104 requests==2.28.2 Pillow==9.4.0 -Flask==2.2.2 +Flask==2.2.3 Flask-Cors==3.0.10 yt-dlp==2022.7.18 \ No newline at end of file diff --git a/test_vx.py b/test_vx.py index 1a51140..b6a3e0e 100644 --- a/test_vx.py +++ b/test_vx.py @@ -7,6 +7,9 @@ import msgs from flask.testing import FlaskClient client = FlaskClient(twitfix.app) +testUser="https://twitter.com/jack" +testUserID = "https://twitter.com/i/user/12" +testUserWeirdURLs=["https://twitter.com/jack?lang=en","https://twitter.com/jack/with_replies","https://twitter.com/jack/media","https://twitter.com/jack/likes","https://twitter.com/jack/with_replies?lang=en","https://twitter.com/jack/media?lang=en","https://twitter.com/jack/likes?lang=en","https://twitter.com/jack/"] testTextTweet="https://twitter.com/jack/status/20" testVideoTweet="https://twitter.com/Twitter/status/1263145271946551300" testMediaTweet="https://twitter.com/Twitter/status/1118295916874739714" @@ -40,6 +43,25 @@ def test_textTweetExtract(): assert 'extended_entities' not in tweet assert tweet["is_quote_status"]==False +def test_UserExtract(): + user = twExtract.extractUser(testUser) + assert user["screen_name"]=="jack" + assert user["id"]==12 + assert user["created_at"] == "Tue Mar 21 20:50:14 +0000 2006" + +def test_UserExtractID(): + user = twExtract.extractUser(testUserID) + assert user["screen_name"]=="jack" + assert user["id"]==12 + assert user["created_at"] == "Tue Mar 21 20:50:14 +0000 2006" + +def test_UserExtractWeirdURLs(): + for url in testUserWeirdURLs: + user = twExtract.extractUser(url) + assert user["screen_name"]=="jack" + assert user["id"]==12 + assert user["created_at"] == "Tue Mar 21 20:50:14 +0000 2006" + def test_videoTweetExtract(): tweet = twExtract.extractStatus(testVideoTweet) assert tweet["full_text"]==videoVNF_compare['description'] diff --git a/twExtract/__init__.py b/twExtract/__init__.py index bb3f940..82c6227 100644 --- a/twExtract/__init__.py +++ b/twExtract/__init__.py @@ -4,14 +4,15 @@ import json import requests import re from . import twExtractError - +bearer="Bearer AAAAAAAAAAAAAAAAAAAAAPYXBAAAAAAACLXUNDekMxqa8h%2F40K4moUkGsoc%3DTYfbDKbT3jJPCEVnMYqilB28NHfOPqkca3qaAxGfsyKCs0wRbw" guestToken=None pathregex = r"\w{1,15}\/(status|statuses)\/(\d{2,20})" - +userregex = r"^https?:\/\/(?:www\.)?twitter\.com\/(?:#!\/)?@?([^/?#]*)(?:[?#/].*)?$" +userIDregex = r"\/i\/user\/(\d+)" def getGuestToken(): global guestToken if guestToken is None: - r = requests.post("https://api.twitter.com/1.1/guest/activate.json", headers={"Authorization":"Bearer AAAAAAAAAAAAAAAAAAAAAPYXBAAAAAAACLXUNDekMxqa8h%2F40K4moUkGsoc%3DTYfbDKbT3jJPCEVnMYqilB28NHfOPqkca3qaAxGfsyKCs0wRbw"}) + r = requests.post("https://api.twitter.com/1.1/guest/activate.json", headers={"Authorization":bearer}) guestToken = json.loads(r.text)["guest_token"] return guestToken @@ -40,7 +41,7 @@ def extractStatus(url): # get guest token guestToken = getGuestToken() # get tweet - tweet = requests.get("https://api.twitter.com/1.1/statuses/show/" + twid + ".json?tweet_mode=extended&cards_platform=Web-12&include_cards=1&include_reply_count=1&include_user_entities=0", headers={"Authorization":"Bearer AAAAAAAAAAAAAAAAAAAAAPYXBAAAAAAACLXUNDekMxqa8h%2F40K4moUkGsoc%3DTYfbDKbT3jJPCEVnMYqilB28NHfOPqkca3qaAxGfsyKCs0wRbw", "x-guest-token":guestToken}) + tweet = requests.get("https://api.twitter.com/1.1/statuses/show/" + twid + ".json?tweet_mode=extended&cards_platform=Web-12&include_cards=1&include_reply_count=1&include_user_entities=0", headers={"Authorization":bearer, "x-guest-token":guestToken}) output = tweet.json() if "errors" in output: # pick the first error and create a twExtractError @@ -50,6 +51,32 @@ def extractStatus(url): except Exception as e: return extractStatus_fallback(url) +def extractUser(url): + useId=True + m = re.search(userIDregex, url) + if m is None: + m = re.search(userregex, url) + if m is None: + raise twExtractError.TwExtractError(400, "Invalid URL") + else: + useId=False + screen_name = m.group(1) + # get guest token + guestToken = getGuestToken() + # get user + if not useId: + user = requests.get(f"https://api.twitter.com/1.1/users/show.json?screen_name={screen_name}",headers={"Authorization":bearer, "x-guest-token":guestToken}) + else: + user = requests.get(f"https://api.twitter.com/1.1/users/show.json?user_id={screen_name}",headers={"Authorization":bearer, "x-guest-token":guestToken}) + output = user.json() + if "errors" in output: + # pick the first error and create a twExtractError + error = output["errors"][0] + raise twExtractError.TwExtractError(error["code"], error["message"]) + return output + +#def extractUserByID(id): + def lambda_handler(event, context): if ("queryStringParameters" not in event):