Merge pull request #75 from dylanpdx/main

Update for recent twitter changes
This commit is contained in:
Dylan 2023-04-08 16:31:47 +01:00 committed by GitHub
commit a987315add
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 75 additions and 23 deletions

View File

@ -1,19 +1,19 @@
FROM python:3.6-alpine AS build FROM python:3.10-alpine AS build
RUN apk add build-base python3-dev linux-headers pcre-dev jpeg-dev zlib-dev RUN apk add build-base python3-dev linux-headers pcre-dev jpeg-dev zlib-dev
RUN pip install --upgrade pip RUN pip install --upgrade pip
RUN pip install yt-dlp pillow uwsgi RUN pip install yt-dlp pillow uwsgi
FROM python:3.6-alpine AS deps FROM python:3.10-alpine AS deps
WORKDIR /twitfix WORKDIR /twitfix
COPY requirements.txt requirements.txt COPY requirements.txt requirements.txt
COPY --from=build /usr/local/lib/python3.6/site-packages /usr/local/lib/python3.6/site-packages COPY --from=build /usr/local/lib/python3.10/site-packages /usr/local/lib/python3.10/site-packages
RUN pip install -r requirements.txt RUN pip install -r requirements.txt
FROM python:3.6-alpine AS runner FROM python:3.10-alpine AS runner
EXPOSE 9000 EXPOSE 9000
RUN apk add pcre-dev jpeg-dev zlib-dev RUN apk add pcre-dev jpeg-dev zlib-dev
WORKDIR /twitfix WORKDIR /twitfix
CMD ["uwsgi", "twitfix.ini"] CMD ["uwsgi", "twitfix.ini"]
COPY --from=build /usr/local/bin/uwsgi /usr/local/bin/uwsgi COPY --from=build /usr/local/bin/uwsgi /usr/local/bin/uwsgi
COPY --from=deps /usr/local/lib/python3.6/site-packages /usr/local/lib/python3.6/site-packages COPY --from=deps /usr/local/lib/python3.10/site-packages /usr/local/lib/python3.10/site-packages
COPY . . COPY . .

View File

@ -30,9 +30,10 @@ def scaleImageIterable(args):
targetWidth = args[1] targetWidth = args[1]
targetHeight = args[2] targetHeight = args[2]
pad=args[3] pad=args[3]
image = image.convert('RGBA')
image = ImageOps.expand(image,20)
if pad: if pad:
image = image.convert('RGBA') newImg = ImageOps.contain(image, (targetWidth, targetHeight))
newImg = ImageOps.pad(image, (targetWidth, targetHeight),color=(0, 0, 0, 0))
else: else:
newImg = ImageOps.fit(image, (targetWidth, targetHeight)) # scale + crop newImg = ImageOps.fit(image, (targetWidth, targetHeight)) # scale + crop
return newImg return newImg
@ -68,7 +69,8 @@ def combineImages(imageArray, totalWidth, totalHeight,pad=True):
x += image.size[0] x += image.size[0]
y += imageArray[0].size[1] y += imageArray[0].size[1]
x = 0 x = 0
newImage.paste(imageArray[2], (x, y)) # paste the final image so that it's centered
newImage.paste(imageArray[2], (int((totalWidth - imageArray[2].size[0]) / 2), y))
elif (len(imageArray) == 4): # if there are four images, combine the first two horizontally, then combine the last two vertically elif (len(imageArray) == 4): # if there are four images, combine the first two horizontally, then combine the last two vertically
for image in imageArray[0:2]: for image in imageArray[0:2]:
newImage.paste(image, (x, y)) newImage.paste(image, (x, y))
@ -91,11 +93,12 @@ def saveImage(image, name):
def genImage(imageArray): def genImage(imageArray):
totalSize=getTotalImgSize(imageArray) totalSize=getTotalImgSize(imageArray)
combined = combineImages(imageArray, *totalSize) combined = combineImages(imageArray, *totalSize)
combinedBG = combineImages(imageArray, *totalSize,False)
combinedBG = blurImage(combinedBG,50) finalImg = combined.convert('RGB')
finalImg = Image.alpha_composite(combinedBG,combined)
#finalImg = ImageOps.pad(finalImg, findImageWithMostPixels(imageArray).size,color=(0, 0, 0, 0)) bbox = finalImg.getbbox()
finalImg = finalImg.convert('RGB') finalImg = finalImg.crop(bbox)
return finalImg return finalImg
def downloadImage(url): def downloadImage(url):

View File

@ -1,6 +1,6 @@
failedToScan="Failed to scan your link! This may be due to an incorrect link, private/suspended account, deleted tweet, or Twitter itself might be having issues (Check here: https://api.twitterstat.us/)" failedToScan="Failed to scan your link! This may be due to an incorrect link, private/suspended account, deleted tweet, or Twitter itself might be having issues (Check here: https://api.twitterstat.us/)"
failedToScanExtra = "\n\nTwitter gave me this error: " failedToScanExtra = "\n\nTwitter gave me this error: "
tweetNotFound="Tweet not found." tweetNotFound="Tweet not found. Note that this may be a result of Twitter blocking some tweets from being viewed as of April 8 2023."
tweetSuspended="This Tweet is from a suspended account." tweetSuspended="This Tweet is from a suspended account."
tweetDescLimit=340 tweetDescLimit=340

View File

@ -39,7 +39,7 @@ vxTwitter generates a config.json in its root directory the first time you run i
**link_cache** - (Options: **db**, **json**) **link_cache** - (Options: **db**, **json**)
- **db**: Caches all links to a mongoDB database. This should be used it you are using uWSGI and are not just running the script on its own as one worker - **db**: Caches all links to a mongoDB database. This should be used if you are using uWSGI and are not just running the script on its own as one worker
- **json**: This saves cached links to a local **links.json** file - **json**: This saves cached links to a local **links.json** file
- **dynamodb**: Saves cached links to a DynamoDB database - set `table` to the table name to cache links to. - **dynamodb**: Saves cached links to a DynamoDB database - set `table` to the table name to cache links to.
- **none**: Does not cache requests. Not reccomended as you can easily use up your Twitter API credits with this. Intended for use with another cache system (i.e NGINX uwsgi_cache) - **none**: Does not cache requests. Not reccomended as you can easily use up your Twitter API credits with this. Intended for use with another cache system (i.e NGINX uwsgi_cache)
@ -58,4 +58,4 @@ vxTwitter generates a config.json in its root directory the first time you run i
We check for t.co links in non video tweets, and if one is found, we direct the discord useragent to embed that link directly, this means that twitter links containing youtube / vimeo links will automatically embed those as if you had just directly linked to that content We check for t.co links in non video tweets, and if one is found, we direct the discord useragent to embed that link directly, this means that twitter links containing youtube / vimeo links will automatically embed those as if you had just directly linked to that content
This project is licensed under the **Do What The Fuck You Want Public License** This project is licensed under the **Do What The Fuck You Want Public License**

View File

@ -1,7 +1,7 @@
pymongo==4.3.3 pymongo==4.3.3
boto3==1.26.69 boto3==1.26.104
requests==2.28.2 requests==2.28.2
Pillow==9.4.0 Pillow==9.4.0
Flask==2.2.2 Flask==2.2.3
Flask-Cors==3.0.10 Flask-Cors==3.0.10
yt-dlp==2022.7.18 yt-dlp==2022.7.18

View File

@ -7,6 +7,9 @@ import msgs
from flask.testing import FlaskClient from flask.testing import FlaskClient
client = FlaskClient(twitfix.app) client = FlaskClient(twitfix.app)
testUser="https://twitter.com/jack"
testUserID = "https://twitter.com/i/user/12"
testUserWeirdURLs=["https://twitter.com/jack?lang=en","https://twitter.com/jack/with_replies","https://twitter.com/jack/media","https://twitter.com/jack/likes","https://twitter.com/jack/with_replies?lang=en","https://twitter.com/jack/media?lang=en","https://twitter.com/jack/likes?lang=en","https://twitter.com/jack/"]
testTextTweet="https://twitter.com/jack/status/20" testTextTweet="https://twitter.com/jack/status/20"
testVideoTweet="https://twitter.com/Twitter/status/1263145271946551300" testVideoTweet="https://twitter.com/Twitter/status/1263145271946551300"
testMediaTweet="https://twitter.com/Twitter/status/1118295916874739714" testMediaTweet="https://twitter.com/Twitter/status/1118295916874739714"
@ -40,6 +43,25 @@ def test_textTweetExtract():
assert 'extended_entities' not in tweet assert 'extended_entities' not in tweet
assert tweet["is_quote_status"]==False assert tweet["is_quote_status"]==False
def test_UserExtract():
user = twExtract.extractUser(testUser)
assert user["screen_name"]=="jack"
assert user["id"]==12
assert user["created_at"] == "Tue Mar 21 20:50:14 +0000 2006"
def test_UserExtractID():
user = twExtract.extractUser(testUserID)
assert user["screen_name"]=="jack"
assert user["id"]==12
assert user["created_at"] == "Tue Mar 21 20:50:14 +0000 2006"
def test_UserExtractWeirdURLs():
for url in testUserWeirdURLs:
user = twExtract.extractUser(url)
assert user["screen_name"]=="jack"
assert user["id"]==12
assert user["created_at"] == "Tue Mar 21 20:50:14 +0000 2006"
def test_videoTweetExtract(): def test_videoTweetExtract():
tweet = twExtract.extractStatus(testVideoTweet) tweet = twExtract.extractStatus(testVideoTweet)
assert tweet["full_text"]==videoVNF_compare['description'] assert tweet["full_text"]==videoVNF_compare['description']

View File

@ -4,14 +4,15 @@ import json
import requests import requests
import re import re
from . import twExtractError from . import twExtractError
bearer="Bearer AAAAAAAAAAAAAAAAAAAAAPYXBAAAAAAACLXUNDekMxqa8h%2F40K4moUkGsoc%3DTYfbDKbT3jJPCEVnMYqilB28NHfOPqkca3qaAxGfsyKCs0wRbw"
guestToken=None guestToken=None
pathregex = r"\w{1,15}\/(status|statuses)\/(\d{2,20})" pathregex = r"\w{1,15}\/(status|statuses)\/(\d{2,20})"
userregex = r"^https?:\/\/(?:www\.)?twitter\.com\/(?:#!\/)?@?([^/?#]*)(?:[?#/].*)?$"
userIDregex = r"\/i\/user\/(\d+)"
def getGuestToken(): def getGuestToken():
global guestToken global guestToken
if guestToken is None: if guestToken is None:
r = requests.post("https://api.twitter.com/1.1/guest/activate.json", headers={"Authorization":"Bearer AAAAAAAAAAAAAAAAAAAAAPYXBAAAAAAACLXUNDekMxqa8h%2F40K4moUkGsoc%3DTYfbDKbT3jJPCEVnMYqilB28NHfOPqkca3qaAxGfsyKCs0wRbw"}) r = requests.post("https://api.twitter.com/1.1/guest/activate.json", headers={"Authorization":bearer})
guestToken = json.loads(r.text)["guest_token"] guestToken = json.loads(r.text)["guest_token"]
return guestToken return guestToken
@ -40,7 +41,7 @@ def extractStatus(url):
# get guest token # get guest token
guestToken = getGuestToken() guestToken = getGuestToken()
# get tweet # get tweet
tweet = requests.get("https://api.twitter.com/1.1/statuses/show/" + twid + ".json?tweet_mode=extended&cards_platform=Web-12&include_cards=1&include_reply_count=1&include_user_entities=0", headers={"Authorization":"Bearer AAAAAAAAAAAAAAAAAAAAAPYXBAAAAAAACLXUNDekMxqa8h%2F40K4moUkGsoc%3DTYfbDKbT3jJPCEVnMYqilB28NHfOPqkca3qaAxGfsyKCs0wRbw", "x-guest-token":guestToken}) tweet = requests.get("https://api.twitter.com/1.1/statuses/show/" + twid + ".json?tweet_mode=extended&cards_platform=Web-12&include_cards=1&include_reply_count=1&include_user_entities=0", headers={"Authorization":bearer, "x-guest-token":guestToken})
output = tweet.json() output = tweet.json()
if "errors" in output: if "errors" in output:
# pick the first error and create a twExtractError # pick the first error and create a twExtractError
@ -50,6 +51,32 @@ def extractStatus(url):
except Exception as e: except Exception as e:
return extractStatus_fallback(url) return extractStatus_fallback(url)
def extractUser(url):
useId=True
m = re.search(userIDregex, url)
if m is None:
m = re.search(userregex, url)
if m is None:
raise twExtractError.TwExtractError(400, "Invalid URL")
else:
useId=False
screen_name = m.group(1)
# get guest token
guestToken = getGuestToken()
# get user
if not useId:
user = requests.get(f"https://api.twitter.com/1.1/users/show.json?screen_name={screen_name}",headers={"Authorization":bearer, "x-guest-token":guestToken})
else:
user = requests.get(f"https://api.twitter.com/1.1/users/show.json?user_id={screen_name}",headers={"Authorization":bearer, "x-guest-token":guestToken})
output = user.json()
if "errors" in output:
# pick the first error and create a twExtractError
error = output["errors"][0]
raise twExtractError.TwExtractError(error["code"], error["message"])
return output
#def extractUserByID(id):
def lambda_handler(event, context): def lambda_handler(event, context):
if ("queryStringParameters" not in event): if ("queryStringParameters" not in event):