Merge pull request #66 from ToadKing/misskey

add support for misskey and calckey/firefish
This commit is contained in:
Michael 2023-08-05 09:04:08 +01:00 committed by GitHub
commit ffa6617fff
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 442 additions and 69 deletions

View file

@ -136,6 +136,7 @@ Option | Required? | Notes |
|`backfill-with-context` | No | Set to `0` to disable fetching remote replies while backfilling profiles. This is enabled by default, but you can disable it, if it's too slow for you. |`backfill-with-context` | No | Set to `0` to disable fetching remote replies while backfilling profiles. This is enabled by default, but you can disable it, if it's too slow for you.
|`backfill-mentioned-users` | No | Set to `0` to disable backfilling any mentioned users when fetching the home timeline. This is enabled by default, but you can disable it, if it's too slow for you. |`backfill-mentioned-users` | No | Set to `0` to disable backfilling any mentioned users when fetching the home timeline. This is enabled by default, but you can disable it, if it's too slow for you.
| `remember-users-for-hours` | No | How long between back-filling attempts for non-followed accounts? Defaults to `168`, i.e. one week. | `remember-users-for-hours` | No | How long between back-filling attempts for non-followed accounts? Defaults to `168`, i.e. one week.
| `remember-hosts-for-days` | No | How long should FediFetcher cache host info for? Defaults to `30`.
| `http-timeout` | No | The timeout for any HTTP requests to the Mastodon API in seconds. Defaults to `5`. | `http-timeout` | No | The timeout for any HTTP requests to the Mastodon API in seconds. Defaults to `5`.
| `lock-hours` | No | Determines after how many hours a lock file should be discarded. Not relevant when running the script as GitHub Action, as concurrency is prevented using a different mechanism. Recommended value: `24`. | `lock-hours` | No | Determines after how many hours a lock file should be discarded. Not relevant when running the script as GitHub Action, as concurrency is prevented using a different mechanism. Recommended value: `24`.
| `lock-file` | No | Location for the lock file. If not specified, will use `lock.lock` under the state directory. Not relevant when running the script as GitHub Action. | `lock-file` | No | Location for the lock file. If not specified, will use `lock.lock` under the state directory. Not relevant when running the script as GitHub Action.

View file

@ -13,6 +13,7 @@ import time
import argparse import argparse
import uuid import uuid
import git import git
import defusedxml.ElementTree as ET
argparser=argparse.ArgumentParser() argparser=argparse.ArgumentParser()
@ -29,6 +30,7 @@ argparser.add_argument('--max-bookmarks', required = False, type=int, default=0,
argparser.add_argument('--max-favourites', required = False, type=int, default=0, help="Fetch remote replies to the API key owners Favourites. We'll fetch replies to at most this many favourites") argparser.add_argument('--max-favourites', required = False, type=int, default=0, help="Fetch remote replies to the API key owners Favourites. We'll fetch replies to at most this many favourites")
argparser.add_argument('--from-notifications', required = False, type=int, default=0, help="Backfill accounts of anyone appearing in your notifications, during the last hours") argparser.add_argument('--from-notifications', required = False, type=int, default=0, help="Backfill accounts of anyone appearing in your notifications, during the last hours")
argparser.add_argument('--remember-users-for-hours', required=False, type=int, default=24*7, help="How long to remember users that you aren't following for, before trying to backfill them again.") argparser.add_argument('--remember-users-for-hours', required=False, type=int, default=24*7, help="How long to remember users that you aren't following for, before trying to backfill them again.")
argparser.add_argument('--remember-hosts-for-days', required=False, type=int, default=30, help="How long to remember host info for, before checking again.")
argparser.add_argument('--http-timeout', required = False, type=int, default=5, help="The timeout for any HTTP requests to your own, or other instances.") argparser.add_argument('--http-timeout', required = False, type=int, default=5, help="The timeout for any HTTP requests to your own, or other instances.")
argparser.add_argument('--backfill-with-context', required = False, type=int, default=1, help="If enabled, we'll fetch remote replies when backfilling profiles. Set to `0` to disable.") argparser.add_argument('--backfill-with-context', required = False, type=int, default=1, help="If enabled, we'll fetch remote replies when backfilling profiles. Set to `0` to disable.")
argparser.add_argument('--backfill-mentioned-users', required = False, type=int, default=1, help="If enabled, we'll backfill any mentioned users when fetching remote replies to timeline posts. Set to `0` to disable.") argparser.add_argument('--backfill-mentioned-users', required = False, type=int, default=1, help="If enabled, we'll backfill any mentioned users when fetching remote replies to timeline posts. Set to `0` to disable.")
@ -66,17 +68,17 @@ def get_favourites(server, access_token, max):
"Authorization": f"Bearer {access_token}", "Authorization": f"Bearer {access_token}",
}) })
def add_user_posts(server, access_token, followings, know_followings, all_known_users, seen_urls): def add_user_posts(server, access_token, followings, known_followings, all_known_users, seen_urls, seen_hosts):
for user in followings: for user in followings:
if user['acct'] not in all_known_users and not user['url'].startswith(f"https://{server}/"): if user['acct'] not in all_known_users and not user['url'].startswith(f"https://{server}/"):
posts = get_user_posts(user, know_followings, server) posts = get_user_posts(user, known_followings, server, seen_hosts)
if(posts != None): if(posts != None):
count = 0 count = 0
failed = 0 failed = 0
for post in posts: for post in posts:
if post.get('reblog') is None and post.get('url') is not None and post.get('url') not in seen_urls: if post.get('reblog') is None and post.get('renoteId') is None and post.get('url') is not None and post.get('url') not in seen_urls:
added = add_post_with_context(post, server, access_token, seen_urls) added = add_post_with_context(post, server, access_token, seen_urls, seen_hosts)
if added is True: if added is True:
seen_urls.add(post['url']) seen_urls.add(post['url'])
count += 1 count += 1
@ -84,10 +86,10 @@ def add_user_posts(server, access_token, followings, know_followings, all_known_
failed += 1 failed += 1
log(f"Added {count} posts for user {user['acct']} with {failed} errors") log(f"Added {count} posts for user {user['acct']} with {failed} errors")
if failed == 0: if failed == 0:
know_followings.add(user['acct']) known_followings.add(user['acct'])
all_known_users.add(user['acct']) all_known_users.add(user['acct'])
def add_post_with_context(post, server, access_token, seen_urls): def add_post_with_context(post, server, access_token, seen_urls, seen_hosts):
added = add_context_url(post['url'], server, access_token) added = add_context_url(post['url'], server, access_token)
if added is True: if added is True:
seen_urls.add(post['url']) seen_urls.add(post['url'])
@ -96,27 +98,72 @@ def add_post_with_context(post, server, access_token, seen_urls):
parsed = parse_url(post['url'], parsed_urls) parsed = parse_url(post['url'], parsed_urls)
if parsed == None: if parsed == None:
return True return True
known_context_urls = get_all_known_context_urls(server, [post],parsed_urls) known_context_urls = get_all_known_context_urls(server, [post],parsed_urls, seen_hosts)
add_context_urls(server, access_token, known_context_urls, seen_urls) add_context_urls(server, access_token, known_context_urls, seen_urls)
return True return True
return False return False
def get_user_posts(user, know_followings, server): def get_user_posts(user, known_followings, server, seen_hosts):
parsed_url = parse_user_url(user['url']) parsed_url = parse_user_url(user['url'])
if parsed_url == None: if parsed_url == None:
# We are adding it as 'known' anyway, because we won't be able to fix this. # We are adding it as 'known' anyway, because we won't be able to fix this.
know_followings.add(user['acct']) known_followings.add(user['acct'])
return None return None
if(parsed_url[0] == server): if(parsed_url[0] == server):
log(f"{user['acct']} is a local user. Skip") log(f"{user['acct']} is a local user. Skip")
know_followings.add(user['acct']) known_followings.add(user['acct'])
return None return None
if re.match(r"^https:\/\/[^\/]+\/c\/", user['url']):
post_server = get_server_info(parsed_url[0], seen_hosts)
if post_server is None:
log(f'server {parsed_url[0]} not found for post')
return None
if post_server['mastodonApiSupport']:
return get_user_posts_mastodon(parsed_url[1], post_server['webserver'])
if post_server['lemmyApiSupport']:
return get_user_posts_lemmy(parsed_url[1], user['url'], post_server['webserver'])
if post_server['misskeyApiSupport']:
return get_user_posts_misskey(parsed_url[1], post_server['webserver'])
log(f'server api unknown for {post_server["webserver"]}, cannot fetch user posts')
return None
def get_user_posts_mastodon(userName, webserver):
try: try:
url = f"https://{parsed_url[0]}/api/v3/post/list?community_name={parsed_url[1]}&sort=New&limit=50" user_id = get_user_id(webserver, userName)
except Exception as ex:
log(f"Error getting user ID for user {user['acct']}: {ex}")
return None
try:
url = f"https://{webserver}/api/v1/accounts/{user_id}/statuses?limit=40"
response = get(url)
if(response.status_code == 200):
return response.json()
elif response.status_code == 404:
raise Exception(
f"User {user['acct']} was not found on server {webserver}"
)
else:
raise Exception(
f"Error getting URL {url}. Status code: {response.status_code}"
)
except Exception as ex:
log(f"Error getting posts for user {user['acct']}: {ex}")
return None
def get_user_posts_lemmy(userName, userUrl, webserver):
# community
if re.match(r"^https:\/\/[^\/]+\/c\/", userUrl):
try:
url = f"https://{webserver}/api/v3/post/list?community_name={userName}&sort=New&limit=50"
response = get(url) response = get(url)
if(response.status_code == 200): if(response.status_code == 200):
@ -126,12 +173,13 @@ def get_user_posts(user, know_followings, server):
return posts return posts
except Exception as ex: except Exception as ex:
log(f"Error getting community posts for community {parsed_url[1]}: {ex}") log(f"Error getting community posts for community {userName}: {ex}")
return None return None
if re.match(r"^https:\/\/[^\/]+\/u\/", user['url']): # user
if re.match(r"^https:\/\/[^\/]+\/u\/", userUrl):
try: try:
url = f"https://{parsed_url[0]}/api/v3/user?username={parsed_url[1]}&sort=New&limit=50" url = f"https://{webserver}/api/v3/user?username={userName}&sort=New&limit=50"
response = get(url) response = get(url)
if(response.status_code == 200): if(response.status_code == 200):
@ -143,32 +191,53 @@ def get_user_posts(user, know_followings, server):
return all_posts return all_posts
except Exception as ex: except Exception as ex:
log(f"Error getting user posts for user {parsed_url[1]}: {ex}") log(f"Error getting user posts for user {userName}: {ex}")
return None return None
def get_user_posts_misskey(userName, webserver):
# query user info via search api
# we could filter by host but there's no way to limit that to just the main host on firefish currently
# on misskey it works if you supply '.' as the host but firefish does not
userId = None
try: try:
user_id = get_user_id(parsed_url[0], parsed_url[1]) url = f'https://{webserver}/api/users/search-by-username-and-host'
except Exception as ex: resp = post(url, { 'username': userName })
log(f"Error getting user ID for user {user['acct']}: {ex}")
return None
try: if resp.status_code == 200:
url = f"https://{parsed_url[0]}/api/v1/accounts/{user_id}/statuses?limit=40" res = resp.json()
response = get(url) for user in res:
if user['host'] is None:
if(response.status_code == 200): userId = user['id']
return response.json() break
elif response.status_code == 404:
raise Exception(
f"User {user['acct']} was not found on server {parsed_url[0]}"
)
else: else:
raise Exception( log(f"Error finding user {userName} from {webserver}. Status Code: {resp.status_code}")
f"Error getting URL {url}. Status code: {response.status_code}"
)
except Exception as ex:
log(f"Error getting posts for user {user['acct']}: {ex}")
return None return None
except Exception as ex:
log(f"Error finding user {userName} from {webserver}. Exception: {ex}")
return None
if userId is None:
log(f'Error finding user {userName} from {webserver}: user not found on server in search')
return None
try:
url = f'https://{webserver}/api/users/notes'
resp = post(url, { 'userId': userId, 'limit': 40 })
if resp.status_code == 200:
notes = resp.json()
for note in notes:
if note.get('url') is None:
# add this to make it look like Mastodon status objects
note.update({ 'url': f"https://{webserver}/notes/{note['id']}" })
return notes
else:
log(f"Error getting posts by user {userName} from {webserver}. Status Code: {resp.status_code}")
return None
except Exception as ex:
log(f"Error getting posts by user {userName} from {webserver}. Exception: {ex}")
return None
def get_new_follow_requests(server, access_token, max, known_followings): def get_new_follow_requests(server, access_token, max, known_followings):
"""Get any new follow requests for the specified user, up to the max number provided""" """Get any new follow requests for the specified user, up to the max number provided"""
@ -388,7 +457,7 @@ def get_reply_toots(user_id, server, access_token, seen_urls, reply_since):
) )
def get_all_known_context_urls(server, reply_toots, parsed_urls): def get_all_known_context_urls(server, reply_toots, parsed_urls, seen_hosts):
"""get the context toots of the given toots from their original server""" """get the context toots of the given toots from their original server"""
known_context_urls = set() known_context_urls = set()
@ -396,7 +465,7 @@ def get_all_known_context_urls(server, reply_toots, parsed_urls):
if toot_has_parseable_url(toot, parsed_urls): if toot_has_parseable_url(toot, parsed_urls):
url = toot["url"] if toot["reblog"] is None else toot["reblog"]["url"] url = toot["url"] if toot["reblog"] is None else toot["reblog"]["url"]
parsed_url = parse_url(url, parsed_urls) parsed_url = parse_url(url, parsed_urls)
context = get_toot_context(parsed_url[0], parsed_url[1], url) context = get_toot_context(parsed_url[0], parsed_url[1], url, seen_hosts)
if context is not None: if context is not None:
for item in context: for item in context:
known_context_urls.add(item) known_context_urls.add(item)
@ -504,6 +573,11 @@ def parse_url(url, parsed_urls):
if match is not None: if match is not None:
parsed_urls[url] = match parsed_urls[url] = match
if url not in parsed_urls:
match = parse_misskey_url(url)
if match is not None:
parsed_urls[url] = match
if url not in parsed_urls: if url not in parsed_urls:
log(f"Error parsing toot URL {url}") log(f"Error parsing toot URL {url}")
parsed_urls[url] = None parsed_urls[url] = None
@ -560,6 +634,15 @@ def parse_pixelfed_url(url):
return (match.group("server"), match.group("toot_id")) return (match.group("server"), match.group("toot_id"))
return None return None
def parse_misskey_url(url):
"""parse a Misskey URL and return the server and ID"""
match = re.match(
r"https://(?P<server>[^/]+)/notes/(?P<toot_id>[^/]+)", url
)
if match is not None:
return (match.group("server"), match.group("toot_id"))
return None
def parse_pixelfed_profile_url(url): def parse_pixelfed_profile_url(url):
"""parse a Pixelfed Profile URL and return the server and username""" """parse a Pixelfed Profile URL and return the server and username"""
match = re.match(r"https://(?P<server>[^/]+)/(?P<username>[^/]+)", url) match = re.match(r"https://(?P<server>[^/]+)/(?P<username>[^/]+)", url)
@ -606,24 +689,37 @@ def get_redirect_url(url):
return None return None
def get_all_context_urls(server, replied_toot_ids): def get_all_context_urls(server, replied_toot_ids, seen_hosts):
"""get the URLs of the context toots of the given toots""" """get the URLs of the context toots of the given toots"""
return filter( return filter(
lambda url: not url.startswith(f"https://{server}/"), lambda url: not url.startswith(f"https://{server}/"),
itertools.chain.from_iterable( itertools.chain.from_iterable(
get_toot_context(server, toot_id, url) get_toot_context(server, toot_id, url, seen_hosts)
for (url, (server, toot_id)) in replied_toot_ids for (url, (server, toot_id)) in replied_toot_ids
), ),
) )
def get_toot_context(server, toot_id, toot_url): def get_toot_context(server, toot_id, toot_url, seen_hosts):
"""get the URLs of the context toots of the given toot""" """get the URLs of the context toots of the given toot"""
if toot_url.find("/comment/") != -1:
return get_comment_context(server, toot_id, toot_url) post_server = get_server_info(server, seen_hosts)
if toot_url.find("/post/") != -1: if post_server is None:
return get_comments_urls(server, toot_id, toot_url) log(f'server {server} not found for post')
url = f"https://{server}/api/v1/statuses/{toot_id}/context" return []
if post_server['mastodonApiSupport']:
return get_mastodon_urls(post_server['webserver'], toot_id, toot_url)
if post_server['lemmyApiSupport']:
return get_lemmy_urls(post_server['webserver'], toot_id, toot_url)
if post_server['misskeyApiSupport']:
return get_misskey_urls(post_server['webserver'], toot_id, toot_url)
log(f'unknown server api for {server}')
return []
def get_mastodon_urls(webserver, toot_id, toot_url):
url = f"https://{webserver}/api/v1/statuses/{toot_id}/context"
try: try:
resp = get(url) resp = get(url)
except Exception as ex: except Exception as ex:
@ -642,16 +738,25 @@ def get_toot_context(server, toot_id, toot_url):
reset = datetime.strptime(resp.headers['x-ratelimit-reset'], '%Y-%m-%dT%H:%M:%S.%fZ') reset = datetime.strptime(resp.headers['x-ratelimit-reset'], '%Y-%m-%dT%H:%M:%S.%fZ')
log(f"Rate Limit hit when getting context for {toot_url}. Waiting to retry at {resp.headers['x-ratelimit-reset']}") log(f"Rate Limit hit when getting context for {toot_url}. Waiting to retry at {resp.headers['x-ratelimit-reset']}")
time.sleep((reset - datetime.now()).total_seconds() + 1) time.sleep((reset - datetime.now()).total_seconds() + 1)
return get_toot_context(server, toot_id, toot_url) return get_mastodon_urls(webserver, toot_id, toot_url)
log( log(
f"Error getting context for toot {toot_url}. Status code: {resp.status_code}" f"Error getting context for toot {toot_url}. Status code: {resp.status_code}"
) )
return [] return []
def get_comment_context(server, toot_id, toot_url): def get_lemmy_urls(webserver, toot_id, toot_url):
if toot_url.find("/comment/") != -1:
return get_lemmy_comment_context(webserver, toot_id, toot_url)
if toot_url.find("/post/") != -1:
return get_lemmy_comments_urls(webserver, toot_id, toot_url)
else:
log(f'unknown lemmy url type {toot_url}')
return []
def get_lemmy_comment_context(webserver, toot_id, toot_url):
"""get the URLs of the context toots of the given toot""" """get the URLs of the context toots of the given toot"""
comment = f"https://{server}/api/v3/comment?id={toot_id}" comment = f"https://{webserver}/api/v3/comment?id={toot_id}"
try: try:
resp = get(comment) resp = get(comment)
except Exception as ex: except Exception as ex:
@ -662,7 +767,7 @@ def get_comment_context(server, toot_id, toot_url):
try: try:
res = resp.json() res = resp.json()
post_id = res['comment_view']['comment']['post_id'] post_id = res['comment_view']['comment']['post_id']
return get_comments_urls(server, post_id, toot_url) return get_lemmy_comments_urls(webserver, post_id, toot_url)
except Exception as ex: except Exception as ex:
log(f"Error parsing context for comment {toot_url}. Exception: {ex}") log(f"Error parsing context for comment {toot_url}. Exception: {ex}")
return [] return []
@ -670,12 +775,12 @@ def get_comment_context(server, toot_id, toot_url):
reset = datetime.strptime(resp.headers['x-ratelimit-reset'], '%Y-%m-%dT%H:%M:%S.%fZ') reset = datetime.strptime(resp.headers['x-ratelimit-reset'], '%Y-%m-%dT%H:%M:%S.%fZ')
log(f"Rate Limit hit when getting context for {toot_url}. Waiting to retry at {resp.headers['x-ratelimit-reset']}") log(f"Rate Limit hit when getting context for {toot_url}. Waiting to retry at {resp.headers['x-ratelimit-reset']}")
time.sleep((reset - datetime.now()).total_seconds() + 1) time.sleep((reset - datetime.now()).total_seconds() + 1)
return get_comment_context(server, toot_id, toot_url) return get_lemmy_comment_context(webserver, toot_id, toot_url)
def get_comments_urls(server, post_id, toot_url): def get_lemmy_comments_urls(webserver, post_id, toot_url):
"""get the URLs of the comments of the given post""" """get the URLs of the comments of the given post"""
urls = [] urls = []
url = f"https://{server}/api/v3/post?id={post_id}" url = f"https://{webserver}/api/v3/post?id={post_id}"
try: try:
resp = get(url) resp = get(url)
except Exception as ex: except Exception as ex:
@ -691,7 +796,7 @@ def get_comments_urls(server, post_id, toot_url):
except Exception as ex: except Exception as ex:
log(f"Error parsing post {post_id} from {toot_url}. Exception: {ex}") log(f"Error parsing post {post_id} from {toot_url}. Exception: {ex}")
url = f"https://{server}/api/v3/comment/list?post_id={post_id}&sort=New&limit=50" url = f"https://{webserver}/api/v3/comment/list?post_id={post_id}&sort=New&limit=50"
try: try:
resp = get(url) resp = get(url)
except Exception as ex: except Exception as ex:
@ -711,11 +816,53 @@ def get_comments_urls(server, post_id, toot_url):
reset = datetime.strptime(resp.headers['x-ratelimit-reset'], '%Y-%m-%dT%H:%M:%S.%fZ') reset = datetime.strptime(resp.headers['x-ratelimit-reset'], '%Y-%m-%dT%H:%M:%S.%fZ')
log(f"Rate Limit hit when getting comments for {toot_url}. Waiting to retry at {resp.headers['x-ratelimit-reset']}") log(f"Rate Limit hit when getting comments for {toot_url}. Waiting to retry at {resp.headers['x-ratelimit-reset']}")
time.sleep((reset - datetime.now()).total_seconds() + 1) time.sleep((reset - datetime.now()).total_seconds() + 1)
return get_comments_urls(server, post_id, toot_url) return get_lemmy_comments_urls(webserver, post_id, toot_url)
log(f"Error getting comments for post {toot_url}. Status code: {resp.status_code}") log(f"Error getting comments for post {toot_url}. Status code: {resp.status_code}")
return [] return []
def get_misskey_urls(webserver, post_id, toot_url):
"""get the URLs of the comments of a given misskey post"""
urls = []
url = f"https://{webserver}/api/notes/children"
try:
resp = post(url, { 'noteId': post_id, 'limit': 100, 'depth': 12 })
except Exception as ex:
log(f"Error getting post {post_id} from {toot_url}. Exception: {ex}")
return []
if resp.status_code == 200:
try:
res = resp.json()
log(f"Got children for misskey post {toot_url}")
list_of_urls = [f'https://{webserver}/notes/{comment_info["id"]}' for comment_info in res]
urls.extend(list_of_urls)
except Exception as ex:
log(f"Error parsing post {post_id} from {toot_url}. Exception: {ex}")
else:
log(f"Error getting post {post_id} from {toot_url}. Status Code: {resp.status_code}")
url = f"https://{webserver}/api/notes/conversation"
try:
resp = post(url, { 'noteId': post_id, 'limit': 100 })
except Exception as ex:
log(f"Error getting post {post_id} from {toot_url}. Exception: {ex}")
return []
if resp.status_code == 200:
try:
res = resp.json()
log(f"Got conversation for misskey post {toot_url}")
list_of_urls = [f'https://{webserver}/notes/{comment_info["id"]}' for comment_info in res]
urls.extend(list_of_urls)
except Exception as ex:
log(f"Error parsing post {post_id} from {toot_url}. Exception: {ex}")
else:
log(f"Error getting post {post_id} from {toot_url}. Status Code: {resp.status_code}")
return urls
def add_context_urls(server, access_token, context_urls, seen_urls): def add_context_urls(server, access_token, context_urls, seen_urls):
"""add the given toot URLs to the server""" """add the given toot URLs to the server"""
count = 0 count = 0
@ -845,9 +992,61 @@ def get(url, headers = {}, timeout = 0, max_tries = 5):
raise Exception(f"Maximum number of retries exceeded for rate limited request {url}") raise Exception(f"Maximum number of retries exceeded for rate limited request {url}")
return response return response
def post(url, json, headers = {}, timeout = 0, max_tries = 5):
"""A simple wrapper to make a post request while providing our user agent, and respecting rate limits"""
h = headers.copy()
if 'User-Agent' not in h:
h['User-Agent'] = 'FediFetcher (https://go.thms.uk/mgr)'
if timeout == 0:
timeout = arguments.http_timeout
response = requests.post( url, json=json, headers= h, timeout=timeout)
if response.status_code == 429:
if max_tries > 0:
reset = parser.parse(response.headers['x-ratelimit-reset'])
now = datetime.now(datetime.now().astimezone().tzinfo)
wait = (reset - now).total_seconds() + 1
log(f"Rate Limit hit requesting {url}. Waiting {wait} sec to retry at {response.headers['x-ratelimit-reset']}")
time.sleep(wait)
return post(url, json, headers, timeout, max_tries - 1)
raise Exception(f"Maximum number of retries exceeded for rate limited request {url}")
return response
def log(text): def log(text):
print(f"{datetime.now()} {datetime.now().astimezone().tzinfo}: {text}") print(f"{datetime.now()} {datetime.now().astimezone().tzinfo}: {text}")
class ServerList:
def __init__(self, iterable):
self._dict = {}
for item in iterable:
if('last_checked' in iterable[item]):
iterable[item]['last_checked'] = parser.parse(iterable[item]['last_checked'])
self.add(item, iterable[item])
def add(self, key, item):
self._dict[key] = item
def get(self, key):
return self._dict[key]
def pop(self,key):
return self._dict.pop(key)
def __contains__(self, item):
return item in self._dict
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def toJSON(self):
return json.dumps(self._dict,default=str)
class OrderedSet: class OrderedSet:
"""An ordered set implementation over a dict""" """An ordered set implementation over a dict"""
@ -890,8 +1089,160 @@ class OrderedSet:
return len(self._dict) return len(self._dict)
def toJSON(self): def toJSON(self):
return json.dump(self._dict, f, default=str) return json.dumps(self._dict,default=str)
def get_server_from_host_meta(server):
url = f'https://{server}/.well-known/host-meta'
try:
resp = get(url, timeout = 30)
except Exception as ex:
log(f"Error getting host meta for {server}. Exception: {ex}")
return None
if resp.status_code == 200:
try:
hostMeta = ET.fromstring(resp.text)
lrdd = hostMeta.find('.//{http://docs.oasis-open.org/ns/xri/xrd-1.0}Link[@rel="lrdd"]')
url = lrdd.get('template')
match = re.match(
r"https://(?P<server>[^/]+)/", url
)
if match is not None:
return match.group("server")
else:
raise Exception(f'server not found in lrdd for {server}')
return None
except Exception as ex:
log(f'Error parsing host meta for {server}. Exception: {ex}')
return None
else:
log(f'Error getting host meta for {server}. Status Code: {resp.status_code}')
return None
def get_nodeinfo(server, seen_hosts, host_meta_fallback = False):
url = f'https://{server}/.well-known/nodeinfo'
try:
resp = get(url, timeout = 30)
except Exception as ex:
log(f"Error getting host node info for {server}. Exception: {ex}")
return None
# if well-known nodeinfo isn't found, try to check host-meta for a webfinger URL
# needed on servers where the display domain is different than the web domain
if resp.status_code != 200 and not host_meta_fallback:
# not found, try to check host-meta as a fallback
log(f'nodeinfo for {server} not found, checking host-meta')
new_server = get_server_from_host_meta(server)
if new_server is not None:
if new_server == server:
log(f'host-meta for {server} did not get a new server.')
return None
else:
return get_nodeinfo(new_server, seen_hosts, True)
else:
return None
if resp.status_code == 200:
try:
nodeInfo = resp.json()
for link in nodeInfo['links']:
if link['rel'] in [
'http://nodeinfo.diaspora.software/ns/schema/2.0',
'http://nodeinfo.diaspora.software/ns/schema/2.1',
]:
nodeLoc = link['href']
break
except Exception as ex:
log(f'error getting server {server} info from well-known node info. Exception: {ex}')
return None
else:
log(f'Error getting well-known host node info for {server}. Status Code: {resp.status_code}')
return None
if nodeLoc is None:
log(f'could not find link to node info in well-known nodeinfo of {server}')
return None
# regrab server from nodeLoc, again in the case of different display and web domains
match = re.match(
r"https://(?P<server>[^/]+)/", nodeLoc
)
if match is None:
log(f"Error getting web server name from {server}.")
return None
server = match.group('server')
# return early if the web domain has been seen previously (in cases with host-meta lookups)
if server in seen_hosts:
return seen_hosts[server]
try:
resp = get(nodeLoc, timeout = 30)
except Exception as ex:
log(f"Error getting host node info for {server}. Exception: {ex}")
return None
if resp.status_code == 200:
try:
nodeInfo = resp.json()
if 'activitypub' not in nodeInfo['protocols']:
log(f'server {server} does not support activitypub, skipping')
return None
return {
'webserver': server,
'software': nodeInfo['software']['name'],
'version': nodeInfo['software']['version'],
'rawnodeinfo': nodeInfo,
}
except Exception as ex:
log(f'error getting server {server} info from nodeinfo. Exception: {ex}')
return None
else:
log(f'Error getting host node info for {server}. Status Code: {resp.status_code}')
return None
def get_server_info(server, seen_hosts):
if server in seen_hosts:
serverInfo = seen_hosts.get(server)
if('info' in serverInfo and serverInfo['info'] == None):
return None
return serverInfo
nodeinfo = get_nodeinfo(server, seen_hosts)
if nodeinfo is None:
seen_hosts.add(server, {
'info': None,
'last_checked': datetime.now()
})
else:
set_server_apis(nodeinfo)
seen_hosts.add(server, nodeinfo)
if server is not nodeinfo['webserver']:
seen_hosts.add(nodeinfo['webserver'], nodeinfo)
return nodeinfo
def set_server_apis(server):
# support for new server software should be added here
software_apis = {
'mastodonApiSupport': ['mastodon', 'pleroma', 'akkoma', 'pixelfed', 'hometown'],
'misskeyApiSupport': ['misskey', 'calckey', 'firefish', 'foundkey'],
'lemmyApiSupport': ['lemmy']
}
# software that has specific API support but is not compatible with FediFetcher for various reasons:
# * gotosocial - All Mastodon APIs require access token (https://github.com/superseriousbusiness/gotosocial/issues/2038)
for api, softwareList in software_apis.items():
server[api] = server['software'] in softwareList
# search `features` list in metadata if available
if 'metadata' in server['rawnodeinfo'] and 'features' in server['rawnodeinfo']['metadata'] and type(server['rawnodeinfo']['metadata']['features']) is list:
features = server['rawnodeinfo']['metadata']['features']
if 'mastodon_api' in features:
server['mastodonApiSupport'] = True
server['last_checked'] = datetime.now()
if __name__ == "__main__": if __name__ == "__main__":
start = datetime.now() start = datetime.now()
@ -978,6 +1329,7 @@ if __name__ == "__main__":
REPLIED_TOOT_SERVER_IDS_FILE = os.path.join(arguments.state_dir, "replied_toot_server_ids") REPLIED_TOOT_SERVER_IDS_FILE = os.path.join(arguments.state_dir, "replied_toot_server_ids")
KNOWN_FOLLOWINGS_FILE = os.path.join(arguments.state_dir, "known_followings") KNOWN_FOLLOWINGS_FILE = os.path.join(arguments.state_dir, "known_followings")
RECENTLY_CHECKED_USERS_FILE = os.path.join(arguments.state_dir, "recently_checked_users") RECENTLY_CHECKED_USERS_FILE = os.path.join(arguments.state_dir, "recently_checked_users")
SEEN_HOSTS_FILE = os.path.join(arguments.state_dir, "seen_hosts")
seen_urls = OrderedSet([]) seen_urls = OrderedSet([])
@ -1011,6 +1363,22 @@ if __name__ == "__main__":
all_known_users = OrderedSet(list(known_followings) + list(recently_checked_users)) all_known_users = OrderedSet(list(known_followings) + list(recently_checked_users))
if os.path.exists(SEEN_HOSTS_FILE):
with open(SEEN_HOSTS_FILE, "r", encoding="utf-8") as f:
seen_hosts = ServerList(json.load(f))
for host in list(seen_hosts):
serverInfo = seen_hosts.get(host)
if 'last_checked' in serverInfo:
serverAge = datetime.now(serverInfo['last_checked'].tzinfo) - serverInfo['last_checked']
if(serverAge.total_seconds() > arguments.remember_hosts_for_days * 24 * 60 * 60 ):
seen_hosts.pop(host)
elif('info' in serverInfo and serverInfo['info'] == None and serverAge.total_seconds() > 60 * 60 ):
# Don't cache failures for more than 24 hours
seen_hosts.pop(host)
else:
seen_hosts = ServerList({})
if(isinstance(arguments.access_token, str)): if(isinstance(arguments.access_token, str)):
setattr(arguments, 'access_token', [arguments.access_token]) setattr(arguments, 'access_token', [arguments.access_token])
@ -1023,19 +1391,19 @@ if __name__ == "__main__":
reply_toots = get_all_reply_toots( reply_toots = get_all_reply_toots(
arguments.server, user_ids, token, seen_urls, arguments.reply_interval_in_hours arguments.server, user_ids, token, seen_urls, arguments.reply_interval_in_hours
) )
known_context_urls = get_all_known_context_urls(arguments.server, reply_toots,parsed_urls) known_context_urls = get_all_known_context_urls(arguments.server, reply_toots,parsed_urls, seen_hosts)
seen_urls.update(known_context_urls) seen_urls.update(known_context_urls)
replied_toot_ids = get_all_replied_toot_server_ids( replied_toot_ids = get_all_replied_toot_server_ids(
arguments.server, reply_toots, replied_toot_server_ids, parsed_urls arguments.server, reply_toots, replied_toot_server_ids, parsed_urls
) )
context_urls = get_all_context_urls(arguments.server, replied_toot_ids) context_urls = get_all_context_urls(arguments.server, replied_toot_ids, seen_hosts)
add_context_urls(arguments.server, token, context_urls, seen_urls) add_context_urls(arguments.server, token, context_urls, seen_urls)
if arguments.home_timeline_length > 0: if arguments.home_timeline_length > 0:
"""Do the same with any toots on the key owner's home timeline """ """Do the same with any toots on the key owner's home timeline """
timeline_toots = get_timeline(arguments.server, token, arguments.home_timeline_length) timeline_toots = get_timeline(arguments.server, token, arguments.home_timeline_length)
known_context_urls = get_all_known_context_urls(arguments.server, timeline_toots,parsed_urls) known_context_urls = get_all_known_context_urls(arguments.server, timeline_toots,parsed_urls, seen_hosts)
add_context_urls(arguments.server, token, known_context_urls, seen_urls) add_context_urls(arguments.server, token, known_context_urls, seen_urls)
# Backfill any post authors, and any mentioned users # Backfill any post authors, and any mentioned users
@ -1057,40 +1425,40 @@ if __name__ == "__main__":
if user not in mentioned_users and user['acct'] not in all_known_users: if user not in mentioned_users and user['acct'] not in all_known_users:
mentioned_users.append(user) mentioned_users.append(user)
add_user_posts(arguments.server, token, filter_known_users(mentioned_users, all_known_users), recently_checked_users, all_known_users, seen_urls) add_user_posts(arguments.server, token, filter_known_users(mentioned_users, all_known_users), recently_checked_users, all_known_users, seen_urls, seen_hosts)
if arguments.max_followings > 0: if arguments.max_followings > 0:
log(f"Getting posts from last {arguments.max_followings} followings") log(f"Getting posts from last {arguments.max_followings} followings")
user_id = get_user_id(arguments.server, arguments.user, token) user_id = get_user_id(arguments.server, arguments.user, token)
followings = get_new_followings(arguments.server, user_id, arguments.max_followings, all_known_users) followings = get_new_followings(arguments.server, user_id, arguments.max_followings, all_known_users)
add_user_posts(arguments.server, token, followings, known_followings, all_known_users, seen_urls) add_user_posts(arguments.server, token, followings, known_followings, all_known_users, seen_urls, seen_hosts)
if arguments.max_followers > 0: if arguments.max_followers > 0:
log(f"Getting posts from last {arguments.max_followers} followers") log(f"Getting posts from last {arguments.max_followers} followers")
user_id = get_user_id(arguments.server, arguments.user, token) user_id = get_user_id(arguments.server, arguments.user, token)
followers = get_new_followers(arguments.server, user_id, arguments.max_followers, all_known_users) followers = get_new_followers(arguments.server, user_id, arguments.max_followers, all_known_users)
add_user_posts(arguments.server, token, followers, recently_checked_users, all_known_users, seen_urls) add_user_posts(arguments.server, token, followers, recently_checked_users, all_known_users, seen_urls, seen_hosts)
if arguments.max_follow_requests > 0: if arguments.max_follow_requests > 0:
log(f"Getting posts from last {arguments.max_follow_requests} follow requests") log(f"Getting posts from last {arguments.max_follow_requests} follow requests")
follow_requests = get_new_follow_requests(arguments.server, token, arguments.max_follow_requests, all_known_users) follow_requests = get_new_follow_requests(arguments.server, token, arguments.max_follow_requests, all_known_users)
add_user_posts(arguments.server, token, follow_requests, recently_checked_users, all_known_users, seen_urls) add_user_posts(arguments.server, token, follow_requests, recently_checked_users, all_known_users, seen_urls, seen_hosts)
if arguments.from_notifications > 0: if arguments.from_notifications > 0:
log(f"Getting notifications for last {arguments.from_notifications} hours") log(f"Getting notifications for last {arguments.from_notifications} hours")
notification_users = get_notification_users(arguments.server, token, all_known_users, arguments.from_notifications) notification_users = get_notification_users(arguments.server, token, all_known_users, arguments.from_notifications)
add_user_posts(arguments.server, token, notification_users, recently_checked_users, all_known_users, seen_urls) add_user_posts(arguments.server, token, notification_users, recently_checked_users, all_known_users, seen_urls, seen_hosts)
if arguments.max_bookmarks > 0: if arguments.max_bookmarks > 0:
log(f"Pulling replies to the last {arguments.max_bookmarks} bookmarks") log(f"Pulling replies to the last {arguments.max_bookmarks} bookmarks")
bookmarks = get_bookmarks(arguments.server, token, arguments.max_bookmarks) bookmarks = get_bookmarks(arguments.server, token, arguments.max_bookmarks)
known_context_urls = get_all_known_context_urls(arguments.server, bookmarks,parsed_urls) known_context_urls = get_all_known_context_urls(arguments.server, bookmarks,parsed_urls, seen_hosts)
add_context_urls(arguments.server, token, known_context_urls, seen_urls) add_context_urls(arguments.server, token, known_context_urls, seen_urls)
if arguments.max_favourites > 0: if arguments.max_favourites > 0:
log(f"Pulling replies to the last {arguments.max_favourites} favourites") log(f"Pulling replies to the last {arguments.max_favourites} favourites")
favourites = get_favourites(arguments.server, token, arguments.max_favourites) favourites = get_favourites(arguments.server, token, arguments.max_favourites)
known_context_urls = get_all_known_context_urls(arguments.server, favourites,parsed_urls) known_context_urls = get_all_known_context_urls(arguments.server, favourites,parsed_urls, seen_hosts)
add_context_urls(arguments.server, token, known_context_urls, seen_urls) add_context_urls(arguments.server, token, known_context_urls, seen_urls)
with open(KNOWN_FOLLOWINGS_FILE, "w", encoding="utf-8") as f: with open(KNOWN_FOLLOWINGS_FILE, "w", encoding="utf-8") as f:
@ -1103,7 +1471,10 @@ if __name__ == "__main__":
json.dump(dict(list(replied_toot_server_ids.items())[-10000:]), f) json.dump(dict(list(replied_toot_server_ids.items())[-10000:]), f)
with open(RECENTLY_CHECKED_USERS_FILE, "w", encoding="utf-8") as f: with open(RECENTLY_CHECKED_USERS_FILE, "w", encoding="utf-8") as f:
recently_checked_users.toJSON() f.write(recently_checked_users.toJSON())
with open(SEEN_HOSTS_FILE, "w", encoding="utf-8") as f:
f.write(seen_hosts.toJSON())
os.remove(LOCK_FILE) os.remove(LOCK_FILE)

View file

@ -9,3 +9,4 @@ requests==2.28.2
six==1.16.0 six==1.16.0
smmap==5.0.0 smmap==5.0.0
urllib3==1.26.14 urllib3==1.26.14
defusedxml==0.7.1