-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathinsta.py
More file actions
162 lines (115 loc) · 5.56 KB
/
insta.py
File metadata and controls
162 lines (115 loc) · 5.56 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
##################################################################
# InstaOSINT Tool #
# Fetches details from an Instagram Username #
# Can also downloadd all images if account is public #
# Coded By: Mystog3n & Inv0k3r #
##################################################################
import argparse
from bs4 import BeautifulSoup
import os
import re
import requests
userAgent = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X x.y; rv:42.0) Gecko/20100101 Firefox/43.4"}
# Connects and fetches the instargram user profile
def connect(username):
contents = requests.get(
"https://www.instagram.com/"+username, headers=userAgent)
# throws an exception if username doesn't exits or connection error occurs
if contents.status_code != 200:
print("Connection error or Username not found!")
exit()
return contents.text.encode()
# Fetches the arguments passed in the shell
def parseArguments():
parser = argparse.ArgumentParser(description="Instagram OSINT tool")
parser.add_argument("-u", "--username",
help="profile username", required=True, nargs=1)
parser.add_argument("-d", "--download", help="Downloads the users photos if their account is public",
action="store_true", required=False)
parser.add_argument("-f", "--file", help="Save the details in a file",
action="store_true", required=False)
return parser.parse_args()
#Fetches details from the profile and returns a dictionary with the details
def getDetails(html, user):
dict = {'Username': user, 'URL': "https://www.instagram.com/" + user + '/'}
dict['Name'] = html.find('title').string.split('(')[0].lstrip()
if dict['Name'][0] == '@':
dict['Name'] = ""
details = html.find('meta', property="og:description")['content'].split()
dict['Followers'] = details[0]
dict['Following'] = details[2]
dict['Posts'] = details[4]
details = str(html('script', type="text/javascript")[3].contents)
dict['Website'] = details.split("""external_url":""")[1].split(",\"")[
0].lstrip('\"').rstrip('\"')
if dict['Website'] == 'null':
dict['Website'] = False
dict['Is-Business'] = True if details.split("""is_business_account":""")[
1].split("\",")[0] == 'true' else False
if dict['Is-Business'] == True:
dict['Business-Type'] = details.split("""business_category_name":""")[
1].split(",\"")[0].lstrip('\"').rstrip('\"')
dict['Is-Private'] = True if details.split("""is_private":""")[1].split("\",")[
0] == 'true' else False
dict['Recently-Joined'] = True if details.split("""is_joined_recently":""")[
1].split("\",")[0] == 'true' else False
dict['Is-Verified'] = True if details.split("""is_verified":""")[
1].split("\",")[0] == 'true' else False
dict['Description'] = details.split("""biography":\"""")[1].split("\",")[
0].replace("\\\\n", '\n')
#dict['Description'] = dict['Description']
dict['DP-URL'] = details.split("""profile_pic_url_hd":""")[
1].split("\",")[0].lstrip('\"').rstrip('\"')
dict['DP-URL'] = dict['DP-URL'].replace('\\\\u0026', '&')
return dict
# To print details
def printDetails(details):
for things in details.keys(): # Prints values in Dictionary ( There for debugging rn )
print(things+':\t', details[things])
# To save details
def saveDetails(details, name):
with open("./download/" + name + '/info.txt', 'w') as f:
for things in details.keys(): # Prints values in Dictionary ( There for debugging rn )
f.write(things+':\t'+str(details[things])+'\n')
# To download an image from a URL
def downloadImage(url, name):
r = requests.get(url, headers=userAgent)
if r.status_code == 200:
with open(name + '.jpg', 'wb') as f:
for chunk in r:
f.write(chunk)
# To download images fromm public profile
def downloadData(code, name, site):
links = re.findall('"((http)s?://.*?)"', code)
i = 1
n = 3 if site == False else 4
for url in links:
if i > n:
downloadImage(url[0].replace('\\\\u0026', '&'),
"./download/" + name + '/' + name + '-' + str(i-n))
i = i+1
def main():
args = parseArguments() # fetches the arguments from shell
username = args.username[0] # fetches username
# parses the html document returned
html = BeautifulSoup(connect(username), 'html.parser')
# fetches details from the username and html document
details = getDetails(html, username)
if args.file:
if not os.path.exists("download/" + details['Username']):
os.mkdir("download/" + details['Username'])
saveDetails(details, username)
else:
printDetails(details)
if args.download:
if not os.path.exists("download/" + details['Username']):
os.mkdir("download/" + details['Username'])
downloadImage(details['DP-URL'], "./download/" +
details['Username'] + '/' + details['Username'])
downloadData(str(html('script', type="text/javascript")
[3].contents), username, details['Website'])
if __name__ == "__main__":
if not os.path.exists("download"):
os.mkdir("download")
main()