Linux 45-56-67-123 5.14.0-503.16.1.el9_5.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Dec 13 01:47:05 EST 2024 x86_64
Apache/2.4.62 (AlmaLinux) OpenSSL/3.2.2
: 45.56.67.123 | : 52.15.253.72
Cant Read [ /etc/named.conf ]
8.3.15
apache
www.github.com/MadExploits
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
CPANEL RESET
CREATE WP USER
README
+ Create Folder
+ Create File
/
var /
www /
py /
py123.testproject.work /
[ HOME SHELL ]
Name
Size
Permission
Action
__pycache__
[ DIR ]
drwxr-xr-x
venv
[ DIR ]
drwxr-xr-x
BankSineBot.py
14.07
KB
-rw-r--r--
botreadweb.log
2.24
MB
-rw-r--r--
botreadweb.py
2.44
KB
-rw-r--r--
favicon.ico
0
B
-rw-r--r--
hello.py
54
B
-rw-r--r--
image.jpg
9.9
KB
-rw-r--r--
main.py
437
B
-rw-r--r--
test_connections.py
2.75
KB
-rw-r--r--
udo crontab -e
1.51
KB
-rw-r--r--
udo systemctl enable py123.ser...
2
KB
-rw-r--r--
Delete
Unzip
Zip
${this.title}
Close
Code Editor : botreadweb.py
#/var/www/py/py123.testproject.work/botreadweb.py # python3 botreadweb.py import requests from bs4 import BeautifulSoup import time import re from concurrent.futures import ThreadPoolExecutor def read_website(url): try: headers = { 'User-Agent': 'AFRABot' } # Send a GET request to the URL with headers response = requests.get(url, headers=headers) if response.status_code == 200: # Use BeautifulSoup to parse the HTML content soup = BeautifulSoup(response.text, 'html.parser') # Extract the text from the parsed HTML text = soup.get_text(separator=' ', strip=True) # Collect details about the reading operation current_time = time.strftime("%Y-%m-%d %H:%M:%S") total_characters = len(text) words = re.findall(r'\w+', text.lower()) # Convert to lower case and find all whole words total_words = len(words) # Log details to a file with open("botreadweb.log", "a") as log_file: log_file.write(f"Website content read at: {current_time}\n") log_file.write(f"Website URL: {url}\n") log_file.write(f"Total number of characters (including spaces and punctuation): {total_characters}\n") log_file.write(f"Total number of words: {total_words}\n\n") # Print details to console print(f"Website content read at: {current_time}") print(f"Website URL: {url}") print(f"Total number of characters (including spaces and punctuation): {total_characters}") print(f"Total number of words: {total_words}") # Print the text content #print("\nText extracted:", text) else: print("Issue reading the website:", response.status_code) except Exception as e: print("An error occurred:", e) # List of URLs to read urls = [ "https://dev.rampagesoft.com/", "https://seo.rampagesoft.com/", "https://rampagesoft.com/webdesign/", "https://rampagesoft.com/", "https://siamwebsite.me/", "https://ttt-website.com/website.php", "https://ttt-website.com/", "https://blog.ttt-website.com/" ] # Create a thread pool executor with maximum of 6 threads with ThreadPoolExecutor(max_workers=6) as executor: executor.map(read_website, urls)
Close