SimpleChatTC:Cleanup: General T2
Pretty print SimpleProxy gMe config Dont ignore the got http response status text. Update readme wrt why autoSecs
This commit is contained in:
parent
c5ff065ad2
commit
2394d38d58
|
|
@ -48,9 +48,11 @@ def get_from_web(url: str, tag: str, inContentType: str, inHeaders: dict[str, st
|
|||
with urllib.request.urlopen(req, timeout=10) as response:
|
||||
contentData = response.read()
|
||||
statusCode = response.status or 200
|
||||
statusMsg = response.msg or ""
|
||||
contentType = response.getheader('Content-Type') or inContentType
|
||||
print(f"DBUG:FM:GFW:Resp:{response.status}:{response.msg}")
|
||||
debug.dump({ 'url': req.full_url, 'headers': req.headers, 'ctype': contentType }, { 'cdata': contentData })
|
||||
return Response(True, statusCode, "", contentType, contentData)
|
||||
return Response(True, statusCode, statusMsg, contentType, contentData)
|
||||
except Exception as exc:
|
||||
return Response(False, 502, f"WARN:{tag}:Failed:{exc}")
|
||||
|
||||
|
|
|
|||
|
|
@ -228,6 +228,7 @@ def process_args(args: list[str]):
|
|||
retained for literal_eval
|
||||
"""
|
||||
import ast
|
||||
import json
|
||||
global gMe
|
||||
iArg = 1
|
||||
while iArg < len(args):
|
||||
|
|
@ -252,7 +253,7 @@ def process_args(args: list[str]):
|
|||
except KeyError:
|
||||
print(f"ERRR:ProcessArgs:{iArg}:{cArg}:UnknownCommand")
|
||||
exit(103)
|
||||
print(gMe)
|
||||
print(json.dumps(gMe, indent=4))
|
||||
for k in gConfigNeeded:
|
||||
if gMe.get(k) == None:
|
||||
print(f"ERRR:ProcessArgs:{k}:missing, did you forget to pass the config file...")
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@
|
|||
# by Humans for All
|
||||
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
import urlvalidator as uv
|
||||
from dataclasses import dataclass
|
||||
import html.parser
|
||||
|
|
|
|||
|
|
@ -279,6 +279,8 @@ It is attached to the document object. Some of these can also be updated using t
|
|||
|
||||
setting this value to 0 (default), disables auto logic, so that end user can review the tool calls requested by ai and if needed even modify them, before triggering/executing them as well as review and modify results generated by the tool call, before submitting them back to the ai.
|
||||
|
||||
this is specified in seconds, so that users by default will normally not overload any website through the proxy server.
|
||||
|
||||
the builtin tools' meta data is sent to the ai model in the requests sent to it.
|
||||
|
||||
inturn if the ai model requests a tool call to be made, the same will be done and the response sent back to the ai model, under user control, by default.
|
||||
|
|
|
|||
Loading…
Reference in New Issue