{ "repository_url": "https://github.com/ronelsolomon/filesummarize.git", "owner": "ronelsolomon", "name": "filesummarize.git", "extracted_at": "2026-03-02T22:50:02.686213", "files": { ".DS_Store": { "content": "\u0000\u0000\u0000\u0001Bud1\u0000\u0000\u0010\u0000\u0000\u0000\b\u0000\u0000\u0000\u0010\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\b\u0000\u0000\u0000\b\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0002\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0002\u0000\u0000\u0000\u0001\u0000\u0000\u0010\u0000\u0000-\u0000e\u0000x\u0000p\u0000l\u0000a\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0002\u0000\u0000\u0000\u0018\u0000c\u0000o\u0000d\u0000e\u0000-\u0000e\u0000x\u0000p\u0000l\u0000a\u0000i\u0000n\u0000e\u0000r\u0000-\u0000e\u0000x\u0000t\u0000e\u0000n\u0000s\u0000i\u0000o\u0000nfdscbool\u0001\u0000\u0000\u0000\u0012\u0000c\u0000o\u0000d\u0000e\u0000_\u0000a\u0000n\u0000a\u0000l\u0000y\u0000s\u0000i\u0000s\u0000_\u0000t\u0000o\u0000o\u0000lfdscbool\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u0000\b\u000b\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0000 \u0000\u0000\u0000\u0001\u0000\u0000\u0000@\u0000\u0000\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0001\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0002\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0004\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0010\u0000\u0000\u0000\u0000\u0001\u0000\u0000 \u0000\u0000\u0000\u0000\u0001\u0000\u0000@\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0002\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0004\u0000\u0000\u0000\u0000\u0000\u0001\u0000\b\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0010\u0000\u0000\u0000\u0000\u0000\u0001\u0000 \u0000\u0000\u0000\u0000\u0000\u0001\u0000@\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0002\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0004\u0000\u0000\u0000\u0000\u0000\u0000\u0001\b\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0010\u0000\u0000\u0000\u0000\u0000\u0000\u0001 \u0000\u0000\u0000\u0000\u0000\u0000\u0001@\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0010\u000b\u0000\u0000\u0000E\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0004DSDB\u0000\u0000\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0002\u0000\u0000\u0000 \u0000\u0000\u0000`\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0001\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0002\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0004\u0000\u0000\u0000\u0000\u0002\u0000\u0000\b\u0000\u0000\u0000\u0018\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0000 \u0000\u0000\u0000\u0000\u0001\u0000\u0000@\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0002\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0004\u0000\u0000\u0000\u0000\u0000\u0001\u0000\b\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0010\u0000\u0000\u0000\u0000\u0000\u0001\u0000 \u0000\u0000\u0000\u0000\u0000\u0001\u0000@\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0002\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0004\u0000\u0000\u0000\u0000\u0000\u0000\u0001\b\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0010\u0000\u0000\u0000\u0000\u0000\u0000\u0001 \u0000\u0000\u0000\u0000\u0000\u0000\u0001@\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000", "size": 6141, "language": "unknown" }, "requirements.txt": { "content": "# Core Dependencies\nstreamlit>=1.24.0\nollama>=0.1.5\npython-docx>=0.8.11\nPyYAML>=6.0\npython-multipart>=0.0.6 # For file uploads in FastAPI\nfastapi>=0.104.0 # For API endpoints\nuvicorn>=0.24.0 # ASGI server\npydantic>=2.4.0 # Data validation\npydantic-settings>=2.0.0 # Settings management\npython-dotenv>=1.0.0 # Environment variables\n\n# Development\npytest>=7.4.0\npytest-cov>=4.1.0\nblack>=23.7.0\nisort>=5.12.0\nmypy>=1.5.0\nflake8>=6.1.0\npre-commit>=3.3.3\n\n# Documentation\nmkdocs>=1.5.0\nmkdocs-material>=9.1.0\nmkdocstrings[python]>=0.22.0\n\n# Testing\npytest-mock>=3.11.1\nresponses>=0.24.0\n\n# Type Hints\ntyping-extensions>=4.7.0\ntypes-requests>=2.31.0\n\n# Code Quality\npre-commit-hooks>=4.4.0\n", "size": 690, "language": "text" }, "Licence.md": { "content": "# End-User License Agreement (EULA) for Code Explainer Extension\n\n**Last Updated:** July 20, 2025\n\n## 1. Agreement to Terms\n\nBy installing, copying, or otherwise using the Code Explainer Extension (\"Software\"), you agree to be bound by the terms of this EULA. If you do not agree to the terms of this EULA, do not install or use the Software.\n\n## 2. License Grant\n\nThe Author grants you a limited, non-exclusive, non-transferable license to use the Software for personal or commercial use, subject to the terms of this EULA.\n\n## 3. Restrictions\n\nYou may not:\n- Modify, adapt, or translate the Software\n- Reverse engineer, decompile, or disassemble the Software\n- Remove any proprietary notices or labels on the Software\n- Use the Software for any illegal purpose\n\n## 4. Intellectual Property\n\nThe Software is protected by copyright and other intellectual property laws. The Author retains all right, title, and interest in and to the Software, including all intellectual property rights.\n\n## 5. No Warranty\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT.\n\n## 6. Limitation of Liability\n\nIN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INCIDENTAL, INDIRECT, OR CONSEQUENTIAL DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR ANY OTHER PECUNIARY LOSS) ARISING OUT OF THE USE OF OR INABILITY TO USE THE SOFTWARE.\n\n## 7. Governing Law\n\nThis EULA shall be governed by and construed in accordance with the laws of [Your Country/State], without regard to its conflict of law provisions.\n\n## 8. Contact Information\n\nFor any questions about this EULA, please contact [Your Email Address].", "size": 1854, "language": "markdown" }, "README.md": { "content": "# Code Analysis Tool with Llama\n\nA powerful tool that analyzes code and text files, generating comprehensive documentation and explanations using the Llama language model. Available both as a command-line interface and as a Python library.\n\n![Code Analysis Tool](https://img.shields.io/badge/python-3.8%2B-blue)\n[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)\n\n## โœจ Features\n\n- ๐Ÿ” Supports multiple programming languages (Python, JavaScript, Java, C++, and more)\n- ๐Ÿ“Š Processes data files (JSON, YAML, XML, CSV) and documents (Markdown, HTML, plain text)\n- ๐Ÿค– AI-powered code analysis and explanations using Llama model\n- ๐Ÿ–ฅ๏ธ Command Line Interface with flexible options\n- ๐Ÿ“ Detailed analysis including structure, elements, and relationships\n- ๐Ÿš€ Fast and efficient processing of large codebases\n- ๐Ÿงฉ Extensible architecture for adding new file type support\n\n## ๐Ÿš€ Quick Start\n\n### Prerequisites\n\n- Python 3.8 or higher\n- Ollama server running locally (with Llama model installed)\n\n### Installation\n\n1. Install the package using pip:\n ```bash\n pip install code-analysis-tool\n ```\n\n2. Set up Ollama:\n ```bash\n # Install Ollama (if not already installed)\n # Visit https://ollama.ai for installation instructions\n \n # Start Ollama server (in a separate terminal)\n ollama serve\n \n # Pull the Llama model (if not already done)\n ollama pull llama3\n ```\n\n### Basic Usage\n\n```bash\n# Analyze all supported files in the current directory\ncode-analyze\n\n# Analyze specific file types only\ncode-analyze --extensions py js json\n\n# Analyze a specific directory\ncode-analyze path/to/your/code\n\n# List all supported file extensions\ncode-analyze --list-extensions\n\n# Save output to a file\ncode-analyze --output analysis.txt\n\n# Get help\ncode-analyze --help\n```\n\n## ๐Ÿ“– Usage\n\n### Web Interface\n\n1. Start the Streamlit application:\n ```bash\n streamlit run app.py\n ```\n Or:\n ```bash\n python -m src.code_explainer.cli web\n ```\n\n2. Open your web browser to `http://localhost:8501`\n\n3. Upload a Python file and explore the analysis\n\n### Command Line Interface\n\nAnalyze a Python file:\n```bash\npython -m src.code_explainer.cli analyze path/to/your/file.py\n```\n\nGenerate documentation:\n```bash\npython -m src.code_explainer.cli document path/to/your/file.py --output docs/\n```\n\n### As a Python Library\n\n```python\nfrom code_analysis_tool import analyze_code, CodeAnalyzer\n\n# Analyze a single file\nresults = analyze_code(\"example.py\")\nprint(results['analysis'])\n\n# Analyze a directory with specific file types\ndirectory_results = analyze_code(\n \"src/\", \n exclude_dirs=[\"tests\", \"venv\"],\n file_extensions=[\"py\", \"js\", \"json\"]\n)\n\n# Or use the CodeAnalyzer class directly for more control\nanalyzer = CodeAnalyzer(model=\"llama3\")\nresults = analyzer.analyze_directory(\n \"project/\",\n exclude_dirs=[\"node_modules\", \".git\"],\n file_extensions=[\"py\", \"js\", \"ts\", \"json\", \"yaml\"]\n)\n\nfor file_path, analysis in results.items():\n print(f\"\\nFile: {file_path} ({analysis.get('file_type')}/{analysis.get('sub_type')})\")\n print(analysis['analysis'])\n \n # Access individual elements if available\n for element in analysis.get('elements', [])[:3]: # Show first 3 elements\n print(f\"\\nElement: {element.get('type')} {element.get('name')}\")\n print(f\"Lines: {element.get('start_line')}-{element.get('end_line')}\")\n```\n\n## ๐Ÿ› ๏ธ How It Works\n\n1. **Code Analysis**: The tool parses Python files to extract:\n - Function and class definitions\n - Method signatures with arguments and return types\n - Docstrings and inline comments\n - Import statements and module-level documentation\n\n2. **AI Integration**: The extracted information is processed by the Llama model to generate:\n - Clear, non-technical explanations\n - Usage examples\n - Documentation in multiple formats\n\n3. **Output Generation**: Results can be:\n - Viewed in the web interface\n - Exported as Word documents\n - Generated automatically via GitHub Actions\n\n## ๐Ÿ“ฆ Project Structure\n\n```\n.\nโ”œโ”€โ”€ src/\nโ”‚ โ”œโ”€โ”€ code_explainer/\nโ”‚ โ”‚ โ”œโ”€โ”€ __init__.py\nโ”‚ โ”‚ โ”œโ”€โ”€ cli.py # Command line interface\nโ”‚ โ”‚ โ”œโ”€โ”€ code_analyzer.py # Core code analysis logic\nโ”‚ โ”‚ โ”œโ”€โ”€ document_generator.py # Document generation\nโ”‚ โ”‚ โ””โ”€โ”€ llm_integration.py # Llama model integration\nโ”‚ โ”œโ”€โ”€ action.py # GitHub Action entry point\nโ”‚ โ””โ”€โ”€ analyzer.py # Core analysis functionality\nโ”œโ”€โ”€ .github/workflows/ # GitHub Actions workflows\nโ”œโ”€โ”€ app.py # Streamlit web app\nโ”œโ”€โ”€ main.py # Legacy entry point\nโ”œโ”€โ”€ requirements.txt # Python dependencies\nโ””โ”€โ”€ setup.py # Package configuration\n```\n\n## GitHub Actions\n\nThis project includes a GitHub Actions workflow (`.github/workflows/code_analysis.yml`) that:\n\n1. Runs tests on push and pull requests\n2. Performs code analysis using the Python Code Explainer\n3. Generates documentation in Word format\n## Supported File Types\n\n### Code Files\n- **Python** (.py)\n- **JavaScript/TypeScript** (.js, .jsx, .ts, .tsx)\n- **Java** (.java)\n- **C/C++** (.c, .cpp, .h, .hpp)\n- **C#** (.cs)\n- **Go** (.go)\n- **Rust** (.rs)\n- **Ruby** (.rb)\n- **PHP** (.php)\n- **And more** (Shell, Perl, R, MATLAB, Julia, etc.)\n\n### Data Files\n- **JSON** (.json)\n- **YAML** (.yaml, .yml)\n- **XML** (.xml)\n- **CSV** (.csv)\n- **TOML** (.toml)\n- **INI/Config** (.ini, .cfg)\n\n### Document Files\n- **Markdown** (.md)\n- **HTML** (.html, .htm)\n- **CSS** (.css)\n- **Plain Text** (.txt)\n- **Pull requests**: Runs tests and performs analysis (does not update documentation)\n- **Manual trigger**: Can be manually triggered from the Actions tab\n\n### Generated Documentation\n\nWhen the workflow runs on the main branch, it will:\n\n1. Generate a Word document (`docs/code_analysis_YYYYMMDD_HHMMSS.docx`)\n2. Include a summary of all analyzed code elements\n3. Provide detailed explanations of the code structure\n4. Automatically commit and push the generated documentation\n\n### Required Permissions\n\nThe workflow requires the following permissions:\n- `contents: write` - To commit and push generated documentation\n- `pull-requests: write` - To update pull request statuses\n- `statuses: write` - To update commit statuses\n\nThese permissions are automatically provided by the default `GITHUB_TOKEN`. No additional configuration is needed for public repositories. For private repositories, ensure the workflow has the necessary permissions in your repository settings.\n\n## ๐Ÿ“„ License\n\nMIT License - see [LICENSE](LICENSE) for details.\n\n## ๐Ÿค Contributing\n\nContributions are welcome! Please feel free to submit a Pull Request.\n\n### Development Setup\n\n1. Fork and clone the repository\n2. Set up a virtual environment:\n ```bash\n python -m venv venv\n source venv/bin/activate # On Windows: venv\\Scripts\\activate\n pip install -e .[dev]\n ```\n3. Make your changes and run tests\n4. Submit a pull request\n\n\n# Code Explainer Extension\n\nA powerful VS Code extension that helps you understand and document your code using AI. Generate detailed documentation, get plain English explanations, and improve code readability with just a few clicks.\n\n## Features\n\n- **AI-Powered Code Explanation**: Get detailed explanations of what your code does\n- **Automatic Documentation**: Generate comprehensive documentation for your code\n- **Multiple Formats**: Get documentation inline or in a separate Markdown file\n- **Plain English Explanations**: Understand complex code in simple terms\n- **Multi-language Support**: Works with various programming languages\n- **Context-Aware**: Analyzes code context for more accurate explanations\n\n## Installation\n\n1. Install the extension from the [VS Code Marketplace](https://marketplace.visualstudio.com/)\n2. Alternatively, install from VSIX:\n ```bash\n code --install-extension code-explainer-0.1.0.vsix\n\n## ๐Ÿ“ง Contact\n\nFor any questions or feedback, please open an issue on the GitHub repository.\n\n## License\n\nThis project is licensed under the [MIT License](LICENSE).\n\n## End-User License Agreement (EULA)\n\nBy installing or using this extension, you agree to the terms of the [End-User License Agreement](EULA.md).\n", "size": 8141, "language": "markdown" }, "setup.py": { "content": "from setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"code-analyzer-ai\",\n version=\"0.1.0\",\n author=\"Your Name\",\n author_email=\"your.email@example.com\",\n description=\"An AI-powered tool for analyzing and explaining code in multiple programming languages\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/yourusername/code-analyzer-ai\",\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\"),\n python_requires=\">=3.8\",\n install_requires=[\n \"streamlit>=1.24.0\",\n \"ollama>=0.1.5\",\n \"python-docx>=0.8.11\",\n ],\n extras_require={\n 'dev': [\n 'pytest>=7.0.0',\n 'pytest-cov>=4.0.0',\n 'black>=23.0.0',\n 'isort>=5.12.0',\n 'flake8>=6.0.0',\n 'mypy>=1.0.0',\n ],\n },\n entry_points={\n 'console_scripts': [\n 'code-analyze=code_analyzer_ai.cli:main',\n 'code-analyzer-ai=code_analyzer_ai.cli:main',\n ],\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)\n", "size": 1306, "language": "python" }, ".gitignore": { "content": "# Python\n__pycache__/\n*.py[cod]\n*$py.class\n*.so\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n.installed.cfg\n*.egg\n\n# Virtual Environment\nvenv/\nenv/\nENV/\n\n# IDE\n.vscode/\n.idea/\n*.swp\n*.swo\n\n# Docs\ndocs/_build/\n\n# Local development\n.env\n.env.local\n\n# Generated documentation\ndocs/code_documentation.docx\n", "size": 367, "language": "unknown" }, ".gitattributes": { "content": "# Auto detect text files and perform LF normalization\n* text=auto\n", "size": 66, "language": "unknown" }, "app.py": { "content": "\"\"\"\nStreamlit application for Python Code Explainer.\n\"\"\"\nimport streamlit as st\nfrom code_explainer import extract_elements, generate_explanation, create_document\n\ndef main():\n \"\"\"Main application function.\"\"\"\n st.set_page_config(\n page_title=\"Python Code Explainer\",\n page_icon=\"๐Ÿค–\",\n layout=\"wide\"\n )\n \n st.title(\"Python Code Explainer\")\n st.caption(\"Upload a Python file to analyze its structure and get explanations\")\n \n # Sidebar for settings\n with st.sidebar:\n st.header(\"Settings\")\n \n # Model selection\n model_name = st.selectbox(\n \"LLM Model\",\n [\"llama2\", \"llama3\", \"mistral\"],\n index=0,\n help=\"Select the language model to use for generating explanations\"\n )\n \n # Ollama host configuration\n with st.expander(\"Advanced Settings\"):\n ollama_host = st.text_input(\n \"Ollama Server URL\",\n value=st.session_state.get('ollama_host', 'http://localhost:11434'),\n help=\"URL of the Ollama server (default: http://localhost:11434)\"\n )\n st.session_state.ollama_host = ollama_host\n \n # File uploader\n uploaded_file = st.file_uploader(\"Upload Python (.py) file\", type=\"py\")\n \n if uploaded_file:\n try:\n # Read and analyze the code\n code = uploaded_file.read().decode()\n code_elements = extract_elements(code)\n \n # Display code analysis\n st.header(\"Code Analysis\")\n for el in code_elements:\n with st.expander(f\"{el['type']}: {el['name']} (Lines {el['start_line']}-{el['end_line']})\"):\n st.caption(f\"Location: Lines {el['start_line']}-{el['end_line']}\")\n \n if el['args']:\n st.write(f\"**Arguments:** `{', '.join(el['args'])}`\")\n if el['type'] != 'Class' and el['has_return']:\n st.write(\"**Returns:** Yes\")\n \n if el['docstring']:\n st.subheader(\"Documentation\")\n st.text(el['docstring'])\n \n st.subheader(\"Source Code\")\n st.code(el['source'], language='python')\n \n # Generate explanation\n if st.button(\"Generate Explanation\", type=\"primary\"):\n with st.spinner(\"Analyzing code with AI...\"):\n try:\n explanation = generate_explanation(code_elements, model=model_name)\n st.success(\"AI Explanation:\")\n st.write(explanation)\n \n # Generate and offer download of DOCX report\n doc_buffer = create_document(\n code_elements, \n explanation,\n model=model_name,\n host=st.session_state.get('ollama_host', 'http://localhost:11434')\n )\n if doc_buffer:\n st.download_button(\n label=\"๐Ÿ“ฅ Download Analysis Report\",\n data=doc_buffer,\n file_name=\"code_analysis_report.docx\",\n mime=\"application/vnd.openxmlformats-officedocument.wordprocessingml.document\"\n )\n \n except Exception as e:\n st.error(f\"Error generating explanation: {str(e)}\")\n \n except Exception as e:\n st.error(f\"Error processing file: {str(e)}\")\n\nif __name__ == \"__main__\":\n main()\n", "size": 3847, "language": "python" }, "main.py": { "content": "import ast\nimport streamlit as st\nfrom ollama import Client\nimport io\n\ntry:\n from docx import Document\n DOCX_AVAILABLE = True\nexcept ImportError:\n DOCX_AVAILABLE = False\n st.warning(\"The 'python-docx' package is required for .docx export. Please install it with 'pip install python-docx'\")\n \n# Add these imports at the top\nimport os\nimport mimetypes\nfrom pathlib import Path\n\ndef detect_file_type(filename):\n \"\"\"Detect file type based on extension and content.\"\"\"\n if not filename:\n return \"unknown\"\n \n ext = Path(filename).suffix.lower()[1:] # Get extension without dot\n mime_type, _ = mimetypes.guess_type(filename)\n \n # Code files\n code_exts = ['py', 'js', 'jsx', 'ts', 'tsx', 'java', 'c', 'cpp', 'h', 'hpp', \n 'cs', 'go', 'rs', 'rb', 'php', 'sh', 'pl', 'r', 'm', 'jl']\n if ext in code_exts:\n return 'code'\n \n # Data files\n data_exts = ['json', 'yaml', 'yml', 'xml', 'csv', 'toml', 'ini', 'cfg']\n if ext in data_exts:\n return 'data'\n \n # Document files\n doc_exts = ['md', 'txt', 'html', 'htm', 'css', 'pdf', 'doc', 'docx']\n if ext in doc_exts or (mime_type and any(t in mime_type for t in ['text', 'document'])): \n return 'document'\n \n return 'unknown'\n\ndef read_file_content(file):\n \"\"\"Read file content based on its type.\"\"\"\n try:\n if file.type and ('text/' in file.type or file.type in ['application/json', 'application/xml']):\n return file.getvalue().decode('utf-8')\n else:\n # For binary files, we'll just note the file type\n return f\"[Binary file: {file.name}, Type: {file.type or 'unknown'}]\"\n except Exception as e:\n return f\"Error reading file: {str(e)}\"\n\n# Helper\ndef extract_elements(code):\n \"\"\"Extracts all top-level classes and functions with docstrings and source code.\"\"\"\n tree = ast.parse(code)\n elements = []\n for node in tree.body:\n if isinstance(node, (ast.FunctionDef, ast.ClassDef, ast.AsyncFunctionDef)):\n # Get basic info\n element_type = node.__class__.__name__\n name = node.name\n doc = ast.get_docstring(node) or \"\"\n \n # Get full source code\n src = ast.get_source_segment(code, node) or \"\"\n \n # Get line numbers\n start_line = getattr(node, 'lineno', 0)\n end_line = getattr(node, 'end_lineno', start_line)\n \n # Get function arguments if it's a function\n args = []\n if hasattr(node, 'args'):\n args = [arg.arg for arg in node.args.args]\n \n elements.append({\n 'type': element_type.replace('Def', ''), # 'FunctionDef' -> 'Function', 'ClassDef' -> 'Class'\n 'name': name,\n 'docstring': doc,\n 'source': src,\n 'start_line': start_line,\n 'end_line': end_line,\n 'args': args if args else None,\n 'has_return': any(isinstance(n, ast.Return) for n in ast.walk(node))\n })\n return elements\n\ndef llama_explain(code_elements):\n \"\"\"\n Send code info to Llama via Ollama and get detailed non-technical summary.\n \n The prompt is structured as follows:\n \n [STATIC] - These parts never change:\n - The initial instructions to the AI about its role and how to respond\n - The section headers (e.g., 'Documentation:', 'Code:')\n - The formatting of the response\n \n [DYNAMIC] - These parts are filled with actual code analysis:\n - Function/Class names and types\n - Line numbers where code appears\n - Documentation strings from the code\n - Function arguments (if any)\n - Return value indicators\n - The actual source code\n \n Example of how the prompt will look:\n \n [SYSTEM PROMPT - Static]\n You are a helpful assistant. Read the information below and summarize what this Python code does,\n explaining it in detail, in plain, non-technical English, to a non-programmer. Avoid jargon.\n Where possible, use analogies and concrete examples.\n \n Here is the code to summarize:\n \n [DYNAMIC CONTENT - Example for one function]\n Function 'calculate_total':\n Location: Lines 5-10\n Documentation: Calculates the total price including tax\n Arguments: price, tax_rate\n Returns: Yes\n Code:\n def calculate_total(price, tax_rate):\n \"\"\"\n # Compose prompt with enhanced information\n combined = \"\\n\\n\".join(\n f\"{el['type']} '{el['name']}':\\n\"\n f\"Location: Lines {el['start_line']}-{el['end_line']}\\n\"\n f\"Documentation: {el['docstring']}\\n\"\n + (f\"Arguments: {', '.join(el['args'])}\\n\" if el['args'] else \"\") \n + (\"Returns: Yes\\n\" if el['has_return'] and el['type'] != 'Class' else \"\")\n + f\"Code:\\n{el['source'] or ''}\"\n for el in code_elements\n )\n \n system_prompt = (\n \"You are a helpful assistant. Your job is to explain the Python code and workflow described below \"\n \"in plain, non-technical English to someone without a programming background. Avoid technical jargon. \"\n \"Use relatable analogies and simple examples where appropriate.\\n\\n\"\n\n \"๐Ÿ“Œ Task Overview:\\n\"\n \"- Break down the Python code into understandable parts.\\n\"\n \"- Present the explanation in a two-column table:\\n\"\n \" 1. Section of the prompt\\n\"\n \" 2. Whether it is dynamic or static\\n\\n\"\n\n \"๐Ÿง  User Context:\\n\"\n \"- The user is building an automated assessment tool using LLMs.\\n\"\n \"- The tool generates subtopics and test questions from a topic + grade + learning objective.\\n\"\n \"- They want help modifying Prompt 1 to include a new variable: the learning objective.\\n\\n\"\n\n \"๐Ÿ’ก Input Example:\\n\"\n \" topic = 'Ratios and Proportional Relationships'\\n\"\n \" student_class = '6th standard'\\n\"\n \" learning_objective = 'Understand ratio concepts and use ratio reasoning to solve problems.'\\n\\n\"\n\n \"๐Ÿ“ Full Prompt 1 (Subtopic Generator):\\n\"\n \"I want a list of sub-topics for the topic \\\"{topic}\\\" which is taught to a \\\"{student_class}\\\" student \"\n \"with a learning objective \\\"{learning_objective}\\\".\\n\"\n \"First, output the learning objective exactly as given.\\n\"\n \"Then, output the sub-topics ONLY as a Python list. Do not include any commentary or explanation.\\n\\n\"\n\n \"๐Ÿ”ง System Context:\\n\"\n \"We are building an automated assessment web app where questions are usually uploaded manually into a MySQL database. \"\n \"This tool uses large language models to generate those questions automatically, saving time and effort.\\n\\n\"\n\n \"๐Ÿ“‹ Additional User Requests:\\n\"\n \"- Modify the original code to include the new learning objective variable.\\n\"\n \"- Ensure that generate_questions_for_subtopic also uses the learning objective.\\n\"\n \"- Recreate the dynamic/static breakdown table of Prompt 1 and Prompt 2.\\n\"\n \"- Show an example of what Prompt 1 and Prompt 2 look like after the code runs.\\n\"\n \"- Give a step-by-step guide to feeding these prompts into a ChatGPT conversation.\\n\"\n \"- Convert the instructions into clean documentation.\\n\"\n \"- Provide a downloadable .docx version of the documentation.\\n\\n\"\n\n f\"{combined}\\n\\n\"\n \"๐Ÿงพ Now, please provide a detailed, beginner-friendly explanation:\"\n)\n\n\n try:\n client = Client(host='http://localhost:11434')\n response = client.chat(model='llama2', messages=[{\"role\": \"user\", \"content\": system_prompt}])\n \n except Exception as e:\n return f\"Error generating explanation: {str(e)}\"\n return response['message']['content']\n\n# ---- Streamlit UI ----\n# ---- Streamlit UI ----\nst.title(\"File Analyzer & Summarizer\")\n\nuploaded_file = st.file_uploader(\n \"Upload any file for analysis\", \n type=None, # Accept all file types\n accept_multiple_files=False,\n help=\"Upload any text-based file (code, documents, data files) for analysis\"\n)\n\nif uploaded_file:\n try:\n file_type = detect_file_type(uploaded_file.name)\n file_content = read_file_content(uploaded_file)\n \n st.header(f\"File Analysis: {uploaded_file.name}\")\n st.write(f\"**Type:** {file_type.capitalize()} file\")\n \n if file_type == 'code':\n # Existing code analysis for Python files\n if uploaded_file.name.endswith('.py'):\n code_elements = extract_elements(file_content)\n # ... rest of the code analysis logic ...\n else:\n st.subheader(\"File Content\")\n st.code(file_content, language='text')\n \n elif file_type == 'data':\n st.subheader(\"Data Content\")\n st.json(file_content) if uploaded_file.name.endswith('.json') else st.code(file_content)\n \n elif file_type == 'document':\n st.subheader(\"Document Content\")\n st.text_area(\"Content\", file_content, height=300)\n \n else:\n st.warning(\"This file type is not fully supported for analysis.\")\n st.download_button(\n label=\"Download File\",\n data=uploaded_file,\n file_name=uploaded_file.name,\n mime=uploaded_file.type\n )\n \n # Add a button to generate DOCX report\n if st.button(\"Generate DOCX Report\"):\n doc = Document()\n doc.add_heading(f\"File Analysis Report: {uploaded_file.name}\", 0)\n doc.add_paragraph(f\"File Type: {file_type.capitalize()}\")\n \n if file_type == 'code' and uploaded_file.name.endswith('.py'):\n doc.add_heading(\"Code Structure:\", level=1)\n for el in code_elements:\n doc.add_heading(f\"{el['type']}: {el['name']} (Lines {el['start_line']}-{el['end_line']})\", level=2)\n if el['args']:\n doc.add_paragraph(f\"Arguments: {', '.join(el['args'])}\")\n if el['type'] != 'Class' and el['has_return']:\n doc.add_paragraph(\"Returns: Yes\")\n if el['docstring']:\n doc.add_paragraph(f\"Documentation:\\\\n{el['docstring']}\")\n doc.add_paragraph(\"Source Code:\")\n doc.add_paragraph(el['source'], style='Intense Quote')\n else:\n doc.add_heading(\"File Content:\", level=1)\n doc.add_paragraph(file_content)\n \n # Save to a BytesIO buffer\n buffer = io.BytesIO()\n doc.save(buffer)\n buffer.seek(0)\n \n # Create download button\n st.download_button(\n label=\"Download DOCX Report\",\n data=buffer,\n file_name=f\"analysis_{Path(uploaded_file.name).stem}.docx\",\n mime=\"application/vnd.openxmlformats-officedocument.wordprocessingml.document\"\n )\n \n except Exception as e:\n st.error(f\"Error processing file: {str(e)}\")\nelse:\n st.info(\"Please upload a file to analyze\")", "size": 11124, "language": "python" }, "test_non_tech_explanation.py": { "content": "\"\"\"\nTest script for the non-technical explanation feature.\n\"\"\"\nimport sys\nimport os\nfrom pathlib import Path\n\n# Add the project root directory to the path\nproject_root = Path(__file__).parent\nsys.path.insert(0, str(project_root))\n\nfrom src.code_explainer.document_generator import generate_non_tech_explanation\n\ndef test_non_tech_explanation():\n \"\"\"Test the non-technical explanation generation.\"\"\"\n # Sample code elements that would come from the analyzer\n sample_elements = [\n {\n 'name': 'calculate_total',\n 'type': 'Function',\n 'docstring': 'Calculates the total price of items in a shopping cart.',\n 'args': ['items', 'tax_rate'],\n 'has_return': True,\n 'source': 'def calculate_total(items, tax_rate=0.08):\\n \"\"\"Calculates the total price of items in a shopping cart.\"\"\"\\n subtotal = sum(item[\"price\"] for item in items)\\n tax = subtotal * tax_rate\\n return subtotal + tax',\n 'start_line': 10,\n 'end_line': 15\n },\n {\n 'name': 'ShoppingCart',\n 'type': 'Class',\n 'docstring': 'Represents a shopping cart with items and checkout functionality.',\n 'source': 'class ShoppingCart:\\n \"\"\"Represents a shopping cart with items and checkout functionality.\"\"\"\\n def __init__(self):\\n self.items = []\\n \\n def add_item(self, item):\\n self.items.append(item)',\n 'start_line': 18,\n 'end_line': 25\n }\n ]\n \n # Generate the explanation\n explanation, elements = generate_non_tech_explanation(sample_elements)\n \n # Print the results\n print(\"=\" * 80)\n print(\"NON-TECHNICAL EXPLANATION\")\n print(\"=\" * 80)\n print(explanation)\n \n print(\"\\n\" + \"=\" * 80)\n print(\"CODE ELEMENT DESCRIPTIONS\")\n print(\"=\" * 80)\n for elem in elements:\n print(f\"\\n{elem.type}: {elem.name}\")\n print(f\"Description: {elem.description}\")\n if elem.example:\n print(f\"Example: {elem.example}\")\n\nif __name__ == \"__main__\":\n test_non_tech_explanation()\n", "size": 2110, "language": "python" }, "action.yml": { "content": "name: 'Python Code Analyzer'\n\non:\n push:\n branches: [ main ]\n paths:\n - '**/*.py'\n pull_request:\n branches: [ main ]\n paths:\n - '**/*.py'\n workflow_dispatch:\n\ndescription: 'Analyze Python code and generate documentation'\nauthor: 'Ronel'\nbranding:\n icon: 'code'\n color: 'blue'\n\ninputs:\n python-version:\n description: 'Python version to use'\n required: false\n default: '3.10'\n ollama-model:\n description: 'Ollama model to use for analysis'\n required: false\n default: 'llama2'\n ollama-host:\n description: 'Ollama server host'\n required: false\n default: 'http://localhost:11434'\n\nruns:\n using: 'composite'\n steps:\n - name: Checkout code\n uses: actions/checkout@v4\n\n - name: Set up Python\n uses: actions/setup-python@v4\n with:\n python-version: ${{ inputs.python-version }}\n\n - name: Install dependencies\n shell: bash\n run: |\n python -m pip install --upgrade pip\n pip install -r requirements.txt\n\n - name: Run analysis\n shell: bash\n run: python -m src.action\n env:\n OLLAMA_MODEL: ${{ inputs.ollama-model }}\n OLLAMA_HOST: ${{ inputs.ollama-host }}\n GITHUB_TOKEN: ${{ github.token }}", "size": 1228, "language": "yaml" }, "tests/test_analyzer.py": { "content": "import pytest\nimport ast\nfrom src.analyzer import extract_elements\n\ndef test_extract_elements_with_functions():\n \"\"\"Test that extract_elements can parse a simple function.\"\"\"\n code = \"\"\"\ndef hello(name: str) -> str:\n \\\"\\\"\\\"Return a greeting message.\\\"\\\"\\\"\n return f\"Hello, {name}!\"\n\"\"\"\n elements = extract_elements(code)\n assert len(elements) == 1\n assert elements[0]['name'] == \"hello\"\n assert elements[0]['type'] == \"Function\"\n assert \"Return a greeting message\" in elements[0]['docstring']\n assert elements[0]['args'] == ['name']\n assert elements[0]['has_return'] is True\n\ndef test_extract_elements_with_class():\n \"\"\"Test that extract_elements can parse a class definition.\"\"\"\n code = \"\"\"\nclass Greeter:\n \\\"\\\"\\\"A class that greets people.\\\"\\\"\\\"\n \n def __init__(self, name: str):\n self.name = name\n \n def greet(self) -> str:\n \\\"\\\"\\\"Return a greeting.\\\"\\\"\\\"\n return f\"Hello, {self.name}!\"\n\"\"\"\n elements = extract_elements(code)\n assert len(elements) == 1 # Only the class itself is extracted\n assert elements[0]['type'] == 'Class' and elements[0]['name'] == 'Greeter'\n\n@pytest.fixture\ndef sample_code():\n return \"\"\"\ndef add(a: int, b: int) -> int:\n \\\"\\\"\\\"Add two numbers.\\\"\\\"\\\"\n return a + b\n \nclass Calculator:\n def multiply(self, x: float, y: float) -> float:\n \\\"\\\"\\\"Multiply two numbers.\\\"\\\"\\\"\n return x * y\n\"\"\"\n\ndef test_extract_elements_with_fixture(sample_code):\n \"\"\"Test extract_elements using a fixture.\"\"\"\n elements = extract_elements(sample_code)\n assert len(elements) == 2 # add function and Calculator class\n assert any(e['name'] == 'add' and e['type'] == 'Function' for e in elements)\n assert any(e['name'] == 'Calculator' and e['type'] == 'Class' for e in elements)\n", "size": 1815, "language": "python" }, "tests/conftest.py": { "content": "\"\"\"Configuration file for pytest.\"\"\"\nimport sys\nfrom pathlib import Path\n\n# Add the src directory to the Python path\nsys.path.insert(0, str(Path(__file__).parent.parent / 'src'))\n", "size": 179, "language": "python" }, "tests/__init__.py": { "content": "# Initialize tests package\n", "size": 27, "language": "python" }, "code_analysis_tool/LICENSE": { "content": "MIT License\n\nCopyright (c) 2023 Your Name\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n", "size": 1066, "language": "unknown" }, "code_analysis_tool/requirements.txt": { "content": "# Core dependencies\nollama>=0.1.5\npython-dotenv>=1.0.0\n", "size": 55, "language": "text" }, "code_analysis_tool/analyzer.py": { "content": "\"\"\"Core analysis functionality for the code analysis tool.\"\"\"\nimport ast\nimport json\nimport yaml\nimport re\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Union, Any\n\nimport ollama\n\ndef detect_file_type(file_path: Union[str, Path]) -> str:\n \"\"\"Detect the type of file based on its extension.\"\"\"\n path = Path(file_path)\n ext = path.suffix.lower()\n \n # Common code file extensions\n code_extensions = {\n '.py': 'python',\n '.js': 'javascript',\n '.jsx': 'javascript',\n '.ts': 'typescript',\n '.tsx': 'typescript',\n '.java': 'java',\n '.c': 'c',\n '.cpp': 'cpp',\n '.h': 'cpp',\n '.hpp': 'cpp',\n '.cs': 'csharp',\n '.go': 'go',\n '.rs': 'rust',\n '.rb': 'ruby',\n '.php': 'php',\n '.swift': 'swift',\n '.kt': 'kotlin',\n '.scala': 'scala',\n '.sh': 'shell',\n '.pl': 'perl',\n '.r': 'r',\n '.m': 'matlab',\n '.jl': 'julia',\n }\n \n # Data file extensions\n data_extensions = {\n '.json': 'json',\n '.yaml': 'yaml',\n '.yml': 'yaml',\n '.xml': 'xml',\n '.csv': 'csv',\n '.toml': 'toml',\n '.ini': 'ini',\n '.cfg': 'ini',\n }\n \n # Document extensions\n doc_extensions = {\n '.md': 'markdown',\n '.txt': 'text',\n '.html': 'html',\n '.htm': 'html',\n '.css': 'css',\n }\n \n if ext in code_extensions:\n return 'code', code_extensions[ext]\n elif ext in data_extensions:\n return 'data', data_extensions[ext]\n elif ext in doc_extensions:\n return 'document', doc_extensions[ext]\n else:\n return 'unknown', ext[1:] if ext else 'text'\n\nclass CodeAnalyzer:\n \"\"\"Analyzes Python code and provides explanations using AI.\"\"\"\n \n def __init__(self, model: str = \"llama3\"):\n \"\"\"Initialize the code analyzer.\n \n Args:\n model: The Ollama model to use for analysis.\n \"\"\"\n self.model = model\n self.client = ollama.Client()\n \n def analyze_file(self, file_path: Union[str, Path]) -> Dict:\n \"\"\"Analyze a single file.\n \n Args:\n file_path: Path to the file to analyze.\n \n Returns:\n Dict containing analysis results.\n \"\"\"\n file_path = Path(file_path)\n with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:\n content = f.read()\n \n file_type, sub_type = detect_file_type(file_path)\n \n if file_type == 'code':\n if sub_type == 'python':\n elements = self._extract_python_elements(content)\n else:\n elements = self._extract_generic_code_elements(content, sub_type)\n elif file_type == 'data':\n elements = self._extract_data_elements(content, sub_type)\n else:\n elements = self._extract_text_elements(content, file_type)\n \n analysis = self._analyze_elements(elements, str(file_path), file_type, sub_type)\n return analysis\n \n def analyze_directory(self, directory: Union[str, Path], \n exclude_dirs: Optional[List[str]] = None,\n file_extensions: Optional[List[str]] = None) -> Dict:\n \"\"\"Analyze all files in a directory.\n \n Args:\n directory: Path to the directory to analyze.\n exclude_dirs: List of directory names to exclude.\n file_extensions: List of file extensions to include (without leading .).\n If None, includes all supported file types.\n \n Returns:\n Dict containing analysis results for all files.\n \"\"\"\n directory = Path(directory)\n if not directory.is_dir():\n raise ValueError(f\"{directory} is not a valid directory\")\n \n exclude_dirs = exclude_dirs or ['__pycache__', '.git', '.github', 'venv', 'env', 'node_modules']\n results = {}\n \n # Default supported extensions if none provided\n if file_extensions is None:\n file_extensions = [\n # Code files\n 'py', 'js', 'jsx', 'ts', 'tsx', 'java', 'c', 'cpp', 'h', 'hpp',\n 'cs', 'go', 'rs', 'rb', 'php', 'swift', 'kt', 'scala', 'sh', 'pl',\n 'r', 'm', 'jl',\n # Data files\n 'json', 'yaml', 'yml', 'xml', 'csv', 'toml', 'ini', 'cfg',\n # Document files\n 'md', 'txt', 'html', 'htm', 'css'\n ]\n \n # Convert to set for faster lookups\n extensions = {f'.{ext.lstrip(\".\").lower()}' for ext in file_extensions}\n \n for file_path in directory.rglob('*'):\n # Skip directories and files in excluded directories\n if not file_path.is_file():\n continue\n \n if any(part in exclude_dirs for part in file_path.parts):\n continue\n \n # Check file extension\n if file_path.suffix.lower() not in extensions:\n continue\n \n try:\n results[str(file_path)] = self.analyze_file(file_path)\n except Exception as e:\n print(f\"Error analyzing {file_path}: {str(e)}\")\n continue\n \n return results\n \n def _extract_python_elements(self, code: str) -> List[Dict]:\n \"\"\"Extract Python code elements (functions, classes) from source code.\"\"\"\n try:\n tree = ast.parse(code)\n elements = []\n \n for node in tree.body:\n if isinstance(node, (ast.FunctionDef, ast.ClassDef, ast.AsyncFunctionDef)):\n element_type = node.__class__.__name__.replace('Def', '')\n doc = ast.get_docstring(node) or \"\"\n src = ast.get_source_segment(code, node) or \"\"\n \n elements.append({\n 'type': element_type,\n 'name': node.name,\n 'docstring': doc,\n 'source': src,\n 'start_line': getattr(node, 'lineno', 0),\n 'end_line': getattr(node, 'end_lineno', 0),\n 'language': 'python'\n })\n \n return elements\n except Exception as e:\n print(f\"Error parsing Python code: {str(e)}\")\n return [{\n 'type': 'File',\n 'name': 'content',\n 'docstring': 'Error parsing Python file',\n 'source': code[:1000] + ('...' if len(code) > 1000 else ''),\n 'language': 'python',\n 'error': str(e)\n }]\n \n def _extract_generic_code_elements(self, code: str, language: str) -> List[Dict]:\n \"\"\"Extract elements from generic code files using simple heuristics.\"\"\"\n elements = []\n lines = code.split('\\n')\n current_element = None\n \n # Common patterns for different languages\n patterns = {\n 'javascript': r'(?:function|class|const|let|var)\\s+([a-zA-Z0-9_$]+)',\n 'typescript': r'(?:function|class|interface|type|enum|const|let|var)\\s+([a-zA-Z0-9_$]+)',\n 'java': r'(?:public|private|protected|static|final|native|synchronized|abstract|transient|class|interface|enum)\\s+([a-zA-Z0-9_$<>, ]+?)[\\s<{]',\n 'c': r'(?:#define|typedef|struct|union|enum|void|int|char|float|double)\\s+([a-zA-Z0-9_]+)',\n 'cpp': r'(?:class|struct|union|enum|namespace|template|using)\\s+([a-zA-Z0-9_:]+)',\n 'csharp': r'(?:class|interface|struct|enum|delegate|namespace|using)\\s+([a-zA-Z0-9_.]+)',\n 'go': r'func\\s+(\\([^)]+\\)\\s+)?([a-zA-Z0-9_]+)',\n 'rust': r'(?:fn|struct|enum|trait|impl|mod)\\s+([a-zA-Z0-9_]+)',\n 'ruby': r'(?:def|class|module)\\s+([a-zA-Z0-9_]+[?!]?)',\n 'php': r'(?:function|class|interface|trait|namespace)\\s+([a-zA-Z0-9_]+)',\n 'swift': r'(?:func|class|struct|enum|protocol|extension|typealias)\\s+([a-zA-Z0-9_]+)',\n 'kotlin': r'(?:fun|class|interface|object|typealias|val|var)\\s+([a-zA-Z0-9_]+)',\n 'scala': r'(?:def|class|trait|object|type|val|var)\\s+([a-zA-Z0-9_]+)',\n 'shell': r'(?:function\\s+)?([a-zA-Z0-9_]+)\\s*\\(\\s*\\)',\n 'perl': r'sub\\s+([a-zA-Z0-9_]+)',\n 'r': r'([a-zA-Z0-9_.]+)\\s*<\\-\\s*function',\n 'matlab': r'function\\s+(?:\\[.*\\]\\s*=\\s*)?([a-zA-Z0-9_]+)',\n 'julia': r'(?:function|struct|mutable\\s+struct|abstract\\s+type|primitive\\s+type)\\s+([a-zA-Z0-9_!]+)'\n }\n \n pattern = patterns.get(language, r'\\b(function|class|def|fn|fun|sub|proc)\\s+([a-zA-Z0-9_]+)')\n \n for i, line in enumerate(lines, 1):\n line = line.strip()\n if not line or line.startswith(('//', '/*', '*', '--', '#', '--[', '--[[')):\n continue\n \n match = re.search(pattern, line)\n if match:\n if current_element:\n elements.append(current_element)\n \n name = match.group(1) if len(match.groups()) == 1 else match.group(2)\n current_element = {\n 'type': 'Function' if 'function' in line.lower() or 'def ' in line.lower() or 'fn ' in line.lower() else 'Class',\n 'name': name.strip(),\n 'docstring': '',\n 'source': line,\n 'start_line': i,\n 'end_line': i,\n 'language': language\n }\n elif current_element:\n current_element['source'] += '\\n' + line\n current_element['end_line'] = i\n \n if current_element:\n elements.append(current_element)\n \n if not elements:\n elements.append({\n 'type': 'File',\n 'name': 'content',\n 'docstring': 'No structured elements found',\n 'source': code[:1000] + ('...' if len(code) > 1000 else ''),\n 'language': language\n })\n \n return elements\n \n def _extract_data_elements(self, content: str, data_type: str) -> List[Dict]:\n \"\"\"Extract elements from data files (JSON, YAML, etc.).\"\"\"\n try:\n if data_type == 'json':\n data = json.loads(content)\n return [{\n 'type': 'Data',\n 'name': 'root',\n 'docstring': 'JSON data',\n 'source': json.dumps(data, indent=2)[:1000],\n 'language': 'json'\n }]\n elif data_type in ('yaml', 'yml'):\n data = yaml.safe_load(content)\n return [{\n 'type': 'Data',\n 'name': 'root',\n 'docstring': 'YAML data',\n 'source': yaml.dump(data, default_flow_style=False)[:1000],\n 'language': 'yaml'\n }]\n elif data_type == 'xml':\n # Simple XML parsing - could be enhanced with proper XML parsing\n return [{\n 'type': 'Data',\n 'name': 'xml_content',\n 'docstring': 'XML data',\n 'source': content[:1000] + ('...' if len(content) > 1000 else ''),\n 'language': 'xml'\n }]\n elif data_type == 'csv':\n # Simple CSV parsing - could be enhanced with proper CSV parsing\n return [{\n 'type': 'Data',\n 'name': 'csv_content',\n 'docstring': 'CSV data',\n 'source': content[:1000] + ('...' if len(content) > 1000 else ''),\n 'language': 'csv'\n }]\n else:\n return self._extract_text_elements(content, 'data')\n except Exception as e:\n return [{\n 'type': 'Data',\n 'name': 'content',\n 'docstring': f'Error parsing {data_type} data: {str(e)}',\n 'source': content[:1000] + ('...' if len(content) > 1000 else ''),\n 'language': data_type,\n 'error': str(e)\n }]\n \n def _extract_text_elements(self, content: str, content_type: str) -> List[Dict]:\n \"\"\"Extract elements from plain text or document files.\"\"\"\n if content_type == 'markdown':\n # Simple markdown section extraction\n sections = re.split(r'\\n(#+\\s+.*?)\\n', content, flags=re.MULTILINE)\n elements = []\n \n for i in range(1, len(sections), 2):\n if i < len(sections):\n elements.append({\n 'type': 'Section',\n 'name': sections[i].strip('# ').strip(),\n 'docstring': '',\n 'source': sections[i] + ('\\n' + sections[i+1] if i+1 < len(sections) else ''),\n 'language': 'markdown'\n })\n \n if not elements:\n elements = [{\n 'type': 'Document',\n 'name': 'content',\n 'docstring': 'Markdown content',\n 'source': content[:1000] + ('...' if len(content) > 1000 else ''),\n 'language': 'markdown'\n }]\n \n return elements\n else:\n # For plain text, just return the content\n return [{\n 'type': 'Content',\n 'name': 'content',\n 'docstring': f'{content_type.capitalize()} content',\n 'source': content[:1000] + ('...' if len(content) > 1000 else ''),\n 'language': content_type\n }]\n \n def _analyze_elements(self, elements: List[Dict], file_path: str, \n file_type: str, sub_type: str) -> Dict:\n \"\"\"Analyze elements using the AI model.\"\"\"\n if not elements:\n return {}\n \n # Customize prompt based on file type\n if file_type == 'code':\n prompt = f\"\"\"Analyze the following {sub_type} code from {file_path}. \"\"\"\n prompt += \"For each element, provide a brief explanation of its purpose and functionality.\\n\\n\"\n elif file_type == 'data':\n prompt = f\"\"\"Analyze the following {sub_type.upper()} data from {file_path}. \"\"\"\n prompt += \"Provide a summary of the data structure and its contents.\\n\\n\"\n else: # document or other text\n prompt = f\"\"\"Analyze the following {sub_type} content from {file_path}. \"\"\"\n prompt += \"Provide a summary of the content.\\n\\n\"\n \n # Add elements to the prompt\n for element in elements:\n element_type = element.get('type', 'Element')\n name = element.get('name', 'unnamed')\n \n prompt += f\"{element_type} {name}:\\n\"\n \n if element.get('docstring'):\n prompt += f\"Description: {element['docstring']}\\n\"\n \n if element.get('source'):\n prompt += f\"Content:\\n{element['source']}\\n\\n\"\n \n # Get analysis from the model\n try:\n response = self.client.generate(\n model=self.model,\n prompt=prompt,\n stream=False\n )\n return {\n 'analysis': response['response'],\n 'elements': elements,\n 'file_type': file_type,\n 'sub_type': sub_type\n }\n except Exception as e:\n print(f\"Error getting analysis from model: {str(e)}\")\n return {\n 'error': str(e),\n 'elements': elements,\n 'file_type': file_type,\n 'sub_type': sub_type\n }\n\ndef analyze_code(path: Union[str, Path], \n model: str = \"llama3\",\n exclude_dirs: Optional[List[str]] = None,\n file_extensions: Optional[List[str]] = None) -> Dict:\n \"\"\"Convenience function to analyze code at the given path.\n \n Args:\n path: Path to a file or directory to analyze.\n model: The Ollama model to use for analysis.\n exclude_dirs: List of directory names to exclude when analyzing directories.\n file_extensions: List of file extensions to include (without leading .).\n If None, includes all supported file types.\n \n Returns:\n Dict containing analysis results.\n \"\"\"\n analyzer = CodeAnalyzer(model=model)\n path = Path(path)\n \n if path.is_file():\n return analyzer.analyze_file(path)\n elif path.is_dir():\n return analyzer.analyze_directory(\n path, \n exclude_dirs=exclude_dirs,\n file_extensions=file_extensions\n )\n else:\n raise ValueError(f\"Path {path} is not a valid file or directory\")\n", "size": 17327, "language": "python" }, "code_analysis_tool/__init__.py": { "content": "\"\"\"\nCode Analysis Tool - A tool for analyzing and explaining Python code using AI.\n\"\"\"\n\n__version__ = \"0.1.0\"\n\nfrom .analyzer import analyze_code\nfrom .cli import main\n\n__all__ = ['analyze_code', 'main']\n", "size": 204, "language": "python" }, "code_analysis_tool/README.md": { "content": "# Code Analysis Tool\n\nA powerful command-line tool for analyzing Python codebases using AI to generate explanations and documentation.\n\n## Features\n\n- **AI-Powered Analysis**: Uses Ollama's language models to analyze and explain Python code\n- **Flexible Input**: Works with single files or entire directories\n- **Customizable**: Configure which directories to exclude from analysis\n- **Multiple Output Formats**: Supports both human-readable and JSON output\n- **Easy Integration**: Can be used as a Python library or command-line tool\n\n## Installation\n\n1. **Install the package**\n\n ```bash\n # Install from source\n git clone https://github.com/yourusername/code-analysis-tool.git\n cd code-analysis-tool\n pip install .\n \n # Or install directly from GitHub\n pip install git+https://github.com/yourusername/code-analysis-tool.git\n ```\n\n2. **Set up Ollama**\n\n Make sure you have Ollama installed and running on your system:\n ```bash\n # Install Ollama (if not already installed)\n curl -fsSL https://ollama.com/install.sh | sh\n \n # Start the Ollama server\n ollama serve\n \n # Pull the desired model (e.g., llama3)\n ollama pull llama3\n ```\n\n## Usage\n\n### Command Line Interface\n\n```bash\n# Analyze the current directory\ncode-analyze\n\n# Analyze a specific file or directory\ncode-analyze path/to/your/code\n\n# Use a different Ollama model\ncode-analyze --model codellama\n\n# Save output to a file\ncode-analyze --output analysis.txt\ncode-analyze --output analysis.json --format json\n\n# Show help\ncode-analyze --help\n```\n\n### As a Python Library\n\n```python\nfrom code_analysis_tool import analyze_code\n\n# Analyze a single file\nresults = analyze_code(\"example.py\")\nprint(results['analysis'])\n\n# Analyze a directory\ndirectory_results = analyze_code(\"src/\", exclude_dirs=[\"tests\", \"venv\"])\nfor file_path, analysis in directory_results.items():\n print(f\"\\nFile: {file_path}\")\n print(analysis['analysis'])\n```\n\n## Configuration\n\n### Environment Variables\n\n- `OLLAMA_HOST`: URL of the Ollama server (default: `http://localhost:11434`)\n- `OLLAMA_MODEL`: Default model to use (default: `llama3`)\n\n### Excluding Directories\n\nBy default, the following directories are excluded from analysis:\n- `__pycache__`\n- `.git`\n- `.github`\n- `venv`\n- `env`\n\nYou can customize this list using the `--exclude` option:\n\n```bash\ncode-analyze --exclude tests build dist\n```\n\n## Development\n\n1. Clone the repository\n2. Install development dependencies:\n ```bash\n pip install -e \".[dev]\"\n pre-commit install\n ```\n\n### Running Tests\n\n```bash\npytest\n```\n\n### Building the Package\n\n```bash\npython -m build\n```\n\n## License\n\nMIT\n\n## Contributing\n\nContributions are welcome! Please open an issue or submit a pull request.\n\n## Support\n\nIf you find this tool useful, please consider giving it a โญ on GitHub!\n", "size": 2808, "language": "markdown" }, "code_analysis_tool/setup.py": { "content": "from setuptools import setup, find_packages\nimport os\n\n# Read the contents of README.md\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n# Get package version\nabout = {}\nwith open(os.path.join(this_directory, 'code_analysis_tool', '__init__.py'), 'r', encoding='utf-8') as f:\n exec(f.read(), about)\n\nsetup(\n name=\"code-analysis-tool\",\n version=about['__version__'],\n author=\"Your Name\",\n author_email=\"your.email@example.com\",\n description=\"A tool for analyzing and explaining Python code using AI\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/yourusername/code-analysis-tool\",\n packages=find_packages(),\n python_requires=\">=3.8\",\n install_requires=[\n \"ollama>=0.1.5\",\n \"python-dotenv>=1.0.0\",\n ],\n extras_require={\n 'dev': [\n 'pytest>=7.0.0',\n 'pytest-cov>=4.0.0',\n 'black>=23.0.0',\n 'isort>=5.12.0',\n 'flake8>=6.0.0',\n 'mypy>=1.0.0',\n 'twine>=4.0.0',\n 'build>=0.10.0',\n ],\n },\n entry_points={\n \"console_scripts\": [\n \"code-analyze=code_analysis_tool.cli:main\",\n ],\n },\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Topic :: Software Development :: Documentation\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n keywords=\"code analysis documentation ai ollama\",\n project_urls={\n 'Bug Reports': 'https://github.com/yourusername/code-analysis-tool/issues',\n 'Source': 'https://github.com/yourusername/code-analysis-tool',\n },\n)\n", "size": 2172, "language": "python" }, "code_analysis_tool/cli.py": { "content": "\"\"\"Command-line interface for the code analysis tool.\"\"\"\nimport argparse\nimport json\nimport sys\nfrom pathlib import Path\nfrom typing import List, Optional\n\nfrom .analyzer import analyze_code\n\ndef parse_args(args: List[str]) -> argparse.Namespace:\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Analyze Python code using AI to generate explanations.\"\n )\n \n parser.add_argument(\n \"path\",\n nargs=\"?\",\n default=\".\",\n help=\"Path to a Python file or directory to analyze (default: current directory)\",\n )\n \n parser.add_argument(\n \"--model\",\n default=\"llama3\",\n help=\"Ollama model to use for analysis (default: llama3)\",\n )\n \n parser.add_argument(\n \"--output\",\n \"-o\",\n help=\"Output file to save results (default: print to stdout)\",\n )\n \n parser.add_argument(\n \"--format\",\n choices=[\"json\", \"text\"],\n default=\"text\",\n help=\"Output format (default: text)\",\n )\n \n parser.add_argument(\n \"--exclude\",\n nargs=\"+\",\n default=[\"__pycache__\", \".git\", \".github\", \"venv\", \"env\", \"node_modules\"],\n help=\"Directories to exclude from analysis\",\n )\n \n parser.add_argument(\n \"--extensions\",\n nargs=\"+\",\n help=\"File extensions to include (without leading .). If not specified, all supported file types are included.\",\n )\n \n parser.add_argument(\n \"--list-extensions\",\n action=\"store_true\",\n help=\"List all supported file extensions and exit\",\n )\n \n parser.add_argument(\n \"--version\",\n action=\"store_true\",\n help=\"Show version and exit\",\n )\n \n return parser.parse_args(args)\n\ndef print_analysis(results: dict, output_format: str = \"text\", output_file: Optional[str] = None):\n \"\"\"Print or save analysis results.\"\"\"\n if output_format == \"json\":\n output = json.dumps(results, indent=2, ensure_ascii=False)\n else:\n output = []\n for file_path, analysis in results.items():\n file_type = analysis.get('file_type', 'unknown')\n sub_type = analysis.get('sub_type', 'unknown')\n \n output.append(f\"\\n{'='*100}\")\n output.append(f\"File: {file_path} ({file_type}/{sub_type})\")\n output.append(f\"{'='*100}\\n\")\n \n if 'error' in analysis:\n output.append(f\"Error: {analysis['error']}\\n\")\n continue\n \n analysis_text = analysis.get('analysis', 'No analysis available')\n output.append(analysis_text)\n \n # Add a summary of elements if available\n elements = analysis.get('elements', [])\n if elements and len(elements) > 1: # Only show if there are multiple elements\n output.append(\"\\nElements found:\")\n for element in elements:\n element_type = element.get('type', 'element')\n name = element.get('name', 'unnamed')\n output.append(f\" - {element_type}: {name}\")\n \n output = \"\\n\".join(output)\n \n if output_file:\n with open(output_file, 'w', encoding='utf-8') as f:\n f.write(output)\n print(f\"Analysis saved to {output_file}\")\n else:\n print(output)\n\ndef list_supported_extensions() -> None:\n \"\"\"List all supported file extensions and their types.\"\"\"\n print(\"Supported file extensions:\")\n print(\"\\nCode files:\")\n code_exts = {\n '.py': 'Python',\n '.js': 'JavaScript',\n '.jsx': 'JavaScript (React)',\n '.ts': 'TypeScript',\n '.tsx': 'TypeScript (React)',\n '.java': 'Java',\n '.c': 'C',\n '.cpp': 'C++',\n '.h': 'C/C++ Header',\n '.hpp': 'C++ Header',\n '.cs': 'C#',\n '.go': 'Go',\n '.rs': 'Rust',\n '.rb': 'Ruby',\n '.php': 'PHP',\n '.swift': 'Swift',\n '.kt': 'Kotlin',\n '.scala': 'Scala',\n '.sh': 'Shell Script',\n '.pl': 'Perl',\n '.r': 'R',\n '.m': 'MATLAB',\n '.jl': 'Julia'\n }\n \n for ext, lang in sorted(code_exts.items()):\n print(f\" {ext:8} - {lang}\")\n \n print(\"\\nData files:\")\n data_exts = {\n '.json': 'JSON',\n '.yaml': 'YAML',\n '.yml': 'YAML',\n '.xml': 'XML',\n '.csv': 'CSV',\n '.toml': 'TOML',\n '.ini': 'INI',\n '.cfg': 'Config'\n }\n \n for ext, fmt in sorted(data_exts.items()):\n print(f\" {ext:8} - {fmt}\")\n \n print(\"\\nDocument files:\")\n doc_exts = {\n '.md': 'Markdown',\n '.txt': 'Plain Text',\n '.html': 'HTML',\n '.htm': 'HTML',\n '.css': 'CSS'\n }\n \n for ext, doc_type in sorted(doc_exts.items()):\n print(f\" {ext:8} - {doc_type}\")\n\ndef main(args: Optional[List[str]] = None) -> int:\n \"\"\"Main entry point for the CLI.\"\"\"\n if args is None:\n args = sys.argv[1:]\n \n # Handle version flag separately\n if \"--version\" in args or \"-v\" in args:\n from . import __version__\n print(f\"Code Analysis Tool v{__version__}\")\n return 0\n \n try:\n parsed_args = parse_args(args)\n \n if parsed_args.version:\n from . import __version__\n print(f\"Code Analysis Tool v{__version__}\")\n return 0\n \n if parsed_args.list_extensions:\n list_supported_extensions()\n return 0\n \n path = Path(parsed_args.path).resolve()\n \n if not path.exists():\n print(f\"Error: Path '{path}' does not exist\", file=sys.stderr)\n return 1\n \n print(f\"Analyzing files in {path}...\")\n \n if path.is_file():\n results = {str(path): analyze_code(path, model=parsed_args.model)}\n else:\n results = analyze_code(\n path, \n model=parsed_args.model,\n exclude_dirs=parsed_args.exclude,\n file_extensions=parsed_args.extensions\n )\n \n if not results:\n print(\"No supported files found to analyze.\")\n if parsed_args.extensions:\n print(f\"No files with extensions: {', '.join(parsed_args.extensions)}\")\n else:\n print(\"No supported file types found in the directory.\")\n return 0\n \n print_analysis(\n results,\n output_format=parsed_args.format,\n output_file=parsed_args.output\n )\n \n return 0\n \n except KeyboardInterrupt:\n print(\"\\nAnalysis cancelled by user.\")\n return 1\n except Exception as e:\n print(f\"Error: {str(e)}\", file=sys.stderr)\n return 1\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "size": 6908, "language": "python" }, "code_analysis_tool/requirements-dev.txt": { "content": "# Development dependencies\n-r requirements.txt\npytest>=7.0.0\npytest-cov>=4.0.0\nblack>=23.0.0\nisort>=5.12.0\nflake8>=6.0.0\nmypy>=1.0.0\ntwine>=4.0.0\nbuild>=0.10.0\npre-commit>=3.0.0\n", "size": 178, "language": "text" }, "code_analysis_tool/.gitignore": { "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n.installed.cfg\n*.egg\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# IDE specific files\n.idea/\n.vscode/\n*.swp\n*.swo\n\n# Local development\n*.log\n\n# Build artifacts\n*.pyc\n*.pyo\n*.pyd\n\n# OS generated files\n.DS_Store\n.DS_Store?\n._*\n.Spotlight-V100\n.Trashes\nehthumbs.db\nThumbs.db\n", "size": 659, "language": "unknown" }, "code_analysis_tool/tests/test_analyzer.py": { "content": "\"\"\"Tests for the code analysis tool.\"\"\"\nimport os\nimport tempfile\nimport unittest\nfrom pathlib import Path\n\nfrom code_analysis_tool.analyzer import CodeAnalyzer, analyze_code\n\nclass TestCodeAnalyzer(unittest.TestCase):\n \"\"\"Test cases for the CodeAnalyzer class.\"\"\"\n \n def setUp(self):\n \"\"\"Set up test fixtures.\"\"\"\n self.analyzer = CodeAnalyzer()\n self.test_dir = tempfile.mkdtemp()\n \n # Create a test Python file\n self.test_file = os.path.join(self.test_dir, \"test.py\")\n with open(self.test_file, 'w', encoding='utf-8') as f:\n f.write('''\"\"\"Test module docstring.\"\"\"\n\ndef hello(name: str) -> str:\n \"\"\"Return a greeting message.\"\"\"\n return f\"Hello, {name}!\"\n\nclass TestClass:\n \"\"\"A test class.\"\"\"\n \n def __init__(self, value: int):\n self.value = value\n \n def get_value(self) -> int:\n \"\"\"Get the current value.\"\"\"\n return self.value\n''')\n \n def test_analyze_file(self):\n \"\"\"Test analyzing a single file.\"\"\"\n result = self.analyzer.analyze_file(self.test_file)\n self.assertIn('analysis', result)\n self.assertIn('elements', result)\n self.assertGreater(len(result['elements']), 0)\n \n def test_analyze_directory(self):\n \"\"\"Test analyzing a directory.\"\"\"\n results = self.analyzer.analyze_directory(self.test_dir)\n self.assertIn(self.test_file, results)\n self.assertIn('analysis', results[self.test_file])\n \n def test_extract_elements(self):\n \"\"\"Test element extraction from source code.\"\"\"\n with open(self.test_file, 'r', encoding='utf-8') as f:\n code = f.read()\n \n elements = self.analyzer._extract_elements(code)\n self.assertEqual(len(elements), 3) # 1 function + 1 class + 1 method\n \n # Check function element\n func = next(e for e in elements if e['name'] == 'hello')\n self.assertEqual(func['type'], 'Function')\n self.assertIn('greeting', func['docstring'].lower())\n \n # Check class element\n cls = next(e for e in elements if e['name'] == 'TestClass')\n self.assertEqual(cls['type'], 'Class')\n \n def test_analyze_elements(self):\n \"\"\"Test analysis of extracted elements.\"\"\"\n with open(self.test_file, 'r', encoding='utf-8') as f:\n code = f.read()\n \n elements = self.analyzer._extract_elements(code)\n analysis = self.analyzer._analyze_elements(elements, self.test_file)\n \n self.assertIn('analysis', analysis)\n self.assertIn('elements', analysis)\n self.assertEqual(len(analysis['elements']), 3)\n\nclass TestAnalyzeCodeFunction(unittest.TestCase):\n \"\"\"Test the analyze_code convenience function.\"\"\"\n \n def test_analyze_code_with_file(self):\n \"\"\"Test analyzing a single file.\"\"\"\n with tempfile.NamedTemporaryFile(suffix='.py', mode='w', delete=False) as f:\n f.write('def test():\\n pass')\n temp_file = f.name\n \n try:\n result = analyze_code(temp_file)\n self.assertIn('analysis', result)\n self.assertIn('elements', result)\n finally:\n os.unlink(temp_file)\n \n def test_analyze_code_with_nonexistent_path(self):\n \"\"\"Test with a non-existent path.\"\"\"\n with self.assertRaises(ValueError):\n analyze_code(\"/nonexistent/path\")\n\nif __name__ == '__main__':\n unittest.main()\n", "size": 3479, "language": "python" }, "src/config.py": { "content": "\"\"\"Configuration settings for the Python Code Explainer application.\"\"\"\nimport os\nfrom pathlib import Path\nfrom typing import Optional\nfrom pydantic_settings import BaseSettings\nfrom pydantic import HttpUrl, field_validator\n\n\nclass Settings(BaseSettings):\n \"\"\"Application settings.\"\"\"\n \n # Application\n APP_NAME: str = \"Python Code Explainer\"\n DEBUG: bool = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n LOG_LEVEL: str = os.getenv(\"LOG_LEVEL\", \"INFO\")\n \n # File handling\n UPLOAD_FOLDER: Path = Path(\"uploads\")\n MAX_CONTENT_LENGTH: int = 16 * 1024 * 1024 # 16MB max file size\n ALLOWED_EXTENSIONS: set[str] = {\"py\"}\n \n # LLM Settings\n OLLAMA_HOST: HttpUrl = os.getenv(\"OLLAMA_HOST\", \"http://localhost:11434\")\n OLLAMA_MODEL: str = os.getenv(\"OLLAMA_MODEL\", \"llama2\")\n \n # Document generation\n OUTPUT_FOLDER: Path = Path(\"docs\")\n \n @validator(\"UPLOAD_FOLDER\", \"OUTPUT_FOLDER\", pre=True)\n def create_folders(cls, v: Path) -> Path:\n \"\"\"Ensure upload and output folders exist.\"\"\"\n v = Path(v)\n v.mkdir(parents=True, exist_ok=True)\n return v\n \n class Config:\n env_file = \".env\"\n env_file_encoding = \"utf-8\"\n case_sensitive = True\n\n\n# Global settings instance\nsettings = Settings()\n", "size": 1305, "language": "python" }, "src/analyzer.py": { "content": "import ast\nfrom typing import List, Dict, Any\n\ndef extract_elements(code: str) -> List[Dict[str, Any]]:\n \"\"\"Extracts all top-level classes and functions with docstrings and source code.\"\"\"\n tree = ast.parse(code)\n elements = []\n for node in tree.body:\n if isinstance(node, (ast.FunctionDef, ast.ClassDef, ast.AsyncFunctionDef)):\n element_type = node.__class__.__name__\n name = node.name\n doc = ast.get_docstring(node) or \"\"\n src = ast.get_source_segment(code, node) or \"\"\n start_line = getattr(node, 'lineno', 0)\n end_line = getattr(node, 'end_lineno', start_line)\n \n args = []\n if hasattr(node, 'args'):\n args = [arg.arg for arg in node.args.args]\n \n elements.append({\n 'type': element_type.replace('Def', ''),\n 'name': name,\n 'docstring': doc,\n 'source': src,\n 'start_line': start_line,\n 'end_line': end_line,\n 'args': args if args else None,\n 'has_return': any(isinstance(n, ast.Return) for n in ast.walk(node))\n })\n return elements", "size": 1224, "language": "python" }, "src/__init__.py": { "content": "\"\"\"Python Code Explainer - A tool to analyze and explain Python code using LLMs.\"\"\"\n\n__version__ = \"0.1.0\"\n__author__ = \"Your Name \"\n__all__ = [] # List public API here\n", "size": 194, "language": "python" }, "src/action.py": { "content": "import os\nimport json\nimport logging\nfrom pathlib import Path\nfrom typing import List, Dict, Any, Optional\nfrom datetime import datetime\n\nfrom .config import settings\nfrom .utils import (\n find_files_by_extension,\n read_file_safely,\n ensure_directory_exists,\n get_llm_client\n)\nfrom .analyzer import extract_elements\nfrom .code_explainer.document_generator import create_document\n\n# Configure logging\nlogging.basicConfig(\n level=settings.LOG_LEVEL,\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n)\nlogger = logging.getLogger(__name__)\n\ndef collect_python_files(root: Path = Path(\".\")) -> List[Path]:\n return find_files_by_extension(root, \".py\")\n\ndef parse_python_file(py_file: Path) -> List[Dict[str, Any]]:\n try:\n code = read_file_safely(py_file, encoding='utf-8')\n elements = extract_elements(code)\n for el in elements:\n el['file'] = str(py_file)\n return elements\n with open(py_file, 'r', encoding='utf-8') as f:\n code = f.read()\n elements = extract_elements(code)\n for el in elements:\n el['file'] = str(py_file)\n return elements\n except Exception as e:\n logger.warning(f\"Error processing {py_file}: {e}\")\n return []\n\ndef build_prompt(elements: List[Dict[str, Any]]) -> str:\n \"\"\"Build a prompt for the LLM based on the code elements.\n \n Args:\n elements: List of code element dictionaries\n \n Returns:\n Formatted prompt string\n \"\"\"\n def format_element(el: Dict[str, Any]) -> str:\n \"\"\"Format a single code element for the prompt.\"\"\"\n parts = [\n f\"File: {el['file']}\",\n f\"{el['type']} '{el['name']}':\",\n f\"Location: Lines {el['start_line']}-{el['end_line']}\",\n f\"Documentation: {el.get('docstring', 'No documentation')}\"\n ]\n \n if el.get('args'):\n parts.append(f\"Arguments: {', '.join(el['args'])}\")\n \n if el.get('has_return', False) and el['type'] != 'Class':\n parts.append(\"Returns: Yes\")\n \n if el.get('source'):\n parts.append(f\"Code:\\n{el['source']}\")\n \n return \"\\n\".join(parts)\n \n # Combine all elements into a single string\n combined = \"\\n\\n\".join(format_element(el) for el in elements)\n \n # System prompt with clear instructions\n system_prompt = (\n \"You are a helpful code analysis assistant. Your task is to analyze the provided Python code \"\n \"and explain it in clear, non-technical language that would be understandable to someone \"\n \"without a programming background. Focus on what the code does and why it's important, \"\n \"rather than how it works technically. Use analogies and examples to make complex concepts \"\n \"more accessible.\\n\\n\"\n \"๐Ÿ“Œ Task Overview:\\n\"\n \"- Break down the Python code into understandable parts.\\n\"\n \"- Present the explanation in a two-column table:\\n\"\n \" 1. Section of the prompt\\n\"\n \" 2. Whether it is dynamic or static\\n\\n\"\n \"๐Ÿง  User Context:\\n\"\n \"- The user is building an automated assessment tool using LLMs.\\n\"\n \"- The tool generates subtopics and test questions from a topic + grade + learning objective.\\n\"\n \"- They want help modifying Prompt 1 to include a new variable: the learning objective.\\n\\n\"\n \"๐Ÿ’ก Input Example:\\n\"\n \" topic = 'Ratios and Proportional Relationships'\\n\"\n \" student_class = '6th standard'\\n\"\n \" learning_objective = 'Understand ratio concepts and use ratio reasoning to solve problems.'\\n\\n\"\n \"๐Ÿ“ Full Prompt 1 (Subtopic Generator):\\n\"\n \"I want a list of sub-topics for the topic \\\"{topic}\\\" which is taught to a \\\"{student_class}\\\" student \"\n \"with a learning objective \\\"{learning_objective}\\\".\\n\"\n \"First, output the learning objective exactly as given.\\n\"\n \"Then, output the sub-topics ONLY as a Python list. Do not include any commentary or explanation.\\n\\n\"\n \"๐Ÿ”ง System Context:\\n\"\n \"We are building an automated assessment web app where questions are usually uploaded manually into a MySQL database. \"\n \"This tool uses large language models to generate those questions automatically, saving time and effort.\\n\\n\"\n \"๐Ÿ“‹ Additional User Requests:\\n\"\n \"- Modify the original code to include the new learning objective variable.\\n\"\n \"- Ensure that generate_questions_for_subtopic also uses the learning objective.\\n\"\n \"- Recreate the dynamic/static breakdown table of Prompt 1 and Prompt 2.\\n\"\n \"- Show an example of what Prompt 1 and Prompt 2 look like after the code runs.\\n\"\n \"- Give a step-by-step guide to feeding these prompts into a ChatGPT conversation.\\n\"\n \"- Convert the instructions into clean documentation.\\n\"\n \"- Provide a downloadable .docx version of the documentation.\\n\\n\"\n f\"{combined}\\n\\n\"\n \"๐Ÿงพ Now, please provide a detailed, beginner-friendly explanation:\"\n )\n \n return system_prompt\n\ndef run_ollama_analysis(prompt: str, model: str, host: str) -> str:\n \"\"\"Run analysis using the Ollama LLM.\n \n Args:\n prompt: The prompt to send to the model\n model: The model to use for generation\n host: The Ollama server host\n \n Returns:\n The generated text from the model\n \n Raises:\n RuntimeError: If there's an error communicating with the LLM\n \"\"\"\n try:\n client = get_llm_client()\n response = client.generate(\n prompt=prompt,\n model=model,\n system_prompt=(\n \"You are a helpful code analysis assistant. Your task is to analyze \"\n \"the provided Python code and explain it in clear, non-technical language.\"\n )\n )\n return response.content\n except Exception as e:\n logger.error(f\"Error in LLM analysis: {e}\")\n raise RuntimeError(f\"Failed to generate analysis: {e}\")\n\ndef save_document(\n elements: List[Dict[str, Any]],\n explanation: str,\n model: str,\n host: str,\n output_dir: Path\n) -> Optional[Path]:\n \"\"\"Save the analysis document to a file.\n \n Args:\n elements: List of code element dictionaries\n explanation: The generated explanation text\n model: The model used for generation\n host: The Ollama server host\n output_dir: Directory to save the document in\n \n Returns:\n Path to the saved document if successful, None otherwise\n \"\"\"\n try:\n ensure_directory_exists(output_dir)\n timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n doc_path = output_dir / f\"code_analysis_{timestamp}.docx\"\n \n logger.info(f\"Generating document: {doc_path}\")\n doc_buffer = create_document(\n elements=elements,\n explanation=explanation,\n model=model,\n host=host,\n include_summary=True\n )\n \n if not doc_buffer:\n logger.error(\"Failed to generate document buffer\")\n return None\n \n with open(doc_path, 'wb') as f:\n f.write(doc_buffer.getvalue())\n \n logger.info(f\"Document successfully saved to: {doc_path}\")\n return doc_path\n \n except Exception as e:\n logger.error(f\"Error saving document: {e}\", exc_info=True)\n return None\n\ndef analyze_repository() -> None:\n ollama_model = os.getenv('OLLAMA_MODEL', 'llama2')\n ollama_host = os.getenv('OLLAMA_HOST', 'http://localhost:11434')\n\n logger.info(\"Collecting .py files ...\")\n python_files = collect_python_files()\n if not python_files:\n logger.error(\"No Python files found!\")\n return\n\n logger.info(f\"Found {len(python_files)} Python files.\")\n all_elements = []\n for py_file in python_files:\n elements = parse_python_file(py_file)\n if elements:\n all_elements.extend(elements)\n else:\n logger.info(f\"No elements extracted from {py_file}\")\n\n if not all_elements:\n logger.error(\"No code elements found to analyze.\")\n return\n\n prompt = build_prompt(all_elements)\n\n logger.info(\"Contacting Ollama LLM ...\")\n try:\n explanation = run_ollama_analysis(prompt, ollama_model, ollama_host)\n print(\"\\n\" + \"=\"*80)\n print(\"CODE ANALYSIS REPORT\")\n print(\"=\"*80)\n print(explanation)\n except Exception as e:\n logger.error(f\"Error during LLM analysis: {e}\")\n return\n\n docs_dir = Path('docs')\n docs_dir.mkdir(exist_ok=True)\n try:\n save_document(all_elements, explanation, ollama_model, ollama_host, docs_dir)\n except Exception as e:\n logger.error(f\"Document creation failed: {e}\")\n\nif __name__ == \"__main__\":\n analyze_repository()\n", "size": 8932, "language": "python" }, "src/code_explainer/__init__.py": { "content": "# This file makes the code_explainer directory a Python package\n", "size": 64, "language": "python" }, "src/code_analyzer_ai/__init__.py": { "content": "\"\"\"\nCode Analyzer AI - A tool for analyzing and explaining code in multiple programming languages using AI.\n\nThis package provides functionality to parse code, extract its structure,\nand generate explanations using language models.\n\"\"\"\n\nfrom .code_analyzer import CodeAnalyzer, extract_elements\nfrom .llm_integration import generate_explanation\nfrom .document_generator import create_document\nfrom .cli import main\n\n__version__ = \"0.1.0\"\n\n__all__ = [\n \"CodeAnalyzer\",\n \"extract_elements\",\n \"generate_explanation\",\n \"create_document\",\n \"main\"\n]\n", "size": 559, "language": "python" }, "src/code_analyzer_ai/llm_integration.py": { "content": "\"\"\"\nLLM integration module for generating code explanations.\n\"\"\"\nfrom typing import List, Dict, Any\nfrom ollama import Client\n\ndef generate_explanation(code_elements: List[Dict[str, Any]], model: str = \"llama2\") -> str:\n \"\"\"\n Generate a non-technical explanation of the code using the specified LLM.\n \n Args:\n code_elements: List of code element dictionaries from extract_elements()\n Each element may contain a 'file' key indicating its source file.\n model: Name of the LLM model to use (default: \"llama2\")\n \n Returns:\n Generated explanation as a string\n \"\"\"\n if not code_elements:\n return \"No code elements to analyze.\"\n \n try:\n # Group elements by file\n elements_by_file = {}\n for el in code_elements:\n file_name = el.get('file', 'main.py')\n if file_name not in elements_by_file:\n elements_by_file[file_name] = []\n elements_by_file[file_name].append(el)\n \n # Compile code elements into a formatted string, grouped by file\n combined_parts = []\n for file_name, elements in elements_by_file.items():\n file_section = f\"# File: {file_name}\\n\\n\"\n \n for el in elements:\n element_section = (\n f\"## {el['type']} '{el['name']}'\\n\"\n f\"Location: Lines {el['start_line']}-{el['end_line']}\\n\"\n )\n \n if el['docstring']:\n element_section += f\"Documentation: {el['docstring']}\\n\"\n \n if el['args']:\n element_section += f\"Arguments: {', '.join(el['args'])}\\n\"\n \n if el['type'] != 'Class' and el['has_return']:\n element_section += \"Returns: Yes\\n\"\n \n element_section += f\"Code:\\n```python\\n{el['source'] or ''}\\n```\\n\\n\"\n file_section += element_section\n \n combined_parts.append(file_section)\n \n combined = \"\\n\".join(combined_parts)\n \n system_prompt = (\n \"You are a helpful assistant. Your job is to explain the Python code and workflow described below \"\n \"in plain, non-technical English to someone without a programming background. Avoid technical jargon. \"\n \"Use relatable analogies and simple examples where appropriate.\\n\\n\"\n\n \"๐Ÿ“Œ Task Overview:\\n\"\n \"- Break down the Python code into understandable parts.\\n\"\n \"- Present the explanation in a two-column table:\\n\"\n \" 1. Section of the prompt\\n\"\n \" 2. Whether it is dynamic or static\\n\\n\"\n\n \"๐Ÿง  User Context:\\n\"\n \"- The user is building an automated assessment tool using LLMs.\\n\"\n \"- The tool generates subtopics and test questions from a topic + grade + learning objective.\\n\"\n \"- They want help modifying Prompt 1 to include a new variable: the learning objective.\\n\\n\"\n\n \"๐Ÿ’ก Input Example:\\n\"\n \" topic = 'Ratios and Proportional Relationships'\\n\"\n \" student_class = '6th standard'\\n\"\n \" learning_objective = 'Understand ratio concepts and use ratio reasoning to solve problems.'\\n\\n\"\n\n \"๐Ÿ“ Full Prompt 1 (Subtopic Generator):\\n\"\n \"I want a list of sub-topics for the topic \\\"{topic}\\\" which is taught to a \\\"{student_class}\\\" student \"\n \"with a learning objective \\\"{learning_objective}\\\".\\n\"\n \"First, output the learning objective exactly as given.\\n\"\n \"Then, output the sub-topics ONLY as a Python list. Do not include any commentary or explanation.\\n\\n\"\n\n \"๐Ÿ”ง System Context:\\n\"\n \"We are building an automated assessment web app where questions are usually uploaded manually into a MySQL database. \"\n \"This tool uses large language models to generate those questions automatically, saving time and effort.\\n\\n\"\n\n \"๐Ÿ“‹ Additional User Requests:\\n\"\n \"- Modify the original code to include the new learning objective variable.\\n\"\n \"- Ensure that generate_questions_for_subtopic also uses the learning objective.\\n\"\n \"- Recreate the dynamic/static breakdown table of Prompt 1 and Prompt 2.\\n\"\n \"- Show an example of what Prompt 1 and Prompt 2 look like after the code runs.\\n\"\n \"- Give a step-by-step guide to feeding these prompts into a ChatGPT conversation.\\n\"\n \"- Convert the instructions into clean documentation.\\n\"\n \"- Provide a downloadable .docx version of the documentation.\\n\\n\"\n\n f\"{combined}\\n\\n\"\n \"๐Ÿงพ Now, please provide a detailed, beginner-friendly explanation:\"\n)\n \n client = Client(host='http://localhost:11434')\n response = client.chat(\n model=model,\n messages=[{\"role\": \"user\", \"content\": system_prompt}],\n stream=False\n )\n \n return response['message']['content']\n \n except Exception as e:\n raise RuntimeError(f\"Error generating explanation: {str(e)}\")\n", "size": 4949, "language": "python" }, "src/code_analyzer_ai/cli.py": { "content": "\"\"\"\nCommand-line interface for Code Analyzer AI.\n\"\"\"\nimport argparse\nimport json\nimport sys\nfrom pathlib import Path\nfrom typing import Optional, List, Dict, Any\n\nfrom .analyzer import CodeAnalyzer\n\ndef main(args: Optional[list] = None) -> int:\n \"\"\"\n Main entry point for the CLI.\n \n Args:\n args: Command-line arguments (defaults to sys.argv[1:])\n \n Returns:\n int: Exit code (0 for success, non-zero for error)\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Python Code Explainer\")\n parser.add_argument(\n \"path\",\n type=str,\n help=\"Python file or directory to analyze\"\n )\n parser.add_argument(\n \"--recursive\",\n \"-r\",\n action=\"store_true\",\n help=\"Recursively process Python files in subdirectories\"\n )\n parser.add_argument(\n \"--model\",\n \"-m\",\n type=str,\n default=\"llama2\",\n help=\"LLM model to use (default: llama2)\"\n )\n parser.add_argument(\n \"--output\",\n \"-o\",\n type=str,\n help=\"Output file for the report (default: print to console)\"\n )\n \n parsed_args = parser.parse_args(args)\n \n try:\n # Import here to avoid loading everything when the CLI is not used\n from .code_analyzer import extract_elements\n from .llm_integration import generate_explanation\n from .document_generator import create_document\n \n # Process the input path (file or directory)\n path = Path(parsed_args.path)\n if not path.exists():\n print(f\"Error: Path '{path}' not found\", file=sys.stderr)\n return 1\n\n # Collect all Python files to process\n python_files = []\n if path.is_file() and path.suffix == '.py':\n python_files = [path]\n elif path.is_dir():\n pattern = '**/*.py' if parsed_args.recursive else '*.py'\n python_files = list(path.glob(pattern))\n if not python_files:\n print(f\"No Python files found in {path}\")\n return 0\n else:\n print(f\"Error: Path must be a Python file or directory\")\n return 1\n\n all_code_elements = []\n for py_file in python_files:\n try:\n print(f\"Processing {py_file}...\", file=sys.stderr)\n code = py_file.read_text(encoding=\"utf-8\")\n elements = extract_elements(code)\n for el in elements:\n el['file'] = str(py_file.relative_to(path.parent))\n el['file_path'] = str(py_file)\n all_code_elements.extend(elements)\n except Exception as e:\n print(f\"Error processing {py_file}: {e}\", file=sys.stderr)\n\n if not all_code_elements:\n print(\"No code elements found in any files.\")\n return 0\n \n # Generate explanation\n print(\"\\nAnalyzing code...\", file=sys.stderr)\n explanation = generate_explanation(all_code_elements, model=parsed_args.model)\n \n # Output the result\n if parsed_args.output:\n output_path = Path(parsed_args.output)\n if output_path.suffix.lower() == '.docx':\n buffer = create_document(all_code_elements, explanation)\n if buffer:\n output_path.write_bytes(buffer.getvalue())\n print(f\"Report saved to {output_path}\")\n else:\n print(\"Error: Could not generate Word document. Is python-docx installed?\", file=sys.stderr)\n return 1\n else:\n with output_path.open('w', encoding='utf-8') as f:\n f.write(\"# Code Analysis Report\\n\\n\")\n f.write(explanation)\n print(f\"Report saved to {output_path}\")\n else:\n print(\"\\n\" + \"=\"*80)\n print(\"CODE ANALYSIS REPORT\")\n print(\"=\"*80)\n print(explanation)\n \n return 0\n \n except Exception as e:\n print(f\"Error: {str(e)}\", file=sys.stderr)\n return 1\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "size": 4179, "language": "python" }, "src/code_analyzer_ai/document_generator.py": { "content": "\"\"\"\nDocument generation module for creating Word documents from code analysis.\n\"\"\"\nfrom typing import List, Dict, Any, Optional, Tuple\nfrom io import BytesIO\nfrom dataclasses import dataclass\nimport json\nfrom ollama import Client\n\n@dataclass\nclass CodeElementSummary:\n \"\"\"Class to hold simplified information about a code element.\"\"\"\n name: str\n type: str\n description: str\n example: str = \"\"\n\ndef generate_non_tech_explanation(code_elements: List[Dict[str, Any]], \n model: str = 'llama2',\n host: str = 'http://localhost:11434') -> Tuple[str, List[CodeElementSummary]]:\n \"\"\"\n Generate a non-technical explanation of the code using LLM.\n \n Args:\n code_elements: List of code element dictionaries from the analyzer\n model: The Ollama model to use for generation\n host: The Ollama server host\n \n Returns:\n Tuple of (general_explanation, element_descriptions)\n \"\"\"\n try:\n client = Client(host=host)\n \n # Create a simplified version of code elements for the prompt\n simplified_elements = []\n for el in code_elements:\n simplified = {\n 'name': el.get('name', 'unnamed'),\n 'type': el.get('type', 'code'),\n 'docstring': el.get('docstring', ''),\n 'args': el.get('args', []),\n 'has_return': el.get('has_return', False),\n 'source': el.get('source', '')[:500] # Limit source length\n }\n simplified_elements.append(simplified)\n \n # Prepare the prompt\n system_prompt = \"\"\"You are a helpful assistant that explains code in simple, non-technical terms. \n Your audience has little to no programming knowledge. Use analogies and simple language.\n Explain what the code does, not how it does it.\"\"\"\n \n user_prompt = f\"\"\"Please explain the following Python code elements in a way that's easy for non-programmers to understand.\n For each element, provide:\n 1. A simple explanation of what it does\n 2. A real-world analogy\n 3. A simple example of how it might be used\n \n Code elements to explain: {json.dumps(simplified_elements, indent=2)}\n \n Format your response as a JSON object with these fields:\n - general_overview: A brief overview of what the code does as a whole\n - elements: A list of objects, each with 'name', 'type', 'explanation', 'analogy', and 'example' fields\n \"\"\"\n \n # Get the LLM response\n response = client.chat(\n model=model,\n messages=[\n {\"role\": \"system\", \"content\": system_prompt},\n {\"role\": \"user\", \"content\": user_prompt}\n ],\n format=\"json\"\n )\n \n # Parse the response\n try:\n result = json.loads(response['message']['content'])\n \n # Format the general explanation\n general = f\"# Understanding the Code: A Friendly Guide\\n\\n\"\n general += f\"## What This Code Does\\n\\n{result.get('general_overview', 'This code performs specific operations.')}\\n\\n\"\n \n # Format the elements\n elements = []\n for elem in result.get('elements', []):\n desc = f\"{elem.get('explanation', '')}\\n\\n\"\n if 'analogy' in elem:\n desc += f\"*Think of it like:* {elem['analogy']}\\n\\n\"\n elements.append(CodeElementSummary(\n name=elem.get('name', 'unnamed'),\n type=elem.get('type', 'code').capitalize(),\n description=desc.strip(),\n example=elem.get('example', '')\n ))\n \n # Add to general explanation\n general += f\"### {elem.get('type', 'Element').capitalize()}: {elem.get('name', 'Unnamed')}\\n\"\n general += f\"{desc}\\n\"\n if 'example' in elem:\n general += f\"*Example:* {elem['example']}\\n\\n\"\n \n # Add conclusion\n general += \"\"\"\n## Why This Is Helpful\n\n1. **For Beginners**: Understand code without needing to learn programming first\n2. **For Teams**: Helps non-technical team members understand technical work\n3. **For Documentation**: Creates clear, accessible records of what code does\n\n## Real-World Impact\n\nThis kind of code documentation helps bridge the gap between technical and non-technical stakeholders, making technology more accessible to everyone.\n\"\"\"\n \n return general, elements\n \n except json.JSONDecodeError:\n # Fallback to simple explanation if JSON parsing fails\n return _generate_fallback_explanation(code_elements)\n \n except Exception as e:\n print(f\"Error generating LLM explanation: {e}\")\n return _generate_fallback_explanation(code_elements)\n\ndef _generate_fallback_explanation(code_elements: List[Dict[str, Any]]) -> Tuple[str, List[CodeElementSummary]]:\n \"\"\"Generate a simple fallback explanation without LLM.\"\"\"\n elements = []\n general = \"# Code Explanation\\n\\nThis code contains the following components:\\n\\n\"\n \n for el in code_elements:\n el_type = el.get('type', 'code')\n name = el.get('name', 'unnamed')\n desc = f\"{el_type.capitalize()} '{name}'\"\n if el.get('docstring'):\n desc += f\": {el['docstring']}\"\n \n elements.append(CodeElementSummary(\n name=name,\n type=el_type,\n description=desc,\n example=\"\"\n ))\n \n general += f\"- {desc}\\n\"\n \n general += \"\\nFor a more detailed explanation, please check the technical documentation.\"\n return general, elements\n\ndef generate_codebase_summary(code_elements: List[Dict[str, Any]], \n model: str = 'llama2',\n host: str = 'http://localhost:11434') -> Dict[str, Any]:\n \"\"\"\n Generate a comprehensive summary of the codebase using LLM.\n \n Args:\n code_elements: List of code element dictionaries from the analyzer\n model: The Ollama model to use for generation\n host: The Ollama server host\n \n Returns:\n Dictionary containing the summary with the following keys:\n - overview: General overview of the codebase\n - architecture: High-level architecture description\n - key_components: List of main components\n - data_flow: Description of how data moves through the system\n - dependencies: External libraries and services used\n - setup_instructions: How to set up the project\n - usage_examples: Example usage of the main components\n \"\"\"\n try:\n client = Client(host=host)\n \n # Prepare the prompt\n system_prompt = \"\"\"You are a senior software architect analyzing a codebase. \n Provide a comprehensive summary in JSON format with the following structure:\n {\n \"overview\": \"Brief overview of what the codebase does\",\n \"architecture\": \"High-level architecture description\",\n \"key_components\": [\"List\", \"of\", \"main\", \"components\"],\n \"data_flow\": \"Description of how data moves through the system\",\n \"dependencies\": [\"List\", \"of\", \"main\", \"dependencies\"],\n \"setup_instructions\": \"Step-by-step setup instructions\",\n \"usage_examples\": \"Example usage of the main components\"\n }\"\"\"\n \n # Create a simplified version of code elements for the prompt\n simplified_elements = []\n for el in code_elements:\n simplified = {\n 'name': el.get('name', 'unnamed'),\n 'type': el.get('type', 'code'),\n 'docstring': el.get('docstring', ''),\n 'file': el.get('file', 'unknown')\n }\n simplified_elements.append(simplified)\n \n user_prompt = f\"\"\"Analyze the following code elements and provide a comprehensive summary:\n {json.dumps(simplified_elements, indent=2)}\n \n Focus on:\n 1. The overall purpose of the codebase\n 2. How the different components interact\n 3. The main data flows\n 4. Key dependencies\n 5. How to set up and use the system\n \"\"\"\n \n # Get the LLM response\n response = client.chat(\n model=model,\n messages=[\n {\"role\": \"system\", \"content\": system_prompt},\n {\"role\": \"user\", \"content\": user_prompt}\n ],\n format=\"json\"\n )\n \n # Parse and return the response\n try:\n return json.loads(response['message']['content'])\n except json.JSONDecodeError:\n return {\n \"error\": \"Failed to parse the LLM response\",\n \"raw_response\": response['message']['content']\n }\n \n except Exception as e:\n return {\n \"error\": f\"Error generating codebase summary: {str(e)}\"\n }\n\ntry:\n from docx import Document\n from docx.shared import Pt\n from docx.enum.text import WD_PARAGRAPH_ALIGNMENT\n DOCX_AVAILABLE = True\nexcept ImportError:\n DOCX_AVAILABLE = False\n\ndef create_document(code_elements: List[Dict[str, Any]], explanation: str, \n title: str = \"Python Code Analysis\",\n include_non_tech: bool = True,\n include_summary: bool = True,\n model: str = 'llama2',\n host: str = 'http://localhost:11434') -> Optional[BytesIO]:\n \"\"\"\n Create a Word document from code analysis and explanation.\n \n Args:\n code_elements: List of code element dictionaries, each can include 'file' key\n explanation: Generated explanation text\n title: Title for the document\n include_non_tech: Whether to include non-technical explanation\n include_summary: Whether to include a comprehensive codebase summary\n model: The Ollama model to use for non-technical explanations\n host: The Ollama server host\n \n Returns:\n BytesIO buffer containing the document, or None if docx is not available\n \"\"\"\n if not DOCX_AVAILABLE:\n return None\n \n # Generate non-technical explanation if requested\n non_tech_explanation = \"\"\n if include_non_tech and code_elements:\n non_tech_explanation, _ = generate_non_tech_explanation(\n code_elements, \n model=model,\n host=host\n )\n \n # Generate codebase summary if requested\n codebase_summary = {}\n if include_summary and code_elements:\n codebase_summary = generate_codebase_summary(\n code_elements,\n model=model,\n host=host\n )\n \n def _get_or_create_code_style(doc):\n \"\"\"Get the Code style or create it if it doesn't exist.\"\"\"\n try:\n return doc.styles['Code']\n except KeyError:\n code_style = doc.styles.add_style('Code', 1) # 1 = WD_STYLE_TYPE.PARAGRAPH\n code_style.font.name = 'Courier New'\n code_style.font.size = Pt(10)\n code_style.paragraph_format.space_after = Pt(6)\n return code_style\n \n try:\n doc = Document()\n \n # Add title\n title_para = doc.add_heading(level=0)\n title_run = title_para.add_run(title)\n title_run.bold = True\n title_para.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER\n \n # Ensure we have the Code style\n _get_or_create_code_style(doc)\n \n # Add non-technical explanation if available\n if non_tech_explanation:\n doc.add_heading(\"Non-Technical Overview\", level=1)\n lines = non_tech_explanation.split('\\n')\n current_paragraph = None\n \n for line in lines:\n if line.startswith('## '):\n doc.add_heading(line[3:].strip(), level=2)\n elif line.startswith('### '):\n doc.add_heading(line[4:].strip(), level=3)\n elif line.startswith('#### '):\n doc.add_heading(line[5:].strip(), level=4)\n elif line.strip() == '':\n if current_paragraph:\n current_paragraph.add_run('\\n')\n else:\n current_paragraph = doc.add_paragraph(line)\n \n doc.add_page_break()\n \n # Add technical summary section\n doc.add_heading(\"Technical Summary\", level=1)\n \n # Count elements by type and file\n elements_by_file = {}\n elements_by_type = {}\n \n for el in code_elements:\n file_name = el.get('file', 'unknown.py')\n el_type = el.get('type', 'Unknown')\n \n # Count by file\n if file_name not in elements_by_file:\n elements_by_file[file_name] = {'functions': 0, 'classes': 0, 'async_functions': 0}\n \n if el_type == 'Function':\n elements_by_file[file_name]['functions'] += 1\n elif el_type == 'AsyncFunction':\n elements_by_file[file_name]['async_functions'] += 1\n elif el_type == 'Class':\n elements_by_file[file_name]['classes'] += 1\n \n # Count by type\n if el_type not in elements_by_type:\n elements_by_type[el_type] = 0\n elements_by_type[el_type] += 1\n \n # Add summary table\n if elements_by_file:\n doc.add_heading(\"Files Analyzed\", level=2)\n for file, counts in elements_by_file.items():\n parts = []\n if counts['classes']:\n parts.append(f\"{counts['classes']} classes\")\n if counts['functions']:\n parts.append(f\"{counts['functions']} functions\")\n if counts['async_functions']:\n parts.append(f\"{counts['async_functions']} async functions\")\n doc.add_paragraph(f\"โ€ข {file}: {', '.join(parts) if parts else 'No elements found'}\")\n \n # Add element type summary\n if elements_by_type:\n doc.add_paragraph(f\"Total elements found: {sum(elements_by_type.values())}\")\n for el_type, count in elements_by_type.items():\n doc.add_paragraph(f\"โ€ข {el_type}s: {count}\", style='List Bullet')\n \n # Add code structure section\n doc.add_heading(\"Code Structure\", level=1)\n \n # Group elements by file\n elements_by_file = {}\n for el in code_elements:\n file_name = el.get('file', 'unknown.py')\n if file_name not in elements_by_file:\n elements_by_file[file_name] = []\n elements_by_file[file_name].append(el)\n \n # Process each file\n for file_name, elements in elements_by_file.items():\n # Add file header\n doc.add_heading(f\"File: {file_name}\", level=2)\n \n for el in elements:\n # Add element header\n el_header = f\"{el['type']}: {el['name']} (Lines {el['start_line']}-{el['end_line']})\"\n doc.add_heading(el_header, level=3)\n \n # Add metadata\n if el['args']:\n doc.add_paragraph(f\"Arguments: {', '.join(el['args'])}\")\n \n if el['type'] != 'Class' and el['has_return']:\n doc.add_paragraph(\"Returns: Yes\")\n \n if el['docstring']:\n doc.add_paragraph(\"Documentation:\")\n doc.add_paragraph(el['docstring'], style='Intense Quote')\n \n # Add source code\n doc.add_paragraph(\"Source Code:\")\n doc.add_paragraph(el['source'], style='Code')\n \n # Add a small separator between elements\n doc.add_paragraph()\n doc.add_paragraph(\"-\" * 40)\n doc.add_paragraph()\n \n # Add explanation section\n doc.add_heading(\"AI Explanation\", level=1)\n doc.add_paragraph(explanation)\n \n # Save to buffer\n buffer = BytesIO()\n doc.save(buffer)\n buffer.seek(0)\n \n return buffer\n \n except Exception as e:\n raise RuntimeError(f\"Error generating document: {str(e)}\")\n", "size": 16711, "language": "python" }, "src/code_analyzer_ai/code_analyzer.py": { "content": "\n\"\"\"\nCode analysis module for extracting structure from Python source code.\n\"\"\"\nimport ast\nfrom typing import List, Dict, Any\n\ndef extract_elements(code: str) -> List[Dict[str, Any]]:\n \"\"\"\n Extract all top-level classes and functions with their metadata from Python code.\n \n Args:\n code: Python source code as a string\n \n Returns:\n List of dictionaries containing information about each code element\n \"\"\"\n try:\n tree = ast.parse(code)\n elements = []\n \n for node in tree.body:\n if isinstance(node, (ast.FunctionDef, ast.ClassDef, ast.AsyncFunctionDef)):\n element_type = node.__class__.__name__.replace('Def', '')\n name = node.name\n doc = ast.get_docstring(node) or \"\"\n src = ast.get_source_segment(code, node) or \"\"\n \n start_line = getattr(node, 'lineno', 0)\n end_line = getattr(node, 'end_lineno', start_line)\n \n args = []\n if hasattr(node, 'args') and hasattr(node.args, 'args'):\n args = [arg.arg for arg in node.args.args]\n \n elements.append({\n 'type': element_type,\n 'name': name,\n 'docstring': doc,\n 'source': src,\n 'start_line': start_line,\n 'end_line': end_line,\n 'args': args if args else None,\n 'has_return': any(isinstance(n, ast.Return) for n in ast.walk(node))\n })\n \n return elements\n \n except SyntaxError as e:\n raise ValueError(f\"Error parsing Python code: {e}\")\n", "size": 1762, "language": "python" }, "src/code_analyzer_ai/utils/__init__.py": { "content": "\"\"\"Utility functions for the Python Code Explainer application.\"\"\"\n\nfrom .file_utils import (\n ensure_directory_exists,\n find_files_by_extension,\n read_file_safely,\n write_file_safely,\n chunk_file\n)\n\nfrom .llm_utils import (\n LLMClient,\n LLMResponse,\n get_llm_client\n)\n\n__all__ = [\n 'ensure_directory_exists',\n 'find_files_by_extension',\n 'read_file_safely',\n 'write_file_safely',\n 'chunk_file',\n 'LLMClient',\n 'LLMResponse',\n 'get_llm_client'\n]\n", "size": 494, "language": "python" }, "src/code_analyzer_ai/utils/llm_utils.py": { "content": "\"\"\"Utility functions for interacting with language models.\"\"\"\nimport json\nimport logging\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom ollama import Client\nfrom pydantic import BaseModel\n\nfrom ..config import settings\n\nlogger = logging.getLogger(__name__)\n\n\nclass LLMResponse(BaseModel):\n \"\"\"Structured response from LLM.\"\"\"\n content: str\n model: str\n prompt_tokens: Optional[int] = None\n completion_tokens: Optional[int] = None\n total_tokens: Optional[int] = None\n \n def to_dict(self) -> Dict[str, Any]:\n \"\"\"Convert to dictionary.\"\"\"\n return self.dict()\n \n @classmethod\n def from_ollama_response(cls, response: Dict[str, Any]) -> \"LLMResponse\":\n \"\"\"Create from Ollama response.\"\"\"\n return cls(\n content=response.get(\"message\", {}).get(\"content\", \"\"),\n model=response.get(\"model\", \"\"),\n prompt_tokens=response.get(\"prompt_eval_count\"),\n completion_tokens=response.get(\"eval_count\"),\n total_tokens=(response.get(\"prompt_eval_count\", 0) + \n response.get(\"eval_count\", 0))\n )\n\n\nclass LLMClient:\n \"\"\"Client for interacting with language models.\"\"\"\n \n def __init__(self, base_url: Optional[str] = None, model: Optional[str] = None):\n \"\"\"Initialize the LLM client.\n \n Args:\n base_url: Base URL of the Ollama server\n model: Default model to use\n \"\"\"\n self.base_url = base_url or str(settings.OLLAMA_HOST)\n self.default_model = model or settings.OLLAMA_MODEL\n self.client = Client(host=self.base_url)\n \n def generate(\n self,\n prompt: str,\n model: Optional[str] = None,\n system_prompt: Optional[str] = None,\n format: str = \"json\",\n **kwargs\n ) -> LLMResponse:\n \"\"\"Generate text from a prompt.\n \n Args:\n prompt: The prompt to send to the model\n model: Model to use (defaults to the instance default)\n system_prompt: System prompt to set the behavior of the assistant\n format: Format of the response ('json' or 'text')\n **kwargs: Additional arguments to pass to the Ollama API\n \n Returns:\n LLMResponse object with the generated content and metadata\n \"\"\"\n model = model or self.default_model\n messages = []\n \n if system_prompt:\n messages.append({\"role\": \"system\", \"content\": system_prompt})\n \n messages.append({\"role\": \"user\", \"content\": prompt})\n \n try:\n response = self.client.chat(\n model=model,\n messages=messages,\n stream=False,\n format=format,\n **kwargs\n )\n return LLMResponse.from_ollama_response(response)\n except Exception as e:\n logger.error(f\"Error generating text with model {model}: {e}\")\n raise\n \n def generate_json(\n self,\n prompt: str,\n model: Optional[str] = None,\n system_prompt: Optional[str] = None,\n **kwargs\n ) -> Dict[str, Any]:\n \"\"\"Generate and parse JSON response from the model.\n \n Args:\n prompt: The prompt to send to the model\n model: Model to use (defaults to the instance default)\n system_prompt: System prompt to set the behavior of the assistant\n **kwargs: Additional arguments to pass to the Ollama API\n \n Returns:\n Parsed JSON response as a dictionary\n \n Raises:\n json.JSONDecodeError: If the response is not valid JSON\n \"\"\"\n response = self.generate(\n prompt=prompt,\n model=model,\n system_prompt=system_prompt,\n format=\"json\",\n **kwargs\n )\n return json.loads(response.content)\n\n\ndef get_llm_client() -> LLMClient:\n \"\"\"Get a configured LLM client.\"\"\"\n return LLMClient()\n", "size": 4055, "language": "python" }, "src/code_analyzer_ai/utils/file_utils.py": { "content": "\"\"\"Utility functions for file operations.\"\"\"\nimport logging\nimport shutil\nfrom pathlib import Path\nfrom typing import List, Optional, Union, Generator, Tuple\n\nlogger = logging.getLogger(__name__)\n\n\ndef ensure_directory_exists(directory: Union[str, Path]) -> Path:\n \"\"\"Ensure a directory exists, create it if it doesn't.\n \n Args:\n directory: Path to the directory\n \n Returns:\n Path: The path to the directory\n \"\"\"\n path = Path(directory).resolve()\n path.mkdir(parents=True, exist_ok=True)\n return path\n\n\ndef find_files_by_extension(\n directory: Union[str, Path], \n extensions: Union[str, List[str]],\n recursive: bool = True\n) -> List[Path]:\n \"\"\"Find files by extension in a directory.\n \n Args:\n directory: Directory to search in\n extensions: File extension(s) to search for\n recursive: Whether to search recursively\n \n Returns:\n List of matching file paths\n \"\"\"\n if isinstance(extensions, str):\n extensions = [extensions]\n \n directory = Path(directory).resolve()\n if not directory.exists():\n logger.warning(f\"Directory does not exist: {directory}\")\n return []\n \n pattern = \"**/*\" if recursive else \"*\"\n files = []\n \n for ext in extensions:\n ext = ext.lstrip('.')\n files.extend(directory.glob(f\"{pattern}.{ext}\"))\n \n return sorted(files)\n\n\ndef read_file_safely(file_path: Union[str, Path]) -> Optional[str]:\n \"\"\"Read a file safely with error handling.\n \n Args:\n file_path: Path to the file to read\n \n Returns:\n File contents as string, or None if an error occurs\n \"\"\"\n try:\n with open(file_path, 'r', encoding='utf-8') as f:\n return f.read()\n except Exception as e:\n logger.error(f\"Error reading file {file_path}: {e}\")\n return None\n\n\ndef write_file_safely(\n file_path: Union[str, Path], \n content: str,\n mode: str = 'w',\n backup: bool = True\n) -> bool:\n \"\"\"Write to a file safely with error handling and optional backup.\n \n Args:\n file_path: Path to the file to write\n content: Content to write\n mode: File open mode ('w' for write, 'a' for append)\n backup: Whether to create a backup if the file exists\n \n Returns:\n bool: True if successful, False otherwise\n \"\"\"\n try:\n file_path = Path(file_path).resolve()\n \n # Create backup if file exists and backup is True\n if file_path.exists() and backup:\n backup_path = file_path.with_suffix(f\"{file_path.suffix}.bak\")\n shutil.copy2(file_path, backup_path)\n \n # Ensure parent directory exists\n file_path.parent.mkdir(parents=True, exist_ok=True)\n \n # Write the file\n with open(file_path, mode, encoding='utf-8') as f:\n f.write(content)\n \n return True\n except Exception as e:\n logger.error(f\"Error writing to file {file_path}: {e}\")\n return False\n\n\ndef chunk_file(\n file_path: Union[str, Path], \n chunk_size: int = 1024 * 1024 # 1MB chunks\n) -> Generator[Tuple[bytes, int, int], None, None]:\n \"\"\"Read a file in chunks.\n \n Args:\n file_path: Path to the file\n chunk_size: Size of each chunk in bytes\n \n Yields:\n Tuple of (chunk_data, chunk_number, total_chunks)\n \"\"\"\n file_path = Path(file_path)\n total_size = file_path.stat().st_size\n total_chunks = (total_size + chunk_size - 1) // chunk_size\n \n with open(file_path, 'rb') as f:\n for i in range(total_chunks):\n yield f.read(chunk_size), i + 1, total_chunks\n", "size": 3686, "language": "python" } }, "_cache_metadata": { "url": "https://github.com/ronelsolomon/filesummarize.git", "content_type": "github", "cached_at": "2026-03-02T22:50:02.688455", "cache_key": "fa1d440547004667e76e49276ecd94a0" } }