Unit Test Generator
Run ID: 69cc1b8304066a6c4a16961e2026-03-31Development
PantheraHive BOS
BOS Dashboard

This deliverable outlines Step 2 of 3 in the "Unit Test Generator" workflow, focusing on leveraging the Gemini API to generate comprehensive and professional unit tests. This step, gemini → generate_code, is designed to take a given source code snippet and produce corresponding unit tests based on specified parameters.


1. Introduction: Generating Unit Tests with Gemini

This section provides the core functionality for automatically generating unit tests using Google's Gemini Pro model. The goal is to produce well-structured, readable, and effective unit tests for a given piece of source code, adhering to specified programming languages and testing frameworks. This automation significantly speeds up the development process, enhances code quality, and ensures better test coverage.

The generated code will interact with the Gemini API to craft intelligent prompts that guide the AI in producing relevant and robust test cases, including typical scenarios, edge cases, and error handling.

2. Core Functionality: generate_unit_tests

The central component of this step is the generate_unit_tests function. This function encapsulates the logic required to communicate with the Gemini API, construct appropriate prompts, and process the AI's response to deliver clean unit test code.

2.1 Function Description

The generate_unit_tests function takes a source code snippet, its programming language, and the desired testing framework as input. It then formulates a detailed prompt for the Gemini model, requesting it to generate unit tests that cover various aspects of the provided code.

2.2 Parameters

2.3 Return Value

2.4 Example Usage

text • 4,448 chars
Please generate the unit tests now:
"""
        logging.info(f"Sending prompt to Gemini model '{model_name}' for {language} code...")
        
        # Generate content using the Gemini model
        # The `generation_config` helps control the output format and creativity.
        response = model.generate_content(
            prompt,
            generation_config=genai.GenerationConfig(
                temperature=0.4,  # Lower temperature for more focused and less creative output
                max_output_tokens=2048 # Adjust based on expected test file size
            )
        )

        # Extract the generated text
        generated_text = response.text.strip()

        if not generated_text:
            logging.warning("Gemini model returned an empty response for unit test generation.")
            return None

        logging.info("Successfully generated unit tests.")
        return generated_text

    except genai.APIError as e:
        logging.error(f"Gemini API error occurred: {e}")
        return None
    except Exception as e:
        logging.error(f"An unexpected error occurred during unit test generation: {e}")
        return None

# --- Example Usage (demonstrates how to integrate) ---
if __name__ == "__main__":
    # Ensure your API key is set in environment variables or replaced directly
    # For production, always use environment variables or a secure secret management system.
    GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY") 

    if not GEMINI_API_KEY:
        logging.error("GEMINI_API_KEY environment variable not set. Please set it to run the example.")
    else:
        # Example 1: Python function with pytest
        python_code_snippet = """
def divide(a, b):
    \"\"\"Divides two numbers. Raises ValueError if b is zero.\"\"\"
    if b == 0:
        raise ValueError("Cannot divide by zero")
    return a / b
"""
        logging.info("\n--- Generating pytest tests for Python code ---")
        python_pytest_tests = generate_unit_tests(
            source_code_snippet=python_code_snippet,
            language="Python",
            test_framework="pytest",
            api_key=GEMINI_API_KEY
        )

        if python_pytest_tests:
            print("\nGenerated Python (pytest) tests:\n", python_pytest_tests)
            # You could save this to a file:
            # with open("test_divide.py", "w") as f:
            #     f.write(python_pytest_tests)
            # print("Tests saved to test_divide.py")
        else:
            print("\nFailed to generate Python (pytest) tests.")

        # Example 2: JavaScript function with Jest
        javascript_code_snippet = """
function factorial(n) {
  if (n < 0) {
    throw new Error("Factorial is not defined for negative numbers");
  }
  if (n === 0 || n === 1) {
    return 1;
  }
  let result = 1;
  for (let i = 2; i <= n; i++) {
    result *= i;
  }
  return result;
}
module.exports = factorial;
"""
        logging.info("\n--- Generating Jest tests for JavaScript code ---")
        javascript_jest_tests = generate_unit_tests(
            source_code_snippet=javascript_code_snippet,
            language="JavaScript",
            test_framework="Jest",
            api_key=GEMINI_API_KEY
        )

        if javascript_jest_tests:
            print("\nGenerated JavaScript (Jest) tests:\n", javascript_jest_tests)
            # with open("factorial.test.js", "w") as f:
            #     f.write(javascript_jest_tests)
            # print("Tests saved to factorial.test.js")
        else:
            print("\nFailed to generate JavaScript (Jest) tests.")

        # Example 3: Java class with JUnit
        java_code_snippet = """
public class Calculator {
    public int add(int a, int b) {
        return a + b;
    }

    public int subtract(int a, int b) {
        return a - b;
    }
}
"""
        logging.info("\n--- Generating JUnit tests for Java code ---")
        java_junit_tests = generate_unit_tests(
            source_code_snippet=java_code_snippet,
            language="Java",
            test_framework="JUnit",
            api_key=GEMINI_API_KEY
        )

        if java_junit_tests:
            print("\nGenerated Java (JUnit) tests:\n", java_junit_tests)
            # with open("CalculatorTest.java", "w") as f:
            #     f.write(java_junit_tests)
            # print("Tests saved to CalculatorTest.java")
        else:
            print("\nFailed to generate Java (JUnit) tests.")
Sandboxed live preview

Unit Test Generator: Comprehensive Study Plan

This document outlines a detailed and actionable study plan for understanding and potentially designing a "Unit Test Generator." This plan is structured to provide a deep dive into the underlying principles, techniques, and architectural considerations required for such a sophisticated system.

Introduction to the Study Plan

The goal of this study plan is to equip you with the knowledge and skills necessary to comprehend the complexities of automated unit test generation. You will explore fundamental concepts of unit testing, delve into code analysis techniques, understand various test case generation strategies, and finally, be able to conceptualize the architecture of a robust unit test generator. This plan is designed to be completed over four weeks, with each week building upon the previous one.

Weekly Schedule

This schedule assumes approximately 10-15 hours of dedicated study per week, including reading, exercises, and practical application.


Week 1: Fundamentals of Unit Testing & Code Representation

  • Focus Areas:

* What are unit tests, why are they crucial, and best practices (FIRST principles: Fast, Isolated, Repeatable, Self-validating, Timely).

* Introduction to Abstract Syntax Trees (ASTs) and their role in code analysis.

* Basic static code analysis concepts.

  • Activities:

* Day 1-2: Review unit testing principles. Read articles on good unit test design and common pitfalls.

* Day 3-4: Introduction to ASTs. Explore AST parsers for a language of your choice (e.g., Python's ast module, Java's ANTLR/JDT AST, C# Roslyn). Understand how code constructs map to AST nodes.

* Day 5-6: Hands-on: Use an AST parser to extract simple information from a code file (e.g., function names, variable declarations).

* Day 7: Review and consolidate.


Week 2: Code Analysis & Control Flow

  • Focus Areas:

* Advanced static analysis techniques: Control Flow Graphs (CFGs) and Data Flow Analysis.

* Symbolic execution basics.

* Intermediate Representations (IR) for code.

  • Activities:

* Day 1-2: Study Control Flow Graphs (CFGs). Understand how they represent execution paths.

* Day 3-4: Hands-on: Attempt to generate a basic CFG for a simple function using pseudocode or a library if available.

* Day 5-6: Introduction to Data Flow Analysis (e.g., reaching definitions, live variables) and its relevance for identifying test targets and potential issues. Explore symbolic execution concepts as a precursor to test generation.

* Day 7: Review and consolidate.


Week 3: Test Case Generation Strategies

  • Focus Areas:

* Property-Based Testing (PBT).

* Fuzz Testing/Random Test Generation.

* Constraint Solving and Satisfiability Modulo Theories (SMT) solvers for generating inputs.

* Mocking and Stubbing strategies for dependencies.

  • Activities:

* Day 1-2: Deep dive into Property-Based Testing. Understand how properties are defined and how generators work (e.g., Hypothesis for Python, QuickCheck for Haskell).

* Day 3-4: Explore Fuzz Testing principles. Understand how random or semi-random inputs are generated to find edge cases and crashes.

* Day 5-6: Introduction to SMT solvers (e.g., Z3, CVC4) and their application in generating inputs that satisfy specific path conditions or pre-conditions. Understand the role of mocking/stubbing for isolating units under test.

* Day 7: Review and consolidate.


Week 4: Architecture & Integration of a Unit Test Generator

  • Focus Areas:

* Core components of a Unit Test Generator: Input Parser, Code Analyzer, Test Case Generator, Test Framework Integrator, Output Formatter.

* Design considerations: scalability, language independence, extensibility.

* Integration with existing testing frameworks (e.g., JUnit, Pytest, NUnit).

* Evaluation metrics for generated tests.

  • Activities:

* Day 1-2: Study existing automated test generation tools (e.g., EvoSuite, Randoop, KLEE) to understand their architecture and methodologies.

* Day 3-4: Architectural Design Exercise: Propose a high-level architecture for a "Unit Test Generator" for a specific programming language. Define its main modules, their responsibilities, and how they interact.

* Day 5-6: Discuss integration points with common unit testing frameworks. Consider how generated tests are presented to the user (readability, format).

* Day 7: Final review, documentation of architectural plan, and preparation for assessment.


Learning Objectives

Upon successful completion of this study plan, you will be able to:

  • Understand the fundamental principles and best practices of unit testing.
  • Analyze code using Abstract Syntax Trees (ASTs) and Control Flow Graphs (CFGs) to identify testable units and execution paths.
  • Explain and differentiate various test case generation strategies, including property-based testing, fuzzing, and constraint-based generation.
  • Apply basic techniques for extracting information from code via static analysis.
  • Design a high-level architecture for a Unit Test Generator, outlining its core components and their interactions.
  • Evaluate the challenges and considerations in integrating an automated test generator with existing development workflows and testing frameworks.
  • Propose methods for assessing the quality and effectiveness of automatically generated unit tests.

Recommended Resources

  • Books:

* "Working Effectively with Legacy Code" by Michael C. Feathers (for understanding testability and breaking dependencies).

* "The Art of Unit Testing" by Roy Osherove (for unit testing fundamentals).

* "Compilers: Principles, Techniques, and Tools" (The Dragon Book) by Aho, Lam, Sethi, Ullman (for ASTs, CFGs, data flow analysis - focus on relevant chapters).

* "Software Testing and Analysis: Process, Principles, and Techniques" by Mauro Pezzè and Michal Young (for broader testing strategies).

  • Online Courses/Tutorials:

* Coursera/edX courses on compilers, program analysis, or software testing.

* Specific language AST documentation (e.g., Python ast module tutorial, Eclipse JDT AST Parser tutorial).

* Tutorials on Property-Based Testing frameworks (e.g., Hypothesis, QuickCheck).

* Introductions to SMT solvers (e.g., Z3 tutorial).

  • Research Papers/Articles:

* Papers on EvoSuite, Randoop, KLEE, or other automated test generation tools. Search academic databases (ACM Digital Library, IEEE Xplore) for "automated unit test generation," "symbolic execution," "property-based testing."

* Blogs and articles on static analysis, fuzzing, and code instrumentation.

  • Tools/Libraries:

* AST Parsers: Python ast module, Java JDT AST Parser, Roslyn (.NET), ANTLR.

* PBT Frameworks: Hypothesis (Python), QuickCheck (Haskell/various ports).

* SMT Solvers: Z3 (Microsoft Research), CVC4.

* Fuzzers: AFL++, libFuzzer.

Milestones

This plan includes specific checkpoints to track progress and ensure understanding.

  • End of Week 1: Successfully parse a simple code file (e.g., a function with loops and conditionals) into an AST and extract basic information (function names, variable types).
  • End of Week 2: Sketch a basic Control Flow Graph (CFG) for a given function and identify potential paths. Understand the concept of data flow for a simple program fragment.
  • End of Week 3: Implement a small program that uses a Property-Based Testing framework to test a simple function (e.g., sorting algorithm, string manipulation). Articulate how fuzzing and constraint solving differ.
  • End of Week 4: Present a detailed architectural plan (diagrams, component descriptions) for a Unit Test Generator, including its interaction points with a target testing framework.

Assessment Strategies

To ensure comprehensive learning and mastery of the subject matter, the following assessment strategies are recommended:

  • Weekly Self-Assessment Quizzes: Short quizzes covering the week's learning objectives, focusing on definitions, concepts, and basic application.
  • Practical Coding Exercises:

* Week 1: A small script to analyze a given code snippet's AST and report specific metrics (e.g., number of function calls, complexity).

* Week 3: A practical exercise demonstrating the use of a Property-Based Testing library or a simple fuzzer for a given code module.

  • Architectural Design Document (End of Week 4): A formal document outlining the proposed architecture of a Unit Test Generator. This should include:

* High-level component diagram.

* Detailed description of each major component (e.g., Code Parser, Analysis Engine, Test Case Generator, Test Runner Interface).

* Explanation of data flow between components.

* Discussion of design choices, challenges, and potential solutions.

* Considerations for language extensibility and integration.

  • Peer Review/Discussion: Engage with peers or mentors to discuss architectural decisions, challenges, and alternative approaches. This fosters critical thinking and exposes different perspectives.
  • Presentation/Walkthrough (Optional): Present the architectural design to a small group, explaining the rationale and answering questions.

By diligently following this study plan, you will gain a profound understanding of the principles and techniques required to build or effectively utilize a sophisticated Unit Test Generator.

gemini Output

Unit Test Generator: Review and Documentation - Final Deliverable

This document marks the completion of the "Unit Test Generator" workflow, specifically the "review_and_document" step. The previous "gemini" step successfully generated a suite of unit tests based on your provided input. This final step provides a comprehensive review guide, essential documentation, and actionable next steps to ensure the effective integration and ongoing maintenance of these generated tests within your development lifecycle.


1. Workflow Summary & Deliverable Overview

The "Unit Test Generator" workflow aimed to accelerate your testing efforts by leveraging AI to produce a foundational set of unit tests. This final deliverable focuses on empowering you to critically evaluate, integrate, and maintain these tests, transforming them into a robust part of your codebase.

Key Deliverables in this Step:

  • Review Guidelines: A structured approach to assess the quality, correctness, and completeness of the generated unit tests.
  • Documentation Insights: Understanding the inherent documentation within the generated tests and how to augment it.
  • Best Practices: Recommendations for effective unit testing and test suite maintenance.
  • Actionable Next Steps: A clear roadmap for integrating and utilizing the generated tests.

2. Output from Previous Step: Generated Unit Tests

The "gemini" step has produced a set of unit test files, typically structured to mirror your existing project's architecture or within a dedicated tests/ directory. These tests are designed to cover various aspects of your code, including:

  • Functionality Tests: Verifying the core behavior of individual functions, methods, or components.
  • Edge Case Tests: Exploring boundary conditions, invalid inputs, and error handling.
  • State Verification: Ensuring that components maintain correct internal states after operations.
  • Dependency Mocking/Stubbing: Where applicable, demonstrating how to isolate units by replacing dependencies with mocks or stubs.

Expected Format:

The generated tests will be in the programming language and framework specified or inferred from your input (e.g., Python with pytest, Java with JUnit, JavaScript with Jest, C# with NUnit/xUnit). Each test file or function will typically include:

  • A clear name indicating the unit under test and the scenario.
  • Setup/teardown methods (if applicable to the framework).
  • Assertions to validate expected outcomes.
  • Comments explaining complex logic or test intent.

3. Reviewing the Generated Unit Tests: Actionable Guidance

Thorough review is paramount to ensure the generated tests align with your project's standards and accurately reflect your component's intended behavior. Use the following checklist to guide your review:

3.1. Correctness and Logic Validation

  • Functional Accuracy: Do the tests correctly assert the expected behavior of the unit under test? Run the tests and observe failures/passes.
  • Input/Output Mapping: For given inputs, do the tests expect the correct outputs or state changes?
  • Edge Case Handling: Are the identified edge cases relevant and comprehensively tested? Consider nulls, empty collections, maximum/minimum values, and error conditions.
  • Negative Scenarios: Are there tests for invalid inputs or error paths, ensuring the code handles them gracefully (e.g., throwing expected exceptions)?

3.2. Test Coverage Assessment

  • Code Coverage: While AI aims for good coverage, manually verify that critical paths, branches, and statements are covered. Utilize code coverage tools (e.g., pytest-cov, JaCoCo, Istanbul) to identify gaps.
  • Requirement Coverage: Do the tests cover the essential business requirements or user stories associated with the unit?
  • Missing Scenarios: Identify any scenarios, functionalities, or edge cases that might have been overlooked by the generator.

3.3. Readability and Maintainability

  • Clarity of Test Names: Are test names descriptive and indicative of the scenario being tested (e.g., test_add_two_positive_numbers, test_calculate_discount_for_premium_user)?
  • Test Structure (AAA Pattern): Do tests follow the Arrange-Act-Assert pattern?

* Arrange: Set up the test data, mocks, and environment.

* Act: Invoke the method or function under test.

* Assert: Verify the outcome.

  • Conciseness: Are tests focused on a single responsibility? Avoid overly complex or multi-assertion tests that are hard to debug.
  • Comments: Are comments present where necessary to explain complex test logic or assumptions? Ensure they are accurate and up-to-date.
  • Consistency: Do the tests adhere to your team's existing coding standards and test style guides? Adjust formatting, naming conventions, and assertion styles as needed.

3.4. Dependencies and Isolation

  • Proper Mocking/Stubbing: If the unit has external dependencies (databases, APIs, file systems), are they properly mocked or stubbed to ensure true unit isolation?
  • Test Data Management: Is test data clearly defined and managed within the tests, avoiding reliance on external, mutable states?
  • Idempotency: Can tests be run multiple times in any order without affecting each other or producing different results?

4. Documentation of Generated Unit Tests

The generated tests inherently serve as living documentation of your code's expected behavior. Beyond the tests themselves, here's how to approach their documentation:

4.1. In-Code Documentation

  • Descriptive Test Names: As noted, well-named tests are the primary form of documentation.
  • Comments: Use comments within tests to explain:

* The purpose of a complex test setup.

* Why a specific assertion is made.

* Any known limitations or assumptions.

  • Docstrings/Annotations: Leverage your language's documentation features (e.g., Python docstrings, Javadoc, C# XML comments) for test classes or methods to provide higher-level context.

4.2. External Documentation (Optional, but Recommended)

For critical components or complex modules, consider adding external documentation that references the test suite:

  • README Files: In your tests/ directory, a README.md can explain:

* How to run the tests.

* Any specific setup requirements (e.g., environment variables, test database).

* The overall testing strategy for the module.

  • Design Documents: Reference specific test suites in your software design documents to show how design decisions are validated.

5. Best Practices for Maintaining a Robust Unit Test Suite

Generating tests is a starting point; maintaining them is key to long-term value.

  • Integrate with CI/CD: Ensure unit tests are an integral part of your Continuous Integration/Continuous Deployment pipeline. Every code commit should trigger unit test execution.
  • Run Tests Frequently: Developers should run relevant unit tests locally before committing code.
  • Test Refactoring: When refactoring production code, refactor your tests alongside it. Tests should be as clean and maintainable as production code.
  • Avoid Fragile Tests: Tests that break easily due to minor code changes (e.g., asserting on exact error messages that might change, relying on implementation details) are "fragile." Focus on testing observable behavior.
  • Keep Tests Fast: Slow unit tests discourage frequent execution. Optimize test setup and avoid unnecessary I/O or network calls.
  • Ownership: Treat unit tests as first-class citizens of your codebase. Assign ownership and ensure they are reviewed alongside production code.
  • Review Test Failures: Investigate and fix failing tests immediately. A failing test often indicates a bug or an outdated test.
  • Living Documentation: Continuously update tests as requirements or code change to ensure they remain accurate and relevant.

6. Actionable Next Steps for Implementation

Follow these steps to integrate the generated unit tests into your project effectively:

  1. Locate Generated Tests: Identify the generated unit test files (e.g., tests/your_module_test.py, src/test/java/com/example/YourClassTest.java).
  2. Integrate into Project Structure: Move the generated test files into the appropriate test directory within your project, adhering to your established project structure and naming conventions.
  3. Install Test Runner/Framework: If not already present, ensure you have the necessary test runner and framework dependencies installed (e.g., pytest, JUnit, Jest).
  4. Run Initial Tests: Execute the generated tests.

* Expected Outcome: Some tests might pass, and some might initially fail.

* Troubleshooting:

* Compilation/Syntax Errors: Correct any minor syntax issues or import path problems that might arise from integration into your specific environment.

* Assertion Failures: This is where the review process becomes critical. A failure could indicate:

* A bug in the original code (great, you found one!).

* An incorrect assumption made by the AI during test generation (adjust the assertion).

* Missing setup or mock configuration (add necessary Arrange steps).

  1. Perform Comprehensive Review: Systematically go through each generated test using the "Reviewing the Generated Unit Tests" guidelines (Section 3).

* Refine and Adapt: Modify test names, assertions, and setups to precisely match your code's behavior and your team's standards.

* Expand Coverage: Add any missing test cases or scenarios identified during your review.

  1. Integrate with Version Control: Add the reviewed and refined test files to your version control system (Git, SVN, etc.) as part of your codebase.
  2. Configure CI/CD: Update your CI/CD pipeline to automatically run these unit tests on every code push or pull request.
  3. Monitor and Maintain: Regularly monitor test results and commit to fixing failing tests promptly. Continuously update tests as your application evolves.

7. Support and Feedback

We are committed to helping you maximize the value of the "Unit Test Generator."

  • For Technical Assistance: If you encounter issues during integration or have specific questions about the generated tests, please refer to our dedicated support channels or documentation.
  • For Feedback: Your feedback is invaluable. Please share your experience with the generated tests and this workflow. Your input helps us refine and improve our AI-powered development tools.

Thank you for using PantheraHive's "Unit Test Generator." We hope this comprehensive output empowers your development process.

unit_test_generator.txt
Download source file
Copy all content
Full output as text
Download ZIP
IDE-ready project ZIP
Copy share link
Permanent URL for this run
Get Embed Code
Embed this result on any website
Print / Save PDF
Use browser print dialog
\n\n\n"); var hasSrcMain=Object.keys(extracted).some(function(k){return k.indexOf("src/main")>=0;}); if(!hasSrcMain) zip.file(folder+"src/main."+ext,"import React from 'react'\nimport ReactDOM from 'react-dom/client'\nimport App from './App'\nimport './index.css'\n\nReactDOM.createRoot(document.getElementById('root')!).render(\n \n \n \n)\n"); var hasSrcApp=Object.keys(extracted).some(function(k){return k==="src/App."+ext||k==="App."+ext;}); if(!hasSrcApp) zip.file(folder+"src/App."+ext,"import React from 'react'\nimport './App.css'\n\nfunction App(){\n return(\n
\n
\n

"+slugTitle(pn)+"

\n

Built with PantheraHive BOS

\n
\n
\n )\n}\nexport default App\n"); zip.file(folder+"src/index.css","*{margin:0;padding:0;box-sizing:border-box}\nbody{font-family:system-ui,-apple-system,sans-serif;background:#f0f2f5;color:#1a1a2e}\n.app{min-height:100vh;display:flex;flex-direction:column}\n.app-header{flex:1;display:flex;flex-direction:column;align-items:center;justify-content:center;gap:12px;padding:40px}\nh1{font-size:2.5rem;font-weight:700}\n"); zip.file(folder+"src/App.css",""); zip.file(folder+"src/components/.gitkeep",""); zip.file(folder+"src/pages/.gitkeep",""); zip.file(folder+"src/hooks/.gitkeep",""); Object.keys(extracted).forEach(function(p){ var fp=p.startsWith("src/")?p:"src/"+p; zip.file(folder+fp,extracted[p]); }); zip.file(folder+"README.md","# "+slugTitle(pn)+"\n\nGenerated by PantheraHive BOS.\n\n## Setup\n\`\`\`bash\nnpm install\nnpm run dev\n\`\`\`\n\n## Build\n\`\`\`bash\nnpm run build\n\`\`\`\n\n## Open in IDE\nOpen the project folder in VS Code or WebStorm.\n"); zip.file(folder+".gitignore","node_modules/\ndist/\n.env\n.DS_Store\n*.local\n"); } /* --- Vue (Vite + Composition API + TypeScript) --- */ function buildVue(zip,folder,app,code,panelTxt){ var pn=pkgName(app); var C=cc(pn); var extracted=extractCode(panelTxt); zip.file(folder+"package.json",'{\n "name": "'+pn+'",\n "version": "0.0.0",\n "type": "module",\n "scripts": {\n "dev": "vite",\n "build": "vue-tsc -b && vite build",\n "preview": "vite preview"\n },\n "dependencies": {\n "vue": "^3.5.13",\n "vue-router": "^4.4.5",\n "pinia": "^2.3.0",\n "axios": "^1.7.9"\n },\n "devDependencies": {\n "@vitejs/plugin-vue": "^5.2.1",\n "typescript": "~5.7.3",\n "vite": "^6.0.5",\n "vue-tsc": "^2.2.0"\n }\n}\n'); zip.file(folder+"vite.config.ts","import { defineConfig } from 'vite'\nimport vue from '@vitejs/plugin-vue'\nimport { resolve } from 'path'\n\nexport default defineConfig({\n plugins: [vue()],\n resolve: { alias: { '@': resolve(__dirname,'src') } }\n})\n"); zip.file(folder+"tsconfig.json",'{"files":[],"references":[{"path":"./tsconfig.app.json"},{"path":"./tsconfig.node.json"}]}\n'); zip.file(folder+"tsconfig.app.json",'{\n "compilerOptions":{\n "target":"ES2020","useDefineForClassFields":true,"module":"ESNext","lib":["ES2020","DOM","DOM.Iterable"],\n "skipLibCheck":true,"moduleResolution":"bundler","allowImportingTsExtensions":true,\n "isolatedModules":true,"moduleDetection":"force","noEmit":true,"jsxImportSource":"vue",\n "strict":true,"paths":{"@/*":["./src/*"]}\n },\n "include":["src/**/*.ts","src/**/*.d.ts","src/**/*.tsx","src/**/*.vue"]\n}\n'); zip.file(folder+"env.d.ts","/// \n"); zip.file(folder+"index.html","\n\n\n \n \n "+slugTitle(pn)+"\n\n\n
\n \n\n\n"); var hasMain=Object.keys(extracted).some(function(k){return k==="src/main.ts"||k==="main.ts";}); if(!hasMain) zip.file(folder+"src/main.ts","import { createApp } from 'vue'\nimport { createPinia } from 'pinia'\nimport App from './App.vue'\nimport './assets/main.css'\n\nconst app = createApp(App)\napp.use(createPinia())\napp.mount('#app')\n"); var hasApp=Object.keys(extracted).some(function(k){return k.indexOf("App.vue")>=0;}); if(!hasApp) zip.file(folder+"src/App.vue","\n\n\n\n\n"); zip.file(folder+"src/assets/main.css","*{margin:0;padding:0;box-sizing:border-box}body{font-family:system-ui,sans-serif;background:#fff;color:#213547}\n"); zip.file(folder+"src/components/.gitkeep",""); zip.file(folder+"src/views/.gitkeep",""); zip.file(folder+"src/stores/.gitkeep",""); Object.keys(extracted).forEach(function(p){ var fp=p.startsWith("src/")?p:"src/"+p; zip.file(folder+fp,extracted[p]); }); zip.file(folder+"README.md","# "+slugTitle(pn)+"\n\nGenerated by PantheraHive BOS.\n\n## Setup\n\`\`\`bash\nnpm install\nnpm run dev\n\`\`\`\n\n## Build\n\`\`\`bash\nnpm run build\n\`\`\`\n\nOpen in VS Code or WebStorm.\n"); zip.file(folder+".gitignore","node_modules/\ndist/\n.env\n.DS_Store\n*.local\n"); } /* --- Angular (v19 standalone) --- */ function buildAngular(zip,folder,app,code,panelTxt){ var pn=pkgName(app); var C=cc(pn); var sel=pn.replace(/_/g,"-"); var extracted=extractCode(panelTxt); zip.file(folder+"package.json",'{\n "name": "'+pn+'",\n "version": "0.0.0",\n "scripts": {\n "ng": "ng",\n "start": "ng serve",\n "build": "ng build",\n "test": "ng test"\n },\n "dependencies": {\n "@angular/animations": "^19.0.0",\n "@angular/common": "^19.0.0",\n "@angular/compiler": "^19.0.0",\n "@angular/core": "^19.0.0",\n "@angular/forms": "^19.0.0",\n "@angular/platform-browser": "^19.0.0",\n "@angular/platform-browser-dynamic": "^19.0.0",\n "@angular/router": "^19.0.0",\n "rxjs": "~7.8.0",\n "tslib": "^2.3.0",\n "zone.js": "~0.15.0"\n },\n "devDependencies": {\n "@angular-devkit/build-angular": "^19.0.0",\n "@angular/cli": "^19.0.0",\n "@angular/compiler-cli": "^19.0.0",\n "typescript": "~5.6.0"\n }\n}\n'); zip.file(folder+"angular.json",'{\n "$schema": "./node_modules/@angular/cli/lib/config/schema.json",\n "version": 1,\n "newProjectRoot": "projects",\n "projects": {\n "'+pn+'": {\n "projectType": "application",\n "root": "",\n "sourceRoot": "src",\n "prefix": "app",\n "architect": {\n "build": {\n "builder": "@angular-devkit/build-angular:application",\n "options": {\n "outputPath": "dist/'+pn+'",\n "index": "src/index.html",\n "browser": "src/main.ts",\n "tsConfig": "tsconfig.app.json",\n "styles": ["src/styles.css"],\n "scripts": []\n }\n },\n "serve": {"builder":"@angular-devkit/build-angular:dev-server","configurations":{"production":{"buildTarget":"'+pn+':build:production"},"development":{"buildTarget":"'+pn+':build:development"}},"defaultConfiguration":"development"}\n }\n }\n }\n}\n'); zip.file(folder+"tsconfig.json",'{\n "compileOnSave": false,\n "compilerOptions": {"baseUrl":"./","outDir":"./dist/out-tsc","forceConsistentCasingInFileNames":true,"strict":true,"noImplicitOverride":true,"noPropertyAccessFromIndexSignature":true,"noImplicitReturns":true,"noFallthroughCasesInSwitch":true,"paths":{"@/*":["src/*"]},"skipLibCheck":true,"esModuleInterop":true,"sourceMap":true,"declaration":false,"experimentalDecorators":true,"moduleResolution":"bundler","importHelpers":true,"target":"ES2022","module":"ES2022","useDefineForClassFields":false,"lib":["ES2022","dom"]},\n "references":[{"path":"./tsconfig.app.json"}]\n}\n'); zip.file(folder+"tsconfig.app.json",'{\n "extends":"./tsconfig.json",\n "compilerOptions":{"outDir":"./dist/out-tsc","types":[]},\n "files":["src/main.ts"],\n "include":["src/**/*.d.ts"]\n}\n'); zip.file(folder+"src/index.html","\n\n\n \n "+slugTitle(pn)+"\n \n \n \n\n\n \n\n\n"); zip.file(folder+"src/main.ts","import { bootstrapApplication } from '@angular/platform-browser';\nimport { appConfig } from './app/app.config';\nimport { AppComponent } from './app/app.component';\n\nbootstrapApplication(AppComponent, appConfig)\n .catch(err => console.error(err));\n"); zip.file(folder+"src/styles.css","* { margin: 0; padding: 0; box-sizing: border-box; }\nbody { font-family: system-ui, -apple-system, sans-serif; background: #f9fafb; color: #111827; }\n"); var hasComp=Object.keys(extracted).some(function(k){return k.indexOf("app.component")>=0;}); if(!hasComp){ zip.file(folder+"src/app/app.component.ts","import { Component } from '@angular/core';\nimport { RouterOutlet } from '@angular/router';\n\n@Component({\n selector: 'app-root',\n standalone: true,\n imports: [RouterOutlet],\n templateUrl: './app.component.html',\n styleUrl: './app.component.css'\n})\nexport class AppComponent {\n title = '"+pn+"';\n}\n"); zip.file(folder+"src/app/app.component.html","
\n
\n

"+slugTitle(pn)+"

\n

Built with PantheraHive BOS

\n
\n \n
\n"); zip.file(folder+"src/app/app.component.css",".app-header{display:flex;flex-direction:column;align-items:center;justify-content:center;min-height:60vh;gap:16px}h1{font-size:2.5rem;font-weight:700;color:#6366f1}\n"); } zip.file(folder+"src/app/app.config.ts","import { ApplicationConfig, provideZoneChangeDetection } from '@angular/core';\nimport { provideRouter } from '@angular/router';\nimport { routes } from './app.routes';\n\nexport const appConfig: ApplicationConfig = {\n providers: [\n provideZoneChangeDetection({ eventCoalescing: true }),\n provideRouter(routes)\n ]\n};\n"); zip.file(folder+"src/app/app.routes.ts","import { Routes } from '@angular/router';\n\nexport const routes: Routes = [];\n"); Object.keys(extracted).forEach(function(p){ var fp=p.startsWith("src/")?p:"src/"+p; zip.file(folder+fp,extracted[p]); }); zip.file(folder+"README.md","# "+slugTitle(pn)+"\n\nGenerated by PantheraHive BOS.\n\n## Setup\n\`\`\`bash\nnpm install\nng serve\n# or: npm start\n\`\`\`\n\n## Build\n\`\`\`bash\nng build\n\`\`\`\n\nOpen in VS Code with Angular Language Service extension.\n"); zip.file(folder+".gitignore","node_modules/\ndist/\n.env\n.DS_Store\n*.local\n.angular/\n"); } /* --- Python --- */ function buildPython(zip,folder,app,code){ var title=slugTitle(app); var pn=pkgName(app); var src=code.replace(/^\`\`\`[\w]*\n?/m,"").replace(/\n?\`\`\`$/m,"").trim(); var reqMap={"numpy":"numpy","pandas":"pandas","sklearn":"scikit-learn","tensorflow":"tensorflow","torch":"torch","flask":"flask","fastapi":"fastapi","uvicorn":"uvicorn","requests":"requests","sqlalchemy":"sqlalchemy","pydantic":"pydantic","dotenv":"python-dotenv","PIL":"Pillow","cv2":"opencv-python","matplotlib":"matplotlib","seaborn":"seaborn","scipy":"scipy"}; var reqs=[]; Object.keys(reqMap).forEach(function(k){if(src.indexOf("import "+k)>=0||src.indexOf("from "+k)>=0)reqs.push(reqMap[k]);}); var reqsTxt=reqs.length?reqs.join("\n"):"# add dependencies here\n"; zip.file(folder+"main.py",src||"# "+title+"\n# Generated by PantheraHive BOS\n\nprint(title+\" loaded\")\n"); zip.file(folder+"requirements.txt",reqsTxt); zip.file(folder+".env.example","# Environment variables\n"); zip.file(folder+"README.md","# "+title+"\n\nGenerated by PantheraHive BOS.\n\n## Setup\n\`\`\`bash\npython3 -m venv .venv\nsource .venv/bin/activate\npip install -r requirements.txt\n\`\`\`\n\n## Run\n\`\`\`bash\npython main.py\n\`\`\`\n"); zip.file(folder+".gitignore",".venv/\n__pycache__/\n*.pyc\n.env\n.DS_Store\n"); } /* --- Node.js --- */ function buildNode(zip,folder,app,code){ var title=slugTitle(app); var pn=pkgName(app); var src=code.replace(/^\`\`\`[\w]*\n?/m,"").replace(/\n?\`\`\`$/m,"").trim(); var depMap={"mongoose":"^8.0.0","dotenv":"^16.4.5","axios":"^1.7.9","cors":"^2.8.5","bcryptjs":"^2.4.3","jsonwebtoken":"^9.0.2","socket.io":"^4.7.4","uuid":"^9.0.1","zod":"^3.22.4","express":"^4.18.2"}; var deps={}; Object.keys(depMap).forEach(function(k){if(src.indexOf(k)>=0)deps[k]=depMap[k];}); if(!deps["express"])deps["express"]="^4.18.2"; var pkgJson=JSON.stringify({"name":pn,"version":"1.0.0","main":"src/index.js","scripts":{"start":"node src/index.js","dev":"nodemon src/index.js"},"dependencies":deps,"devDependencies":{"nodemon":"^3.0.3"}},null,2)+"\n"; zip.file(folder+"package.json",pkgJson); var fallback="const express=require(\"express\");\nconst app=express();\napp.use(express.json());\n\napp.get(\"/\",(req,res)=>{\n res.json({message:\""+title+" API\"});\n});\n\nconst PORT=process.env.PORT||3000;\napp.listen(PORT,()=>console.log(\"Server on port \"+PORT));\n"; zip.file(folder+"src/index.js",src||fallback); zip.file(folder+".env.example","PORT=3000\n"); zip.file(folder+".gitignore","node_modules/\n.env\n.DS_Store\n"); zip.file(folder+"README.md","# "+title+"\n\nGenerated by PantheraHive BOS.\n\n## Setup\n\`\`\`bash\nnpm install\n\`\`\`\n\n## Run\n\`\`\`bash\nnpm run dev\n\`\`\`\n"); } /* --- Vanilla HTML --- */ function buildVanillaHtml(zip,folder,app,code){ var title=slugTitle(app); var isFullDoc=code.trim().toLowerCase().indexOf("=0||code.trim().toLowerCase().indexOf("=0; var indexHtml=isFullDoc?code:"\n\n\n\n\n"+title+"\n\n\n\n"+code+"\n\n\n\n"; zip.file(folder+"index.html",indexHtml); zip.file(folder+"style.css","/* "+title+" — styles */\n*{margin:0;padding:0;box-sizing:border-box}\nbody{font-family:system-ui,-apple-system,sans-serif;background:#fff;color:#1a1a2e}\n"); zip.file(folder+"script.js","/* "+title+" — scripts */\n"); zip.file(folder+"assets/.gitkeep",""); zip.file(folder+"README.md","# "+title+"\n\nGenerated by PantheraHive BOS.\n\n## Open\nDouble-click \`index.html\` in your browser.\n\nOr serve locally:\n\`\`\`bash\nnpx serve .\n# or\npython3 -m http.server 3000\n\`\`\`\n"); zip.file(folder+".gitignore",".DS_Store\nnode_modules/\n.env\n"); } /* ===== MAIN ===== */ var sc=document.createElement("script"); sc.src="https://cdnjs.cloudflare.com/ajax/libs/jszip/3.10.1/jszip.min.js"; sc.onerror=function(){ if(lbl)lbl.textContent="Download ZIP"; alert("JSZip load failed — check connection."); }; sc.onload=function(){ var zip=new JSZip(); var base=(_phFname||"output").replace(/\.[^.]+$/,""); var app=base.toLowerCase().replace(/[^a-z0-9]+/g,"_").replace(/^_+|_+$/g,"")||"my_app"; var folder=app+"/"; var vc=document.getElementById("panel-content"); var panelTxt=vc?(vc.innerText||vc.textContent||""):""; var lang=detectLang(_phCode,panelTxt); if(_phIsHtml){ buildVanillaHtml(zip,folder,app,_phCode); } else if(lang==="flutter"){ buildFlutter(zip,folder,app,_phCode,panelTxt); } else if(lang==="react-native"){ buildReactNative(zip,folder,app,_phCode,panelTxt); } else if(lang==="swift"){ buildSwift(zip,folder,app,_phCode,panelTxt); } else if(lang==="kotlin"){ buildKotlin(zip,folder,app,_phCode,panelTxt); } else if(lang==="react"){ buildReact(zip,folder,app,_phCode,panelTxt); } else if(lang==="vue"){ buildVue(zip,folder,app,_phCode,panelTxt); } else if(lang==="angular"){ buildAngular(zip,folder,app,_phCode,panelTxt); } else if(lang==="python"){ buildPython(zip,folder,app,_phCode); } else if(lang==="node"){ buildNode(zip,folder,app,_phCode); } else { /* Document/content workflow */ var title=app.replace(/_/g," "); var md=_phAll||_phCode||panelTxt||"No content"; zip.file(folder+app+".md",md); var h=""+title+""; h+="

"+title+"

"; var hc=md.replace(/&/g,"&").replace(//g,">"); hc=hc.replace(/^### (.+)$/gm,"

$1

"); hc=hc.replace(/^## (.+)$/gm,"

$1

"); hc=hc.replace(/^# (.+)$/gm,"

$1

"); hc=hc.replace(/\*\*(.+?)\*\*/g,"$1"); hc=hc.replace(/\n{2,}/g,"

"); h+="

"+hc+"

Generated by PantheraHive BOS
"; zip.file(folder+app+".html",h); zip.file(folder+"README.md","# "+title+"\n\nGenerated by PantheraHive BOS.\n\nFiles:\n- "+app+".md (Markdown)\n- "+app+".html (styled HTML)\n"); } zip.generateAsync({type:"blob"}).then(function(blob){ var a=document.createElement("a"); a.href=URL.createObjectURL(blob); a.download=app+".zip"; a.click(); URL.revokeObjectURL(a.href); if(lbl)lbl.textContent="Download ZIP"; }); }; document.head.appendChild(sc); } function phShare(){navigator.clipboard.writeText(window.location.href).then(function(){var el=document.getElementById("ph-share-lbl");if(el){el.textContent="Link copied!";setTimeout(function(){el.textContent="Copy share link";},2500);}});}function phEmbed(){var runId=window.location.pathname.split("/").pop().replace(".html","");var embedUrl="https://pantherahive.com/embed/"+runId;var code='';navigator.clipboard.writeText(code).then(function(){var el=document.getElementById("ph-embed-lbl");if(el){el.textContent="Embed code copied!";setTimeout(function(){el.textContent="Get Embed Code";},2500);}});}