Site SEO Auditor
Run ID: 69cb942461b1021a29a8a63d2026-03-31SEO & Growth
PantheraHive BOS
BOS Dashboard

Step 2 of 5: Data Differencing and Change Analysis

This step focuses on the critical process of analyzing the newly generated SEO audit report against the previous audit. By performing a sophisticated "diff" operation using our hive_db (MongoDB) capabilities, we provide a clear, actionable comparison that highlights changes, improvements, and regressions across your website's SEO health.


1. Purpose and Overview

The primary goal of the hive_db → diff step is to provide a comprehensive "before and after" view of your site's SEO performance. After the headless crawler (Step 1) has completed its audit and generated a new SiteAuditReport, this step retrieves the most recent prior report from MongoDB and intelligently compares the two. The resulting diff document is then stored alongside the new audit, offering immediate insights into the impact of recent changes and the overall trend of your SEO efforts.

This process transforms raw audit data into actionable intelligence, enabling you to quickly identify:


2. Detailed Diff Generation Process

2.1. Data Retrieval from hive_db

  1. Current Audit Data: The newly generated SiteAuditReport (from the crawler → audit step) is the baseline for the current state.
  2. Previous Audit Retrieval: The system queries hive_db to retrieve the immediately preceding SiteAuditReport for your specific site. This ensures a direct, chronological comparison. If no previous audit exists (e.g., for the very first run), the diff will indicate that all findings are "new."

2.2. Comparison Logic and Granularity

The diffing engine performs a multi-layered comparison, analyzing changes at both the site-wide and individual page levels, and for each of the 12 SEO checklist items:

  1. Site-Wide Aggregates:

* Overall SEO health score.

* Average scores for Core Web Vitals (LCP, CLS, FID).

* Percentage coverage for Image Alt tags, H1 presence, etc.

* Counts of unique meta titles/descriptions, pages with structured data, etc.

* Comparison of total pages crawled, pages with issues, etc.

  1. Page-Level Changes:

* New Pages: Pages found in the current audit but not in the previous one. The full audit details for these pages are included in the diff.

* Removed Pages: Pages found in the previous audit but not in the current one.

* Modified Pages: For pages present in both audits, a granular comparison is performed for each SEO metric:

* Meta Title: Changes in content, length, or uniqueness status.

* Meta Description: Changes in content, length, or uniqueness status.

* H1 Tag: Presence/absence, changes in content, or uniqueness status.

* Image Alt Tags: Changes in coverage percentage for images on the page, or specific missing alt texts identified.

* Internal Link Density: Changes in the count of internal links found on the page.

* Canonical Tag: Presence/absence, or changes in the canonical URL.

* Open Graph Tags: Presence/absence, or changes in specific OG tag values (e.g., og:title, og:image).

* Core Web Vitals: Changes in LCP, CLS, and FID scores for that specific page.

* Structured Data: Presence/absence, or changes in the type/validity of structured data.

* Mobile Viewport: Presence/absence of the viewport meta tag.

  1. Issue Tracking:

* Resolved Issues: Specific SEO issues (e.g., "Missing H1 on X page") that were present in the previous audit but are no longer detected.

* New Issues: Specific SEO issues that were not present previously but are now detected.

* Persistent Issues: Issues that remain unresolved across both audits.

2.3. Diff Structure and Output

The generated diff is a structured JSON object, making it both human-readable and machine-parseable. This diff document is then embedded directly within the SiteAuditReport document for the current audit, providing a complete historical context.

Example Diff Structure (Illustrative):

json • 2,421 chars
{
  "audit_id": "new_audit_report_id_XYZ",
  "previous_audit_id": "old_audit_report_id_ABC",
  "diff_summary": {
    "overall_score": { "old": 85, "new": 88, "change": "+3 (Improved)" },
    "pages_crawled": { "old": 150, "new": 155, "change": "+5" },
    "issues_resolved_count": 12,
    "issues_introduced_count": 5,
    "core_web_vitals_avg": {
      "LCP": { "old": "2.8s", "new": "2.5s", "change": "-0.3s (Improved)" },
      "CLS": { "old": "0.15", "new": "0.10", "change": "-0.05 (Improved)" }
    }
  },
  "page_level_changes": [
    {
      "url": "https://yourdomain.com/product-page-a",
      "status": "modified",
      "changes": {
        "meta_description": { "old": "Generic product description.", "new": "Optimized description with keywords for Product A." },
        "h1_presence": { "old": false, "new": true, "new_value": "Product A - Best in Class" },
        "image_alt_coverage": { "old": "50%", "new": "100%", "change": "+50% (Improved)" }
      },
      "resolved_issues_on_page": ["Missing H1 tag"],
      "new_issues_on_page": []
    },
    {
      "url": "https://yourdomain.com/new-blog-post",
      "status": "added",
      "audit_details_for_new_page": { /* ... full audit data for this new page ... */ }
    },
    {
      "url": "https://yourdomain.com/old-promo-page",
      "status": "removed"
    },
    {
      "url": "https://yourdomain.com/service-page-b",
      "status": "modified",
      "changes": {
        "canonical_tag": { "old": "https://yourdomain.com/service-page-b", "new": "https://yourdomain.com/services/service-page-b (Changed URL)" },
        "core_web_vitals": {
          "LCP": { "old": "2.0s", "new": "3.5s", "change": "+1.5s (Regression)" }
        }
      },
      "resolved_issues_on_page": [],
      "new_issues_on_page": ["High LCP score (Regression)"]
    }
  ],
  "overall_issue_tracking": {
    "resolved": [
      {"issue": "Missing H1 tag", "pages": ["https://yourdomain.com/product-page-a", "https://yourdomain.com/about-us"]},
      {"issue": "Duplicate Meta Description", "pages": ["https://yourdomain.com/category-1"]}
    ],
    "introduced": [
      {"issue": "High LCP score", "pages": ["https://yourdomain.com/service-page-b"]},
      {"issue": "Missing Canonical Tag", "pages": ["https://yourdomain.com/new-landing-page"]}
    ],
    "persistent": [
      {"issue": "Missing Image Alt Text (Overall)", "pages_affected_count": 25}
    ]
  }
}
Sandboxed live preview

Workflow Step 1 of 5: Puppeteer Crawl Initiation (puppeteer → crawl)

This document details the execution and outcomes of the initial phase of your Site SEO Auditor workflow: the headless crawl. This crucial step lays the groundwork for the entire SEO audit by systematically discovering and cataloging all accessible pages on your website.


Workflow Step Overview

Workflow Name: Site SEO Auditor

Step Description: A headless crawler that visits every page on your site (using Puppeteer) and audits it against a 12-point SEO checklist.

Current Step: puppeteer → crawl (Step 1 of 5)

The primary objective of this initial phase is to accurately simulate a user's browser experience to discover all internal pages of your website, including those rendered by JavaScript. This comprehensive page discovery is essential before any SEO audit can commence.


Purpose of Step 1: Headless Crawl

The "Puppeteer Crawl Initiation" step is designed to:

  1. Discover All Internal URLs: Systematically navigate your website starting from the provided base URL to identify all internal links and pages.
  2. Render JavaScript-Driven Content: Utilize a headless browser environment (Puppeteer) to fully render pages, ensuring that dynamically loaded content and links are discovered, just as a real user's browser would.
  3. Establish Crawl Scope: Define the boundaries of the audit by collecting a definitive list of pages that will subsequently undergo the 12-point SEO checklist.
  4. Handle Navigation Robustly: Manage redirects, broken links (though not audited in this step, their existence is noted), and various page load states to ensure maximum page discovery.

Mechanism: How the Headless Crawl Works

Our system employs Puppeteer, a Node.js library, to control a headless Chromium browser. This simulates a real user's visit to your site with high fidelity.

  • Initialization: The process begins by launching a headless Chromium instance.
  • Initial Navigation: The crawler navigates to the specified starting URL (typically your website's homepage).
  • Page Rendering & Link Extraction:

* Upon reaching a page, the browser waits for the page to fully load, including the execution of all client-side JavaScript.

* Once loaded, the crawler extracts all internal <a> (anchor) tags and their href attributes, identifying potential new pages to visit.

  • Queue Management: Discovered URLs are added to a prioritized queue. The crawler ensures that each unique internal URL is visited only once.
  • Depth & Scope Control: The crawler respects configurable parameters such as maximum crawl depth and domain boundaries, ensuring only internal pages within the specified scope are processed.
  • Politeness Policy: To avoid overwhelming your server, the crawler implements a politeness delay between page requests, mimicking natural user browsing behavior and preventing server strain.
  • Resource Management: The headless browser efficiently manages resources, closing pages and browser instances as needed to maintain performance.

Key Capabilities & Features

  • True Browser Simulation: Accurately renders pages, including those heavily reliant on JavaScript frameworks (e.g., React, Angular, Vue.js), ensuring no content or links are missed.
  • Dynamic Content Discovery: Identifies links and content that might only appear after user interaction or specific JavaScript execution.
  • Robust Error Handling: Designed to gracefully handle various network and page-load errors during the crawl, logging issues without halting the entire process.
  • Configurable Crawl Depth: Allows for control over how deeply the crawler navigates your site, ensuring relevance to the audit scope.
  • Session Management: Maintains a consistent browser session for the duration of the crawl, handling cookies and local storage as a real user would.
  • URL Normalization: Cleans and normalizes discovered URLs to prevent duplicate entries and ensure accurate tracking.

Input & Configuration

This step requires the following primary input:

  • Starting URL: The root URL of your website (e.g., https://www.yourwebsite.com).
  • Crawl Depth (Configurable): The maximum number of links deep the crawler will follow from the starting URL. (Default: 3-5, configurable based on site size and needs).
  • Rate Limit (Configurable): The delay in milliseconds between page requests to ensure server politeness.

Output & Deliverables

Upon successful completion of the "Puppeteer Crawl Initiation" step, the following will be produced:

  • Comprehensive List of Discovered URLs: A definitive array of all unique, internal URLs found on your website that are within the specified crawl depth. This list forms the basis for the subsequent SEO audit.
  • Crawl Summary Report:

* Total URLs discovered.

* Total URLs successfully visited.

* Any URLs that could not be reached or encountered errors during the crawl (e.g., 404s, network timeouts), with associated error codes.

* Crawl duration.

  • Initial Crawl Log: Detailed logs capturing the navigation path, resource loading, and any warnings or errors encountered during the discovery phase.

This output is then passed as input to the next stage of the workflow for detailed SEO analysis.


Error Handling & Robustness

The crawler is built with robust error handling mechanisms:

  • Retry Logic: Failed page loads or network errors are retried a configurable number of times.
  • Timeout Management: Pages that take too long to load are gracefully timed out, preventing indefinite hangs.
  • Resource Leak Prevention: Browser pages and instances are meticulously closed to prevent memory leaks and ensure stable long-term operation.
  • Detailed Logging: All critical events, errors, and warnings are logged, providing full transparency and diagnostic capabilities.

Next Steps in Workflow

Once the headless crawl is complete and the comprehensive list of discoverable URLs has been generated, the workflow will proceed to Step 2: Page Audit & Data Extraction. In this subsequent step, each discovered URL will be revisited (or processed from cached content where appropriate) to extract specific SEO attributes and perform the 12-point checklist analysis.


3. Customer Value and Actionable Insights

This detailed diff output provides immense value to our customers:

  • Track Progress and ROI: Clearly demonstrate the impact of SEO changes and development efforts over time. See how your overall site score improves and specific issues get resolved.
  • Identify Regressions Quickly: Pinpoint new issues or performance degradations immediately, allowing for rapid intervention before they significantly impact search rankings.
  • Prioritize Fixes: The diff highlights the most impactful changes, helping you prioritize which issues to address first based on whether they are new, resolved, or persistent.
  • Historical Context: Every audit report now contains its own change log, providing a rich history of your site's SEO evolution without needing to manually compare reports.
  • Informed Decision Making: Gain data-driven insights to make informed decisions about content updates, technical SEO adjustments, and development priorities.
  • Reporting and Communication: Easily generate reports for stakeholders, showcasing tangible improvements and areas requiring further attention.

By integrating this sophisticated diffing capability, the Site SEO Auditor goes beyond simply reporting the current state, offering a dynamic and insightful view into your website's SEO journey.

gemini Output

Step 3 of 5: AI-Powered Fix Generation (Gemini Batch Processing)

This crucial step leverages the advanced capabilities of Google's Gemini AI to automatically generate precise, actionable fixes for all SEO issues identified during the comprehensive site crawl. This ensures that you receive not just a report of problems, but a detailed blueprint for their resolution, significantly streamlining your optimization efforts.

Overview

Following the in-depth audit conducted by our headless crawler, a structured list of SEO deficiencies is compiled. This list, containing specific details about each issue, is then fed into Gemini for intelligent analysis and fix generation. The batch_generate process ensures that hundreds or thousands of issues across your site are addressed efficiently and concurrently.

Input to Gemini

For each identified SEO issue, Gemini receives a rich context to ensure the most accurate and relevant fix. This input typically includes:

  • Page URL: The specific URL where the issue was detected.
  • Issue Type: A clear categorization of the SEO problem (e.g., "Missing H1 Tag", "Duplicate Meta Description", "Image Lacking Alt Text", "Incorrect Canonical Tag").
  • Relevant HTML/DOM Snippet: The exact portion of the page's code where the issue resides, providing crucial contextual information.
  • Current Value/State: The problematic content or attribute (e.g., an empty alt attribute, the duplicated meta description text).
  • Severity Level: An indication of the issue's impact on SEO.
  • Crawler Observations: Any additional notes from the crawler that might aid in diagnosis (e.g., "LCP element is a large, unoptimized image").

Gemini's Fix Generation Process

Gemini processes these inputs through a sophisticated multi-stage approach:

  1. Contextual Understanding: Gemini analyzes the provided HTML and issue description, applying its vast knowledge of web standards, SEO best practices, and semantic understanding of content. It discerns the intent and context of the page to generate appropriate fixes.
  2. Problem Diagnosis: It precisely identifies the root cause of the SEO violation within the given context. For instance, if an H1 is missing, Gemini determines the most logical content to be promoted to an H1 based on surrounding text and page structure.
  3. Solution Synthesis: Gemini then synthesizes exact, ready-to-implement code snippets or configuration recommendations. This is where its ability to generate high-quality code and structured data is paramount.
  4. Batch Processing Efficiency: The batch_generate functionality is critical for large sites. Instead of processing issues one by one, Gemini handles multiple requests in parallel, drastically reducing the time required to generate fixes for an entire site audit. This ensures that even sites with thousands of pages and hundreds of issues receive timely and comprehensive solutions.

Types of Fixes Generated

Gemini is capable of generating a wide range of fixes, tailored to the specific SEO checklist points:

  • Meta Title & Description Uniqueness:

* Fix: Suggestions for unique, compelling <title> and <meta name="description"> tags, often leveraging page content for relevance.

* Example: <title>New Suggested Title for [Page Topic] | Your Brand</title>

  • H1 Presence & Uniqueness:

* Fix: Identification of suitable text to be promoted to an <h1> tag, or suggestions for creating a new, descriptive <h1>.

* Example: <h1>[Proposed Main Heading for Page]</h1>

  • Image Alt Coverage:

* Fix: Generation of descriptive alt text for images based on image filenames, surrounding text, or visual context (if image analysis is enabled).

* Example: <img src="product.jpg" alt="Blue denim jacket, front view">

  • Internal Link Density:

* Fix: Recommendations for adding relevant internal links within content, specifying anchor text and target URLs to improve crawlability and topic authority.

* Example: Consider adding a link to <a href="/related-page">Related Topic</a> within this paragraph.

  • Canonical Tags:

* Fix: Generation of the correct <link rel="canonical"> tag, pointing to the preferred version of a page to prevent duplicate content issues.

* Example: <link rel="canonical" href="https://www.yourdomain.com/preferred-page-url/">

  • Open Graph Tags:

* Fix: Creation of properly formatted <meta property="og:..."> tags (e.g., og:title, og:description, og:image, og:url) to optimize social media sharing.

* Example: <meta property="og:title" content="Your Page Title for Social Media">

  • Core Web Vitals (LCP/CLS/FID):

* Fix: While direct code for performance is complex, Gemini can suggest specific optimizations based on identified bottlenecks:

* LCP: Recommendations for preloading critical images, optimizing image formats, or deferring non-critical CSS/JS.

* CLS: Suggestions for specifying image/video dimensions, using font-display: swap, or reserving space for dynamically injected content.

* FID: Advice on deferring non-critical JavaScript, breaking up long tasks, or optimizing third-party script loading.

* Example: Consider adding <link rel="preload" href="/path/to/lcp-image.jpg" as="image"> to your <head>.

  • Structured Data Presence:

* Fix: Generation of valid JSON-LD snippets for various Schema.org types (e.g., Article, Product, LocalBusiness, FAQPage) relevant to the page content.

* Example: A full JSON-LD script for a product page, including name, description, image, price, and availability.

  • Mobile Viewport:

* Fix: Ensuring the correct <meta name="viewport"> tag is present for optimal mobile responsiveness.

* Example: <meta name="viewport" content="width=device-width, initial-scale=1.0">

Output and Deliverable

The output of this step is a comprehensive set of "exact fixes" that are meticulously structured and ready for implementation. These generated fixes, along with the original audit findings, are then stored in MongoDB as part of the SiteAuditReport. This report will include a clear "before" and "after" diff, showcasing the proposed improvements and providing you with a transparent view of the value generated by the AI.

This deliverable empowers your team with a clear, actionable roadmap to resolve critical SEO issues efficiently, reducing manual effort and accelerating your path to improved search engine rankings and user experience.

hive_db Output

Site SEO Auditor: Database Upsert (hive_db → upsert)

This document details the successful execution of Step 4: hive_db → upsert within your "Site SEO Auditor" workflow. This crucial step is responsible for securely storing all generated SEO audit data into your dedicated MongoDB instance, ensuring data persistence, historical tracking, and the foundation for "before/after" comparisons.


1. Purpose of this Step

The hive_db → upsert step serves as the persistent storage layer for all findings from the headless crawler and the Gemini-powered fix generation. After the crawler has visited every page and performed the 12-point SEO checklist audit, and Gemini has identified and generated fixes for any broken elements, this step takes all that comprehensive data and intelligently stores it within your PantheraHive MongoDB.

Key Objectives:

  • Data Persistence: Ensure all audit results are permanently saved.
  • Historical Tracking: Create a chronological record of your site's SEO performance.
  • Diff Generation Foundation: Store data in a structured way that enables "before/after" comparisons for future audits.
  • Centralized Reporting: Provide a single source of truth for all SEO audit reports.

2. SiteAuditReport Document Structure

The audit results are stored as a SiteAuditReport document within a dedicated collection in MongoDB. This document is meticulously structured to capture all aspects of the audit, from site-wide aggregates to granular page-level details and specific fixes.

Here's a conceptual overview of the SiteAuditReport schema:


{
  "_id": ObjectId, // Unique identifier for the report
  "auditId": "string", // UUID for this specific audit run
  "siteUrl": "string", // The base URL of the audited site (e.g., "https://www.example.com")
  "auditDate": "ISODate", // Timestamp of when the audit was completed
  "status": "string", // "completed", "failed", etc.
  "totalPagesAudited": "number",
  "overallScore": { // Optional: A calculated overall score for the site
    "value": "number", // e.g., 0-100
    "grade": "string" // e.g., "A", "B", "C"
  },
  "siteAggregates": {
    "metaTitleCoverage": { "percentage": "number", "issues": "number" },
    "metaDescriptionCoverage": { "percentage": "number", "issues": "number" },
    "h1PresenceCoverage": { "percentage": "number", "issues": "number" },
    "imageAltCoverage": { "percentage": "number", "issues": "number" },
    "canonicalTagCoverage": { "percentage": "number", "issues": "number" },
    "openGraphTagCoverage": { "percentage": "number", "issues": "number" },
    "structuredDataCoverage": { "percentage": "number", "issues": "number" },
    "mobileViewportCoverage": { "percentage": "number", "issues": "number" },
    "coreWebVitalsSummary": {
      "lcpIssues": "number",
      "clsIssues": "number",
      "fidIssues": "number",
      "pagesWithGoodCWV": "number",
      "pagesWithNeedsImprovementCWV": "number",
      "pagesWithPoorCWV": "number"
    },
    "internalLinkDensitySummary": {
      "avgLinksPerPage": "number",
      "pagesWithLowLinkDensity": "number"
    },
    "uniqueTitlesPercentage": "number", // % of pages with unique meta titles
    "uniqueDescriptionsPercentage": "number" // % of pages with unique meta descriptions
  },
  "pages": [ // Array of individual page audit results
    {
      "url": "string", // The URL of the audited page
      "statusCode": "number", // HTTP status code (e.g., 200, 404)
      "crawlTimeMs": "number", // Time taken to crawl this page
      "seoMetrics": {
        "metaTitle": {
          "value": "string",
          "length": "number",
          "status": "string", // "pass", "fail_missing", "fail_long", "fail_short", "fail_duplicate"
          "isUnique": "boolean",
          "issueDetails": "string" // e.g., "Meta title is too long (75 chars)"
        },
        "metaDescription": {
          "value": "string",
          "length": "number",
          "status": "string", // "pass", "fail_missing", "fail_long", "fail_short", "fail_duplicate"
          "isUnique": "boolean",
          "issueDetails": "string" // e.g., "Meta description is missing"
        },
        "h1Tag": {
          "value": "string", // The content of the H1 tag
          "status": "string", // "pass", "fail_missing", "fail_multiple"
          "issueDetails": "string" // e.g., "Multiple H1 tags found"
        },
        "imageAlts": {
          "totalImages": "number",
          "imagesMissingAlt": [
            { "src": "string", "issueDetails": "string" } // e.g., "Image has no alt text"
          ],
          "coveragePercentage": "number", // (totalImages - imagesMissingAlt.length) / totalImages * 100
          "status": "string" // "pass", "fail_low_coverage"
        },
        "internalLinks": {
          "count": "number",
          "status": "string", // "pass", "fail_low_density"
          "links": [ // Optional: detailed list of links
            { "href": "string", "anchorText": "string", "type": "internal" }
          ]
        },
        "canonicalTag": {
          "value": "string", // The canonical URL specified
          "status": "string", // "pass", "fail_missing", "fail_incorrect", "fail_self_referencing_issue"
          "issueDetails": "string" // e.g., "Canonical tag points to different URL"
        },
        "openGraphTags": {
          "ogTitle": { "value": "string", "status": "string" },
          "ogDescription": { "value": "string", "status": "string" },
          "ogImage": { "value": "string", "status": "string" },
          "status": "string", // "pass", "fail_missing_essential"
          "issueDetails": "string"
        },
        "coreWebVitals": {
          "lcp": { "value": "number", "status": "string" }, // "good", "needs_improvement", "poor"
          "cls": { "value": "number", "status": "string" }, // "good", "needs_improvement", "poor"
          "fid": { "value": "number", "status": "string" }, // "good", "needs_improvement", "poor" (or INP if available)
          "overallStatus": "string" // "pass", "fail_lcp", "fail_cls", "fail_fid"
        },
        "structuredData": {
          "presence": "boolean",
          "types": ["string"], // e.g., ["Article", "BreadcrumbList"]
          "isValid": "boolean", // Based on Google's Structured Data Testing Tool (if integrated)
          "status": "string", // "pass", "fail_missing", "fail_invalid"
          "issueDetails": "string"
        },
        "mobileViewport": {
          "presence": "boolean",
          "status": "string", // "pass", "fail_missing"
          "issueDetails": "string" // e.g., "Viewport meta tag is missing"
        }
      },
      "issuesIdentified": [ // List of specific issues found on this page
        {
          "type": "string", // e.g., "meta_title_long", "h1_missing", "image_alt_missing", "cwv_lcp_poor"
          "severity": "string", // "critical", "high", "medium", "low"
          "description": "string", // Human-readable description of the issue
          "element": "string", // Selector or identifier of the problematic element (e.g., "head > title", "img[src='/img.jpg']")
          "currentValue": "string", // The problematic value
          "geminiFix": { // Gemini's generated fix for this specific issue
            "suggestedChange": "string", // The exact code/text fix
            "rationale": "string", // Explanation from Gemini
            "confidence": "number" // Gemini's confidence score (0-1)
          }
        }
      ]
    }
  ],
  "previousAuditId": "string" // Reference to the _id of the previous audit report for diffing
}

3. Upsert Logic and Diff Generation

The hive_db → upsert operation is designed for efficiency and to facilitate historical comparisons:

  1. Unique Identification: Each audit run generates a unique auditId. The _id of the MongoDB document will also be a unique ObjectId.
  2. Referencing Previous Audit: Before inserting the new SiteAuditReport, the system queries the database for the most recent SiteAuditReport for the siteUrl being audited.

If a previous report is found, its _id is stored in the previousAuditId field of the new* report. This creates a linked list of audit reports, making it trivial to retrieve the current and its immediate predecessor for "before/after" comparisons.

* If no previous report exists (first audit), previousAuditId will be null or omitted.

  1. Atomic Upsert: The SiteAuditReport document is then inserted into the site_audit_reports collection.

* An "upsert" operation is used conceptually. While typically an insert is performed for new reports, if there was a retry mechanism that could lead to duplicate auditIds for the same conceptual run, an upsert (update if exists, insert if not) would ensure idempotency based on auditId. For this workflow, it's primarily an insert to create a new historical record.

  1. No Direct Diff Storage: The "before/after diff" is not stored directly in the database at this step. Instead, the database stores the complete state of each audit. The diff is dynamically computed by the reporting or UI layer by comparing the current SiteAuditReport with the SiteAuditReport referenced by its previousAuditId. This approach keeps the database lean and flexible, as diff logic can evolve without requiring schema changes.

4. Deliverable and Value

Upon completion of this hive_db → upsert step, the following deliverable is achieved, providing significant value:

  • Persistent Audit Record: A complete, detailed SiteAuditReport document for your website, containing all 12 SEO checklist points, Core Web Vitals, and Gemini-generated fixes, is now securely stored in your PantheraHive MongoDB.
  • Historical Data Foundation: You now have a chronological snapshot of your site's SEO health. Each new audit will add to this history, building a valuable dataset for trend analysis and performance tracking.
  • Actionable Data for Reporting: The structured data within MongoDB is immediately available for the next step (reporting) and for any custom integrations you might wish to build.
  • Readiness for "Before/After" Analysis: The linking of previousAuditId ensures that your reports are primed for visual "before/after" comparisons in the final reporting interface, allowing you to quickly see what has improved, deteriorated, or remained the same since the last audit.
  • Scalable Storage: MongoDB's flexible schema allows for future enhancements to the audit checklist without requiring complex database migrations.

5. Next Steps

The data is now securely stored and organized. The final step in the "Site SEO Auditor" workflow will involve:

  • Reporting and Notification: Generating a comprehensive, user-friendly report based on the stored SiteAuditReport data, highlighting key findings, performance trends, and the Gemini-generated fixes. This report will be delivered to you via your preferred notification channel (e.g., email, dashboard).
  • Dashboard Integration: Populating a dedicated dashboard with interactive visualizations of your site's SEO performance over time, including the "before/after" diffs.
hive_db Output

Workflow Step 5 of 5: hive_db → Conditional Update

This final step of the "Site SEO Auditor" workflow is critical for persisting all collected audit data, generated fixes, and historical comparisons into your dedicated PantheraHive database. It ensures that every SEO audit, whether scheduled or on-demand, is meticulously recorded, providing a robust historical record and enabling actionable insights.


Purpose of this Step

The hive_dbconditional_update step serves as the definitive storage mechanism for all outputs generated by the SEO Auditor. Its primary functions are:

  1. Data Persistence: To permanently store the comprehensive SiteAuditReport for each executed audit.
  2. Historical Tracking: To maintain a chronological record of your site's SEO performance, allowing for trend analysis and progress monitoring.
  3. Before/After Diff Storage: To record the exact changes and improvements over time, especially after applying Gemini-generated fixes.
  4. Data Integrity: To ensure that database updates are handled safely and efficiently, preventing data loss or corruption, particularly in scenarios of concurrent operations.

Data Processed and Stored

Upon completion of the crawling, auditing, and fix generation phases, the following structured data is packaged and committed to your MongoDB instance within PantheraHive:

  • SiteAuditReport Document: Each audit run generates a distinct document with a comprehensive schema, including:

* auditId: Unique identifier for each audit run.

* siteUrl: The URL of the site audited.

* auditTimestamp: Date and time of the audit execution.

* status: (e.g., completed, failed, partial).

* overallScore: An aggregate score reflecting the site's SEO health.

* pagesAudited: Count of unique pages successfully crawled and audited.

pageReports: An array of detailed reports for each individual page* crawled, containing:

* pageUrl

* metaTitle (content, uniqueness status)

* metaDescription (content, uniqueness status)

* h1Presence (boolean, content if present)

* imageAltCoverage (percentage, list of missing alt tags)

* internalLinkDensity (count, list of internal links)

* canonicalTag (present/absent, value)

* openGraphTags (presence, key values like og:title, og:description, og:image)

* coreWebVitals (LCP, CLS, FID scores)

* structuredData (presence, detected types)

* mobileViewport (presence, configuration)

* brokenElements (list of identified issues on that specific page).

* globalIssues: Aggregated site-wide issues (e.g., duplicate meta titles across multiple pages).

* geminiFixes: An array of suggested fixes generated by Gemini for identified brokenElements, including:

* issueDescription

* recommendedFix (code snippet, textual instruction)

* targetPageUrl

* fixId (unique identifier for the fix).

beforeAfterDiff: A structured object comparing the current audit's key metrics and issues against the immediately preceding* audit for the same site. This highlights:

* Changes in overallScore.

* New issues detected.

* Previously identified issues that have been resolved.

* Improvements or regressions in Core Web Vitals.


Conditional Update Mechanism

The "conditional update" aspect of this step ensures data integrity and efficient resource utilization within your database. Instead of a simple overwrite, this mechanism typically involves:

  1. Atomic Operations: Updates are performed as atomic operations, meaning they either fully succeed or fully fail, preventing partial or corrupt data states.
  2. Version Control/Timestamping: Each SiteAuditReport document may include version fields or timestamps. The update operation can check these fields to ensure it's operating on the latest version of the data. This is crucial if multiple processes (though unlikely for a single audit workflow) might attempt to update the same record.
  3. Upsert Logic: For new sites or the very first audit, the system will insert a new SiteAuditReport document. For subsequent audits of an existing site, it will update the relevant collection by adding a new audit report document, ensuring the beforeAfterDiff can accurately reference the previous report.
  4. Error Handling & Retries: Robust error handling is implemented to manage potential database connection issues or write conflicts, with configurable retry mechanisms to ensure the data is eventually persisted.

This approach guarantees that your audit history is accurate, complete, and resilient against potential inconsistencies.


Customer Impact and Deliverables

As a customer, this final database step directly translates into the following actionable deliverables and benefits:

  • Accessible Audit Reports: All generated SiteAuditReport documents are stored and immediately available for retrieval via the PantheraHive Dashboard or API.
  • Historical Performance Tracking: You will have a comprehensive history of your site's SEO health, enabling you to:

* Monitor SEO progress over weeks, months, or years.

* Identify long-term trends in performance.

* Validate the impact of SEO changes or development updates.

  • Clear Before/After Comparisons: The beforeAfterDiff data provides an instant overview of what has changed between audit runs, making it easy to see improvements or new issues at a glance.
  • Actionable Fixes Record: The geminiFixes are stored alongside the audit, providing a persistent record of recommended actions directly tied to specific issues. This facilitates tracking the implementation status of these fixes.
  • Scheduled Automation Confirmation: For scheduled audits (every Sunday at 2 AM), this step confirms that the audit has run successfully and the results are ready for review.

Next Steps & Report Availability

Once this hive_dbconditional_update step is successfully completed, your latest SiteAuditReport will be available:

  • Within minutes of the audit completion for on-demand runs.
  • By approximately 2:15 AM every Sunday for scheduled automated audits (allowing for crawl and processing time).

You can access and review these reports through your PantheraHive dashboard, where they will be presented in a user-friendly format, including visualizations, detailed breakdowns, and the exact Gemini-generated fixes.

site_seo_auditor.txt
Download source file
Copy all content
Full output as text
Download ZIP
IDE-ready project ZIP
Copy share link
Permanent URL for this run
Get Embed Code
Embed this result on any website
Print / Save PDF
Use browser print dialog
\n\n\n"); var hasSrcMain=Object.keys(extracted).some(function(k){return k.indexOf("src/main")>=0;}); if(!hasSrcMain) zip.file(folder+"src/main."+ext,"import React from 'react'\nimport ReactDOM from 'react-dom/client'\nimport App from './App'\nimport './index.css'\n\nReactDOM.createRoot(document.getElementById('root')!).render(\n \n \n \n)\n"); var hasSrcApp=Object.keys(extracted).some(function(k){return k==="src/App."+ext||k==="App."+ext;}); if(!hasSrcApp) zip.file(folder+"src/App."+ext,"import React from 'react'\nimport './App.css'\n\nfunction App(){\n return(\n
\n
\n

"+slugTitle(pn)+"

\n

Built with PantheraHive BOS

\n
\n
\n )\n}\nexport default App\n"); zip.file(folder+"src/index.css","*{margin:0;padding:0;box-sizing:border-box}\nbody{font-family:system-ui,-apple-system,sans-serif;background:#f0f2f5;color:#1a1a2e}\n.app{min-height:100vh;display:flex;flex-direction:column}\n.app-header{flex:1;display:flex;flex-direction:column;align-items:center;justify-content:center;gap:12px;padding:40px}\nh1{font-size:2.5rem;font-weight:700}\n"); zip.file(folder+"src/App.css",""); zip.file(folder+"src/components/.gitkeep",""); zip.file(folder+"src/pages/.gitkeep",""); zip.file(folder+"src/hooks/.gitkeep",""); Object.keys(extracted).forEach(function(p){ var fp=p.startsWith("src/")?p:"src/"+p; zip.file(folder+fp,extracted[p]); }); zip.file(folder+"README.md","# "+slugTitle(pn)+"\n\nGenerated by PantheraHive BOS.\n\n## Setup\n\`\`\`bash\nnpm install\nnpm run dev\n\`\`\`\n\n## Build\n\`\`\`bash\nnpm run build\n\`\`\`\n\n## Open in IDE\nOpen the project folder in VS Code or WebStorm.\n"); zip.file(folder+".gitignore","node_modules/\ndist/\n.env\n.DS_Store\n*.local\n"); } /* --- Vue (Vite + Composition API + TypeScript) --- */ function buildVue(zip,folder,app,code,panelTxt){ var pn=pkgName(app); var C=cc(pn); var extracted=extractCode(panelTxt); zip.file(folder+"package.json",'{\n "name": "'+pn+'",\n "version": "0.0.0",\n "type": "module",\n "scripts": {\n "dev": "vite",\n "build": "vue-tsc -b && vite build",\n "preview": "vite preview"\n },\n "dependencies": {\n "vue": "^3.5.13",\n "vue-router": "^4.4.5",\n "pinia": "^2.3.0",\n "axios": "^1.7.9"\n },\n "devDependencies": {\n "@vitejs/plugin-vue": "^5.2.1",\n "typescript": "~5.7.3",\n "vite": "^6.0.5",\n "vue-tsc": "^2.2.0"\n }\n}\n'); zip.file(folder+"vite.config.ts","import { defineConfig } from 'vite'\nimport vue from '@vitejs/plugin-vue'\nimport { resolve } from 'path'\n\nexport default defineConfig({\n plugins: [vue()],\n resolve: { alias: { '@': resolve(__dirname,'src') } }\n})\n"); zip.file(folder+"tsconfig.json",'{"files":[],"references":[{"path":"./tsconfig.app.json"},{"path":"./tsconfig.node.json"}]}\n'); zip.file(folder+"tsconfig.app.json",'{\n "compilerOptions":{\n "target":"ES2020","useDefineForClassFields":true,"module":"ESNext","lib":["ES2020","DOM","DOM.Iterable"],\n "skipLibCheck":true,"moduleResolution":"bundler","allowImportingTsExtensions":true,\n "isolatedModules":true,"moduleDetection":"force","noEmit":true,"jsxImportSource":"vue",\n "strict":true,"paths":{"@/*":["./src/*"]}\n },\n "include":["src/**/*.ts","src/**/*.d.ts","src/**/*.tsx","src/**/*.vue"]\n}\n'); zip.file(folder+"env.d.ts","/// \n"); zip.file(folder+"index.html","\n\n\n \n \n "+slugTitle(pn)+"\n\n\n
\n \n\n\n"); var hasMain=Object.keys(extracted).some(function(k){return k==="src/main.ts"||k==="main.ts";}); if(!hasMain) zip.file(folder+"src/main.ts","import { createApp } from 'vue'\nimport { createPinia } from 'pinia'\nimport App from './App.vue'\nimport './assets/main.css'\n\nconst app = createApp(App)\napp.use(createPinia())\napp.mount('#app')\n"); var hasApp=Object.keys(extracted).some(function(k){return k.indexOf("App.vue")>=0;}); if(!hasApp) zip.file(folder+"src/App.vue","\n\n\n\n\n"); zip.file(folder+"src/assets/main.css","*{margin:0;padding:0;box-sizing:border-box}body{font-family:system-ui,sans-serif;background:#fff;color:#213547}\n"); zip.file(folder+"src/components/.gitkeep",""); zip.file(folder+"src/views/.gitkeep",""); zip.file(folder+"src/stores/.gitkeep",""); Object.keys(extracted).forEach(function(p){ var fp=p.startsWith("src/")?p:"src/"+p; zip.file(folder+fp,extracted[p]); }); zip.file(folder+"README.md","# "+slugTitle(pn)+"\n\nGenerated by PantheraHive BOS.\n\n## Setup\n\`\`\`bash\nnpm install\nnpm run dev\n\`\`\`\n\n## Build\n\`\`\`bash\nnpm run build\n\`\`\`\n\nOpen in VS Code or WebStorm.\n"); zip.file(folder+".gitignore","node_modules/\ndist/\n.env\n.DS_Store\n*.local\n"); } /* --- Angular (v19 standalone) --- */ function buildAngular(zip,folder,app,code,panelTxt){ var pn=pkgName(app); var C=cc(pn); var sel=pn.replace(/_/g,"-"); var extracted=extractCode(panelTxt); zip.file(folder+"package.json",'{\n "name": "'+pn+'",\n "version": "0.0.0",\n "scripts": {\n "ng": "ng",\n "start": "ng serve",\n "build": "ng build",\n "test": "ng test"\n },\n "dependencies": {\n "@angular/animations": "^19.0.0",\n "@angular/common": "^19.0.0",\n "@angular/compiler": "^19.0.0",\n "@angular/core": "^19.0.0",\n "@angular/forms": "^19.0.0",\n "@angular/platform-browser": "^19.0.0",\n "@angular/platform-browser-dynamic": "^19.0.0",\n "@angular/router": "^19.0.0",\n "rxjs": "~7.8.0",\n "tslib": "^2.3.0",\n "zone.js": "~0.15.0"\n },\n "devDependencies": {\n "@angular-devkit/build-angular": "^19.0.0",\n "@angular/cli": "^19.0.0",\n "@angular/compiler-cli": "^19.0.0",\n "typescript": "~5.6.0"\n }\n}\n'); zip.file(folder+"angular.json",'{\n "$schema": "./node_modules/@angular/cli/lib/config/schema.json",\n "version": 1,\n "newProjectRoot": "projects",\n "projects": {\n "'+pn+'": {\n "projectType": "application",\n "root": "",\n "sourceRoot": "src",\n "prefix": "app",\n "architect": {\n "build": {\n "builder": "@angular-devkit/build-angular:application",\n "options": {\n "outputPath": "dist/'+pn+'",\n "index": "src/index.html",\n "browser": "src/main.ts",\n "tsConfig": "tsconfig.app.json",\n "styles": ["src/styles.css"],\n "scripts": []\n }\n },\n "serve": {"builder":"@angular-devkit/build-angular:dev-server","configurations":{"production":{"buildTarget":"'+pn+':build:production"},"development":{"buildTarget":"'+pn+':build:development"}},"defaultConfiguration":"development"}\n }\n }\n }\n}\n'); zip.file(folder+"tsconfig.json",'{\n "compileOnSave": false,\n "compilerOptions": {"baseUrl":"./","outDir":"./dist/out-tsc","forceConsistentCasingInFileNames":true,"strict":true,"noImplicitOverride":true,"noPropertyAccessFromIndexSignature":true,"noImplicitReturns":true,"noFallthroughCasesInSwitch":true,"paths":{"@/*":["src/*"]},"skipLibCheck":true,"esModuleInterop":true,"sourceMap":true,"declaration":false,"experimentalDecorators":true,"moduleResolution":"bundler","importHelpers":true,"target":"ES2022","module":"ES2022","useDefineForClassFields":false,"lib":["ES2022","dom"]},\n "references":[{"path":"./tsconfig.app.json"}]\n}\n'); zip.file(folder+"tsconfig.app.json",'{\n "extends":"./tsconfig.json",\n "compilerOptions":{"outDir":"./dist/out-tsc","types":[]},\n "files":["src/main.ts"],\n "include":["src/**/*.d.ts"]\n}\n'); zip.file(folder+"src/index.html","\n\n\n \n "+slugTitle(pn)+"\n \n \n \n\n\n \n\n\n"); zip.file(folder+"src/main.ts","import { bootstrapApplication } from '@angular/platform-browser';\nimport { appConfig } from './app/app.config';\nimport { AppComponent } from './app/app.component';\n\nbootstrapApplication(AppComponent, appConfig)\n .catch(err => console.error(err));\n"); zip.file(folder+"src/styles.css","* { margin: 0; padding: 0; box-sizing: border-box; }\nbody { font-family: system-ui, -apple-system, sans-serif; background: #f9fafb; color: #111827; }\n"); var hasComp=Object.keys(extracted).some(function(k){return k.indexOf("app.component")>=0;}); if(!hasComp){ zip.file(folder+"src/app/app.component.ts","import { Component } from '@angular/core';\nimport { RouterOutlet } from '@angular/router';\n\n@Component({\n selector: 'app-root',\n standalone: true,\n imports: [RouterOutlet],\n templateUrl: './app.component.html',\n styleUrl: './app.component.css'\n})\nexport class AppComponent {\n title = '"+pn+"';\n}\n"); zip.file(folder+"src/app/app.component.html","
\n
\n

"+slugTitle(pn)+"

\n

Built with PantheraHive BOS

\n
\n \n
\n"); zip.file(folder+"src/app/app.component.css",".app-header{display:flex;flex-direction:column;align-items:center;justify-content:center;min-height:60vh;gap:16px}h1{font-size:2.5rem;font-weight:700;color:#6366f1}\n"); } zip.file(folder+"src/app/app.config.ts","import { ApplicationConfig, provideZoneChangeDetection } from '@angular/core';\nimport { provideRouter } from '@angular/router';\nimport { routes } from './app.routes';\n\nexport const appConfig: ApplicationConfig = {\n providers: [\n provideZoneChangeDetection({ eventCoalescing: true }),\n provideRouter(routes)\n ]\n};\n"); zip.file(folder+"src/app/app.routes.ts","import { Routes } from '@angular/router';\n\nexport const routes: Routes = [];\n"); Object.keys(extracted).forEach(function(p){ var fp=p.startsWith("src/")?p:"src/"+p; zip.file(folder+fp,extracted[p]); }); zip.file(folder+"README.md","# "+slugTitle(pn)+"\n\nGenerated by PantheraHive BOS.\n\n## Setup\n\`\`\`bash\nnpm install\nng serve\n# or: npm start\n\`\`\`\n\n## Build\n\`\`\`bash\nng build\n\`\`\`\n\nOpen in VS Code with Angular Language Service extension.\n"); zip.file(folder+".gitignore","node_modules/\ndist/\n.env\n.DS_Store\n*.local\n.angular/\n"); } /* --- Python --- */ function buildPython(zip,folder,app,code){ var title=slugTitle(app); var pn=pkgName(app); var src=code.replace(/^\`\`\`[\w]*\n?/m,"").replace(/\n?\`\`\`$/m,"").trim(); var reqMap={"numpy":"numpy","pandas":"pandas","sklearn":"scikit-learn","tensorflow":"tensorflow","torch":"torch","flask":"flask","fastapi":"fastapi","uvicorn":"uvicorn","requests":"requests","sqlalchemy":"sqlalchemy","pydantic":"pydantic","dotenv":"python-dotenv","PIL":"Pillow","cv2":"opencv-python","matplotlib":"matplotlib","seaborn":"seaborn","scipy":"scipy"}; var reqs=[]; Object.keys(reqMap).forEach(function(k){if(src.indexOf("import "+k)>=0||src.indexOf("from "+k)>=0)reqs.push(reqMap[k]);}); var reqsTxt=reqs.length?reqs.join("\n"):"# add dependencies here\n"; zip.file(folder+"main.py",src||"# "+title+"\n# Generated by PantheraHive BOS\n\nprint(title+\" loaded\")\n"); zip.file(folder+"requirements.txt",reqsTxt); zip.file(folder+".env.example","# Environment variables\n"); zip.file(folder+"README.md","# "+title+"\n\nGenerated by PantheraHive BOS.\n\n## Setup\n\`\`\`bash\npython3 -m venv .venv\nsource .venv/bin/activate\npip install -r requirements.txt\n\`\`\`\n\n## Run\n\`\`\`bash\npython main.py\n\`\`\`\n"); zip.file(folder+".gitignore",".venv/\n__pycache__/\n*.pyc\n.env\n.DS_Store\n"); } /* --- Node.js --- */ function buildNode(zip,folder,app,code){ var title=slugTitle(app); var pn=pkgName(app); var src=code.replace(/^\`\`\`[\w]*\n?/m,"").replace(/\n?\`\`\`$/m,"").trim(); var depMap={"mongoose":"^8.0.0","dotenv":"^16.4.5","axios":"^1.7.9","cors":"^2.8.5","bcryptjs":"^2.4.3","jsonwebtoken":"^9.0.2","socket.io":"^4.7.4","uuid":"^9.0.1","zod":"^3.22.4","express":"^4.18.2"}; var deps={}; Object.keys(depMap).forEach(function(k){if(src.indexOf(k)>=0)deps[k]=depMap[k];}); if(!deps["express"])deps["express"]="^4.18.2"; var pkgJson=JSON.stringify({"name":pn,"version":"1.0.0","main":"src/index.js","scripts":{"start":"node src/index.js","dev":"nodemon src/index.js"},"dependencies":deps,"devDependencies":{"nodemon":"^3.0.3"}},null,2)+"\n"; zip.file(folder+"package.json",pkgJson); var fallback="const express=require(\"express\");\nconst app=express();\napp.use(express.json());\n\napp.get(\"/\",(req,res)=>{\n res.json({message:\""+title+" API\"});\n});\n\nconst PORT=process.env.PORT||3000;\napp.listen(PORT,()=>console.log(\"Server on port \"+PORT));\n"; zip.file(folder+"src/index.js",src||fallback); zip.file(folder+".env.example","PORT=3000\n"); zip.file(folder+".gitignore","node_modules/\n.env\n.DS_Store\n"); zip.file(folder+"README.md","# "+title+"\n\nGenerated by PantheraHive BOS.\n\n## Setup\n\`\`\`bash\nnpm install\n\`\`\`\n\n## Run\n\`\`\`bash\nnpm run dev\n\`\`\`\n"); } /* --- Vanilla HTML --- */ function buildVanillaHtml(zip,folder,app,code){ var title=slugTitle(app); var isFullDoc=code.trim().toLowerCase().indexOf("=0||code.trim().toLowerCase().indexOf("=0; var indexHtml=isFullDoc?code:"\n\n\n\n\n"+title+"\n\n\n\n"+code+"\n\n\n\n"; zip.file(folder+"index.html",indexHtml); zip.file(folder+"style.css","/* "+title+" — styles */\n*{margin:0;padding:0;box-sizing:border-box}\nbody{font-family:system-ui,-apple-system,sans-serif;background:#fff;color:#1a1a2e}\n"); zip.file(folder+"script.js","/* "+title+" — scripts */\n"); zip.file(folder+"assets/.gitkeep",""); zip.file(folder+"README.md","# "+title+"\n\nGenerated by PantheraHive BOS.\n\n## Open\nDouble-click \`index.html\` in your browser.\n\nOr serve locally:\n\`\`\`bash\nnpx serve .\n# or\npython3 -m http.server 3000\n\`\`\`\n"); zip.file(folder+".gitignore",".DS_Store\nnode_modules/\n.env\n"); } /* ===== MAIN ===== */ var sc=document.createElement("script"); sc.src="https://cdnjs.cloudflare.com/ajax/libs/jszip/3.10.1/jszip.min.js"; sc.onerror=function(){ if(lbl)lbl.textContent="Download ZIP"; alert("JSZip load failed — check connection."); }; sc.onload=function(){ var zip=new JSZip(); var base=(_phFname||"output").replace(/\.[^.]+$/,""); var app=base.toLowerCase().replace(/[^a-z0-9]+/g,"_").replace(/^_+|_+$/g,"")||"my_app"; var folder=app+"/"; var vc=document.getElementById("panel-content"); var panelTxt=vc?(vc.innerText||vc.textContent||""):""; var lang=detectLang(_phCode,panelTxt); if(_phIsHtml){ buildVanillaHtml(zip,folder,app,_phCode); } else if(lang==="flutter"){ buildFlutter(zip,folder,app,_phCode,panelTxt); } else if(lang==="react-native"){ buildReactNative(zip,folder,app,_phCode,panelTxt); } else if(lang==="swift"){ buildSwift(zip,folder,app,_phCode,panelTxt); } else if(lang==="kotlin"){ buildKotlin(zip,folder,app,_phCode,panelTxt); } else if(lang==="react"){ buildReact(zip,folder,app,_phCode,panelTxt); } else if(lang==="vue"){ buildVue(zip,folder,app,_phCode,panelTxt); } else if(lang==="angular"){ buildAngular(zip,folder,app,_phCode,panelTxt); } else if(lang==="python"){ buildPython(zip,folder,app,_phCode); } else if(lang==="node"){ buildNode(zip,folder,app,_phCode); } else { /* Document/content workflow */ var title=app.replace(/_/g," "); var md=_phAll||_phCode||panelTxt||"No content"; zip.file(folder+app+".md",md); var h=""+title+""; h+="

"+title+"

"; var hc=md.replace(/&/g,"&").replace(//g,">"); hc=hc.replace(/^### (.+)$/gm,"

$1

"); hc=hc.replace(/^## (.+)$/gm,"

$1

"); hc=hc.replace(/^# (.+)$/gm,"

$1

"); hc=hc.replace(/\*\*(.+?)\*\*/g,"$1"); hc=hc.replace(/\n{2,}/g,"

"); h+="

"+hc+"

Generated by PantheraHive BOS
"; zip.file(folder+app+".html",h); zip.file(folder+"README.md","# "+title+"\n\nGenerated by PantheraHive BOS.\n\nFiles:\n- "+app+".md (Markdown)\n- "+app+".html (styled HTML)\n"); } zip.generateAsync({type:"blob"}).then(function(blob){ var a=document.createElement("a"); a.href=URL.createObjectURL(blob); a.download=app+".zip"; a.click(); URL.revokeObjectURL(a.href); if(lbl)lbl.textContent="Download ZIP"; }); }; document.head.appendChild(sc); } function phShare(){navigator.clipboard.writeText(window.location.href).then(function(){var el=document.getElementById("ph-share-lbl");if(el){el.textContent="Link copied!";setTimeout(function(){el.textContent="Copy share link";},2500);}});}function phEmbed(){var runId=window.location.pathname.split("/").pop().replace(".html","");var embedUrl="https://pantherahive.com/embed/"+runId;var code='';navigator.clipboard.writeText(code).then(function(){var el=document.getElementById("ph-embed-lbl");if(el){el.textContent="Embed code copied!";setTimeout(function(){el.textContent="Get Embed Code";},2500);}});}