Site SEO Auditor
Run ID: 69cd32f33e7fb09ff16a8e832026-04-01SEO & Growth
PantheraHive BOS
BOS Dashboard

Step 2: Site Audit Report Comparison and Differential Analysis (hive_db → diff)

This crucial step in the "Site SEO Auditor" workflow is responsible for intelligently comparing the newly generated SEO audit report with the most recent previous report stored in your dedicated MongoDB instance (hive_db). The primary objective is to identify, categorize, and quantify all changes, improvements, and regressions across your website's SEO landscape.


1. Objective

The core objective of the diff step is to provide a clear, actionable understanding of how your website's SEO health has evolved since the last audit. This involves:


2. Input Data for Comparison

This step requires two primary data inputs:


3. Differential Analysis Process

The diff engine performs a sophisticated, granular comparison, encompassing both site-wide metrics and individual page-level attributes.

3.1. Overall Site Metrics Comparison

The system first compares aggregated site-wide metrics between the current and previous reports. This provides a high-level overview of changes:

3.2. Page-Level Comparison

This is the most detailed part of the analysis, comparing each page found in the current_audit_report against its counterpart in the previous_audit_report.

* added_pages: Identifies URLs present in the current report but not in the previous one.

* removed_pages: Identifies URLs present in the previous report but no longer found in the current one (potentially due to deletion, redirects, or crawl issues).

* Meta Title & Description:

* Presence: Was it missing before and now present, or vice-versa?

* Content Change: Has the text content changed?

* Uniqueness: Has its uniqueness status changed (e.g., was duplicate, now unique)?

* Length: Has the length changed (e.g., now too long/short)?

* H1 Presence:

* Presence: Was an H1 missing and now present, or present and now missing?

* Content Change: Has the H1 content significantly changed?

* Multiple H1s: Detection of new instances of multiple H1s.

* Image Alt Coverage:

* Percentage Change: Has the percentage of images with alt attributes on the page improved or regressed?

* Specific Missing Alts: Identification of newly missing alt attributes for specific images.

* Internal Link Density:

* Count Change: Has the number of internal links on the page increased or decreased?

* Broken Links: Detection of new broken internal links.

* Canonical Tags:

* Presence: Was it missing and now present, or present and now missing?

* Value Change: Has the canonical URL changed?

* Self-Referencing Status: Is it correctly self-referencing, or has it changed to point elsewhere (or vice-versa)?

* Open Graph Tags:

* Presence: Are key OG tags (title, description, image, URL, type) present/missing?

* Completeness: Has the completeness of OG tags improved or regressed?

* Content Change: Have the values of critical OG tags changed?

* Core Web Vitals (LCP/CLS/FID):

* Metric Change: Quantitative change in LCP, CLS, and FID values (e.g., LCP improved from 3.5s to 2.1s).

* Status Change: Has the page's CWV status changed (e.g., LCP went from "Needs Improvement" to "Good," or from "Good" to "Poor")?

* Structured Data Presence:

* Presence: Is structured data present where it wasn't before, or vice-versa?

* Type Change: Has the type of structured data changed (e.g., new Schema.org types detected)?

* Validation Issues: Detection of new validation errors in structured data.

* Mobile Viewport:

* Presence: Is the viewport meta tag correctly configured, or has its status changed?

* Configuration Change: Has the viewport configuration changed (e.g., width=device-width or initial-scale=1.0)?


4. Output Structure: The diff Object

The output of this step is a comprehensive diff object, embedded directly within the SiteAuditReport document. This object is structured to provide both a high-level summary and granular, actionable details.

json • 2,781 chars
{
  "_id": "...",
  "siteUrl": "https://example.com",
  "auditDate": "2023-10-27T08:00:00Z",
  "status": "completed",
  "pages": [
    // Array of detailed page audit results
  ],
  "overallMetrics": {
    // Aggregated site-wide metrics
  },
  "diff": {
    "summary": {
      "totalPagesChanged": 5,
      "totalPagesAdded": 2,
      "totalPagesRemoved": 1,
      "totalIssuesResolved": 7,
      "totalNewIssues": 4,
      "totalRegressions": 2,
      "overallCWVStatusChange": "mixed" // "improved", "regressed", "stable", "mixed"
    },
    "overall_metrics_diff": {
      "pagesCrawled": { "before": 100, "after": 101, "change": "+1" },
      "avgLCP": { "before": 2800, "after": 2500, "status": "improved", "delta": -300 },
      "avgCLS": { "before": 0.15, "after": 0.12, "status": "improved", "delta": -0.03 },
      "avgFID": { "before": 50, "after": 45, "status": "improved", "delta": -5 },
      "pagesWithMissingH1": { "before": 10, "after": 8, "status": "improved", "delta": -2 }
    },
    "page_level_diffs": {
      "added_pages": [
        "https://example.com/new-product",
        "https://example.com/blog/new-article"
      ],
      "removed_pages": [
        "https://example.com/old-promotion"
      ],
      "changed_pages": [
        {
          "url": "https://example.com/about-us",
          "changes": {
            "meta_title": {
              "before": "About Us",
              "after": "Learn About Our Company",
              "status": "changed"
            },
            "h1_presence": {
              "before": { "present": false },
              "after": { "present": true, "content": "About Our Company" },
              "status": "improved"
            },
            "lcp": {
              "before": { "value": 3100, "status": "needs_improvement" },
              "after": { "value": 2400, "status": "good" },
              "status": "improved",
              "delta": -700
            }
          },
          "new_issues": [
            "image_alt_missing_for: /img/team-member.jpg"
          ],
          "resolved_issues": [
            "missing_h1"
          ]
        },
        {
          "url": "https://example.com/product-page",
          "changes": {
            "open_graph_tags": {
              "before": { "og:title": "Product X" },
              "after": { "og:title": "Product X - Best Seller" },
              "status": "changed"
            },
            "cls": {
              "before": { "value": 0.08, "status": "good" },
              "after": { "value": 0.28, "status": "poor" },
              "status": "regressed",
              "delta": +0.20
            }
          },
          "new_issues": [
            "core_web_vitals_cls_poor"
          ],
          "resolved_issues": []
        }
      ]
    }
  }
}
Sandboxed live preview

Step 1: Initiating Comprehensive Site Crawl (Puppeteer)

This initial and foundational step of the "Site SEO Auditor" workflow is dedicated to performing a complete and accurate crawl of your website using Puppeteer. This process meticulously visits every accessible page, emulating a real user's browser experience, to gather the raw data necessary for a thorough SEO audit.


1. Purpose of This Step

The primary goal of Step 1 is to discover and collect comprehensive data from every page on your website. By leveraging Puppeteer, a headless browser automation library, we ensure that even dynamically generated content (JavaScript-rendered) is fully processed and captured, providing a true representation of what search engines and users see. This data forms the bedrock upon which the subsequent 12-point SEO checklist audit will be performed.


2. Crawl Methodology and Execution

Our crawling mechanism is designed for accuracy, completeness, and efficiency:

  • Headless Browser Emulation: We utilize Puppeteer to control a headless instance of Google Chrome. This means the crawler renders pages exactly like a modern web browser, executing all JavaScript, loading CSS, and processing images. This is crucial for modern, dynamic websites where much of the content is loaded client-side.
  • Intelligent URL Discovery:

* Sitemap Integration: The crawler first ingests your site's XML sitemap(s) (e.g., sitemap.xml) to identify all declared URLs.

* Internal Link Traversal: Beyond the sitemap, the crawler actively parses the HTML of each visited page to discover and follow all internal links (<a> tags), ensuring no accessible page is missed, even if not explicitly listed in the sitemap.

* URL Deduplication: A robust system tracks all discovered URLs to prevent redundant visits and manage the crawl scope efficiently.

  • User Agent Simulation: The crawler can mimic various user agents, including common search engine bots (e.g., Googlebot), to observe how your site behaves under different conditions. By default, it operates as a standard desktop browser.
  • Concurrent Page Processing: To optimize crawl speed while respecting server load, pages are processed concurrently within defined limits, ensuring efficient data collection without overwhelming your server.
  • Resource Handling: The crawler manages network requests, waiting for pages to fully load (including asynchronous content) before capturing data, ensuring a complete snapshot.

3. Data Collection During Crawl

For each successfully crawled URL, the following detailed information is systematically captured:

  • Page URL: The canonical URL of the page.
  • Raw HTML Content: The complete HTML source code of the page after JavaScript execution, representing the fully rendered DOM.
  • DOM Snapshot: A structured representation of the page's Document Object Model, allowing for easy querying and analysis of elements (e.g., <h1>, <title>, <img>, <meta>).
  • Network Requests Log: A comprehensive record of all assets loaded by the page (CSS files, JavaScript files, images, fonts, API calls), including their URLs, status codes, and response times.
  • Initial Performance Metrics: Basic load timings such as domContentLoadedEventEnd and loadEventEnd are recorded, laying the groundwork for more detailed Core Web Vitals analysis in subsequent steps.
  • Console Logs & Errors: Any JavaScript errors or warnings logged to the browser console during page load are captured, indicating potential client-side issues.
  • HTTP Status Code: The response status code received from the server for the main document (e.g., 200 OK, 404 Not Found, 301 Redirect).

4. Robustness and Error Handling

To ensure a reliable and complete crawl, the system incorporates several fault-tolerant mechanisms:

  • Retry Logic: Pages that fail to load due to transient network issues or server timeouts are automatically retried a configurable number of times.
  • Timeout Management: Specific timeouts are set for page navigation and resource loading to prevent the crawler from getting stuck on unresponsive pages.
  • Resource Throttling: The crawler intelligently manages its request rate to avoid overwhelming your web server, adhering to best practices for respectful crawling.
  • Error Logging: All crawl errors (e.g., 404s, 500s, JavaScript errors) are meticulously logged for later analysis and reporting.

5. Output of This Step

Upon completion of Step 1, the system will have generated a comprehensive dataset comprising:

  • A finalized list of all unique, accessible URLs discovered on your website.
  • A structured collection of raw data for each crawled URL, including its fully rendered HTML, DOM snapshot, and associated network request information.

This collected data is then securely stored temporarily and prepared for the next stage of the workflow.


6. Next Steps

The rich dataset generated by this crawl is immediately passed to the subsequent step, where the dedicated SEO audit engine will begin processing this information against the predefined 12-point SEO checklist. This includes analyzing meta tags, H1s, image alt attributes, internal linking, canonicals, Open Graph, Core Web Vitals, and structured data, among others.

Key Components of the diff Object:

  • summary: A high-level, human-readable summary of the most significant changes, allowing for quick assessment.
  • overall_metrics_diff: Contains specific quantitative and qualitative changes for site-wide aggregated metrics.
  • page_level_diffs:

* added_pages: An array of URLs that are new to the site.

* removed_pages: An array of URLs that are no longer found on the site.

* changed_pages: An array of objects, where each object represents a specific page that has undergone changes.

* url: The URL of the page.

* changes: An object detailing specific attribute changes. Each attribute will have before, after, status (e.g., "improved", "regressed", "changed", "newly_missing", "newly_present"), and potentially delta for numerical metrics.

* new_issues: A list of new SEO issues identified on this specific page that were not present in the previous audit.

* resolved_issues: A list of issues that were present on this page in the previous audit but have now been fixed.


5. Storage and Persistence

The complete SiteAuditReport document, including the newly generated diff object, is stored in your dedicated MongoDB collection. This ensures:

  • Historical Tracking: A complete record of every audit run and the changes identified.
  • Data Integrity: All audit data, including the comparative analysis, is kept together.
  • Easy Retrieval: Future audit runs can easily pull the previous_audit_report for comparison.

6. Actionability and Next Steps

The diff object is the critical output that drives the subsequent steps in the workflow:

  • Input for Gemini (Step 3): The new_issues and regressions identified in the diff are directly fed to Gemini. This allows Gemini to focus its efforts on generating precise, actionable fixes for the specific problems that have emerged or worsened. For example, if missing_h1 is a new_issue for a page, Gemini will be prompted to suggest an H1. If LCP regressed for another page, Gemini will analyze the page content and suggest LCP optimization techniques.
  • User Notifications: The summary
gemini Output

Step 3 of 5: AI-Powered Fix Generation (gemini → batch_generate)

This critical step leverages Google's Gemini AI to transform identified SEO issues into precise, actionable fixes. Following the comprehensive site crawl and audit, any elements that fail to meet the 12-point SEO checklist are systematically routed to Gemini. The AI then analyzes these "broken elements" within their specific page context and generates the exact code or content modifications required to resolve the issues. This automates the most time-consuming part of SEO remediation: diagnosing problems and formulating solutions.


1. Step Overview: AI-Powered Fix Generation

Purpose: To automatically generate specific, ready-to-implement code or content fixes for all identified SEO deficiencies across your website. This significantly reduces manual effort and accelerates the remediation process, ensuring your site quickly aligns with best-practice SEO standards.

Process Flow:

  1. Input: Raw audit findings detailing specific "broken elements" (e.g., missing alt tags, duplicate meta descriptions, absent H1s, incorrect canonicals) along with their page URL and surrounding HTML context.
  2. AI Analysis (Gemini): Gemini receives these findings, understands the nature of each issue, and analyzes the provided HTML/content context.
  3. Fix Generation: Gemini formulates the precise, actionable fix (e.g., a specific HTML snippet, a suggested meta description, a corrected H1 tag) for each issue.
  4. Output: A structured collection of recommended fixes, categorized by issue type and page, ready for review and implementation.

2. Input Data for Gemini

Gemini receives a highly structured dataset for each identified issue, ensuring it has all necessary context to generate accurate fixes.

  • Source: The output of the headless crawler and subsequent audit against the 12-point SEO checklist.
  • Data Points per Issue:

* Page URL: The specific URL where the issue was found.

* Issue Type: The exact SEO checklist item that failed (e.g., "Meta Title Uniqueness", "Missing H1", "Image Alt Coverage", "Invalid Canonical Tag").

* Problem Description: A concise explanation of the failure (e.g., "Meta title is a duplicate of '/page-b'", "Image has no alt attribute", "H1 tag not found on page").

* Relevant HTML/Content Snippet: The specific section of the page's source code or content where the issue resides. For example, for a missing alt tag, the <img> element; for a duplicate meta description, the <head> section.

* Contextual Information:

* Page Title: The current <title> tag content.

* Meta Description: The current <meta name="description"> content.

* Existing H1s: Any existing <h1> tags and their content.

* Internal Link Anchor Text: For internal link density issues, relevant surrounding text.

* Image Source (src): For alt tag issues, the image URL.

* Current Canonical Tag: If present, for canonical tag issues.

* Open Graph Tags: Existing OG tags for social media optimization.

* Structured Data Snippets: Existing JSON-LD or microdata for structured data issues.

* Severity Level: An indication of the issue's impact on SEO.


3. Gemini's Role in Fix Generation

Gemini acts as an intelligent SEO consultant, analyzing each problem with deep understanding of web standards and SEO best practices to produce tailored solutions.

  • Intelligent Analysis: Gemini doesn't just identify a problem; it understands why it's a problem in the context of SEO. For instance, a missing alt tag isn't just a missing attribute; it's a missed opportunity for accessibility and keyword relevance.
  • Contextual Fixes: By ingesting the relevant HTML and page content, Gemini ensures that its suggested fixes are coherent with the existing page structure and content. It avoids generating generic fixes, instead providing tailored solutions.
  • Code & Content Generation:

* HTML/CSS Snippets: For issues like missing alt tags, incorrect canonicals, or Open Graph tags, Gemini generates the exact HTML attributes or tags to insert/modify.

* Content Suggestions: For duplicate meta titles/descriptions or missing H1s, Gemini can suggest unique, SEO-optimized text based on the page's content and context.

* Structured Data: For missing or incorrect structured data, Gemini can generate appropriate JSON-LD snippets based on the page's content type (e.g., Article, Product, FAQPage).

Examples of Gemini-Generated Fixes:

  • Issue: Duplicate Meta Description

* Input: Page A has meta description "Shop our amazing products." which is identical to Page B.

* Gemini Output:


        <!-- Suggested update for Page A's meta description -->
        <meta name="description" content="Discover exclusive offers and high-quality products on Page A. Shop now for unique finds and unbeatable value.">
  • Issue: Missing H1 Tag

* Input: Page /blog/article-title has no <h1> tag, but the main article heading is <h2>Our Latest Article</h2>.

* Gemini Output:


        <!-- Suggested H1 tag to replace existing H2 -->
        <h1>Our Latest Article: [Keyword Optimized Title]</h1>
        <!-- Or if a new H1 is needed -->
        <h1>[Suggested Main Heading for Page: e.g., "Comprehensive Guide to SEO Auditing"]</h1>
  • Issue: Image Missing Alt Attribute

* Input: <img src="/images/product-xyz.jpg" title="Product XYZ">

* Gemini Output:


        <!-- Suggested alt attribute for the image -->
        <img src="/images/product-xyz.jpg" title="Product XYZ" alt="Product XYZ - High-Quality [Category] Item">
  • Issue: Incorrect Canonical Tag

* Input: Page /category/red-shoes?sort=price has <link rel="canonical" href="https://www.example.com/category/red-shoes?sort=price">

* Gemini Output:


        <!-- Suggested correct canonical tag for the page, removing query parameters -->
        <link rel="canonical" href="https://www.example.com/category/red-shoes/">
  • Issue: Missing Open Graph Tags

* Input: A blog post page is missing OG tags.

* Gemini Output:


        <!-- Suggested Open Graph tags for a blog post -->
        <meta property="og:title" content="[Blog Post Title]">
        <meta property="og:description" content="[Excerpt of Blog Post Content]">
        <meta property="og:image" content="[URL to featured image]">
        <meta property="og:url" content="[Canonical URL of Blog Post]">
        <meta property="og:type" content="article">

4. Output: Actionable SEO Fixes

The output of this step is a comprehensive, structured dataset of recommended fixes, designed for immediate action.

  • Format: The fixes are delivered in a machine-readable format (e.g., JSON) and presented in a user-friendly report. Each fix includes:

* Page URL: The specific page the fix applies to.

* Issue Type: The original SEO audit failure.

* Problem Description: A clear explanation of the issue.

* Suggested Fix (Code/Content): The exact HTML snippet, attribute, or text content to implement.

* Location/Instruction: Guidance on where to apply the fix within the page's code.

Rationale: A brief explanation of why* this fix is important for SEO.

  • Specificity and Detail: Each fix is highly specific, providing the exact code to be added, modified, or replaced, minimizing ambiguity and potential errors during implementation.
  • Categorization: Fixes are grouped by page and by issue type, allowing for efficient batch processing and targeted remediation efforts.

5. Quality Assurance & Validation

While Gemini generates highly accurate fixes, a multi-layered approach ensures the quality and safety of these recommendations.

  • Pre-computation Validation: Before final output, generated fixes undergo automated checks to ensure:

* Syntactic Correctness: HTML snippets are valid.

* Logical Consistency: Canonical tags point to valid URLs, alt texts are descriptive.

* SEO Best Practices Adherence: Fixes align with general SEO guidelines (e.g., meta descriptions aren't excessively long).

  • Contextual Review (Automated): The system cross-references the generated fix with the original page content to ensure it doesn't introduce new issues or conflict with existing, correct elements.
  • Human Oversight (Optional/On-Demand): For critical or complex issues, the system can flag fixes for optional human review, though the primary goal is full automation.

6. Next Steps in the Workflow

The generated fixes are not just presented; they are integrated directly into the subsequent steps of the workflow.

  • Storage in MongoDB: The generated fixes are stored alongside the initial audit report in MongoDB. This forms a crucial part of the SiteAuditReport document, specifically contributing to the "before/after diff" capability.
  • Implementation & Re-crawl (Step 4): These fixes serve as the blueprint for actual implementation. Once applied (either manually or via automated deployment, if configured), the system schedules a re-crawl to validate the remediation.
  • Performance Tracking: The "before/after diff" will compare the initial audit results with the post-fix re-crawl, allowing for clear measurement of the impact of these AI-generated solutions.

7. Customer Impact & Benefits

This AI-powered fix generation step delivers significant value directly to you:

  • Accelerated Remediation: Dramatically reduces the time and effort traditionally required to identify, diagnose, and formulate solutions for SEO issues.
  • Precision & Accuracy: Gemini's advanced understanding of SEO ensures fixes are accurate, effective, and align with current best practices.
  • Cost Efficiency: Automating fix generation lowers the operational costs associated with manual SEO analysis and consultation.
  • Actionable Deliverables: You receive concrete, ready-to-implement code and content suggestions, eliminating guesswork and speeding up developer workflows.
  • Proactive Site Optimization: Your website can maintain optimal SEO health with minimal human intervention, staying ahead of potential ranking drops.

By leveraging Gemini's capabilities, the Site SEO Auditor moves beyond just identifying problems to actively providing the solutions, making your path to improved search engine visibility clearer and more efficient.

hive_db Output

Site SEO Auditor: Step 4 - Data Persistence via hive_db Upsert

This document details the successful execution of Step 4 of the "Site SEO Auditor" workflow: hive_db → upsert. In this critical phase, all comprehensive SEO audit results, including identified issues and Gemini-generated fixes, are securely stored or updated within your dedicated MongoDB instance (hive_db). This ensures robust data persistence, historical tracking, and the ability to generate insightful before-and-after comparisons.


1. Introduction to the hive_db → upsert Step

Following the completion of the headless crawl, the 12-point SEO checklist audit, and the AI-powered fix generation by Gemini, the final step in processing this data is its secure storage. The hive_db → upsert operation is responsible for taking the entirety of the SiteAuditReport data and either inserting it as a new record or updating an existing one in your MongoDB database. This mechanism is crucial for maintaining a complete historical record of your site's SEO performance.


2. Purpose of this Step

The primary objectives of the hive_db → upsert operation are:

  • Data Persistence: To permanently store all audit findings, issues, and suggested fixes, ensuring no valuable data is lost.
  • Historical Tracking: To create a chronological record of your site's SEO health, allowing you to track progress over time.
  • Diff Generation: To facilitate the comparison between the current audit and previous audits, highlighting improvements or regressions.
  • Accessibility: To make the audit reports readily available for analysis, reporting, and integration with other systems.
  • Reliability: To ensure that even if subsequent steps or analyses are performed, the raw and processed audit data remains intact and accessible.

3. Data Model: The SiteAuditReport

The core data structure being upserted is the SiteAuditReport. This comprehensive document encapsulates all findings from a single audit run. Below is a detailed breakdown of its structure:


{
  "_id": ObjectId,                  // Unique identifier for this audit report
  "siteId": String,                 // Identifier for the client's website being audited
  "auditTimestamp": ISODate,        // Date and time when the audit was completed
  "triggerType": String,            // "scheduled" (every Sunday 2 AM) or "on-demand"
  "status": String,                 // "completed", "in_progress", "failed"
  "auditSummary": {
    "pagesCrawled": Number,         // Total number of pages successfully crawled
    "totalIssuesFound": Number,     // Aggregate count of all issues across the site
    "criticalIssues": Number,       // Count of critical issues
    "warnings": Number,             // Count of warning-level issues
    "score": Number                 // Overall SEO health score (e.g., 0-100)
  },
  "pageAudits": [                   // Array of detailed audit results for each page
    {
      "url": String,                // The URL of the audited page
      "pageTitle": String,          // The actual meta title of the page
      "metaTitle": {
        "value": String,
        "status": "unique" | "duplicate" | "missing" | "too_long" | "too_short",
        "score": Number             // e.g., 0-1 (1 for optimal)
      },
      "metaDescription": {
        "value": String,
        "status": "unique" | "duplicate" | "missing" | "too_long" | "too_short",
        "score": Number
      },
      "h1Presence": {
        "exists": Boolean,
        "content": String | null,
        "status": "present" | "missing" | "multiple",
        "score": Number
      },
      "imageAltCoverage": {
        "totalImages": Number,
        "imagesMissingAlt": Number,
        "percentageCovered": Number, // Percentage of images with alt text
        "issues": [String],          // Array of image URLs missing alt text
        "score": Number
      },
      "internalLinkDensity": {
        "count": Number,             // Number of internal links found
        "issues": [String],          // e.g., "low_density" if below threshold
        "score": Number
      },
      "canonicalTag": {
        "exists": Boolean,
        "url": String | null,
        "status": "valid" | "missing" | "self_referencing" | "invalid_url",
        "score": Number
      },
      "openGraphTags": {
        "present": Boolean,
        "ogTitle": String | null,
        "ogDescription": String | null,
        "ogImage": String | null,
        "status": "complete" | "incomplete" | "missing",
        "score": Number
      },
      "coreWebVitals": {
        "lcp": { "value": Number, "status": "good" | "needs_improvement" | "poor" }, // Largest Contentful Paint (ms)
        "cls": { "value": Number, "status": "good" | "needs_improvement" | "poor" }, // Cumulative Layout Shift
        "fid": { "value": Number, "status": "good" | "needs_improvement" | "poor" }, // First Input Delay (ms) - *Note: FID is being replaced by INP, but for this context, assuming FID for now*
        "score": Number
      },
      "structuredData": {
        "present": Boolean,
        "typesFound": [String],      // e.g., ["Article", "FAQPage"]
        "status": "valid" | "missing" | "invalid_schema",
        "score": Number
      },
      "mobileViewport": {
        "present": Boolean,
        "status": "valid" | "missing" | "invalid_config",
        "score": Number
      },
      "issuesFound": [               // Array of specific issues identified on this page
        {
          "type": String,            // e.g., "MetaTitleTooLong", "MissingH1", "ImageMissingAlt"
          "severity": "critical" | "warning" | "info",
          "details": String          // Specific details about the issue
        }
      ],
      "geminiFixes": [               // Array of Gemini-generated fixes for issues on this page
        {
          "issueType": String,       // Corresponds to an issue in "issuesFound"
          "description": String,     // Human-readable description of the fix
          "codeSnippet": String | null, // Exact code snippet for implementation
          "confidence": Number       // Gemini's confidence score for the fix
        }
      ]
    }
  ],
  "previousAuditId": ObjectId | null, // Reference to the _id of the previous successful audit report
  "diffReport": {                   // Details changes from the previous audit
    "newIssues": [Object],          // Issues found in current audit not present in previous
    "resolvedIssues": [Object],     // Issues present in previous audit but resolved in current
    "scoreChanges": {
      "overall": Number,            // Change in overall score
      "metaTitle": Number,          // Change in meta title score across pages
      // ... other aggregated score changes
    },
    "pageLevelChanges": [           // Array of changes per page
      {
        "url": String,
        "status": "improved" | "declined" | "no_change",
        "changes": [String]         // e.g., "Meta Title fixed", "New H1 missing issue"
      }
    ]
  }
}

4. Upsert Mechanism Explained

The upsert operation intelligently handles data storage:

  • New Audit Report (Insert):

* If no prior SiteAuditReport exists for your site, or if this is the very first audit run, a completely new document will be inserted into the SiteAuditReport collection.

* The previousAuditId field will be null, and the diffReport will either be empty or indicate "no previous audit for comparison."

  • Subsequent Audit Report (Update/Insert with Diff):

When an audit runs after* a previous successful audit, the system performs a sophisticated comparison.

It retrieves the _id of the most recent successful* SiteAuditReport for your siteId and populates the previousAuditId field in the new report.

* A detailed diffReport is generated by comparing the current audit's findings against the pageAudits and auditSummary of the previousAuditId. This diff explicitly highlights:

* New Issues: Problems identified in the current audit that were not present in the previous one.

* Resolved Issues: Problems that were present in the previous audit but are no longer found in the current one, indicating successful fixes.

* Score Changes: Quantitative changes in overall SEO score and specific checklist item scores (e.g., Core Web Vitals, image alt coverage).

* Page-Level Changes: Granular details on which pages improved, declined, or remained stable.

* The new SiteAuditReport document, complete with the previousAuditId and diffReport, is then inserted as a new record. This approach ensures an immutable historical log rather than overwriting previous reports.


5. Key Benefits for You

  • Comprehensive Record: Every detail of your site's SEO health is recorded, providing a single source of truth.
  • Actionable Insights: The diffReport immediately shows you what has changed, allowing you to prioritize new issues or celebrate resolved ones.
  • Performance Tracking: Easily visualize your SEO progress over weeks, months, or years.
  • Audit Trail: A clear, immutable audit trail for compliance and internal reporting.
  • Foundation for Reporting: This stored data forms the backbone for all your SEO dashboards and custom reports.

6. Next Steps & Data Availability

The SiteAuditReport for this audit run has now been successfully stored in your hive_db.

  • Access: You can now access this detailed report through your designated reporting interface or API.
  • Reporting: Automated reports will be generated based on this data, highlighting key findings and the before/after diff.
  • Dashboard Integration: This data will immediately populate your SEO performance dashboards, providing real-time insights into your site's health.

This completes the data persistence step for your Site SEO Auditor workflow. The information is now ready for your review and action.

hive_db Output

Workflow Step 5 of 5: hive_dbconditional_update - Database Update and Report Generation

This final step of the "Site SEO Auditor" workflow is critical for data persistence, historical tracking, and delivering actionable insights. Upon successful completion of the headless crawling and AI-powered issue analysis, all gathered data is meticulously stored and organized within your dedicated MongoDB instance (hive_db).

1. Purpose and Core Functionality

The primary objective of this step is to persist the comprehensive SEO audit report for your site. This involves:

  • Storing New Audit Results: The detailed findings for every audited page, including all 12 SEO checklist points, Core Web Vitals, and identified issues, are saved.
  • Conditional Update Logic: The system intelligently determines if this is the first audit for your site or a subsequent one.

* First Audit: A new SiteAuditReport document is created in the SiteAuditReports collection.

* Subsequent Audits: The system retrieves the most recent previous audit for your site to perform a crucial "before/after diff" comparison before storing the new report.

  • "Before/After Diff" Calculation: This is a cornerstone feature. The current audit results are compared against the previous audit for your site. This differential analysis highlights:

* Improvements: SEO elements that have been fixed or improved since the last audit.

* Regressions: SEO elements that have worsened or broken.

* New Issues: Problems identified in the current audit that were not present or detected previously.

* Resolved Issues: Problems from the previous audit that are no longer present.

  • Integration of Gemini Fixes: Any fixes suggested by Gemini for broken elements are stored alongside the issues, providing a direct link between problem identification and recommended solutions.
  • Report Status and Metadata: The report is tagged with its completion status, audit date, and other relevant metadata.

2. SiteAuditReport Document Structure (Example)

The audit data is stored in a structured JSON document within the SiteAuditReports collection, designed for easy querying and analysis. An example structure includes:


{
  "_id": ObjectId("..."),
  "siteId": "your-site-unique-id", // Unique identifier for your website
  "auditDate": ISODate("2023-10-27T02:00:00Z"),
  "status": "completed",
  "overallScore": 85, // Aggregate score based on all checks
  "totalPagesAudited": 150,
  "issuesFoundCount": 25,
  "previousAuditId": ObjectId("..."), // Reference to the previous audit report, if any
  "diffReport": {
    "improvements": [
      { "page": "/product-a", "issueType": "Missing H1", "details": "H1 added" },
      // ... more improvements
    ],
    "regressions": [
      { "page": "/blog/post-x", "issueType": "LCP increased", "details": "LCP from 2.1s to 3.5s" },
      // ... more regressions
    ],
    "newIssues": [
      { "page": "/about-us", "issueType": "Missing Alt Text", "details": "2 images without alt text" },
      // ... more new issues
    ],
    "resolvedIssues": [
      { "page": "/contact", "issueType": "Duplicate Meta Description", "details": "Description made unique" },
      // ... more resolved issues
    ]
  },
  "pages": [
    {
      "url": "https://www.your-site.com/",
      "auditDetails": {
        "metaTitle": { "value": "Your Homepage Title", "status": "ok", "unique": true },
        "metaDescription": { "value": "Your compelling description.", "status": "ok", "unique": true },
        "h1Presence": { "status": "ok", "value": "Welcome to Your Site" },
        "imageAltCoverage": { "status": "warning", "missingCount": 2, "totalCount": 10 },
        "internalLinkDensity": { "status": "ok", "count": 25 },
        "canonicalTag": { "status": "ok", "value": "https://www.your-site.com/" },
        "openGraphTags": { "status": "ok", "ogTitle": "Your Site Home", "ogType": "website" },
        "coreWebVitals": { "lcp": "1.8s", "cls": "0.05", "fid": "50ms", "status": "ok" },
        "structuredDataPresence": { "status": "ok", "types": ["Organization", "Website"] },
        "mobileViewport": { "status": "ok" }
      },
      "issues": [
        {
          "type": "Missing Alt Text",
          "severity": "medium",
          "elementSelector": "img[src='/img/logo.png']",
          "geminiFix": "Add 'alt=\"Company Logo\"' to the image tag."
        }
      ]
    },
    // ... data for other audited pages
  ],
  "geminiFixSuggestions": [
    {
      "issueId": "generated-issue-id-123",
      "issueType": "Missing H1",
      "pageUrl": "https://www.your-site.com/product/example",
      "suggestedFix": "Insert `<h1 class=\"product-title\">Example Product Name</h1>` after the `<body>` tag."
    },
    // ... more Gemini suggestions
  ]
}

3. Key Benefits for Your Business

  • Historical Performance Tracking: Gain a clear understanding of your site's SEO evolution over time. Track improvements, identify regressions, and measure the impact of your SEO efforts.
  • Actionable "Before/After Diff": The differential report cuts through the noise, immediately highlighting what has changed and what needs your attention. This saves significant time in analysis.
  • Evidence-Based Decision Making: With quantifiable data and clear diffs, you can make informed decisions about your SEO strategy and resource allocation.
  • Centralized Reporting: All audit data for your site is stored in one accessible location, serving as a single source of truth for your SEO health.
  • Tracking Remediation Efforts: By reviewing subsequent audit reports, you can verify if Gemini's suggested fixes were implemented and if they successfully resolved the identified issues.
  • Automated Oversight: The regular, automated audits (every Sunday at 2 AM) ensure continuous monitoring without manual intervention, providing peace of mind.

4. Accessing Your Reports

The generated SiteAuditReport documents are immediately available within your PantheraHive MongoDB instance. You can access these reports through:

  • PantheraHive Dashboard: A dedicated section of your dashboard will present these reports in an intuitive, visual format, allowing you to filter by date, site, and review the overall scores, page-specific details, and especially the "before/after diff" reports.
  • API Access: For advanced users, direct API endpoints are available to programmatically fetch these reports, integrate them into your own internal dashboards, or trigger custom workflows based on audit findings.

5. Workflow Completion

This step marks the successful completion of the "Site SEO Auditor" workflow for your website. A comprehensive SEO audit report, including a detailed "before/after diff" (if applicable), has been generated and securely stored in your hive_db. You can now proceed to review the findings and implement the recommended fixes to enhance your site's search engine performance.

site_seo_auditor.txt
Download source file
Copy all content
Full output as text
Download ZIP
IDE-ready project ZIP
Copy share link
Permanent URL for this run
Get Embed Code
Embed this result on any website
Print / Save PDF
Use browser print dialog
"); var hasSrcMain=Object.keys(extracted).some(function(k){return k.indexOf("src/main")>=0;}); if(!hasSrcMain) zip.file(folder+"src/main."+ext,"import React from 'react' import ReactDOM from 'react-dom/client' import App from './App' import './index.css' ReactDOM.createRoot(document.getElementById('root')!).render( ) "); var hasSrcApp=Object.keys(extracted).some(function(k){return k==="src/App."+ext||k==="App."+ext;}); if(!hasSrcApp) zip.file(folder+"src/App."+ext,"import React from 'react' import './App.css' function App(){ return(

"+slugTitle(pn)+"

Built with PantheraHive BOS

) } export default App "); zip.file(folder+"src/index.css","*{margin:0;padding:0;box-sizing:border-box} body{font-family:system-ui,-apple-system,sans-serif;background:#f0f2f5;color:#1a1a2e} .app{min-height:100vh;display:flex;flex-direction:column} .app-header{flex:1;display:flex;flex-direction:column;align-items:center;justify-content:center;gap:12px;padding:40px} h1{font-size:2.5rem;font-weight:700} "); zip.file(folder+"src/App.css",""); zip.file(folder+"src/components/.gitkeep",""); zip.file(folder+"src/pages/.gitkeep",""); zip.file(folder+"src/hooks/.gitkeep",""); Object.keys(extracted).forEach(function(p){ var fp=p.startsWith("src/")?p:"src/"+p; zip.file(folder+fp,extracted[p]); }); zip.file(folder+"README.md","# "+slugTitle(pn)+" Generated by PantheraHive BOS. ## Setup ```bash npm install npm run dev ``` ## Build ```bash npm run build ``` ## Open in IDE Open the project folder in VS Code or WebStorm. "); zip.file(folder+".gitignore","node_modules/ dist/ .env .DS_Store *.local "); } /* --- Vue (Vite + Composition API + TypeScript) --- */ function buildVue(zip,folder,app,code,panelTxt){ var pn=pkgName(app); var C=cc(pn); var extracted=extractCode(panelTxt); zip.file(folder+"package.json",'{ "name": "'+pn+'", "version": "0.0.0", "type": "module", "scripts": { "dev": "vite", "build": "vue-tsc -b && vite build", "preview": "vite preview" }, "dependencies": { "vue": "^3.5.13", "vue-router": "^4.4.5", "pinia": "^2.3.0", "axios": "^1.7.9" }, "devDependencies": { "@vitejs/plugin-vue": "^5.2.1", "typescript": "~5.7.3", "vite": "^6.0.5", "vue-tsc": "^2.2.0" } } '); zip.file(folder+"vite.config.ts","import { defineConfig } from 'vite' import vue from '@vitejs/plugin-vue' import { resolve } from 'path' export default defineConfig({ plugins: [vue()], resolve: { alias: { '@': resolve(__dirname,'src') } } }) "); zip.file(folder+"tsconfig.json",'{"files":[],"references":[{"path":"./tsconfig.app.json"},{"path":"./tsconfig.node.json"}]} '); zip.file(folder+"tsconfig.app.json",'{ "compilerOptions":{ "target":"ES2020","useDefineForClassFields":true,"module":"ESNext","lib":["ES2020","DOM","DOM.Iterable"], "skipLibCheck":true,"moduleResolution":"bundler","allowImportingTsExtensions":true, "isolatedModules":true,"moduleDetection":"force","noEmit":true,"jsxImportSource":"vue", "strict":true,"paths":{"@/*":["./src/*"]} }, "include":["src/**/*.ts","src/**/*.d.ts","src/**/*.tsx","src/**/*.vue"] } '); zip.file(folder+"env.d.ts","/// "); zip.file(folder+"index.html"," "+slugTitle(pn)+"
"); var hasMain=Object.keys(extracted).some(function(k){return k==="src/main.ts"||k==="main.ts";}); if(!hasMain) zip.file(folder+"src/main.ts","import { createApp } from 'vue' import { createPinia } from 'pinia' import App from './App.vue' import './assets/main.css' const app = createApp(App) app.use(createPinia()) app.mount('#app') "); var hasApp=Object.keys(extracted).some(function(k){return k.indexOf("App.vue")>=0;}); if(!hasApp) zip.file(folder+"src/App.vue"," "); zip.file(folder+"src/assets/main.css","*{margin:0;padding:0;box-sizing:border-box}body{font-family:system-ui,sans-serif;background:#fff;color:#213547} "); zip.file(folder+"src/components/.gitkeep",""); zip.file(folder+"src/views/.gitkeep",""); zip.file(folder+"src/stores/.gitkeep",""); Object.keys(extracted).forEach(function(p){ var fp=p.startsWith("src/")?p:"src/"+p; zip.file(folder+fp,extracted[p]); }); zip.file(folder+"README.md","# "+slugTitle(pn)+" Generated by PantheraHive BOS. ## Setup ```bash npm install npm run dev ``` ## Build ```bash npm run build ``` Open in VS Code or WebStorm. "); zip.file(folder+".gitignore","node_modules/ dist/ .env .DS_Store *.local "); } /* --- Angular (v19 standalone) --- */ function buildAngular(zip,folder,app,code,panelTxt){ var pn=pkgName(app); var C=cc(pn); var sel=pn.replace(/_/g,"-"); var extracted=extractCode(panelTxt); zip.file(folder+"package.json",'{ "name": "'+pn+'", "version": "0.0.0", "scripts": { "ng": "ng", "start": "ng serve", "build": "ng build", "test": "ng test" }, "dependencies": { "@angular/animations": "^19.0.0", "@angular/common": "^19.0.0", "@angular/compiler": "^19.0.0", "@angular/core": "^19.0.0", "@angular/forms": "^19.0.0", "@angular/platform-browser": "^19.0.0", "@angular/platform-browser-dynamic": "^19.0.0", "@angular/router": "^19.0.0", "rxjs": "~7.8.0", "tslib": "^2.3.0", "zone.js": "~0.15.0" }, "devDependencies": { "@angular-devkit/build-angular": "^19.0.0", "@angular/cli": "^19.0.0", "@angular/compiler-cli": "^19.0.0", "typescript": "~5.6.0" } } '); zip.file(folder+"angular.json",'{ "$schema": "./node_modules/@angular/cli/lib/config/schema.json", "version": 1, "newProjectRoot": "projects", "projects": { "'+pn+'": { "projectType": "application", "root": "", "sourceRoot": "src", "prefix": "app", "architect": { "build": { "builder": "@angular-devkit/build-angular:application", "options": { "outputPath": "dist/'+pn+'", "index": "src/index.html", "browser": "src/main.ts", "tsConfig": "tsconfig.app.json", "styles": ["src/styles.css"], "scripts": [] } }, "serve": {"builder":"@angular-devkit/build-angular:dev-server","configurations":{"production":{"buildTarget":"'+pn+':build:production"},"development":{"buildTarget":"'+pn+':build:development"}},"defaultConfiguration":"development"} } } } } '); zip.file(folder+"tsconfig.json",'{ "compileOnSave": false, "compilerOptions": {"baseUrl":"./","outDir":"./dist/out-tsc","forceConsistentCasingInFileNames":true,"strict":true,"noImplicitOverride":true,"noPropertyAccessFromIndexSignature":true,"noImplicitReturns":true,"noFallthroughCasesInSwitch":true,"paths":{"@/*":["src/*"]},"skipLibCheck":true,"esModuleInterop":true,"sourceMap":true,"declaration":false,"experimentalDecorators":true,"moduleResolution":"bundler","importHelpers":true,"target":"ES2022","module":"ES2022","useDefineForClassFields":false,"lib":["ES2022","dom"]}, "references":[{"path":"./tsconfig.app.json"}] } '); zip.file(folder+"tsconfig.app.json",'{ "extends":"./tsconfig.json", "compilerOptions":{"outDir":"./dist/out-tsc","types":[]}, "files":["src/main.ts"], "include":["src/**/*.d.ts"] } '); zip.file(folder+"src/index.html"," "+slugTitle(pn)+" "); zip.file(folder+"src/main.ts","import { bootstrapApplication } from '@angular/platform-browser'; import { appConfig } from './app/app.config'; import { AppComponent } from './app/app.component'; bootstrapApplication(AppComponent, appConfig) .catch(err => console.error(err)); "); zip.file(folder+"src/styles.css","* { margin: 0; padding: 0; box-sizing: border-box; } body { font-family: system-ui, -apple-system, sans-serif; background: #f9fafb; color: #111827; } "); var hasComp=Object.keys(extracted).some(function(k){return k.indexOf("app.component")>=0;}); if(!hasComp){ zip.file(folder+"src/app/app.component.ts","import { Component } from '@angular/core'; import { RouterOutlet } from '@angular/router'; @Component({ selector: 'app-root', standalone: true, imports: [RouterOutlet], templateUrl: './app.component.html', styleUrl: './app.component.css' }) export class AppComponent { title = '"+pn+"'; } "); zip.file(folder+"src/app/app.component.html","

"+slugTitle(pn)+"

Built with PantheraHive BOS

"); zip.file(folder+"src/app/app.component.css",".app-header{display:flex;flex-direction:column;align-items:center;justify-content:center;min-height:60vh;gap:16px}h1{font-size:2.5rem;font-weight:700;color:#6366f1} "); } zip.file(folder+"src/app/app.config.ts","import { ApplicationConfig, provideZoneChangeDetection } from '@angular/core'; import { provideRouter } from '@angular/router'; import { routes } from './app.routes'; export const appConfig: ApplicationConfig = { providers: [ provideZoneChangeDetection({ eventCoalescing: true }), provideRouter(routes) ] }; "); zip.file(folder+"src/app/app.routes.ts","import { Routes } from '@angular/router'; export const routes: Routes = []; "); Object.keys(extracted).forEach(function(p){ var fp=p.startsWith("src/")?p:"src/"+p; zip.file(folder+fp,extracted[p]); }); zip.file(folder+"README.md","# "+slugTitle(pn)+" Generated by PantheraHive BOS. ## Setup ```bash npm install ng serve # or: npm start ``` ## Build ```bash ng build ``` Open in VS Code with Angular Language Service extension. "); zip.file(folder+".gitignore","node_modules/ dist/ .env .DS_Store *.local .angular/ "); } /* --- Python --- */ function buildPython(zip,folder,app,code){ var title=slugTitle(app); var pn=pkgName(app); var src=code.replace(/^```[w]* ?/m,"").replace(/ ?```$/m,"").trim(); var reqMap={"numpy":"numpy","pandas":"pandas","sklearn":"scikit-learn","tensorflow":"tensorflow","torch":"torch","flask":"flask","fastapi":"fastapi","uvicorn":"uvicorn","requests":"requests","sqlalchemy":"sqlalchemy","pydantic":"pydantic","dotenv":"python-dotenv","PIL":"Pillow","cv2":"opencv-python","matplotlib":"matplotlib","seaborn":"seaborn","scipy":"scipy"}; var reqs=[]; Object.keys(reqMap).forEach(function(k){if(src.indexOf("import "+k)>=0||src.indexOf("from "+k)>=0)reqs.push(reqMap[k]);}); var reqsTxt=reqs.length?reqs.join(" "):"# add dependencies here "; zip.file(folder+"main.py",src||"# "+title+" # Generated by PantheraHive BOS print(title+" loaded") "); zip.file(folder+"requirements.txt",reqsTxt); zip.file(folder+".env.example","# Environment variables "); zip.file(folder+"README.md","# "+title+" Generated by PantheraHive BOS. ## Setup ```bash python3 -m venv .venv source .venv/bin/activate pip install -r requirements.txt ``` ## Run ```bash python main.py ``` "); zip.file(folder+".gitignore",".venv/ __pycache__/ *.pyc .env .DS_Store "); } /* --- Node.js --- */ function buildNode(zip,folder,app,code){ var title=slugTitle(app); var pn=pkgName(app); var src=code.replace(/^```[w]* ?/m,"").replace(/ ?```$/m,"").trim(); var depMap={"mongoose":"^8.0.0","dotenv":"^16.4.5","axios":"^1.7.9","cors":"^2.8.5","bcryptjs":"^2.4.3","jsonwebtoken":"^9.0.2","socket.io":"^4.7.4","uuid":"^9.0.1","zod":"^3.22.4","express":"^4.18.2"}; var deps={}; Object.keys(depMap).forEach(function(k){if(src.indexOf(k)>=0)deps[k]=depMap[k];}); if(!deps["express"])deps["express"]="^4.18.2"; var pkgJson=JSON.stringify({"name":pn,"version":"1.0.0","main":"src/index.js","scripts":{"start":"node src/index.js","dev":"nodemon src/index.js"},"dependencies":deps,"devDependencies":{"nodemon":"^3.0.3"}},null,2)+" "; zip.file(folder+"package.json",pkgJson); var fallback="const express=require("express"); const app=express(); app.use(express.json()); app.get("/",(req,res)=>{ res.json({message:""+title+" API"}); }); const PORT=process.env.PORT||3000; app.listen(PORT,()=>console.log("Server on port "+PORT)); "; zip.file(folder+"src/index.js",src||fallback); zip.file(folder+".env.example","PORT=3000 "); zip.file(folder+".gitignore","node_modules/ .env .DS_Store "); zip.file(folder+"README.md","# "+title+" Generated by PantheraHive BOS. ## Setup ```bash npm install ``` ## Run ```bash npm run dev ``` "); } /* --- Vanilla HTML --- */ function buildVanillaHtml(zip,folder,app,code){ var title=slugTitle(app); var isFullDoc=code.trim().toLowerCase().indexOf("=0||code.trim().toLowerCase().indexOf("=0; var indexHtml=isFullDoc?code:" "+title+" "+code+" "; zip.file(folder+"index.html",indexHtml); zip.file(folder+"style.css","/* "+title+" — styles */ *{margin:0;padding:0;box-sizing:border-box} body{font-family:system-ui,-apple-system,sans-serif;background:#fff;color:#1a1a2e} "); zip.file(folder+"script.js","/* "+title+" — scripts */ "); zip.file(folder+"assets/.gitkeep",""); zip.file(folder+"README.md","# "+title+" Generated by PantheraHive BOS. ## Open Double-click `index.html` in your browser. Or serve locally: ```bash npx serve . # or python3 -m http.server 3000 ``` "); zip.file(folder+".gitignore",".DS_Store node_modules/ .env "); } /* ===== MAIN ===== */ var sc=document.createElement("script"); sc.src="https://cdnjs.cloudflare.com/ajax/libs/jszip/3.10.1/jszip.min.js"; sc.onerror=function(){ if(lbl)lbl.textContent="Download ZIP"; alert("JSZip load failed — check connection."); }; sc.onload=function(){ var zip=new JSZip(); var base=(_phFname||"output").replace(/.[^.]+$/,""); var app=base.toLowerCase().replace(/[^a-z0-9]+/g,"_").replace(/^_+|_+$/g,"")||"my_app"; var folder=app+"/"; var vc=document.getElementById("panel-content"); var panelTxt=vc?(vc.innerText||vc.textContent||""):""; var lang=detectLang(_phCode,panelTxt); if(_phIsHtml){ buildVanillaHtml(zip,folder,app,_phCode); } else if(lang==="flutter"){ buildFlutter(zip,folder,app,_phCode,panelTxt); } else if(lang==="react-native"){ buildReactNative(zip,folder,app,_phCode,panelTxt); } else if(lang==="swift"){ buildSwift(zip,folder,app,_phCode,panelTxt); } else if(lang==="kotlin"){ buildKotlin(zip,folder,app,_phCode,panelTxt); } else if(lang==="react"){ buildReact(zip,folder,app,_phCode,panelTxt); } else if(lang==="vue"){ buildVue(zip,folder,app,_phCode,panelTxt); } else if(lang==="angular"){ buildAngular(zip,folder,app,_phCode,panelTxt); } else if(lang==="python"){ buildPython(zip,folder,app,_phCode); } else if(lang==="node"){ buildNode(zip,folder,app,_phCode); } else { /* Document/content workflow */ var title=app.replace(/_/g," "); var md=_phAll||_phCode||panelTxt||"No content"; zip.file(folder+app+".md",md); var h=""+title+""; h+="

"+title+"

"; var hc=md.replace(/&/g,"&").replace(//g,">"); hc=hc.replace(/^### (.+)$/gm,"

$1

"); hc=hc.replace(/^## (.+)$/gm,"

$1

"); hc=hc.replace(/^# (.+)$/gm,"

$1

"); hc=hc.replace(/**(.+?)**/g,"$1"); hc=hc.replace(/ {2,}/g,"

"); h+="

"+hc+"

Generated by PantheraHive BOS
"; zip.file(folder+app+".html",h); zip.file(folder+"README.md","# "+title+" Generated by PantheraHive BOS. Files: - "+app+".md (Markdown) - "+app+".html (styled HTML) "); } zip.generateAsync({type:"blob"}).then(function(blob){ var a=document.createElement("a"); a.href=URL.createObjectURL(blob); a.download=app+".zip"; a.click(); URL.revokeObjectURL(a.href); if(lbl)lbl.textContent="Download ZIP"; }); }; document.head.appendChild(sc); }function phShare(){navigator.clipboard.writeText(window.location.href).then(function(){var el=document.getElementById("ph-share-lbl");if(el){el.textContent="Link copied!";setTimeout(function(){el.textContent="Copy share link";},2500);}});}function phEmbed(){var runId=window.location.pathname.split("/").pop().replace(".html","");var embedUrl="https://pantherahive.com/embed/"+runId;var code='';navigator.clipboard.writeText(code).then(function(){var el=document.getElementById("ph-embed-lbl");if(el){el.textContent="Embed code copied!";setTimeout(function(){el.textContent="Get Embed Code";},2500);}});}