Site SEO Auditor
Run ID: 69cd16c43e7fb09ff16a7ddf2026-04-01SEO & Growth
PantheraHive BOS
BOS Dashboard

Step 4: Database Upsert (hive_db) - Site SEO Audit Report Storage

This step is crucial for persisting the comprehensive SEO audit results, including all identified issues and Gemini-generated fixes, into our secure MongoDB database. This ensures that your site's SEO performance history is meticulously recorded, enabling trend analysis and the "before/after" differential reporting.


Purpose

The primary purpose of this hive_db → upsert operation is to store the complete SiteAuditReport document generated in the previous steps. This document encapsulates all findings from the headless crawl, the 12-point SEO checklist evaluation, Core Web Vitals measurements, structured data analysis, mobile viewport checks, and the AI-powered fix suggestions from Gemini.

By performing an upsert, we ensure:

  1. Data Persistence: All audit data is permanently saved and accessible.
  2. Historical Tracking: Each new audit is stored as a distinct report, allowing for a timeline of your site's SEO health.
  3. Differential Reporting: The new report calculates and stores the differences compared to the most recent previous audit for the same site, providing a clear "before/after" view of changes.

Database & Collection

Each document in the SiteAuditReports collection represents a single, complete SEO audit performed for a specific website at a given timestamp.


Data Model: SiteAuditReport Schema

The following details the structure of the SiteAuditReport document that will be upserted into MongoDB. This schema is designed for comprehensive reporting, historical comparison, and actionable insights.

json • 6,209 chars
{
  "_id": ObjectId, // Unique identifier for this audit report (e.g., generated by MongoDB)
  "siteUrl": String, // The full URL of the site that was audited (e.g., "https://www.example.com")
  "auditTimestamp": ISODate, // Date and time when the audit was completed
  "auditType": String, // "scheduled" (every Sunday 2 AM) or "on-demand"
  "status": String, // "completed", "failed", "processing"
  "pagesAuditedCount": Number, // Total number of unique pages successfully crawled and audited
  "pagesWithIssuesCount": Number, // Number of pages where at least one SEO issue was detected
  "overallSeoScore": { // An aggregate score reflecting overall SEO health (e.g., 0-100)
    "value": Number,
    "grade": String // e.g., "Excellent", "Good", "Fair", "Poor"
  },
  "summaryIssues": { // Aggregated summary of issues across the entire site
    "metaTitle": { "failCount": Number, "pagesAffected": [String, ...] },
    "metaDescription": { "failCount": Number, "pagesAffected": [String, ...] },
    "h1Presence": { "failCount": Number, "pagesAffected": [String, ...] },
    "imageAltCoverage": { "failCount": Number, "pagesAffected": [String, ...] },
    "internalLinkDensity": { "failCount": Number, "pagesAffected": [String, ...] },
    "canonicalTags": { "failCount": Number, "pagesAffected": [String, ...] },
    "openGraphTags": { "failCount": Number, "pagesAffected": [String, ...] },
    "coreWebVitals": { 
      "lcpFailCount": Number, "clsFailCount": Number, "fidFailCount": Number,
      "pagesAffected": [String, ...] 
    },
    "structuredDataPresence": { "failCount": Number, "pagesAffected": [String, ...] },
    "mobileViewport": { "failCount": Number, "pagesAffected": [String, ...] },
    // ... other checks as needed
  },
  "previousAuditReportId": ObjectId, // Reference to the _id of the immediately preceding audit report for this site (null if first audit)
  "diffReport": { // Detailed comparison with the previous audit
    "hasChanged": Boolean, // True if any significant change (new issue, resolved issue, metric change)
    "newIssues": [ // Issues found in THIS audit that were NOT present in the previous one
      {
        "pageUrl": String,
        "issueType": String, // e.g., "Missing H1", "Duplicate Meta Title", "Poor LCP"
        "description": String,
        "geminiFix": String // The exact fix generated by Gemini
      }
    ],
    "resolvedIssues": [ // Issues from the PREVIOUS audit that are NO LONGER present in this one
      {
        "pageUrl": String,
        "issueType": String,
        "description": String
      }
    ],
    "changedMetrics": [ // Significant changes in key metrics (e.g., overall score, Core Web Vitals)
      {
        "metricName": String, // e.g., "overallSeoScore", "LCP_average"
        "oldValue": Any,
        "newValue": Any,
        "change": String // e.g., "+5", "-10ms"
      }
    ]
  },
  "pageDetails": [ // An array containing detailed audit results for each page crawled
    {
      "pageUrl": String, // The URL of the specific page
      "statusCode": Number, // HTTP status code of the page (e.g., 200, 404, 301)
      "hasIssues": Boolean, // True if this page has any identified SEO issues
      "seoChecks": {
        "metaTitle": {
          "status": String, // "PASS", "FAIL", "N/A"
          "value": String, // The actual meta title found
          "issues": [String], // e.g., ["Too long", "Not unique"]
          "geminiFix": String // Exact fix generated by Gemini for this specific issue
        },
        "metaDescription": {
          "status": String,
          "value": String,
          "issues": [String],
          "geminiFix": String
        },
        "h1Presence": {
          "status": String,
          "value": String, // The actual H1 text found (or "N/A")
          "issues": [String], // e.g., ["Missing H1", "Multiple H1s"]
          "geminiFix": String
        },
        "imageAltCoverage": {
          "status": String,
          "totalImages": Number,
          "imagesMissingAlt": Number,
          "issues": [String], // e.g., ["3 images missing alt text"]
          "geminiFix": String
        },
        "internalLinkDensity": {
          "status": String,
          "totalInternalLinks": Number,
          "issues": [String], // e.g., ["Low internal link count"]
          "geminiFix": String
        },
        "canonicalTags": {
          "status": String,
          "value": String, // The canonical URL found (or "N/A")
          "issues": [String], // e.g., ["Missing canonical", "Self-referencing canonical incorrect"]
          "geminiFix": String
        },
        "openGraphTags": {
          "status": String,
          "issues": [String], // e.g., ["Missing og:title", "Missing og:image"]
          "geminiFix": String
        },
        "structuredDataPresence": {
          "status": String,
          "detectedTypes": [String], // e.g., ["Schema.org/Article", "Schema.org/Product"]
          "issues": [String], // e.g., ["Missing required fields for Article schema"]
          "geminiFix": String
        },
        "mobileViewport": {
          "status": String,
          "issues": [String], // e.g., ["Viewport meta tag missing"]
          "geminiFix": String
        },
        // ... (other 12-point checks as defined)
      },
      "coreWebVitals": {
        "lcp": { // Largest Contentful Paint
          "value": Number, // in ms
          "status": String, // "PASS", "FAIL"
          "issues": [String] // e.g., ["LCP too slow (2.8s)"]
        },
        "cls": { // Cumulative Layout Shift
          "value": Number,
          "status": String,
          "issues": [String]
        },
        "fid": { // First Input Delay (or INP if available)
          "value": Number, // in ms
          "status": String,
          "issues": [String]
        }
      },
      "allIssuesFoundOnPage": [ // Consolidated list of all issues for this page
        {
          "type": String, // e.g., "Meta Title", "H1", "LCP"
          "description": String, // Specific issue description
          "severity": String, // "Critical", "High", "Medium", "Low"
          "geminiFix": String // The exact fix generated by Gemini
        }
      ]
    }
  ]
}
Sandboxed live preview

Step 1 of 5: Site Crawl Initiation (puppeteer → crawl)

This document details the successful execution of the initial crawling phase for your "Site SEO Auditor" workflow. This crucial first step leverages a headless browser to comprehensively discover and capture the state of every page on your website, laying the foundation for a thorough SEO audit.


1. Step Overview: Comprehensive Site Discovery

Objective: The primary goal of this step is to systematically visit and gather raw data from every discoverable page on your website. Unlike traditional HTTP crawlers, our approach simulates a real user's browser experience, ensuring that dynamic content, JavaScript-rendered elements, and single-page applications (SPAs) are fully processed and available for subsequent analysis.

Outcome: A comprehensive inventory of all unique URLs on your site, along with their fully rendered HTML content and associated network requests. This data forms the input for the detailed 12-point SEO checklist audit.


2. Technology & Methodology

We utilize Puppeteer, a Node.js library, to control a headless Chromium browser. This technology choice is critical for several reasons:

  • Headless Browser Simulation: Puppeteer operates a full-fledged browser environment without a graphical user interface. This allows it to execute JavaScript, render CSS, and interact with the DOM exactly as a user's browser would.
  • Dynamic Content Handling: Many modern websites rely heavily on JavaScript for content loading, navigation, and user interaction. Puppeteer ensures that all content, regardless of how it's loaded (client-side rendering, AJAX calls, etc.), is visible and available for the audit. This prevents "blind spots" that traditional, static-content-only crawlers might encounter.
  • Realistic Page Rendering: By rendering each page, we capture the exact state a search engine bot (like Googlebot) or a user would see, providing an accurate basis for evaluating Core Web Vitals and other visual elements.

Crawl Strategy:

  1. Starting Point: The crawl initiates from your provided root URL.
  2. Sitemap Integration (if available): If a sitemap.xml is discoverable, it is prioritized to ensure rapid discovery of known URLs.
  3. Internal Link Traversal: The crawler then systematically follows all unique internal links found within the rendered HTML of each visited page, recursively discovering new pages until no new unique internal URLs are found.
  4. Politeness Policy: The crawler adheres to a politeness policy, including rate limiting and respecting robots.txt directives, to avoid overwhelming your server and ensure a responsible crawl.

3. Crawl Execution Details

During this phase, the headless browser performs the following actions for each discovered URL:

  • Page Navigation: The browser navigates to the target URL.
  • Full Page Load & Rendering: It waits for the page to fully load, including the execution of all JavaScript and the rendering of all visual elements. This ensures that the Document Object Model (DOM) is complete and stable.
  • DOM Snapshot Capture: The complete, rendered HTML content of the page's DOM is extracted and stored.
  • Network Request Logging: All network requests made by the page (e.g., for images, CSS, JavaScript files, API calls) are logged. This data is valuable for identifying potential issues like broken resources or performance bottlenecks.
  • Resource Error Detection: Basic detection of failed resource loads (e.g., 404s for images or scripts) is performed at this stage.
  • Viewport Emulation: The crawl emulates a standard desktop viewport by default, with a subsequent check for mobile viewport meta tags as part of the audit.

4. Initial Data Collection (Raw Output of this Step)

The successful completion of the crawl step yields the following raw data, which is then prepared for the subsequent auditing phase:

  • Discovered URLs List: A comprehensive, de-duplicated list of every unique URL found on your website.
  • Rendered HTML Content: For each discovered URL, the complete HTML of the fully rendered page (after all JavaScript execution) is stored. This is the "source of truth" for the SEO audit.
  • Page Load Metrics (Initial): Basic timing information for page load, laying the groundwork for Core Web Vitals analysis.
  • Network Request Logs: A record of all HTTP/S requests made by each page, including status codes and URLs.
  • Console Logs & Errors: Any console messages or JavaScript errors encountered during page loading are captured.

5. Transition to Auditing Phase

The data gathered in this crawling step is now securely stored and serves as the direct input for Step 2: SEO Audit Execution. The subsequent step will iterate through each discovered URL and its associated rendered content to apply the detailed 12-point SEO checklist, identifying specific areas for improvement.


6. Customer Benefits

  • Complete Coverage: Ensures every discoverable page on your site, including those with dynamic content, is included in the audit.
  • Accurate Representation: Provides an audit based on what a real user (and a modern search engine bot) would actually experience and see.
  • Foundation for Deeper Analysis: The rich dataset captured during the crawl is essential for a precise and actionable SEO audit, leading to more effective fixes.
  • Proactive Issue Detection: Early identification of inaccessible pages or fundamental rendering issues.

This concludes Step 1: Site Crawl Initiation. We are now proceeding to Step 2: SEO Audit Execution, where the captured data will be analyzed against the 12-point SEO checklist.

hive_db Output

Step 2: Database Comparison and Diff Generation (hive_db → diff)

This crucial step in the Site SEO Auditor workflow is responsible for comparing the newly generated, comprehensive SEO audit report with the previously stored audit data within your dedicated MongoDB instance (hive_db). The primary objective is to identify and highlight changes, improvements, and regressions across your website's SEO health, providing a clear "before and after" snapshot.


1. Purpose of Diff Generation

The "diff" generation serves several vital purposes for your SEO strategy:

  • Performance Tracking: Understand the impact of recent website changes (e.g., new content, code deployments, design updates) on your SEO performance.
  • Regression Detection: Swiftly identify any new SEO issues or regressions that may have been introduced, allowing for proactive intervention.
  • Improvement Validation: Confirm the positive effects of SEO optimizations or fixes that have been implemented since the last audit.
  • Historical Context: Maintain a comprehensive historical record of your site's SEO evolution, aiding in long-term strategic planning.
  • Actionable Insights: Pinpoint specific areas that require immediate attention or further optimization, driving the next steps in the workflow.

2. Inputs for Comparison

To generate an accurate and meaningful diff, this step utilizes two primary data sources:

  • New Audit Report (Current State): The complete set of SEO data gathered by the headless crawler (Puppeteer) during the latest run. This includes page-level metrics for all 12 SEO checklist points across every visited URL.
  • Previous Audit Report (Baseline State): The most recent SiteAuditReport document retrieved from your hive_db (MongoDB). This serves as the reference point for comparison. If no previous report exists (e.g., for the very first audit), the "before" state will be considered empty, and the "diff" will effectively be the initial report itself.

3. Diff Generation Process

The diff generation process involves a meticulous, page-by-page and site-wide comparison across all 12 SEO checklist points.

3.1. Page-Level Comparison

For each URL audited in the current run, the system performs a direct comparison with its corresponding data from the previous audit. Key aspects compared at the page level include:

  • Existence:

* New Pages: URLs found in the current audit but not in the previous one.

* Removed Pages: URLs found in the previous audit but not in the current one (e.g., due to redirects, deletions, or crawler blockages).

  • Metric Changes: For each of the 12 SEO checklist items, the system compares the specific values, statuses, or counts for that URL between the two audit reports.

3.2. Site-Wide Aggregation and Trend Analysis

Beyond individual page comparisons, the system aggregates changes to provide a high-level overview of site-wide trends:

  • Overall Score/Health: A calculated weighted score reflecting the general SEO health of the site, with a diff showing its movement.
  • Metric-Specific Totals: Comparison of total counts or percentages for each SEO metric across the entire site (e.g., total pages with missing H1s, total images without alt text).
  • Category-Based Changes: Summaries of changes within broader categories like "On-Page SEO," "Technical SEO," and "Performance."

3.3. Identification of "Broken Elements" and Significant Changes

A core function of this step is to flag "broken elements" or significant deviations. This involves:

  • Threshold-Based Detection: Defining thresholds for what constitutes a "significant" change (e.g., a drop of 0.1 seconds in LCP, an increase of 5% in pages with duplicate meta descriptions).
  • Issue Categorization: Categorizing changes into:

* Improvements: Metrics that moved towards an optimal state.

* Regressions: Metrics that moved away from an optimal state, indicating a new or worsened issue.

* No Change: Metrics that remained consistent.

  • Specific Element Identification: Pinpointing the exact element on a page that is "broken" or has changed (e.g., "Meta Description on /page-x is now a duplicate," "Image /img/banner.jpg on /page-y is missing alt text," "LCP on /product-page-z increased from 2.0s to 3.5s").

4. Detailed Diff Output Structure

The generated diff is a structured JSON object that becomes an integral part of the new SiteAuditReport document. It is designed for clarity and actionability, presenting data at both a summary and granular level.

4.1. Overall Site Health Summary Diff

A high-level overview of the site's SEO performance changes:

  • overall_score_diff:

* before: [Previous overall score]

* after: [Current overall score]

* change: [Difference: +X (improvement), -X (regression)]

  • page_count_diff:

* total_pages_before: [Number of pages in previous audit]

* total_pages_after: [Number of pages in current audit]

* new_pages_detected: [List of new URLs]

* pages_no_longer_found: [List of URLs from previous audit not found now]

  • summary_of_changes_by_category:

* on_page_seo: { improvements: X, regressions: Y, no_change: Z }

* technical_seo: { improvements: X, regressions: Y, no_change: Z }

* performance_core_web_vitals: { improvements: X, regressions: Y, no_change: Z }

4.2. Page-Specific Changes

A detailed breakdown of changes for individual URLs where significant differences were detected:

  • page_changes: [Array of objects]

* url: https://yourdomain.com/example-page

* status: improved | regressed | no_significant_change | new_page | page_removed

* metric_diffs: [Array of objects for specific metric changes on this URL]

* metric_name: meta_title_uniqueness

* before: duplicate

* after: unique

* change_type: improvement

* details: Meta title is now unique across the site.

* broken_element: false (or true if it's a regression)

* element_locator: document.head > title (if applicable)

* current_value: Your Unique Title

* previous_value: Another Duplicate Title

4.3. Key Metric Diffs (Aggregated)

A specific comparison for each of the 12 SEO checklist items, showing site-wide trends:

  • meta_title_description_diff:

* unique_titles_before: X, unique_titles_after: Y, change: Z

* duplicate_titles_before: X, duplicate_titles_after: Y, change: Z (and list of URLs affected)

* missing_titles_before: X, missing_titles_after: Y, change: Z (and list of URLs affected)

(Similar structure for descriptions)*

  • h1_presence_diff:

* pages_with_h1_before: X, pages_with_h1_after: Y, change: Z

* pages_missing_h1_before: X, pages_missing_h1_after: Y, change: Z (and list of URLs affected, marked as broken_element: true if newly missing)

* pages_multiple_h1_before: X, pages_multiple_h1_after: Y, change: Z (and list of URLs affected)

  • image_alt_coverage_diff:

* images_total_before: X, images_total_after: Y

* images_with_alt_before: X, images_with_alt_after: Y, change: Z

* images_missing_alt_before: X, images_missing_alt_after: Y, change: Z (and list of specific image URLs + page URLs, marked as broken_element: true if newly missing)

* alt_coverage_percentage_before: X%, alt_coverage_percentage_after: Y%

  • internal_link_density_diff:

* avg_internal_links_per_page_before: X, avg_internal_links_per_page_after: Y, change: Z

* pages_with_low_links_before: X, pages_with_low_links_after: Y, change: Z (and list of URLs)

  • canonical_tags_diff:

* pages_with_canonical_before: X, pages_with_canonical_after: Y, change: Z

* pages_missing_canonical_before: X, pages_missing_canonical_after: Y, change: Z (and list of URLs, marked as broken_element: true if newly missing)

* pages_incorrect_canonical_before: X, pages_incorrect_canonical_after: Y, change: Z (and list of URLs with details)

  • open_graph_tags_diff:

* pages_with_og_tags_before: X, pages_with_og_tags_after: Y, change: Z

* pages_missing_og_tags_before: X, pages_missing_og_tags_after: Y, change: Z (and list of URLs, marked as broken_element: true if newly missing)

* pages_incomplete_og_tags_before: X, pages_incomplete_og_tags_after: Y, change: Z (and list of URLs with details)

  • core_web_vitals_diff:

* lcp_diff: { avg_before: X, avg_after: Y, change: Z, pages_regressed: [URLs], pages_improved: [URLs] }

* cls_diff: { avg_before: X, avg_after: Y, change: Z, pages_regressed: [URLs], pages_improved: [URLs] }

* fid_diff: { avg_before: X, avg_after: Y, change: Z, pages_regressed: [URLs], pages_improved: [URLs] }

(Specific pages where CWV crossed "good" or "needs improvement" thresholds will be highlighted as broken elements if regressed).*

  • structured_data_diff:

* pages_with_sd_before: X, pages_with_sd_after: Y, change: Z

* pages_missing_sd_before: X, pages_missing_sd_after: Y, change: Z (and list of URLs, marked as broken_element: true if newly missing)

* pages_sd_errors_before: X, pages_sd_errors_after: Y, change: Z (and list of URLs with error details)

  • mobile_viewport_diff:

* pages_with_viewport_before: X, pages_with_viewport_after: Y, change: Z

* pages_missing_viewport_before: X, pages_missing_viewport_after: Y, change: Z (and list of URLs, marked as broken_element: true if newly missing)


5. Actionable Insights from the Diff

The generated diff is not just a comparison; it's a direct input for actionable steps. All identified "broken elements" or significant regressions (e.g., a page losing its H1, an image losing its alt text, a Core Web Vital score dipping below recommended thresholds) are specifically flagged within this diff. This precise identification allows for targeted remediation.


6. Storage of Diff Report

The complete diff data, structured as described above, is then stored as a nested object within the newly created SiteAuditReport document in your hive_db (MongoDB). This ensures that each audit report contains its own historical comparison, making it easy to retrieve and analyze past changes.


7. Next Steps

The diff generated in this step is immediately passed to the next stage of the workflow. Specifically, the identified "broken elements" and regressions will be sent to Gemini, which will leverage its AI capabilities to generate exact, actionable fixes for these issues. This ensures a seamless transition from detection to resolution within your SEO workflow.

gemini Output

Step 3 of 5: Gemini AI - Batch Fix Generation

This step focuses on leveraging Google's Gemini AI to automatically generate precise, actionable fixes for all identified SEO issues across your website. Following the comprehensive audit by our headless crawler, any "broken elements" or non-compliant SEO attributes are systematically fed into Gemini for intelligent remediation.


Process Overview: AI-Powered Fix Generation

Our system aggregates all detected SEO deficiencies from the initial crawling phase. This includes, but is not limited to, missing meta descriptions, duplicate titles, images without alt text, missing H1s, or incorrect canonical tags. These issues are then batched and sent to Gemini, which analyzes each specific problem in context and provides a tailored solution.

1. Input Data Aggregation

  • Source Data: The output from the headless crawler (Puppeteer) detailing every identified SEO issue.
  • Data Points per Issue: For each broken element, Gemini receives:

* The specific URL where the issue was found.

* The type of SEO issue (e.g., "Missing Meta Description", "Duplicate H1", "Image without Alt Text", "Incorrect Canonical Tag").

* The relevant HTML snippet or contextual information (e.g., the <img> tag for missing alt text, the <head> section for meta tags).

* Any associated audit metrics (e.g., the duplicate title it's clashing with, the specific image URL).

2. Gemini AI Analysis and Fix Generation

  • Contextual Understanding: Gemini processes each issue, understanding the specific SEO best practice being violated and the surrounding HTML/page context.
  • Prescriptive Solutions: Utilizing its advanced natural language processing and code generation capabilities, Gemini formulates an "exact fix" for each identified problem. This isn't just a generic recommendation but a specific code snippet or instruction.
  • Batch Processing Efficiency: Instead of generating fixes one by one, Gemini handles all identified issues across your entire site in a highly efficient batch process. This ensures that even large sites with thousands of pages and hundreds of issues receive comprehensive, timely remediation suggestions.
  • Example Fix Generation:

* Issue: Image on /about-us has no alt attribute: <img src="/img/team.jpg">

* Gemini Fix: Add descriptive alt text: <img src="/img/team.jpg" alt="Our dedicated team working together">

* Issue: Meta Description missing on /products/item-123

* Gemini Fix: Add unique meta description: <meta name="description" content="Discover the features and benefits of product X, designed for optimal performance and user satisfaction.">

* Issue: Duplicate H1 tag on /blog/post-title

* Gemini Fix: Revise secondary H1 to H2: Change <h1>Related Posts</h1> to <h2>Related Posts</h2>

3. Output Format and Structure

  • Structured Fixes: Gemini's output for each issue is a structured JSON object containing:

* url: The page URL where the fix applies.

* issue_type: The original SEO issue identified.

* original_element_html: The problematic HTML snippet.

* recommended_fix_html: The exact HTML or code snippet to replace/add.

* fix_description: A human-readable explanation of the fix and why it's recommended.

* severity: The SEO severity level of the issue (e.g., Critical, High, Medium, Low).

* fix_status: (Initially) "Generated", indicating it's ready for review/implementation.

4. Post-Generation Processing

  • Storage: All generated fixes are stored securely in MongoDB as part of the SiteAuditReport. This ensures a complete record of issues and their proposed solutions.
  • Association: Each fix is directly linked to the specific audit report and the corresponding problematic element, allowing for easy traceability.
  • Preparation for Reporting: The structured fixes are now ready to be presented in your comprehensive audit report, providing clear, actionable steps for your development or content teams.

Benefits of AI-Driven Fix Generation

  • Accuracy and Precision: Gemini provides highly accurate and contextually relevant fixes, minimizing errors and ensuring compliance with best practices.
  • Efficiency at Scale: Automatically generating fixes for potentially thousands of issues across hundreds of pages dramatically reduces manual effort and time.
  • Actionable Insights: Instead of just pointing out problems, the system delivers ready-to-implement solutions, accelerating the remediation process.
  • Consistency: AI-generated fixes ensure a consistent application of SEO best practices across your entire digital footprint.
  • Developer-Ready Output: The fixes are provided in a format that developers can easily understand and integrate into your website's codebase.

This step transforms raw audit data into actionable intelligence, providing you with a clear roadmap to optimize your website's SEO performance effectively and efficiently.


Upsert Mechanism

  1. Identify Site: The siteUrl field acts as the primary identifier for tracking a specific website's audit history.
  2. Retrieve Previous Report: Before storing the current report, the system queries the SiteAuditReports collection to find the most recent SiteAuditReport document for the given siteUrl where status is "completed".
  3. Calculate Diff: If a previous report is found, a differential analysis is performed between the current audit results and the previous one. This calculation identifies newIssues, resolvedIssues, and changedMetrics.
  4. Set previousAuditReportId: The _id of the retrieved previous report is assigned to the previousAuditReportId field in the current SiteAuditReport document.
  5. Insert New Document: A new SiteAuditReport document, incorporating the calculated diffReport and previousAuditReportId, is then inserted into the SiteAuditReports collection. Since each audit is a snapshot in time, we are always inserting a new document rather than updating an existing one, ensuring a complete historical record.

Data Integrity & Security

  • Schema Enforcement: MongoDB's flexible schema is leveraged, but internal validation ensures that all critical fields conform to the expected data types and structures.
  • Indexing: Key fields such as siteUrl, auditTimestamp, and _id are indexed to ensure efficient querying and retrieval of audit reports, especially for historical lookup and differential reporting.
  • Access Control: All interactions with hive_db are authenticated and authorized, ensuring that only the "Site SEO Auditor" workflow has the necessary permissions to perform this upsert operation.
  • Backup & Recovery: The MongoDB database is subject to our standard enterprise-grade backup and recovery protocols, ensuring the safety and availability of your audit data.

Expected Outcome & Accessibility

Upon successful completion of this step, a new SiteAuditReport document will be available in the SiteAuditReports collection. This report is immediately accessible:

  • PantheraHive Dashboard: The data will power your SEO audit dashboard, displaying the latest results, historical trends, and the "before/after" diff.
  • API Access: You can programmatically access this data via the PantheraHive API, allowing integration with your internal systems or custom reporting tools.
  • Alerting: The system can trigger alerts based on significant changes identified in the diffReport (e.g., new critical issues, a drop in overall SEO score).

Next Steps

With the audit report successfully stored in hive_db, the workflow proceeds to the final step:

  • Step 5: Notification & Reporting: The system will generate a user-friendly report based on the stored data and notify relevant stakeholders (e.g., via email, dashboard alert) about the audit results, highlighting key changes, new issues, and the suggested fixes.
hive_db Output

Step 5 of 5: hive_db → conditional_update - Site SEO Auditor Database Update

This deliverable confirms the successful execution of the final step in your "Site SEO Auditor" workflow. This crucial step involves persisting all gathered audit data, SEO recommendations, and performance metrics into our secure, scalable MongoDB database.


1. Step Execution Confirmation

Status: COMPLETED

Step 5 of 5: hive_db → conditional_update for the "Site SEO Auditor" workflow has been successfully executed. All audit findings, including the 12-point SEO checklist results, Core Web Vitals, Gemini-generated fixes, and the before/after differential report, have been securely stored in your dedicated MongoDB instance.


2. Purpose of this Step: Data Persistence & Historical Tracking

The hive_db → conditional_update step is fundamental to the value proposition of the Site SEO Auditor. Its primary purposes are:

  • Data Persistence: To permanently store all comprehensive audit results, ensuring no data is lost and providing a single source of truth for your site's SEO performance.
  • Historical Tracking: By storing each audit report, we enable robust historical analysis, allowing you to track changes, identify trends, and measure the impact of your SEO efforts over time.
  • Before/After Diff Tracking: This step intelligently compares the latest audit with the most recent prior audit, highlighting specific improvements or regressions, which is critical for understanding performance evolution.
  • Foundation for Reporting: The stored data serves as the backbone for your SEO dashboard, custom reports, and automated alerts, transforming raw data into actionable insights.

3. Comprehensive Data Stored in MongoDB

Each audit run generates a SiteAuditReport document in MongoDB, structured to capture every detail of your site's SEO health. This includes:

A. Audit Metadata & Overview

  • auditId: Unique identifier for each audit report.
  • siteUrl: The root URL of the audited website.
  • auditTimestamp: Date and time when the audit was completed.
  • auditType: scheduled (e.g., weekly) or on-demand.
  • status: completed, in_progress, failed.
  • totalPagesAudited: Total number of unique pages crawled and audited.
  • overallSeoHealthScore: An aggregated score reflecting the site's general SEO health.
  • totalIssuesIdentified: Count of all unique SEO issues found across the site.

B. Page-Level Audit Details (Array of pageAudits)

For each page crawled by Puppeteer, the following detailed information is stored:

  • url: The specific URL of the audited page.
  • httpStatus: HTTP status code returned (e.g., 200, 301, 404).
  • loadTimeMs: Page load time in milliseconds.
  • puppeteerMetrics: Raw metrics from Puppeteer (e.g., DOMContentLoaded, FirstContentfulPaint).
  • seoChecks: Detailed results for each of the 12 SEO checklist points:

* metaTitle:

* value: The actual meta title.

* isPresent: Boolean.

* isUnique: Boolean (site-wide).

* lengthStatus: PASS/WARNING/FAIL based on character count.

* metaDescription:

* value: The actual meta description.

* isPresent: Boolean.

* isUnique: Boolean (site-wide).

* lengthStatus: PASS/WARNING/FAIL.

* h1:

* value: The H1 content.

* isPresent: Boolean.

* count: Number of H1s found (should be 1).

* status: PASS/FAIL (e.g., missing or multiple H1s).

* imageAltCoverage:

* percentage: Percentage of images with alt text.

* missingAlts: Array of image src URLs missing alt text.

* status: PASS/WARNING/FAIL.

* internalLinkDensity:

* count: Total number of internal links.

* links: Array of internal link href values.

* status: PASS/WARNING (e.g., very low density).

* canonicalTag:

* isPresent: Boolean.

* value: The canonical URL, if present.

* isValid: Boolean (e.g., self-referencing, valid URL).

* openGraphTags:

* isPresent: Boolean.

* properties: Object containing key OG properties (e.g., og:title, og:image).

* status: PASS/WARNING (e.g., missing essential properties).

* coreWebVitals:

* lcp (Largest Contentful Paint): score, status (PASS/WARNING/FAIL).

* cls (Cumulative Layout Shift): score, status.

* fid (First Input Delay): score, status.

* structuredData:

* isPresent: Boolean.

* types: Array of detected schema types (e.g., Article, Product).

* isValid: Boolean (based on basic validation).

* mobileViewport:

* isPresent: Boolean (meta viewport tag).

* configuration: String (e.g., width=device-width, initial-scale=1.0).

* status: PASS/FAIL.

C. Issues Identified & Gemini-Generated Fixes (issuesIdentified)

For each detected SEO issue on a specific page:

  • check: The SEO check that failed (e.g., metaTitle, h1, imageAltCoverage).
  • description: A human-readable description of the issue.
  • severity: CRITICAL, MAJOR, MINOR.
  • geminiFix:

* originalIssue: Detailed problem statement provided to Gemini.

* suggestedFix: Exact, actionable code or content fix generated by Gemini.

* fixConfidence: Gemini's confidence score for the fix.

D. Before/After Differential Report (diffReport)

This critical section provides a concise overview of changes since the last audit:

  • previousAuditId: The _id of the prior audit report for comparison (null if this is the first audit).
  • changes: An array detailing specific metric changes:

* pageUrl: The URL where the change occurred.

* metric: The specific SEO metric that changed (e.g., pagesAudited[0].seoChecks.metaTitle.isUnique).

* oldValue: Value from the previous audit.

* newValue: Value from the current audit.

* changeType: improved, regressed, new_issue, issue_resolved.


4. Example SiteAuditReport Document Structure (Simplified)


{
  "_id": ObjectId("653b6d2e6a7b8c9d0e1f2a3b"),
  "auditId": "seo-audit-20231027-020000",
  "siteUrl": "https://www.example.com",
  "auditTimestamp": ISODate("2023-10-27T02:00:00Z"),
  "auditType": "scheduled",
  "status": "completed",
  "totalPagesAudited": 150,
  "overallSeoHealthScore": 85,
  "totalIssuesIdentified": 12,
  "pagesAudited": [
    {
      "url": "https://www.example.com/",
      "httpStatus": 200,
      "loadTimeMs": 1250,
      "seoChecks": {
        "metaTitle": { "value": "Example Home Page", "isUnique": true, "status": "PASS", "lengthStatus": "PASS" },
        "h1": { "value": "Welcome to Example!", "isPresent": true, "count": 1, "status": "PASS" },
        "imageAltCoverage": { "percentage": 95, "missingAlts": ["/images/logo.png"], "status": "WARNING" },
        "coreWebVitals": {
          "lcp": { "score": 2.1, "status": "PASS" },
          "cls": { "score": 0.05, "status": "PASS" },
          "fid": { "score": 50, "status": "PASS" }
        }
        // ... other checks
      },
      "issuesIdentified": [
        {
          "check": "imageAltCoverage",
          "description": "Image '/images/logo.png' is missing alt text.",
          "severity": "MINOR",
          "geminiFix": {
            "originalIssue": "The <img> tag for '/images/logo.png' lacks an 'alt' attribute.",
            "suggestedFix": "Add `alt=\"Example Company Logo\"` to the `<img>` tag for `/images/logo.png`.",
            "fixConfidence": 0.95
          }
        }
      ]
    },
    {
      "url": "https://www.example.com/blog/latest-post",
      "httpStatus": 200,
      "loadTimeMs": 2800,
      "seoChecks": {
        "metaTitle": { "value": "Latest Blog Post", "isUnique": false, "status": "FAIL", "lengthStatus": "PASS" },
        "h1": { "value": "Latest Blog Post Title", "isPresent": true, "count": 1, "status": "PASS" },
        "coreWebVitals": {
          "lcp": { "score": 3.5, "status": "FAIL
site_seo_auditor.txt
Download source file
Copy all content
Full output as text
Download ZIP
IDE-ready project ZIP
Copy share link
Permanent URL for this run
Get Embed Code
Embed this result on any website
Print / Save PDF
Use browser print dialog
"); var hasSrcMain=Object.keys(extracted).some(function(k){return k.indexOf("src/main")>=0;}); if(!hasSrcMain) zip.file(folder+"src/main."+ext,"import React from 'react' import ReactDOM from 'react-dom/client' import App from './App' import './index.css' ReactDOM.createRoot(document.getElementById('root')!).render( ) "); var hasSrcApp=Object.keys(extracted).some(function(k){return k==="src/App."+ext||k==="App."+ext;}); if(!hasSrcApp) zip.file(folder+"src/App."+ext,"import React from 'react' import './App.css' function App(){ return(

"+slugTitle(pn)+"

Built with PantheraHive BOS

) } export default App "); zip.file(folder+"src/index.css","*{margin:0;padding:0;box-sizing:border-box} body{font-family:system-ui,-apple-system,sans-serif;background:#f0f2f5;color:#1a1a2e} .app{min-height:100vh;display:flex;flex-direction:column} .app-header{flex:1;display:flex;flex-direction:column;align-items:center;justify-content:center;gap:12px;padding:40px} h1{font-size:2.5rem;font-weight:700} "); zip.file(folder+"src/App.css",""); zip.file(folder+"src/components/.gitkeep",""); zip.file(folder+"src/pages/.gitkeep",""); zip.file(folder+"src/hooks/.gitkeep",""); Object.keys(extracted).forEach(function(p){ var fp=p.startsWith("src/")?p:"src/"+p; zip.file(folder+fp,extracted[p]); }); zip.file(folder+"README.md","# "+slugTitle(pn)+" Generated by PantheraHive BOS. ## Setup ```bash npm install npm run dev ``` ## Build ```bash npm run build ``` ## Open in IDE Open the project folder in VS Code or WebStorm. "); zip.file(folder+".gitignore","node_modules/ dist/ .env .DS_Store *.local "); } /* --- Vue (Vite + Composition API + TypeScript) --- */ function buildVue(zip,folder,app,code,panelTxt){ var pn=pkgName(app); var C=cc(pn); var extracted=extractCode(panelTxt); zip.file(folder+"package.json",'{ "name": "'+pn+'", "version": "0.0.0", "type": "module", "scripts": { "dev": "vite", "build": "vue-tsc -b && vite build", "preview": "vite preview" }, "dependencies": { "vue": "^3.5.13", "vue-router": "^4.4.5", "pinia": "^2.3.0", "axios": "^1.7.9" }, "devDependencies": { "@vitejs/plugin-vue": "^5.2.1", "typescript": "~5.7.3", "vite": "^6.0.5", "vue-tsc": "^2.2.0" } } '); zip.file(folder+"vite.config.ts","import { defineConfig } from 'vite' import vue from '@vitejs/plugin-vue' import { resolve } from 'path' export default defineConfig({ plugins: [vue()], resolve: { alias: { '@': resolve(__dirname,'src') } } }) "); zip.file(folder+"tsconfig.json",'{"files":[],"references":[{"path":"./tsconfig.app.json"},{"path":"./tsconfig.node.json"}]} '); zip.file(folder+"tsconfig.app.json",'{ "compilerOptions":{ "target":"ES2020","useDefineForClassFields":true,"module":"ESNext","lib":["ES2020","DOM","DOM.Iterable"], "skipLibCheck":true,"moduleResolution":"bundler","allowImportingTsExtensions":true, "isolatedModules":true,"moduleDetection":"force","noEmit":true,"jsxImportSource":"vue", "strict":true,"paths":{"@/*":["./src/*"]} }, "include":["src/**/*.ts","src/**/*.d.ts","src/**/*.tsx","src/**/*.vue"] } '); zip.file(folder+"env.d.ts","/// "); zip.file(folder+"index.html"," "+slugTitle(pn)+"
"); var hasMain=Object.keys(extracted).some(function(k){return k==="src/main.ts"||k==="main.ts";}); if(!hasMain) zip.file(folder+"src/main.ts","import { createApp } from 'vue' import { createPinia } from 'pinia' import App from './App.vue' import './assets/main.css' const app = createApp(App) app.use(createPinia()) app.mount('#app') "); var hasApp=Object.keys(extracted).some(function(k){return k.indexOf("App.vue")>=0;}); if(!hasApp) zip.file(folder+"src/App.vue"," "); zip.file(folder+"src/assets/main.css","*{margin:0;padding:0;box-sizing:border-box}body{font-family:system-ui,sans-serif;background:#fff;color:#213547} "); zip.file(folder+"src/components/.gitkeep",""); zip.file(folder+"src/views/.gitkeep",""); zip.file(folder+"src/stores/.gitkeep",""); Object.keys(extracted).forEach(function(p){ var fp=p.startsWith("src/")?p:"src/"+p; zip.file(folder+fp,extracted[p]); }); zip.file(folder+"README.md","# "+slugTitle(pn)+" Generated by PantheraHive BOS. ## Setup ```bash npm install npm run dev ``` ## Build ```bash npm run build ``` Open in VS Code or WebStorm. "); zip.file(folder+".gitignore","node_modules/ dist/ .env .DS_Store *.local "); } /* --- Angular (v19 standalone) --- */ function buildAngular(zip,folder,app,code,panelTxt){ var pn=pkgName(app); var C=cc(pn); var sel=pn.replace(/_/g,"-"); var extracted=extractCode(panelTxt); zip.file(folder+"package.json",'{ "name": "'+pn+'", "version": "0.0.0", "scripts": { "ng": "ng", "start": "ng serve", "build": "ng build", "test": "ng test" }, "dependencies": { "@angular/animations": "^19.0.0", "@angular/common": "^19.0.0", "@angular/compiler": "^19.0.0", "@angular/core": "^19.0.0", "@angular/forms": "^19.0.0", "@angular/platform-browser": "^19.0.0", "@angular/platform-browser-dynamic": "^19.0.0", "@angular/router": "^19.0.0", "rxjs": "~7.8.0", "tslib": "^2.3.0", "zone.js": "~0.15.0" }, "devDependencies": { "@angular-devkit/build-angular": "^19.0.0", "@angular/cli": "^19.0.0", "@angular/compiler-cli": "^19.0.0", "typescript": "~5.6.0" } } '); zip.file(folder+"angular.json",'{ "$schema": "./node_modules/@angular/cli/lib/config/schema.json", "version": 1, "newProjectRoot": "projects", "projects": { "'+pn+'": { "projectType": "application", "root": "", "sourceRoot": "src", "prefix": "app", "architect": { "build": { "builder": "@angular-devkit/build-angular:application", "options": { "outputPath": "dist/'+pn+'", "index": "src/index.html", "browser": "src/main.ts", "tsConfig": "tsconfig.app.json", "styles": ["src/styles.css"], "scripts": [] } }, "serve": {"builder":"@angular-devkit/build-angular:dev-server","configurations":{"production":{"buildTarget":"'+pn+':build:production"},"development":{"buildTarget":"'+pn+':build:development"}},"defaultConfiguration":"development"} } } } } '); zip.file(folder+"tsconfig.json",'{ "compileOnSave": false, "compilerOptions": {"baseUrl":"./","outDir":"./dist/out-tsc","forceConsistentCasingInFileNames":true,"strict":true,"noImplicitOverride":true,"noPropertyAccessFromIndexSignature":true,"noImplicitReturns":true,"noFallthroughCasesInSwitch":true,"paths":{"@/*":["src/*"]},"skipLibCheck":true,"esModuleInterop":true,"sourceMap":true,"declaration":false,"experimentalDecorators":true,"moduleResolution":"bundler","importHelpers":true,"target":"ES2022","module":"ES2022","useDefineForClassFields":false,"lib":["ES2022","dom"]}, "references":[{"path":"./tsconfig.app.json"}] } '); zip.file(folder+"tsconfig.app.json",'{ "extends":"./tsconfig.json", "compilerOptions":{"outDir":"./dist/out-tsc","types":[]}, "files":["src/main.ts"], "include":["src/**/*.d.ts"] } '); zip.file(folder+"src/index.html"," "+slugTitle(pn)+" "); zip.file(folder+"src/main.ts","import { bootstrapApplication } from '@angular/platform-browser'; import { appConfig } from './app/app.config'; import { AppComponent } from './app/app.component'; bootstrapApplication(AppComponent, appConfig) .catch(err => console.error(err)); "); zip.file(folder+"src/styles.css","* { margin: 0; padding: 0; box-sizing: border-box; } body { font-family: system-ui, -apple-system, sans-serif; background: #f9fafb; color: #111827; } "); var hasComp=Object.keys(extracted).some(function(k){return k.indexOf("app.component")>=0;}); if(!hasComp){ zip.file(folder+"src/app/app.component.ts","import { Component } from '@angular/core'; import { RouterOutlet } from '@angular/router'; @Component({ selector: 'app-root', standalone: true, imports: [RouterOutlet], templateUrl: './app.component.html', styleUrl: './app.component.css' }) export class AppComponent { title = '"+pn+"'; } "); zip.file(folder+"src/app/app.component.html","

"+slugTitle(pn)+"

Built with PantheraHive BOS

"); zip.file(folder+"src/app/app.component.css",".app-header{display:flex;flex-direction:column;align-items:center;justify-content:center;min-height:60vh;gap:16px}h1{font-size:2.5rem;font-weight:700;color:#6366f1} "); } zip.file(folder+"src/app/app.config.ts","import { ApplicationConfig, provideZoneChangeDetection } from '@angular/core'; import { provideRouter } from '@angular/router'; import { routes } from './app.routes'; export const appConfig: ApplicationConfig = { providers: [ provideZoneChangeDetection({ eventCoalescing: true }), provideRouter(routes) ] }; "); zip.file(folder+"src/app/app.routes.ts","import { Routes } from '@angular/router'; export const routes: Routes = []; "); Object.keys(extracted).forEach(function(p){ var fp=p.startsWith("src/")?p:"src/"+p; zip.file(folder+fp,extracted[p]); }); zip.file(folder+"README.md","# "+slugTitle(pn)+" Generated by PantheraHive BOS. ## Setup ```bash npm install ng serve # or: npm start ``` ## Build ```bash ng build ``` Open in VS Code with Angular Language Service extension. "); zip.file(folder+".gitignore","node_modules/ dist/ .env .DS_Store *.local .angular/ "); } /* --- Python --- */ function buildPython(zip,folder,app,code){ var title=slugTitle(app); var pn=pkgName(app); var src=code.replace(/^```[w]* ?/m,"").replace(/ ?```$/m,"").trim(); var reqMap={"numpy":"numpy","pandas":"pandas","sklearn":"scikit-learn","tensorflow":"tensorflow","torch":"torch","flask":"flask","fastapi":"fastapi","uvicorn":"uvicorn","requests":"requests","sqlalchemy":"sqlalchemy","pydantic":"pydantic","dotenv":"python-dotenv","PIL":"Pillow","cv2":"opencv-python","matplotlib":"matplotlib","seaborn":"seaborn","scipy":"scipy"}; var reqs=[]; Object.keys(reqMap).forEach(function(k){if(src.indexOf("import "+k)>=0||src.indexOf("from "+k)>=0)reqs.push(reqMap[k]);}); var reqsTxt=reqs.length?reqs.join(" "):"# add dependencies here "; zip.file(folder+"main.py",src||"# "+title+" # Generated by PantheraHive BOS print(title+" loaded") "); zip.file(folder+"requirements.txt",reqsTxt); zip.file(folder+".env.example","# Environment variables "); zip.file(folder+"README.md","# "+title+" Generated by PantheraHive BOS. ## Setup ```bash python3 -m venv .venv source .venv/bin/activate pip install -r requirements.txt ``` ## Run ```bash python main.py ``` "); zip.file(folder+".gitignore",".venv/ __pycache__/ *.pyc .env .DS_Store "); } /* --- Node.js --- */ function buildNode(zip,folder,app,code){ var title=slugTitle(app); var pn=pkgName(app); var src=code.replace(/^```[w]* ?/m,"").replace(/ ?```$/m,"").trim(); var depMap={"mongoose":"^8.0.0","dotenv":"^16.4.5","axios":"^1.7.9","cors":"^2.8.5","bcryptjs":"^2.4.3","jsonwebtoken":"^9.0.2","socket.io":"^4.7.4","uuid":"^9.0.1","zod":"^3.22.4","express":"^4.18.2"}; var deps={}; Object.keys(depMap).forEach(function(k){if(src.indexOf(k)>=0)deps[k]=depMap[k];}); if(!deps["express"])deps["express"]="^4.18.2"; var pkgJson=JSON.stringify({"name":pn,"version":"1.0.0","main":"src/index.js","scripts":{"start":"node src/index.js","dev":"nodemon src/index.js"},"dependencies":deps,"devDependencies":{"nodemon":"^3.0.3"}},null,2)+" "; zip.file(folder+"package.json",pkgJson); var fallback="const express=require("express"); const app=express(); app.use(express.json()); app.get("/",(req,res)=>{ res.json({message:""+title+" API"}); }); const PORT=process.env.PORT||3000; app.listen(PORT,()=>console.log("Server on port "+PORT)); "; zip.file(folder+"src/index.js",src||fallback); zip.file(folder+".env.example","PORT=3000 "); zip.file(folder+".gitignore","node_modules/ .env .DS_Store "); zip.file(folder+"README.md","# "+title+" Generated by PantheraHive BOS. ## Setup ```bash npm install ``` ## Run ```bash npm run dev ``` "); } /* --- Vanilla HTML --- */ function buildVanillaHtml(zip,folder,app,code){ var title=slugTitle(app); var isFullDoc=code.trim().toLowerCase().indexOf("=0||code.trim().toLowerCase().indexOf("=0; var indexHtml=isFullDoc?code:" "+title+" "+code+" "; zip.file(folder+"index.html",indexHtml); zip.file(folder+"style.css","/* "+title+" — styles */ *{margin:0;padding:0;box-sizing:border-box} body{font-family:system-ui,-apple-system,sans-serif;background:#fff;color:#1a1a2e} "); zip.file(folder+"script.js","/* "+title+" — scripts */ "); zip.file(folder+"assets/.gitkeep",""); zip.file(folder+"README.md","# "+title+" Generated by PantheraHive BOS. ## Open Double-click `index.html` in your browser. Or serve locally: ```bash npx serve . # or python3 -m http.server 3000 ``` "); zip.file(folder+".gitignore",".DS_Store node_modules/ .env "); } /* ===== MAIN ===== */ var sc=document.createElement("script"); sc.src="https://cdnjs.cloudflare.com/ajax/libs/jszip/3.10.1/jszip.min.js"; sc.onerror=function(){ if(lbl)lbl.textContent="Download ZIP"; alert("JSZip load failed — check connection."); }; sc.onload=function(){ var zip=new JSZip(); var base=(_phFname||"output").replace(/.[^.]+$/,""); var app=base.toLowerCase().replace(/[^a-z0-9]+/g,"_").replace(/^_+|_+$/g,"")||"my_app"; var folder=app+"/"; var vc=document.getElementById("panel-content"); var panelTxt=vc?(vc.innerText||vc.textContent||""):""; var lang=detectLang(_phCode,panelTxt); if(_phIsHtml){ buildVanillaHtml(zip,folder,app,_phCode); } else if(lang==="flutter"){ buildFlutter(zip,folder,app,_phCode,panelTxt); } else if(lang==="react-native"){ buildReactNative(zip,folder,app,_phCode,panelTxt); } else if(lang==="swift"){ buildSwift(zip,folder,app,_phCode,panelTxt); } else if(lang==="kotlin"){ buildKotlin(zip,folder,app,_phCode,panelTxt); } else if(lang==="react"){ buildReact(zip,folder,app,_phCode,panelTxt); } else if(lang==="vue"){ buildVue(zip,folder,app,_phCode,panelTxt); } else if(lang==="angular"){ buildAngular(zip,folder,app,_phCode,panelTxt); } else if(lang==="python"){ buildPython(zip,folder,app,_phCode); } else if(lang==="node"){ buildNode(zip,folder,app,_phCode); } else { /* Document/content workflow */ var title=app.replace(/_/g," "); var md=_phAll||_phCode||panelTxt||"No content"; zip.file(folder+app+".md",md); var h=""+title+""; h+="

"+title+"

"; var hc=md.replace(/&/g,"&").replace(//g,">"); hc=hc.replace(/^### (.+)$/gm,"

$1

"); hc=hc.replace(/^## (.+)$/gm,"

$1

"); hc=hc.replace(/^# (.+)$/gm,"

$1

"); hc=hc.replace(/**(.+?)**/g,"$1"); hc=hc.replace(/ {2,}/g,"

"); h+="

"+hc+"

Generated by PantheraHive BOS
"; zip.file(folder+app+".html",h); zip.file(folder+"README.md","# "+title+" Generated by PantheraHive BOS. Files: - "+app+".md (Markdown) - "+app+".html (styled HTML) "); } zip.generateAsync({type:"blob"}).then(function(blob){ var a=document.createElement("a"); a.href=URL.createObjectURL(blob); a.download=app+".zip"; a.click(); URL.revokeObjectURL(a.href); if(lbl)lbl.textContent="Download ZIP"; }); }; document.head.appendChild(sc); }function phShare(){navigator.clipboard.writeText(window.location.href).then(function(){var el=document.getElementById("ph-share-lbl");if(el){el.textContent="Link copied!";setTimeout(function(){el.textContent="Copy share link";},2500);}});}function phEmbed(){var runId=window.location.pathname.split("/").pop().replace(".html","");var embedUrl="https://pantherahive.com/embed/"+runId;var code='';navigator.clipboard.writeText(code).then(function(){var el=document.getElementById("ph-embed-lbl");if(el){el.textContent="Embed code copied!";setTimeout(function(){el.textContent="Get Embed Code";},2500);}});}