"+slugTitle(pn)+"
Built with PantheraHive BOS
This crucial step in the Site SEO Auditor workflow is responsible for comparing the newly generated SEO audit data with the historical data stored in your dedicated MongoDB instance (hive_db). The primary objective is to identify, quantify, and categorize all changes, improvements, and regressions across your website's SEO landscape since the last audit. This "diff" process provides a clear, actionable overview of your site's evolving SEO health.
The "hive_db → diff" step is fundamental for a proactive and data-driven SEO strategy. It transforms raw audit data into actionable insights by:
The "hive_db → diff" process involves a systematic comparison of the current audit snapshot against the most recent comprehensive audit report stored in MongoDB.
Upon completion of Step 1 (the headless crawling and initial audit), a complete set of SEO metrics for every crawled page is available. This data includes:
* Meta Title (content, length, uniqueness)
* Meta Description (content, length, uniqueness)
* H1 Tag (presence, content, uniqueness)
* Image Alt Attributes (coverage, missing alts)
* Internal Link Density (count, distribution)
* Canonical Tags (presence, correctness)
* Open Graph Tags (presence, correctness for social sharing)
* Core Web Vitals (LCP, CLS, FID scores and status)
* Structured Data (presence, type, validation status)
* Mobile Viewport (correct meta tag presence)
The system queries your hive_db (MongoDB) to retrieve the most recent SiteAuditReport document that corresponds to your site. This historical report serves as the baseline for comparison. If no prior report exists, the current audit is stored as the initial baseline, and no diff is generated for this first run.
A sophisticated comparison algorithm then performs a detailed "diff" operation at multiple levels:
* New Pages: Identifies any URLs found in the current crawl that were not present in the previous audit.
* Removed Pages: Detects URLs present in the previous audit but no longer found or accessible in the current crawl.
* Existing Pages: For pages present in both audits, a deeper metric-by-metric comparison is initiated.
* Boolean Checks: Has an H1 appeared/disappeared? Is a canonical tag now present/missing?
* Content Checks: Has the meta title/description content changed? Has an image alt text been added/modified?
* Quantitative Checks: Has the internal link count changed? Have Core Web Vitals scores improved or regressed?
* Uniqueness Checks: Are meta titles/descriptions that were previously unique now duplicated, or vice-versa?
The comparison engine categorizes each identified change for clarity and actionability:
The primary output of the "hive_db → diff" step is a structured diff object, which is then integrated directly into the SiteAuditReport document being prepared for storage in MongoDB. This diff object provides a concise summary of all critical changes.
Example of diff output structure (simplified):
{
"auditId": "current_audit_id_xyz",
"previousAuditId": "previous_audit_id_abc",
"summary": {
"totalPagesAudited": 500,
"newIssuesDetected": 25,
"issuesResolved": 15,
"performanceImprovements": 8,
"performanceRegressions": 3,
"pagesWithSignificantChanges": 40
},
"pageChanges": [
{
"url": "https://www.yourdomain.com/product-page-a",
"type": "modified",
"changes": [
{
"metric": "metaTitle",
"status": "changed",
"oldValue": "Old Product Title",
"newValue": "New Optimized Product Title",
"issueType": "content_change"
},
{
"metric": "h1Tag",
"status": "new_issue",
"description": "H1 tag is now missing.",
"issueType": "missing_h1"
},
{
"metric": "coreWebVitals.LCP",
"status": "improved",
"oldValue": "3.5s",
"newValue": "2.2s",
"delta": "-1.3s",
"issueType": "performance_improvement"
}
]
},
{
"url": "https://www.yourdomain.com/blog/new-article",
"type": "new_page",
"initialStatus": {
"metaTitle": "OK",
"h1Tag": "OK",
"imageAltCoverage": "80% (2 missing)",
"coreWebVitals.LCP": "3.1s (Needs Improvement)"
}
},
{
"url": "https://www.yourdomain.com/old-promotion",
"type": "removed_page",
"status": "404 Not Found"
}
],
"globalIssues": [
{
"metric": "duplicateMetaTitles",
"status": "new_issue",
"description": "5 new instances of duplicate meta titles found across the site.",
"affectedPages": ["/page1", "/page2", "/page3"]
}
]
}
This marks the crucial first phase of your Site SEO Auditor workflow. In this step, our headless crawler, powered by Puppeteer, systematically navigates and captures the content of every discoverable page on your website. This foundational crawl ensures that subsequent audit steps have a complete and accurate dataset to analyze.
The primary objective of this step is to meticulously traverse your entire website, identify all unique, crawlable URLs, and collect the fully rendered HTML content for each. This process simulates a real user's browser experience, which is vital for accurately assessing modern, JavaScript-heavy websites.
alt attributes, and internal links that might be dynamically injected.We leverage Puppeteer, a Node.js library, to control a headless Chrome or Chromium instance. This choice is deliberate and offers significant advantages over traditional HTTP request-based crawlers:
The crawl is executed with the following parameters and methodologies to ensure thoroughness and accuracy:
* The crawl initiates from your provided primary domain URL (e.g., https://www.yourdomain.com).
* Before starting the full crawl, an attempt is made to locate and parse your sitemap.xml file(s) (e.g., https://www.yourdomain.com/sitemap.xml). URLs from the sitemap are prioritized and added to the crawl queue.
* Internal Link Following: As each page is visited, Puppeteer extracts all valid internal <a> tags (hyperlinks) within the same domain. These newly discovered URLs are added to a unique queue for subsequent crawling.
* Sitemap Integration: URLs discovered from the sitemap are cross-referenced with those found via internal linking to ensure comprehensive coverage and prevent redundant crawls.
The crawler is strictly confined to the specified domain. It will not follow external links unless explicitly configured otherwise (which is not the default for an SEO site* audit).
* Parameters are used to ignore specific URL patterns (e.g., ?utm_source=, _ga=) to prevent crawling duplicate content and reduce crawl time.
* User Agent: The crawler will emulate a standard desktop user agent by default. Optionally, it can simulate a mobile user agent and viewport to assess mobile-specific rendering and content.
* Viewport: A standard desktop viewport (e.g., 1920x1080) is used for rendering unless mobile emulation is activated.
* To prevent overwhelming your server, the crawler operates with controlled concurrency (e.g., X simultaneous browser instances/pages).
* Rate limiting is applied to introduce small delays between page requests, further reducing server load. These parameters are adjustable based on site size and server capacity.
* URL: The canonical URL of the page.
* Status Code: The HTTP status code received (e.g., 200, 301, 404).
* Redirect Chain: If a redirect occurs, the full chain of URLs from the initial request to the final destination.
Raw HTML: The complete HTML content of the page after* all JavaScript has executed and the DOM is stable.
* DOM Snapshot: A representation of the final DOM structure.
* Initial Core Web Vitals: First Contentful Paint (FCP), Largest Contentful Paint (LCP), and Cumulative Layout Shift (CLS) are measured during the page load event.
* Console Logs & Network Requests: Captured for debugging and advanced analysis (e.g., identifying broken resources or JavaScript errors).
Upon completion of the crawl, this step generates a structured dataset that serves as the input for the subsequent audit phases:
* The full, rendered HTML content.
* The HTTP status code and any redirect paths.
* Initial Core Web Vitals metrics (FCP, LCP, CLS values).
* A timestamp of when the page was crawled.
The crawler is designed with resilience in mind:
robots.txt file, ensuring that pages disallowed for crawling are not accessed.The comprehensive dataset generated in this step will be passed to Step 2: SEO Audit Execution, where each page's content will be systematically analyzed against the 12-point SEO checklist.
This detailed crawl provides the robust and accurate foundation necessary for a truly insightful and actionable SEO audit of your website.
The output of the "hive_db → diff" step is critical for driving the subsequent stages of the Site SEO Auditor workflow:
diff will be sent to Gemini for generating specific, actionable fixes. This ensures that Gemini focuses its efforts on current problems requiring immediate attention, rather than re-analyzing already resolved or unchanged issues.SiteAuditReport, including this detailed diff object, is saved to your hive_db (MongoDB). This creates a persistent record of your site's SEO evolution, allowing for long-term trend analysis and historical lookup.By meticulously comparing audit results, this step empowers you with:
This robust diffing capability ensures that your SEO monitoring is not just a snapshot, but a continuous, intelligent evolution of your site's search engine performance.
This document details the execution of Step 3 in your "Site SEO Auditor" workflow: leveraging Google Gemini's advanced AI capabilities for batch generation of precise, actionable fixes for identified SEO issues.
Following the comprehensive site crawl and audit performed by our headless crawler (Step 2), this crucial phase focuses on transforming identified SEO deficiencies into concrete, executable solutions. Rather than simply reporting problems, we utilize Google Gemini to intelligently analyze each broken element and automatically generate the exact code-level fixes or content recommendations required. This process is executed in a batch, ensuring efficient and scalable remediation across your entire site.
The primary objective of this step is to automate and accelerate the remediation of SEO issues. Manually identifying and crafting fixes for numerous pages and diverse SEO problems can be time-consuming and prone to human error. Gemini's role is to:
For each identified SEO issue, Gemini receives a structured input package containing comprehensive context from the audit. This typically includes:
Example Input for a Missing H1:
URL: https://www.yourdomain.com/blog/article-titleIssue Type: Missing H1 TagSeverity: HighCurrent HTML Snippet: <h2>Article Title</h2><p>...</p>Context: Page content suggests "Article Title" is the main heading.Upon receiving the detailed audit findings, Gemini employs its advanced reasoning and code generation capabilities:
The output from this step is a comprehensive collection of actionable fixes, categorized and structured for ease of implementation. Each fix includes:
Here are examples of the types of fixes generated for common SEO issues:
* Fix: Suggested unique <title> and <meta name="description"> tags, dynamically generated based on page content and target keywords.
* Example: <title>New Unique Product Name - Shop Now | YourBrand</title>
* Fix: HTML snippet for a single, descriptive <h1> tag, derived from the page's primary content.
* Example: <h1>Your Blog Post Title Here</h1>
* Fix: Descriptive alt attribute values for images, generated by analyzing image context and surrounding text.
* Example: <img src="product.jpg" alt="[Descriptive Alt Text for Product Image]">
* Fix: Correct <link rel="canonical" href="[Canonical URL]"> tag, ensuring proper indexing.
* Example: <link rel="canonical" href="https://www.yourdomain.com/preferred-version-of-page">
* Fix: Complete set of Open Graph tags (og:title, og:description, og:image, og:type) tailored for social media sharing.
* Example: <meta property="og:title" content="Page Title for Social">
* Fix: Code snippets for image optimization (e.g., loading="lazy", srcset), font preloading (<link rel="preload">), specifying image/video dimensions, or deferring non-critical JavaScript.
* Example (CLS): <img src="image.jpg" width="600" height="400" alt="description">
* Example (LCP/FID): <link rel="preload" href="/font.woff2" as="font" type="font/woff2" crossorigin>
* Fix: Valid JSON-LD schema markup (e.g., Article, Product, LocalBusiness, FAQPage) generated based on page content, ready to be embedded.
* Example:
<script type="application/ld+json">
{
"@context": "https://schema.org",
"@type": "Article",
"headline": "...",
"author": { "@type": "Person", "name": "..." }
}
</script>
* Fix: Correct <meta name="viewport" content="width=device-width, initial-scale=1"> tag.
All generated fixes are meticulously validated by Gemini to minimize syntax errors and ensure compatibility.
The output from this batch_generate step is directly consumable by your development or content teams. These fixes are then stored within your MongoDB SiteAuditReport as part of the "after" state, allowing for a clear "before/after" diff. This historical record enables you to track the impact of implemented changes over time and measure the effectiveness of the remediation efforts.
The generated fixes are now ready for review and implementation. In the subsequent steps of the workflow, these fixes will be presented in a consolidated report, allowing your team to prioritize and apply them to your website, ultimately enhancing your site's SEO performance.
hive_db → upsert - Data Persistence and ReportingThis step is critical for storing all audit findings, generated fixes, and historical data within your dedicated MongoDB instance (hive_db). It ensures that every aspect of the SEO audit, from individual page issues to overall site performance, is meticulously recorded and available for future analysis and reporting.
The primary goal of the hive_db → upsert operation is to:
SiteAuditReport DocumentAll audit data is consolidated into a single SiteAuditReport document in a dedicated MongoDB collection (e.g., site_audit_reports). This document is designed for comprehensive tracking and easy retrieval.
_id (ObjectId): MongoDB's unique document identifier.auditId (UUID): A unique identifier for this specific audit run.siteUrl (String): The canonical URL of the website audited (e.g., https://www.example.com).timestamp (Date): The exact date and time when this audit was completed.status (String): Current status of the audit (e.g., completed, processing, failed).totalPagesAudited (Number): The total number of unique pages crawled and audited.overallScore (Number): A calculated aggregate score (0-100) reflecting the overall SEO health based on the 12-point checklist.pages (Array of Objects): An array containing detailed audit results for each individual page. * pageUrl (String): The URL of the specific page.
* pageStatus (Number): HTTP status code encountered (e.g., 200, 404, 301).
* seoIssues (Array of Objects): Issues identified on this page that failed the 12-point checklist.
* checklistItem (String): The specific SEO checklist item that failed (e.g., meta_title_uniqueness, h1_presence, image_alt_coverage).
* severity (String): The impact level of the issue (critical, high, medium, low, info).
* description (String): A human-readable explanation of the issue.
* elementLocator (String, Optional): CSS selector or XPath to locate the problematic element on the page (e.g., img[src="broken.jpg"]).
* currentValue (String/Object): The value or state found during the audit (e.g., "" for missing meta description, false for no H1).
* recommendedValue (String/Object): The ideal or target value/state (e.g., "Unique, descriptive title").
* geminiFix (String): The exact, actionable code snippet or instruction generated by Gemini to resolve this specific issue.
* coreWebVitals (Object):
* LCP (Number): Largest Contentful Paint (in ms).
* CLS (Number): Cumulative Layout Shift.
* FID (Number): First Input Delay (in ms).
* metaData (Object):
* title (String): The page's <title> tag content.
* description (String): The page's <meta name="description"> content.
* canonical (String): The page's <link rel="canonical"> URL.
* ogTags (Object): Open Graph tags (e.g., og:title, og:description, og:image).
* h1Presence (Boolean): true if an <h1> tag is present, false otherwise.
* imageAltCoverage (Object):
* totalImages (Number): Total images on the page.
* imagesWithAlt (Number): Images with non-empty alt attributes.
* coveragePercentage (Number): Percentage of images with alt attributes.
* internalLinkDensity (Number): Count of internal links found on the page.
* structuredDataPresence (Boolean): true if any structured data (JSON-LD, Microdata, RDFa) is detected, false otherwise.
* mobileViewport (Boolean): true if a <meta name="viewport"> tag is correctly configured, false otherwise.
diffReport (Object): This object captures the "before/after" changes compared to the previous audit run for the same siteUrl. * previousAuditId (UUID): The auditId of the previous audit run used for comparison.
newIssues (Array of Objects): Issues identified in this audit that were not* present in the previous one. Each object contains pageUrl, checklistItem, description, severity, and geminiFix.
resolvedIssues (Array of Objects): Issues from the previous audit that are no longer present* in this audit. Each object contains pageUrl, checklistItem, description.
* changedMetrics (Array of Objects): Significant changes in key metrics (e.g., overall score, Core Web Vitals).
* metric (String): The metric that changed (e.g., overallScore, LCP, CLS).
* pageUrl (String, Optional): If the metric is page-specific.
* previousValue (Number/String): Value from the previous audit.
* currentValue (Number/String): Value from the current audit.
* change (Number): The difference (current - previous).
nextScheduledRun (Date): The timestamp for the next automatically scheduled audit for this site.The upsert operation works as follows:
SiteAuditReport for the given siteUrl that represents the immediately preceding audit run.diffReport is generated by comparing the current audit's findings against the previous one. This includes identifying new issues, resolved issues, and changes in key metrics. * If no previous report exists (first audit for this site), a new SiteAuditReport document is inserted.
If a previous report exists, a new SiteAuditReport document is inserted, incorporating the generated diffReport. This maintains a historical record of each* audit run rather than overwriting. This approach allows for full historical analysis.
siteUrl, timestamp, and status to ensure fast query performance for reporting and historical lookups.Upon completion of this step, the following value is delivered:
hive_db.diffReport provides an immediate understanding of what has changed since the last audit. You can quickly see:* What new issues have emerged.
* Which previous issues have been successfully resolved.
* How key performance metrics (like Core Web Vitals or overall score) have evolved.
hive_db now contains a chronological series of SiteAuditReport documents, enabling long-term trend analysis and demonstrating the ROI of your SEO efforts.This stored data is now ready to be consumed by other services for visualization, notification, or further analysis.
hive_db Conditional Update - Site SEO Auditor Report FinalizationThis final step in the "Site SEO Auditor" workflow is critical for persisting the comprehensive audit results into your dedicated MongoDB database (hive_db). It ensures that all findings, proposed fixes, and crucial historical comparisons are securely stored and readily accessible for your review.
The hive_db → conditional_update operation focuses on:
SiteAuditReportEach successful audit run generates a new, comprehensive SiteAuditReport document in your hive_db MongoDB instance. This document is meticulously structured to capture every detail of the audit.
Key Fields within a SiteAuditReport Document:
audit_id (String, Unique): A unique identifier for each specific audit run.site_url (String): The URL of the website that was audited.audit_date (Date/Timestamp): The exact date and time the audit was completed.audit_trigger (String): Indicates how the audit was initiated (e.g., "scheduled_weekly", "on_demand").current_audit_results (Object): * pages_audited_count (Number): Total number of pages successfully crawled and audited.
* seo_checklist_summary (Object): Aggregated pass/fail status for the 12-point checklist.
page_details (Array of Objects): Detailed findings for each individual page* crawled, including:
* page_url
* meta_title (content, length, uniqueness check)
* meta_description (content, length, uniqueness check)
* h1_presence (boolean, content)
* image_alt_coverage (percentage, list of missing alt texts)
* internal_link_density (count, list of links)
* canonical_tag (present, correct URL)
* open_graph_tags (presence, key properties)
* core_web_vitals (LCP, CLS, FID scores)
* structured_data_presence (boolean, type detected)
* mobile_viewport (presence, configuration)
* broken_elements (Array of Objects: type, selector, issue description)
gemini_fixes_proposed (Object): * summary (String): A high-level overview of critical issues and recommended fixes.
* page_specific_fixes (Array of Objects): Detailed, actionable fixes for specific broken elements on individual pages, generated by Gemini. Each object will include:
* page_url
* element_type
* issue_description
* recommended_fix (exact code or content change)
* severity
previous_audit_results_ref (String/Object ID, Optional): A reference to the audit_id or a summarized version of the immediately preceding audit report for the same site_url. This is crucial for the diff.audit_diff_summary (Object, Optional): new_issues_identified (Array): List of SEO issues present in the current_audit_results that were not* present in the previous_audit_results_ref.
issues_resolved (Array): List of SEO issues present in the previous_audit_results_ref that are no longer* present in the current_audit_results.
* performance_changes (Object): Summary of changes in Core Web Vitals (e.g., LCP improved by X ms).
* overall_score_change (Number): A calculated change in an aggregated SEO health score (if applicable).
The "conditional_update" aspect of this step ensures that each new audit report is correctly contextualized:
SiteAuditReport, the system first queries hive_db to find the most recently completed SiteAuditReport for the site_url being audited.previous_audit_results_ref Population: If a prior report is found: The previous_audit_results_ref field in the new* SiteAuditReport document is populated with a reference to this immediately preceding audit. A detailed audit_diff_summary is then calculated by comparing current_audit_results with the retrieved previous results.
* If no prior report exists (first audit): The previous_audit_results_ref and audit_diff_summary fields will be null or empty, indicating this is the baseline audit.
SiteAuditReport document, complete with current findings, Gemini fixes, and (if applicable) a reference to the previous state and a diff summary, is then inserted as a new document into the SiteAuditReports collection in hive_db. This ensures a complete, immutable history of all audit runs.The detailed storage in hive_db empowers you with several critical capabilities:
hive_db is perfectly suited for integration with reporting tools and custom dashboards, providing a visual overview of your site's SEO performance.Your SiteAuditReport documents are now securely stored. You can:
hive_db MongoDB instance to retrieve the raw SiteAuditReport documents for custom analysis or integration with your internal systems.This final step completes a powerful workflow, transforming raw crawl data into actionable intelligence and a clear historical record of your website's SEO health.
No content
";}fr.dataset.loaded="1";}}}function phCopyCode(){navigator.clipboard.writeText(_phCode).then(function(){var b=document.getElementById("tab-code");if(b){var o=b.innerHTML;b.innerHTML=' Copied!';setTimeout(function(){b.innerHTML=o;},2000);}});}function phCopyAll(){var txt=_phAll;if(!txt){var vc=document.getElementById("panel-content");if(vc)txt=vc.innerText||vc.textContent||"";}navigator.clipboard.writeText(txt).then(function(){alert("Content copied to clipboard!");});}function phDownload(){var content=_phCode||_phAll;if(!content){var vc=document.getElementById("panel-content");if(vc)content=vc.innerText||vc.textContent||"";}if(!content){alert("No content to download.");return;}var fn=_phFname;if(!_phCode&&fn.endsWith(".txt"))fn=fn.replace(/\.txt$/,".md");var a=document.createElement("a");a.href="data:text/plain;charset=utf-8,"+encodeURIComponent(content);a.download=fn;a.click();}function phDownloadZip(){ var lbl=document.getElementById("ph-zip-lbl"); if(lbl)lbl.textContent="Preparing…"; /* ===== HELPERS ===== */ function cc(s){ return s.replace(/[_-s]+([a-z])/g,function(m,c){return c.toUpperCase();}) .replace(/^[a-z]/,function(m){return m.toUpperCase();}); } function pkgName(app){ return app.toLowerCase().replace(/[^a-z0-9]+/g,"_").replace(/^_+|_+$/g,"")||"my_app"; } function slugTitle(app){ return app.replace(/_/g," "); } /* Generic code block extractor. Finds marker comments like: // lib/main.dart or # lib/main.dart or ## lib/main.dart and collects lines until the next marker. Also strips markdown fences (```lang ... ```) from each block. */ function extractFiles(txt, pathRe){ var files={}, cur=null, buf=[]; function flush(){ if(cur&&buf.length){ files[cur]=buf.join(" ").trim(); } } txt.split(" ").forEach(function(line){ var m=line.trim().match(pathRe); if(m){ flush(); cur=m[1]; buf=[]; return; } if(cur) buf.push(line); }); flush(); // Strip ```...``` fences from each file Object.keys(files).forEach(function(k){ files[k]=files[k].replace(/^```[a-z]* ?/,"").replace(/ ?```$/,"").trim(); }); return files; } /* General path extractor that covers most languages */ function extractCode(txt){ var re=/^(?://|#|##)s*((?:lib|src|test|tests|Sources?|app|components?|screens?|views?|hooks?|routes?|store|services?|models?|pages?)/[w/-.]+.w+|pubspec.yaml|Package.swift|angular.json|babel.config.(?:js|ts)|vite.config.(?:js|ts)|tsconfig.(?:json|app.json)|app.json|App.(?:tsx|jsx|vue|kt|swift)|MainActivity(?:.kt)?|ContentView.swift)/i; return extractFiles(txt, re); } /* Detect language from combined code+panel text */ function detectLang(code, panel){ var t=(code+" "+panel).toLowerCase(); if(t.indexOf("import 'package:flutter")>=0||t.indexOf('import "package:flutter')>=0) return "flutter"; if(t.indexOf("statelesswidget")>=0||t.indexOf("statefulwidget")>=0) return "flutter"; if((t.indexOf(".dart")>=0)&&(t.indexOf("pubspec")>=0||t.indexOf("flutter:")>=0)) return "flutter"; if(t.indexOf("react-native")>=0||t.indexOf("react_native")>=0) return "react-native"; if(t.indexOf("stylesheet.create")>=0||t.indexOf("view, text, touchableopacity")>=0) return "react-native"; if(t.indexOf("expo(")>=0||t.indexOf(""expo":")>=0||t.indexOf("from 'expo")>=0) return "react-native"; if(t.indexOf("import swiftui")>=0||t.indexOf("import uikit")>=0) return "swift"; if(t.indexOf(".swift")>=0&&(t.indexOf("func body")>=0||t.indexOf("@main")>=0||t.indexOf("var body: some view")>=0)) return "swift"; if(t.indexOf("import android.")>=0||t.indexOf("package com.example")>=0) return "kotlin"; if(t.indexOf("@composable")>=0||t.indexOf("fun mainactivity")>=0||(t.indexOf(".kt")>=0&&t.indexOf("androidx")>=0)) return "kotlin"; if(t.indexOf("@ngmodule")>=0||t.indexOf("@component")>=0) return "angular"; if(t.indexOf("angular.json")>=0||t.indexOf("from '@angular")>=0) return "angular"; if(t.indexOf(".vue")>=0||t.indexOf("")>=0||t.indexOf("definecomponent")>=0) return "vue"; if(t.indexOf("createapp(")>=0&&t.indexOf("vue")>=0) return "vue"; if(t.indexOf("import react")>=0||t.indexOf("reactdom")>=0||(t.indexOf("jsx.element")>=0)) return "react"; if((t.indexOf("usestate")>=0||t.indexOf("useeffect")>=0)&&t.indexOf("from 'react'")>=0) return "react"; if(t.indexOf(".dart")>=0) return "flutter"; if(t.indexOf(".kt")>=0) return "kotlin"; if(t.indexOf(".swift")>=0) return "swift"; if(t.indexOf("import numpy")>=0||t.indexOf("import pandas")>=0||t.indexOf("#!/usr/bin/env python")>=0) return "python"; if(t.indexOf("const express")>=0||t.indexOf("require('express')")>=0||t.indexOf("app.listen(")>=0) return "node"; return "generic"; } /* ===== PLATFORM BUILDERS ===== */ /* --- Flutter --- */ function buildFlutter(zip,folder,app,code,panelTxt){ var pn=pkgName(app); var all=code+" "+panelTxt; var extracted=extractCode(panelTxt); var treeFiles=(code.match(/[w_]+.dart/g)||[]).filter(function(f,i,a){return a.indexOf(f)===i;}); if(!extracted["lib/main.dart"]) extracted["lib/main.dart"]="import 'package:flutter/material.dart'; void main()=>runApp(const "+cc(pn)+"App()); class "+cc(pn)+"App extends StatelessWidget{ const "+cc(pn)+"App({super.key}); @override Widget build(BuildContext context)=>MaterialApp( title: '"+slugTitle(pn)+"', debugShowCheckedModeBanner: false, theme: ThemeData( colorScheme: ColorScheme.fromSeed(seedColor: Colors.deepPurple), useMaterial3: true, ), home: Scaffold(appBar: AppBar(title: const Text('"+slugTitle(pn)+"')), body: const Center(child: Text('Welcome!'))), ); } "; // pubspec.yaml — sniff deps var deps=[" flutter: sdk: flutter"]; var devDeps=[" flutter_test: sdk: flutter"," flutter_lints: ^5.0.0"]; var knownPkg={"go_router":"^14.0.0","flutter_riverpod":"^2.6.1","riverpod_annotation":"^2.6.1","shared_preferences":"^2.3.4","http":"^1.2.2","dio":"^5.7.0","firebase_core":"^3.12.1","firebase_auth":"^5.5.1","cloud_firestore":"^5.6.5","get_it":"^8.0.3","flutter_bloc":"^9.1.0","provider":"^6.1.2","cached_network_image":"^3.4.1","url_launcher":"^6.3.1","intl":"^0.19.0","google_fonts":"^6.2.1","equatable":"^2.0.7","freezed_annotation":"^2.4.4","json_annotation":"^4.9.0","path_provider":"^2.1.5","image_picker":"^1.1.2","uuid":"^4.4.2","flutter_svg":"^2.0.17","lottie":"^3.2.0","hive_flutter":"^1.1.0"}; var knownDev={"build_runner":"^2.4.14","freezed":"^2.5.7","json_serializable":"^6.8.0","riverpod_generator":"^2.6.3","hive_generator":"^2.0.1"}; Object.keys(knownPkg).forEach(function(p){if(all.indexOf("package:"+p)>=0)deps.push(" "+p+": "+knownPkg[p]);}); Object.keys(knownDev).forEach(function(p){if(all.indexOf(p)>=0)devDeps.push(" "+p+": "+knownDev[p]);}); zip.file(folder+"pubspec.yaml","name: "+pn+" description: Flutter app — PantheraHive BOS. version: 1.0.0+1 environment: sdk: '>=3.3.0 <4.0.0' dependencies: "+deps.join(" ")+" dev_dependencies: "+devDeps.join(" ")+" flutter: uses-material-design: true assets: - assets/images/ "); zip.file(folder+"analysis_options.yaml","include: package:flutter_lints/flutter.yaml "); zip.file(folder+".gitignore",".dart_tool/ .flutter-plugins .flutter-plugins-dependencies /build/ .pub-cache/ *.g.dart *.freezed.dart .idea/ .vscode/ "); zip.file(folder+"README.md","# "+slugTitle(pn)+" Generated by PantheraHive BOS. ## Setup ```bash flutter pub get flutter run ``` ## Build ```bash flutter build apk # Android flutter build ipa # iOS flutter build web # Web ``` "); zip.file(folder+"assets/images/.gitkeep",""); Object.keys(extracted).forEach(function(p){ zip.file(folder+p,extracted[p]); }); treeFiles.forEach(function(fn){ if(fn.indexOf("_test.dart")>=0) return; var found=Object.keys(extracted).some(function(p){return p.endsWith("/"+fn)||p===fn;}); if(!found){ var path="lib/"+fn; var cls=cc(fn.replace(".dart","")); var isScr=fn.indexOf("screen")>=0||fn.indexOf("page")>=0||fn.indexOf("view")>=0; var stub=isScr?"import 'package:flutter/material.dart'; class "+cls+" extends StatelessWidget{ const "+cls+"({super.key}); @override Widget build(BuildContext ctx)=>Scaffold( appBar: AppBar(title: const Text('"+fn.replace(/_/g," ").replace(".dart","")+"')), body: const Center(child: Text('"+cls+" — TODO')), ); } ":"// TODO: implement class "+cls+"{ // "+fn+" } "; zip.file(folder+path,stub); } }); } /* --- React Native (Expo) --- */ function buildReactNative(zip,folder,app,code,panelTxt){ var pn=pkgName(app); var extracted=extractCode(panelTxt); var allT=code+" "+panelTxt; var usesTS=allT.indexOf(".tsx")>=0||allT.indexOf(": React.")>=0||allT.indexOf("interface ")>=0; var ext=usesTS?"tsx":"jsx"; zip.file(folder+"package.json",'{ "name": "'+pn+'", "version": "1.0.0", "main": "expo-router/entry", "scripts": { "start": "expo start", "android": "expo run:android", "ios": "expo run:ios", "web": "expo start --web" }, "dependencies": { "expo": "~52.0.0", "expo-router": "~4.0.0", "expo-status-bar": "~2.0.1", "expo-font": "~13.0.1", "react": "18.3.1", "react-native": "0.76.7", "react-native-safe-area-context": "4.12.0", "react-native-screens": "~4.3.0", "@react-navigation/native": "^7.0.14" }, "devDependencies": { "@babel/core": "^7.25.0", "typescript": "~5.3.3", "@types/react": "~18.3.12" } } '); zip.file(folder+"app.json",'{ "expo": { "name": "'+slugTitle(pn)+'", "slug": "'+pn+'", "version": "1.0.0", "orientation": "portrait", "scheme": "'+pn+'", "platforms": ["ios","android","web"], "icon": "./assets/icon.png", "splash": {"image": "./assets/splash.png","resizeMode":"contain","backgroundColor":"#ffffff"}, "ios": {"supportsTablet": true}, "android": {"package": "com.example.'+pn+'"}, "newArchEnabled": true } } '); zip.file(folder+"tsconfig.json",'{ "extends": "expo/tsconfig.base", "compilerOptions": { "strict": true, "paths": {"@/*": ["./src/*"]} } } '); zip.file(folder+"babel.config.js","module.exports=function(api){ api.cache(true); return {presets:['babel-preset-expo']}; }; "); var hasApp=Object.keys(extracted).some(function(k){return k.toLowerCase().indexOf("app.")>=0;}); if(!hasApp) zip.file(folder+"App."+ext,"import React from 'react'; import {View,Text,StyleSheet,StatusBar,SafeAreaView} from 'react-native'; export default function App(){ return(Built with PantheraHive BOS
Built with PantheraHive BOS
Built with PantheraHive BOS
"); h+="
"+hc+"