/* global React */
// Shared content data: blog posts, whitepapers, glossary entries, competitor matrix rows.

const BLOG_POSTS = [
  {
    slug: 'geo-is-the-new-seo',
    title: 'GEO is the new SEO — and the measurement rules just changed',
    excerpt: 'For two decades, rank on page one was the proxy for attention. In 2026, rank inside the answer is the only metric that maps to pipeline. Here\'s how to re-ground your measurement stack.',
    category: 'Strategy',
    author: 'Josh Bernstein',
    role: 'Co-Founder',
    date: 'April 14, 2026',
    read: '9 min',
    glyph: 'GEO',
    color: '#F08A2B',
    featured: true,
  },
  {
    slug: 'why-chatgpt-keeps-recommending-your-competitor',
    title: 'Why ChatGPT keeps recommending your competitor (and how to fix it in 14 days)',
    excerpt: 'A reverse-engineered playbook from three beta customers who moved from rank 06 to rank 01 in under a month. Page rewrites, citation seeding, and the Reddit thread that did 70% of the work.',
    category: 'Playbook',
    author: 'Tyler Truffi',
    role: 'Co-Founder',
    date: 'April 04, 2026',
    read: '12 min',
    glyph: '02→01',
    color: '#1A6B6B',
  },
  {
    slug: 'sentiment-scoring-under-the-hood',
    title: 'How we score sentiment across four models (without making it up)',
    excerpt: 'A look at our rubric, our inter-model calibration, and why "+0.42" means something specific — not a vibe. Plus the prompts we use to grade the graders.',
    category: 'Engineering',
    author: 'Tyler Truffi',
    role: 'Co-Founder',
    date: 'March 22, 2026',
    read: '14 min',
    glyph: '+.42',
    color: '#3DA874',
  },
  {
    slug: 'the-answer-is-the-funnel',
    title: 'The answer is the funnel now',
    excerpt: '58% of searches end without a click. If the answer is the funnel, then the content that ends up inside the answer is the only content that compounds.',
    category: 'Strategy',
    author: 'Josh Bernstein',
    role: 'Co-Founder',
    date: 'March 11, 2026',
    read: '7 min',
    glyph: '58%',
    color: '#4A8FBD',
  },
  {
    slug: 'reddit-is-training-data',
    title: 'Reddit is training data. Act like it.',
    excerpt: 'Why a pinned thread in r/SaaS moved our rank on 11 prompts in three weeks — and the mechanics of seeding credible UGC without getting burned as spam.',
    category: 'Playbook',
    author: 'Josh Bernstein',
    role: 'Co-Founder',
    date: 'February 28, 2026',
    read: '11 min',
    glyph: 'R/',
    color: '#D14F3C',
  },
  {
    slug: 'five-prompts-every-brand-should-track',
    title: 'The five prompts every brand should track (before the big ones)',
    excerpt: 'Category, comparison, objection, use-case, and alternative — the prompt taxonomy that uncovers every competitive threat in under a week.',
    category: 'Playbook',
    author: 'Tyler Truffi',
    role: 'Co-Founder',
    date: 'February 12, 2026',
    read: '6 min',
    glyph: '5P',
    color: '#8B6BAE',
  },
  {
    slug: 'state-of-llm-search-q1-2026',
    title: 'State of LLM search — Q1 2026',
    excerpt: 'Our quarterly read of the four-engine landscape: who is gaining share of voice, where sentiment is softening, and what categories are most contested right now.',
    category: 'Research',
    author: 'Tyler Truffi',
    role: 'Co-Founder',
    date: 'January 30, 2026',
    read: '18 min',
    glyph: 'Q1',
    color: '#1E1A14',
  },
  {
    slug: 'agencies-running-aio-audits',
    title: 'The agency playbook for running AI-visibility audits',
    excerpt: 'How three founder-led agencies productized AIO audits into $4–8k engagements, what they sell, and the four-deliverable template you can lift today.',
    category: 'Agencies',
    author: 'Josh Bernstein',
    role: 'Co-Founder',
    date: 'January 15, 2026',
    read: '10 min',
    glyph: 'A+',
    color: '#F08A2B',
  },
];

const WHITEPAPERS = [
  {
    slug: 'measuring-the-answer-layer',
    title: 'Measuring the Answer Layer',
    subtitle: 'A 42-page framework for operationalizing GEO inside a marketing org',
    excerpt: 'The first end-to-end framework for measuring brand presence inside AI answers. Data model, scoring rubric, reporting cadence, cross-functional ownership map.',
    pages: 42,
    date: 'April 2026',
    category: 'Framework',
    glyph: 'MAL',
    color: '#1E1A14',
    featured: true,
  },
  {
    slug: 'citation-economy',
    title: 'The Citation Economy',
    subtitle: 'Where the four AI engines actually pull their answers from — and how to earn a slot',
    excerpt: 'We analyzed 1.2M citations across ChatGPT, Claude, Gemini, and Perplexity. 68% of all cited domains come from just 400 sources. Here\'s the ranked list and what it means for your content plan.',
    pages: 28,
    date: 'March 2026',
    category: 'Research',
    glyph: 'CIT',
    color: '#3DA874',
  },
  {
    slug: 'rewriting-for-llms',
    title: 'Rewriting for LLMs',
    subtitle: 'A technical guide to restructuring your content for the answer layer',
    excerpt: 'Concrete before/afters from twelve page rewrites that moved rank. Covers schema, entity clarity, competitive positioning language, and the "definition-first" pattern that wins list prompts.',
    pages: 34,
    date: 'February 2026',
    category: 'Playbook',
    glyph: 'RWL',
    color: '#F08A2B',
  },
  {
    slug: 'enterprise-governance-aio',
    title: 'Governing AI Visibility at Scale',
    subtitle: 'How four Fortune 500 marketing orgs operationalized LLM monitoring',
    excerpt: 'Workflow, escalation paths, vendor selection criteria, and the org-chart changes that made it stick. Includes a vendor RFP template you can adapt.',
    pages: 38,
    date: 'January 2026',
    category: 'Enterprise',
    glyph: 'GOV',
    color: '#4A8FBD',
  },
];

const GLOSSARY = [
  { term: 'AI-first content', aka: null, def: 'Content written primarily to perform inside generative answers rather than to rank in the ten blue links. Structurally it leads with a tight definition, names entities explicitly, and answers a clear question within the first 50 words.', related: ['Definition-first pattern', 'GEO'] },
  { term: 'AI Overview', aka: 'SGE · AIO', def: 'Google\'s synthesized answer card that appears above the ten blue links. In 2026 it appears on roughly 58% of informational queries in English-language US traffic.', related: ['Answer Layer'] },
  { term: 'AI Visibility', aka: 'Share of voice · LLM presence', def: 'The frequency with which a brand is cited, mentioned, or recommended inside generative answers across engines. Promptive\'s default scoring dimension.', related: ['Position', 'Visibility Score'] },
  { term: 'Alternative prompt', aka: 'Alt prompt', def: 'A prompt phrased as "alternatives to [product]" or "competitors of [brand]." Brands that don\'t appear in their own alternative-prompt results have a blind spot that\'s usually recoverable.', related: ['Prompt Taxonomy', 'Competitive displacement'] },
  { term: 'Answer eligibility', aka: null, def: 'Whether a brand\'s content meets the implicit bar an engine sets for appearing in a given answer type. Eligibility depends on entity clarity, source trust, and topical depth — not on backlinks.', related: ['Domain Authority (LLM)', 'Citation'] },
  { term: 'Answer Layer', aka: 'AI Overview · SGE · AIO', def: 'The slab of generative content that AI engines assemble at the top of a results page. It\'s increasingly where purchase consideration begins and ends — and the unit most brands now try to rank inside of.', related: ['GEO', 'AI Visibility', 'Citation'] },
  { term: 'Answer rank fluctuation', aka: null, def: 'Week-over-week variance in where a brand appears within an answer. High fluctuation without a clear cause is a common signal that competing content has been recently published or seeded.', related: ['Rank flip', 'Position'] },
  { term: 'Attribution window', aka: null, def: 'The lag between a content or seeding action and its measurable effect on AI visibility. Typically 2–8 weeks depending on engine crawl frequency and how quickly the content earns inbound links.', related: ['Source seeding', 'Training signal'] },
  { term: 'Backfill', aka: null, def: 'Historical data pulled for a brand before it was first tracked. Useful to establish a pre-launch baseline or recover after onboarding a new competitor set.', related: [] },
  { term: 'Benchmark prompt', aka: null, def: 'A stable, unchanging prompt used as a control to measure how a brand\'s position changes over time independent of query drift. Every prompt set should include at least three.', related: ['Prompt Set', 'Rank flip'] },
  { term: 'Brand authority (LLM)', aka: null, def: 'The degree to which a model treats a brand as a definitive source in its category. Expressed in practice as how often the brand is mentioned unprompted in category-level answers.', related: ['Topic authority', 'Domain Authority (LLM)'] },
  { term: 'Brand entity', aka: null, def: 'The discrete, machine-readable identity a model holds for a company — including its name variants, product names, founding context, and competitive set. Poorly defined entities hallucinate more.', related: ['Named entity recognition', 'Hallucination'] },
  { term: 'Brand Mention', aka: 'LLM mention', def: 'A single instance of a brand appearing inside a generative answer. Mentions are scored for position, sentiment, and whether the engine cited a source URL to justify it.', related: ['Position', 'Sentiment score'] },
  { term: 'Category prompt', aka: null, def: 'A prompt framed around a space, not a product — e.g. "best tools for AI visibility tracking." Category prompts reveal which brands own the mental model for the whole category in a given engine.', related: ['Prompt Taxonomy', 'Share of Voice'] },
  { term: 'Citation', aka: 'Grounding source', def: 'A URL an AI engine references when producing an answer. Citations are the closest proxy to "what the engine trusts" and are the leverage point for anyone trying to influence what it says.', related: ['Domain Authority (LLM)'] },
  { term: 'Citation velocity', aka: null, def: 'The rate at which a domain accumulates new citations across a tracked prompt set over a rolling window. Accelerating citation velocity is an early indicator of a competitor gaining ground.', related: ['Citation', 'Mention velocity'] },
  { term: 'Comparison prompt', aka: null, def: 'A prompt that pits two or more named products against each other — e.g. "Promptive vs. Profound." High-intent traffic; also the prompt type most likely to surface inaccurate competitive framing.', related: ['Prompt Taxonomy', 'Competitor Diff'] },
  { term: 'Competitive displacement', aka: null, def: 'When a competitor\'s brand mention pushes yours down in a list answer or replaces you entirely in a recommendation. The most common alert reason in Promptive dashboards.', related: ['Competitor Diff', 'Rank flip'] },
  { term: 'Context window', aka: null, def: 'The finite token budget inside which a model sees its retrieved sources and builds an answer. Content that is too long to fit gets truncated — shorter, denser pages win at inference time.', related: ['LLM-readable content'] },
  { term: 'Corpus coverage', aka: null, def: 'The breadth of a domain\'s representation inside a model\'s training data. Higher coverage means the model has more signal about a brand and is less likely to hallucinate or omit it.', related: ['Training-data window', 'Training signal'] },
  { term: 'Dark mention', aka: null, def: 'A brand reference inside a generative answer that carries no citation URL. The engine is drawing from training data, not live retrieval — harder to influence and harder to trace.', related: ['Grounded answer', 'Citation'] },
  { term: 'Definition-first pattern', aka: null, def: 'A content structure where the first sentence answers "what is X" before any context or argument. Engines overwhelmingly pull from definition-first copy when assembling category answers.', related: ['AI-first content', 'GEO'] },
  { term: 'Direct citation', aka: null, def: 'A citation where the engine links directly to a brand\'s own domain. More valuable than a third-party citation because it signals the engine trusts the primary source, not just coverage of the brand.', related: ['Citation', 'Source seeding'] },
  { term: 'Domain Authority (LLM)', aka: 'Source trust', def: 'Our internal weight for how often a given engine cites a domain across all prompts in a category. Unlike SEO DA, it is engine-specific and updates weekly.', related: ['Citation'] },
  { term: 'Engine', aka: 'LLM · AI search', def: 'One of the four systems Promptive samples: ChatGPT, Claude, Gemini, Perplexity. Each has its own prompt behavior, citation policy, and reasoning style.', related: [] },
  { term: 'Engine drift', aka: null, def: 'A systematic shift in how one engine answers a class of prompts — often caused by a model update, a policy change, or a new data partnership. Requires recalibrating baselines.', related: ['Model drift', 'Engine weighting'] },
  { term: 'Engine weighting', aka: null, def: 'The multiplier applied to each engine\'s signal when computing aggregate scores. Weights can be adjusted in Promptive to reflect the traffic mix that matters to your business.', related: ['Visibility Score', 'Engine'] },
  { term: 'Entity disambiguation', aka: null, def: 'The process of ensuring models can distinguish your brand from identically named companies, people, or concepts. Critical for brands with generic or shared names.', related: ['Brand entity', 'Named entity recognition'] },
  { term: 'First-mention bias', aka: null, def: 'The empirical tendency for the first brand listed in an AI answer to receive disproportionate clicks and recall. Drives the outsized weight placed on position 1 in Promptive\'s scoring.', related: ['Position', 'AI Visibility'] },
  { term: 'Forum signal', aka: 'Community signal', def: 'Mentions, threads, and discussions on Reddit, Quora, Stack Overflow, and niche forums that AI engines consistently treat as credibility evidence. One of the highest-ROI seeding surfaces in GEO.', related: ['Source seeding', 'UGC signal'] },
  { term: 'Frequency score', aka: null, def: 'How often a brand appears across the full prompt set — without weighting for position or sentiment. A brand can have high frequency and poor position if it\'s consistently mentioned late in lists.', related: ['AI Visibility', 'Position'] },
  { term: 'GEO', aka: 'Generative Engine Optimization', def: 'The practice of shaping content, pages, and off-site mentions so that AI engines reference you well. The LLM-era analog of SEO, with a different reward function.', related: ['Answer Layer', 'AI Visibility'] },
  { term: 'GEO audit', aka: 'AI visibility audit', def: 'A structured review of a brand\'s current position, citation footprint, and sentiment across a defined prompt set. Standard deliverable: current baseline, gap analysis, and a prioritized action list.', related: ['GEO', 'Visibility Score'] },
  { term: 'Grounded answer', aka: null, def: 'An AI response that cites at least one source URL. Grounded answers are heavier, slower, and harder to influence — but they\'re also where high-intent queries live.', related: ['Citation'] },
  { term: 'Grounding confidence', aka: null, def: 'An informal measure of how consistently an engine cites sources for a given prompt type. High-confidence topics (product comparisons, factual questions) are more citation-heavy and more gameable.', related: ['Grounded answer', 'Citation'] },
  { term: 'Hallucination', aka: null, def: 'An answer the model presents confidently but that isn\'t supported by a citation or by its training distribution. Brand hallucinations (wrong pricing, discontinued products) are a tracked alert type.', related: ['Brand entity'] },
  { term: 'High-intent prompt', aka: null, def: 'A prompt that signals purchase or evaluation intent — "best CRM for mid-market SaaS," "pricing for X vs. Y." These prompts are weighted most heavily because they are closest to pipeline.', related: ['Recommendation prompt', 'Prompt Taxonomy'] },
  { term: 'Inference-time citation', aka: null, def: 'A citation the engine retrieves live at query time via RAG or web search, as opposed to knowledge baked into training weights. Inference-time citations are faster to influence with new content.', related: ['Citation', 'RAG'] },
  { term: 'Intent coverage', aka: null, def: 'The percentage of a buyer\'s evaluation journey — awareness, comparison, objection, alternative, decision — that your tracked prompt set covers. Gaps in intent coverage leave blind spots.', related: ['Prompt Taxonomy', 'Prompt Set'] },
  { term: 'Intent mapping', aka: null, def: 'The exercise of pairing buyer journey stages (awareness, comparison, objection) to prompt types, then verifying you have coverage at each stage. Output feeds directly into prompt set design.', related: ['Prompt Set', 'High-intent prompt'] },
  { term: 'Knowledge cutoff', aka: 'Training cutoff', def: 'The date after which no new information was incorporated into a model\'s base weights. Brands that launched or pivoted after the cutoff are invisible to that model unless they earn inference-time citations.', related: ['Training-data window', 'Inference-time citation'] },
  { term: 'Knowledge graph entity', aka: null, def: 'A structured node representing a brand, person, or concept inside Google\'s or Wikidata\'s knowledge graph. Strong entity presence in these graphs correlates with more consistent AI Overview mentions.', related: ['Brand entity', 'Entity disambiguation'] },
  { term: 'Latent competitor', aka: null, def: 'A brand that doesn\'t appear on your radar in traditional SEO but consistently occupies top positions inside AI answers for your key prompts. Promptive surfaces these via competitor diff reporting.', related: ['Competitor Diff', 'Share of Voice'] },
  { term: 'LLM-readable content', aka: null, def: 'Pages structured so a language model can reliably extract a clear claim, entity, and supporting evidence within a short passage. Short paragraphs, explicit subject labeling, and schema markup all help.', related: ['Definition-first pattern', 'AI-first content'] },
  { term: 'Long-tail prompt', aka: null, def: 'A highly specific prompt with lower monthly search volume but higher conversion intent. Long-tail prompts often have less competition in AI answers and are a faster path to rank-1 visibility.', related: ['Prompt Set', 'High-intent prompt'] },
  { term: 'Mention velocity', aka: null, def: 'The rate of change in a brand\'s mention count week-over-week. Sudden spikes or drops are typically the first signal of a PR event, a competitor\'s launch, or an algorithm update.', related: ['Brand Mention', 'Velocity alert'] },
  { term: 'Model drift', aka: null, def: 'A gradual, unannounced change in a model\'s behavior — answer style, citation habits, brand preferences — that isn\'t tied to a versioned release. Detectable only through consistent time-series sampling.', related: ['Engine drift', 'Refresh cadence'] },
  { term: 'Multi-brand workspace', aka: null, def: 'A Promptive workspace configured to track multiple brands under one account — typical for agencies managing several clients. Each brand gets isolated prompt sets, scores, and reports.', related: ['White-label dashboard'] },
  { term: 'Multi-engine consensus', aka: null, def: 'Agreement across all four sampled engines on a brand\'s position or sentiment. When all four agree, the signal is strong; divergence between engines usually means one has stale training data.', related: ['Engine', 'Visibility Score'] },
  { term: 'Named entity recognition', aka: 'NER', def: 'The model\'s ability to identify and classify proper nouns (brands, products, people) inside a passage. Clean, consistent brand naming across your content improves NER accuracy and reduces hallucinations.', related: ['Brand entity', 'Entity disambiguation'] },
  { term: 'Negative sentiment drift', aka: null, def: 'A multi-engine, multi-week slide in how models describe a brand. Usually the downstream signal of a Reddit thread or a bad review cementing into training data.', related: ['Sentiment score'] },
  { term: 'No-citation answer', aka: 'Ungrounded answer', def: 'An AI response with no linked sources — the model answered from memory. Brand mentions in no-citation answers are harder to earn and harder to verify, but they exist at very high volume.', related: ['Dark mention', 'Grounded answer'] },
  { term: 'Objection prompt', aka: null, def: 'A prompt framed around a concern or drawback — "downsides of [product]," "is [brand] worth it." Brands that lose sentiment on objection prompts are leaking deal-stage evaluations.', related: ['Prompt Taxonomy', 'Sentiment score'] },
  { term: 'Organic AI mention', aka: null, def: 'A brand appearance in an answer the brand didn\'t explicitly optimize for. These are the highest-quality mentions because they signal the engine independently associates the brand with a topic.', related: ['Brand Mention', 'Topic authority'] },
  { term: 'Perplexity rank', aka: null, def: 'A brand\'s position inside Perplexity\'s generated answers specifically. Perplexity is citation-heavy by design and tends to rank brands with stronger domain citation footprints higher than ChatGPT does.', related: ['Position', 'Citation'] },
  { term: 'Position', aka: 'Answer rank · List rank', def: 'Where a brand mention lands inside an answer — first listed, top-three, or buried below the fold. Weighted heavily in visibility scoring because first-mention bias is real.', related: ['AI Visibility'] },
  { term: 'Proactive mention', aka: null, def: 'A brand recommendation an engine volunteers without the prompt explicitly asking for suggestions. The most commercially valuable mention type — it means the engine treats the brand as a default association for the topic.', related: ['Organic AI mention', 'Recommendation prompt'] },
  { term: 'Prompt', aka: 'Query · Question', def: 'The input Promptive sends to each engine on a schedule. Prompts are the unit of billing and the unit of measurement — you pick what you want tracked.', related: ['Prompt Set'] },
  { term: 'Prompt coverage', aka: null, def: 'The percentage of a brand\'s realistic buyer queries that are represented in its tracked prompt set. Gaps in coverage produce blind spots — whole intent segments you\'re not measuring.', related: ['Intent coverage', 'Prompt Set'] },
  { term: 'Prompt Set', aka: 'Prompt library', def: 'A curated bundle of prompts associated with a brand. A well-built set covers category, comparison, objection, use-case, and alternative queries.', related: ['Prompt'] },
  { term: 'Prompt taxonomy', aka: null, def: 'The five-type classification used to design comprehensive prompt sets: category, comparison, objection, use-case, and alternative. Full taxonomy coverage prevents intent blind spots.', related: ['Prompt Set', 'Intent mapping'] },
  { term: 'RAG', aka: 'Retrieval-augmented generation', def: 'An architecture where a model retrieves external documents at query time and uses them to ground its answer. Promptive monitors the outputs of RAG-powered engines (Perplexity, Bing Copilot) separately from base-model engines.', related: ['Inference-time citation', 'Grounded answer'] },
  { term: 'Rank flip', aka: null, def: 'A single event where a brand moves from outside the top-three to inside it, or vice versa. Rank flips in either direction are the default trigger for Promptive\'s real-time alerts.', related: ['Position', 'Webhook alert'] },
  { term: 'Recommendation prompt', aka: null, def: 'A prompt that asks the engine to recommend a tool, vendor, or product. The highest-signal prompt type because it maps directly to purchase intent.', related: ['Prompt'] },
  { term: 'Reddit signal', aka: 'UGC citation', def: 'A Reddit thread or comment that an AI engine cites or incorporates into its answer. Reddit\'s high indexing velocity and community trust make it one of the most reliable short-term GEO levers.', related: ['Forum signal', 'Source seeding'] },
  { term: 'Refresh cadence', aka: null, def: 'How often Promptive re-samples each engine for your tracked prompts. Weekly / daily / 12h / 6h depending on plan.', related: [] },
  { term: 'Retrieval freshness', aka: null, def: 'How recently an engine\'s live retrieval layer has indexed a piece of content. Fresh pages can appear in grounded answers within days; the base model weights update far less frequently.', related: ['Inference-time citation', 'RAG'] },
  { term: 'Sentiment calibration', aka: null, def: 'The process of validating that a sentiment-scoring model agrees with human raters across a representative sample. Promptive publishes its calibration results quarterly so customers can audit the rubric.', related: ['Sentiment score'] },
  { term: 'Sentiment score', aka: 'Sentiment index', def: 'A −1.0 to +1.0 rating of the tone an engine uses to describe a brand. Graded by an LLM against a published rubric and calibrated across engines.', related: ['Negative sentiment drift'] },
  { term: 'Share of Voice', aka: 'SoV', def: 'Your percentage of relevant brand mentions inside a given category\'s prompt set. The single number most teams end up tracking at the exec level.', related: ['AI Visibility'] },
  { term: 'Snapshot', aka: null, def: 'A point-in-time capture of all scores and raw answers for a brand\'s full prompt set. Snapshots are automatically stored at each refresh so you can diff any two moments in time.', related: ['Backfill', 'Refresh cadence'] },
  { term: 'Soft mention', aka: null, def: 'A brand reference that appears in supporting context rather than as a direct recommendation — "companies like X" or "some teams use Y." Scored separately from hard recommendations.', related: ['Brand Mention', 'Position'] },
  { term: 'Source seeding', aka: 'Citation seeding', def: 'The practice of placing credible content on domains the engines already trust (Reddit, Quora, high-DA blogs) to influence future answers. Not the same as manipulation — quality bar matters.', related: ['Citation'] },
  { term: 'Structured data (LLM)', aka: 'Schema · Entity markup', def: 'HTML schema markup (Organization, Product, FAQ, HowTo) that helps engines parse entity relationships without reading the full page. FAQ schema in particular correlates with answer-layer inclusions.', related: ['LLM-readable content', 'Brand entity'] },
  { term: 'Topic authority', aka: null, def: 'The degree to which a brand is perceived by AI engines as a go-to source for a specific subject. Built through depth of coverage, citation density, and co-occurrence with high-trust domains.', related: ['Brand authority (LLM)', 'Corpus coverage'] },
  { term: 'Tracked prompt', aka: null, def: 'Any prompt in a brand\'s active prompt set that Promptive samples on a recurring schedule. Tracked prompts are the billable unit and the source of all score data.', related: ['Prompt', 'Refresh cadence'] },
  { term: 'Training signal', aka: null, def: 'Any piece of content — article, forum post, documentation, press coverage — that, once indexed, can influence a model\'s future outputs. The GEO analog of a link signal in traditional SEO.', related: ['Corpus coverage', 'Source seeding'] },
  { term: 'Training-data window', aka: null, def: 'The approximate cutoff inside a model\'s training corpus. Matters because anything authored after the window has to earn citations at inference time instead of being absorbed directly.', related: ['Knowledge cutoff'] },
  { term: 'UGC signal', aka: 'User-generated content signal', def: 'Reviews, forum threads, Q&A posts, and social content that engines treat as third-party evidence for or against a brand. High-volume, authentic UGC outperforms brand-owned content for sentiment influence.', related: ['Reddit signal', 'Forum signal'] },
  { term: 'Use-case prompt', aka: null, def: 'A prompt that names a specific problem or workflow — "tool for tracking AI mentions in B2B SaaS." These prompts are the highest-signal discovery surface for bottom-funnel buyers.', related: ['Prompt Taxonomy', 'High-intent prompt'] },
  { term: 'Velocity alert', aka: null, def: 'A notification triggered when a brand\'s mention count or rank changes faster than a defined threshold within a rolling 7-day window. Useful for catching competitor launches before they compound.', related: ['Mention velocity', 'Webhook alert'] },
  { term: 'Verbatim citation', aka: null, def: 'A citation where the engine quotes or closely paraphrases a specific sentence from a source page. The strongest form of citation — it means the engine found the exact passage useful enough to reproduce.', related: ['Citation', 'LLM-readable content'] },
  { term: 'Visibility Score', aka: null, def: 'Promptive\'s aggregate score for a brand, combining frequency, position, and engine weight into a single 0–100 number. Moves slowly by design.', related: ['AI Visibility'] },
  { term: 'Watermark prompt', aka: null, def: 'A synthetic test prompt seeded with unique phrasing to verify whether a specific piece of content has been ingested and cited by a given engine. Used to validate seeding efforts.', related: ['Benchmark prompt', 'Inference-time citation'] },
  { term: 'Webhook alert', aka: null, def: 'An HTTP POST we fire when a tracked metric crosses a threshold. Typical uses: rank flip, sentiment drop, new competitor overtake.', related: ['Velocity alert'] },
  { term: 'White-label dashboard', aka: null, def: 'A Promptive report view with custom branding — logo, colors, domain — delivered to clients under an agency\'s name. Available on agency plans; supports per-client login and isolated data.', related: ['Multi-brand workspace'] },
  { term: 'Zero-click answer', aka: null, def: 'A generative response so complete that the user has no reason to visit any linked source. The dominant answer type for informational queries — and the main reason AI visibility now matters more than organic CTR.', related: ['Answer Layer', 'AI Overview'] },
];

const COMPETITORS = [
  {
    slug: 'peec',
    url: '/promptive-vs-peec',
    name: 'Peec',
    tagline: 'Clean dashboards, three engines',
    color: '#8B6BAE',
    sigil: 'Pe',
    domain: 'peec.ai',
    reviewed: 'April 2026',
    tldr: 'Peec is a polished AI search analytics tool built for marketing teams — solid prompt management, AI-suggested prompts, and clean dashboards. The coverage gap is real though: Peec tracks ChatGPT, Perplexity, and Gemini. Claude is not included. Promptive covers all four major engines on every plan.',
    quote: {
      who: '[VP of Marketing]',
      role: '[B2B SaaS · switched from Peec]',
      text: 'Peec is where we started — it\'s genuinely easy to learn. But once we needed Claude data alongside the others, we hit a wall. Promptive covers everything in one plan without needing to piece it together.',
      avatar: 'VP'
    },
    pillars: [
      ['Engines covered', 'ChatGPT, Perplexity, Gemini', 'ChatGPT, Claude, Gemini, Perplexity'],
      ['Claude included', '—', '✓'],
      ['Sentiment scoring', '—', '−1 to +1, calibrated'],
      ['Citation tracking', 'Source frequency only', 'Full URL attribution'],
      ['Competitor tracking', 'Limited by tier', 'Unlimited on paid plans'],
      ['Agency / multi-brand', '—', 'White-label + pooled credits'],
      ['Pricing', 'Not publicly disclosed', 'From $69/mo'],
    ],
  },
  {
    slug: 'profound',
    url: '/promptive-vs-profound',
    name: 'Profound',
    tagline: 'Broadest engine coverage, pricing by sales only',
    color: '#4A8FBD',
    sigil: 'Pr',
    domain: 'tryprofound.com',
    reviewed: 'April 2026',
    tldr: 'Profound tracks more AI engines than any other tool — ChatGPT, Claude, Gemini, Perplexity, Grok, Copilot, Meta AI, DeepSeek, and Google AI Overviews. Their autonomous marketing agents and Conversation Explorer are genuinely differentiated. The tradeoff: pricing is entirely sales-gated with no public tiers, and onboarding typically takes weeks. Promptive is live in fifteen minutes.',
    quote: {
      who: '[Director of Content Strategy]',
      role: '[E-commerce brand · evaluated Profound]',
      text: 'Profound is impressive — nine engines and the agent layer are real differentiators. But getting a number out of them took two weeks of calls. We needed something running now. Promptive was live in fifteen minutes.',
      avatar: 'DC'
    },
    pillars: [
      ['Engines covered', 'ChatGPT, Claude, Gemini, Perplexity, Copilot, Grok, Meta AI, DeepSeek, Google AI Overviews', 'ChatGPT, Claude, Gemini, Perplexity'],
      ['Refresh cadence', 'Not publicly disclosed', 'Daily'],
      ['Sentiment scoring', '✓', '✓ w/ published rubric'],
      ['Citation tracking', '✓', '✓'],
      ['Autonomous marketing agents', '✓', '—'],
      ['Time to value', 'Weeks (sales onboarding)', '< 15 minutes'],
      ['Agency mode', '—', 'White-label + pooled credits'],
      ['Pricing', 'Sales-gated, not disclosed', 'From $69/mo'],
    ],
  },
  {
    slug: 'scrunch',
    url: '/promptive-vs-scrunch',
    name: 'Scrunch',
    tagline: 'Deck-ready reports, enterprise pricing',
    color: '#D14F3C',
    sigil: 'Sc',
    domain: 'scrunch.com',
    reviewed: 'April 2026',
    tldr: 'Scrunch (scrunch.com) is an AI Customer Experience Platform tracking how brands appear across five LLMs. Their reports are genuinely polished — persona-based tracking, citation analysis, and deck-ready exports make it popular with agencies. The Core plan starts at $250/month with 4 engines; Claude and Gemini require upgrading to Enterprise. Promptive starts at $69/month with all four major engines included.',
    quote: {
      who: '[SEO Lead]',
      role: '[Marketing agency · switched from Scrunch]',
      text: 'The reports looked great in client decks. But $250 a month to track only four engines, with Claude locked behind Enterprise, pushed us to look at alternatives. Promptive gave us everything we needed at a fraction of the price.',
      avatar: 'SL'
    },
    pillars: [
      ['Engines (Core plan)', 'ChatGPT, Perplexity, Google AI Overviews, Copilot', 'ChatGPT, Claude, Gemini, Perplexity'],
      ['Claude + Gemini included', 'Enterprise only', '✓ All plans'],
      ['Sentiment scoring', 'Positive / neutral / negative', 'Continuous −1 to +1'],
      ['Citation tracking', '✓', '✓'],
      ['Persona-based tracking', '✓', '✓'],
      ['Agency mode', '✓ ($500/mo Agency plan)', '✓ White-label + pooled credits'],
      ['Free trial', '✓ 7 days (card required)', '✓ 7 days, no card'],
      ['Starting price', '$250/mo', '$69/mo'],
    ],
  },
  {
    slug: 'athena',
    url: '/promptive-vs-athena',
    name: 'Athena HQ',
    tagline: 'Deep AEO platform, credit-based billing',
    color: '#5B3FA6',
    sigil: 'At',
    domain: 'athenahq.ai',
    reviewed: 'April 2026',
    tldr: 'Athena HQ is a serious AEO/GEO platform — 8+ AI engines, Shopify integration, an AI copilot ("Ask Athena"), and multi-brand portfolio tracking. It\'s built for teams that want depth. The friction points: self-serve starts at $295/month on a credit-based model that makes costs hard to predict, and there\'s no clear free trial. Promptive\'s flat-rate plans start at $69/month with a 7-day free trial, no card required.',
    quote: {
      who: '[Head of Growth]',
      role: '[DTC brand · evaluated Athena HQ]',
      text: 'The platform is genuinely deep — the Shopify integration and multi-brand tracking were exactly what our CFO wanted to see. But the credit system made it hard to forecast costs month to month. Promptive\'s flat pricing made the budget conversation simple.',
      avatar: 'HG'
    },
    pillars: [
      ['Engines covered', 'ChatGPT, Claude, Gemini, Perplexity, Copilot, Grok, Google AI Overviews + more', 'ChatGPT, Claude, Gemini, Perplexity'],
      ['Sentiment scoring', '✓', '✓ w/ published rubric'],
      ['Citation tracking', '✓', '✓'],
      ['Shopify / e-commerce integration', '✓', '—'],
      ['AI copilot (Ask Athena)', '✓', '—'],
      ['Pricing model', 'Credit-based', 'Flat monthly · no tokens'],
      ['Free trial', 'Not clearly offered', '✓ 7 days, no card'],
      ['Starting price', '$295/mo', '$69/mo'],
    ],
  },
  {
    slug: 'otterly',
    url: '/promptive-vs-otterly',
    name: 'Otterly',
    tagline: 'Affordable monitoring + GEO URL audits',
    color: '#2E86AB',
    sigil: 'Ot',
    domain: 'otterly.ai',
    reviewed: 'April 2026',
    tldr: 'Otterly is a legitimate AI visibility monitoring tool — tracking how brands appear across ChatGPT, Gemini, Perplexity, Google AI Overviews, Google AI Mode, and Copilot. It\'s the most affordable entry point in the category at $29/month, and adds GEO URL auditing that most competitors don\'t offer. The gap: no Claude coverage, no sentiment scoring beyond basic mention detection, and no agency white-labeling. Promptive adds Claude, calibrated sentiment (−1 to +1), full citation attribution, and agency features.',
    quote: {
      who: '[Content Strategist]',
      role: '[SaaS startup · evaluated Otterly]',
      text: 'Otterly was the easiest way to start — $29, no credit card, live in minutes. When we needed sentiment depth and Claude data to bring to leadership, we moved to Promptive. The GEO audit feature is genuinely useful though, and nothing else at that price point comes close.',
      avatar: 'CS'
    },
    pillars: [
      ['Engines covered', 'ChatGPT, Gemini, Perplexity, Google AI Overviews, Google AI Mode, Copilot', 'ChatGPT, Claude, Gemini, Perplexity'],
      ['Claude included', '—', '✓'],
      ['Sentiment scoring', '—', '−1 to +1, calibrated'],
      ['Citation tracking', '✓ (URL-level)', '✓'],
      ['GEO URL auditing', '✓ (1k–10k/mo by plan)', '—'],
      ['Competitor benchmarking', '✓', '✓'],
      ['Agency white-label', '—', '✓ pooled credits + custom domain'],
      ['Free trial', '✓ no card required', '✓ 7 days, no card'],
      ['Starting price', '$29/mo', '$69/mo'],
    ],
  },
  {
    slug: 'airops',
    url: '/promptive-vs-airops',
    name: 'AirOps',
    tagline: 'Content engineering platform, not visibility monitoring',
    color: '#E07B39',
    sigil: 'Ao',
    domain: 'airops.com',
    reviewed: 'April 2026',
    tldr: 'AirOps is a content engineering platform — it helps teams create, optimize, and publish AI search-ready content at scale using agentic workflows and a task-based model. It does offer basic prompt tracking (100–250 prompts on paid plans) but that\'s a secondary feature. It doesn\'t provide sentiment scoring, citation attribution, or competitor benchmarking. If you need to measure how your brand currently performs in AI answers and compare against competitors, Promptive is purpose-built for that.',
    quote: {
      who: '[Head of Content]',
      role: '[Growth-stage SaaS · evaluated AirOps]',
      text: 'AirOps is excellent for producing content that\'s optimized for AI search — the workflow tooling is genuinely powerful. But we needed to measure where we actually stood in AI answers before we could know what to fix. That\'s what Promptive does.',
      avatar: 'HC'
    },
    pillars: [
      ['AI visibility monitoring', '—', '✓'],
      ['Sentiment scoring', '—', '−1 to +1, calibrated'],
      ['Citation attribution', '—', '✓'],
      ['Competitor benchmarking', '—', '✓'],
      ['Prompt tracking', '✓ (100–250 prompts, basic)', '✓ (20–400 prompts, full analytics)'],
      ['Content creation workflows', '✓', '—'],
      ['Agentic content pipelines', '✓', '—'],
      ['Pricing model', 'Task-based (usage overages)', 'Flat monthly · no tokens'],
      ['Free trial', '✓ 14 days, no card', '✓ 7 days, no card'],
      ['Starting price', 'Free tier available', '$69/mo'],
    ],
  },
];

window.Promptive = Object.assign(window.Promptive || {}, {
  BLOG_POSTS, WHITEPAPERS, GLOSSARY, COMPETITORS
});
