// Essay — The Design Mistake Behind Most AI Transformations
// Outside-In Triad. Unlinked preview route. Mirrors POV editorial scaffolding.
function EssayOutsideInTriad() {
  const { go } = useRoute();
  const [progress, setProgress] = React.useState(0);
  const [activeSection, setActiveSection] = React.useState('i');
  const articleRef = React.useRef(null);

  const sections = [
    { id: 'i',    numeral: 'I',    title: 'Three conversations I keep having' },
    { id: 'ii',   numeral: 'II',   title: 'One root cause' },
    { id: 'iii',  numeral: 'III',  title: 'How this differs from existing frameworks' },
    { id: 'iv',   numeral: 'IV',   title: 'In practice: a customer address change' },
    { id: 'v',    numeral: 'V',    title: 'Lens 1 — Governance, compliance & risk' },
    { id: 'vi',   numeral: 'VI',   title: 'Lens 2 — People & internal ops' },
    { id: 'vii',  numeral: 'VII',  title: 'Lens 3 — Customer journey' },
    { id: 'viii', numeral: 'VIII', title: 'Measurement: the layer every framework skips' },
    { id: 'ix',   numeral: 'IX',   title: 'Cross-industry signals' },
    { id: 'x',    numeral: 'X',    title: 'When all three hold' },
  ];

  React.useEffect(() => {
    const onScroll = () => {
      const el = articleRef.current;
      if (!el) return;
      const top = el.getBoundingClientRect().top;
      const height = el.scrollHeight - window.innerHeight;
      const scrolled = Math.min(1, Math.max(0, (-top) / height));
      setProgress(scrolled);
      for (let i = sections.length - 1; i >= 0; i--) {
        const s = document.getElementById('sec-' + sections[i].id);
        if (s && s.getBoundingClientRect().top < 120) { setActiveSection(sections[i].id); break; }
      }
    };
    window.addEventListener('scroll', onScroll, { passive: true });
    onScroll();
    return () => window.removeEventListener('scroll', onScroll);
  }, []);

  return (
    <main ref={articleRef} className="mx-auto max-w-[1320px] px-6 md:px-10 pt-10 md:pt-16 pb-20">
      <div className="rule-bottom pb-4 flex items-center justify-between smallcaps text-muted">
        <span>Essay · Preview</span>
        <span className="hidden md:inline">The design mistake behind most AI transformations</span>
        <span className="tabular">May 2026 · draft</span>
      </div>

      <header className="mt-10 md:mt-16 grid md:grid-cols-12 gap-6 md:gap-10">
        <div className="md:col-span-2">
          <div className="smallcaps text-muted">Essay</div>
        </div>
        <div className="md:col-span-10">
          <h1 className="font-display font-medium tracking-tight text-balance leading-[0.98]
                         text-[48px] sm:text-[64px] md:text-[84px] lg:text-[96px]">
            The design mistake behind most AI transformations.
          </h1>
          <p className="mt-6 font-display text-[22px] md:text-[26px] leading-[1.35] max-w-[52ch] text-ink2">
            Why governance, people, and customer must be held simultaneously — not sequenced.
          </p>
          <div className="mt-8 smallcaps text-muted">
            By <span className="text-ink">Prathyusha Vemula</span> · May 2026 · ~14 min read
          </div>
        </div>
      </header>

      <div className="mt-16 grid md:grid-cols-12 gap-6 md:gap-10">
        <aside className="md:col-span-3 order-2 md:order-1">
          <div className="md:sticky md:top-28">
            <div className="smallcaps text-muted">Contents</div>
            <ol className="mt-4 space-y-3">
              {sections.map(s => (
                <li key={s.id}>
                  <a href={`#sec-${s.id}`} className={`flex items-baseline gap-3 transition-colors ${activeSection === s.id ? 'text-sienna' : 'text-ink2 hover:text-ink'}`}>
                    <span className="font-display text-sm w-7 text-right tabular">{s.numeral}</span>
                    <span className="leading-tight">{s.title}</span>
                  </a>
                </li>
              ))}
            </ol>
            <div className="mt-10 rule-top pt-5">
              <div className="smallcaps text-muted">Progress</div>
              <div className="mt-3 h-[2px] bg-ink/10 relative">
                <div className="absolute left-0 top-0 h-full bg-sienna transition-all duration-200" style={{ width: `${Math.round(progress * 100)}%` }} />
              </div>
              <div className="mt-2 tabular text-xs text-muted">{Math.round(progress * 100)}%</div>
            </div>
            <div className="mt-10 rule-top pt-5">
              <div className="smallcaps text-muted">Review</div>
              <p className="mt-2 text-sm text-ink2 leading-relaxed">
                This is a preview link for reviewers. Feedback to <a className="link-underline text-ink" href="mailto:vemula.prathyusha@gmail.com">vemula.prathyusha@gmail.com</a>.
              </p>
            </div>
          </div>
        </aside>

        <article className="md:col-span-9 order-1 md:order-2">
          <div className="measure font-display text-[20px] md:text-[21px] leading-[1.65] text-ink">

            {/* Prologue */}
            <div className="mb-12">
              <div className="smallcaps text-sienna">For leaders making the decision</div>
              <p className="mt-4 font-display text-[24px] md:text-[28px] leading-[1.35] text-ink2">
                Most enterprise AI investments do not fail because of the technology. They fail because of the design sequence. That failure shows up in P&amp;L, in regulatory exposure, and in customer churn at a multiplier the dashboard cannot see.
              </p>
              <p className="mt-8">
                Boards approve AI programmes that look successful in execution and produce disappointing results in the business. Audit findings arrive 12–18 months post-launch. NPS drifts downward in ways nobody connects to the transformation. Productivity gains stay on the slide deck.
              </p>
              <p>
                The cause sits at the design layer. AI transformation is still being designed inside-out — starting with what operations wants to fix, with governance and customer experience added later as separate workstreams.
              </p>
              <p>
                This article makes the case for a different discipline. The Outside-In Triad. Three constraints held simultaneously from the first workflow decision, rather than sequenced through the programme: governance and risk, people and operations, customer journey.
              </p>
              <p>
                The strategic implication is direct. The next wave of AI value will be won by the organisations whose design discipline survives contact with their actual business — regulatory, operational, and customer-facing — all at once.
              </p>
            </div>

            {/* I */}
            <h2 id="sec-i" className="scroll-mt-24 mt-16">
              <span className="block smallcaps text-sienna mb-2">I · Three conversations I keep having</span>
              <span className="font-display text-[34px] md:text-[44px] leading-[1.05] tracking-tight">Three different organisations. Three different failure modes. One root cause.</span>
            </h2>
            <p className="mt-8">
              A leader pulls up the transformation dashboard. Eight projects. All green. Twelve months in, benefits haven't materialised. Teams are duplicating effort. Vendors operate in parallel without visibility into each other's work. Data is handled differently across lines of business.
            </p>
            <p>
              Nobody can point to what's wrong, because individually nothing is. The problem isn't in any single project. It's the absence of structure holding them together.
            </p>
            <p>
              I've seen this across five to eight client implementations. The pattern is identical: multiple workstreams, separate ownership, no central governance. Projects optimise locally and the organisation drifts from its strategic goal. Goldratt called it out in The Goal: local optimisation is the enemy of global performance. In AI transformation, this shows up in the P&amp;L eighteen months after go-live.
            </p>
            <div className="mt-12 smallcaps text-sienna">A second conversation</div>
            <p>
              A different organisation. Design was right. Implementation was smooth. Six months later, adoption sits at 20%. Employees route around the new system. Leadership is confused — the technology works, the logic is sound.
            </p>
            <p>
              What's missing is the answer to a question nobody asked: what does it feel like to work inside this process every day?
            </p>
            <p>
              The AI handled the standard case. Every exception landed on the human — harder, more ambiguous, higher cognitive load than before. The human role was never redesigned for that reality. In rule-based automations this is manageable. In AI implementations it is significantly worse, because outputs are probabilistic and the human now has to judge when to trust the system and when to override it.
            </p>
            <div className="mt-12 smallcaps text-sienna">A third conversation</div>
            <p>
              Costs came down. Average handle time improved. Ops hit its targets. Then NPS dropped six points.
            </p>
            <p>
              The IVR that saved thirty seconds per agent added three minutes of friction per customer. The chatbot that replaced it couldn't understand context — different technology, same design failure. Customers didn't switch channels. They switched providers.
            </p>
            <PullQuote>
              A positive customer experience drives roughly 3× uplift. A negative one drives 9× damage. Optimising internal operations at the expense of the customer journey destroys value at a multiplier.
            </PullQuote>

            {/* II */}
            <h2 id="sec-ii" className="scroll-mt-24 mt-16">
              <span className="block smallcaps text-sienna mb-2">II · One root cause</span>
              <span className="font-display text-[34px] md:text-[44px] leading-[1.05] tracking-tight">Three failure modes, one design sequence — inside out, with the customer downstream.</span>
            </h2>
            <p className="mt-6">
              All three designed AI transformation from the inside out — starting with what's slow, what's costly, what operations wants to fix. The customer was downstream. Governance was a final-stage review. People were expected to adapt.
            </p>
            <p>
              That sequence is the failure mode.
            </p>
            <p>
              Transformation that lasts starts from the collision point of three forces held together: the regulatory and governance frame that determines what is sustainable, the people and operational reality that determines what is adoptable, and the customer journey that determines what is actually valuable.
            </p>
            <p className="mt-6 font-display text-[24px] md:text-[28px] leading-[1.25] text-sienna">This is the Outside-In Triad.</p>

            {/* III */}
            <h2 id="sec-iii" className="scroll-mt-24 mt-16">
              <span className="block smallcaps text-sienna mb-2">III · How this differs from existing frameworks</span>
              <span className="font-display text-[34px] md:text-[44px] leading-[1.05] tracking-tight">BCG, McKinsey, and Forrester are right about the ingredients. The Triad adds three things they don't.</span>
            </h2>
            <p className="mt-6">
              BCG's 10-20-70 rule correctly identifies that 70% of transformation value lives in people and process. McKinsey addresses strategy and culture. Forrester covers governance and customer-obsessed operating models. These frameworks are right about the ingredients.
            </p>
            <h3 className="font-display text-2xl mt-8 text-sienna">Simultaneity, not sequence</h3>
            <p className="mt-3">
              Existing frameworks still imply an order — governance first, change management second, customer measurement third. The Triad treats all three as co-equal constraints from day one. A decision that satisfies governance and people but ignores the customer journey is a future problem that has not surfaced yet.
            </p>
            <h3 className="font-display text-2xl mt-8 text-sienna">Ground-level, not strategy-level</h3>
            <p className="mt-3">
              BCG and McKinsey operate at portfolio altitude. The Triad applies at the workflow level — loan origination, fraud alerts, returns automation, field engineer escalation. This is where the three lenses actually collide. Modern enterprise teams have no separate strategy team, implementation team, and middle-coordination layer. The person operating the workflow carries all three lenses simultaneously.
            </p>
            <h3 className="font-display text-2xl mt-8 text-sienna">Measurement redesign, not measurement addition</h3>
            <p className="mt-3">
              Most frameworks bolt measurement on at the end. The Triad treats it as a design question: are the metrics being tracked still valid for the new process, or did the transformation change what needs to be measured? Carrying old metrics into a new process is one of the most common and least discussed failure modes.
            </p>

            {/* IV */}
            <h2 id="sec-iv" className="scroll-mt-24 mt-16">
              <span className="block smallcaps text-sienna mb-2">IV · In practice: a customer address change</span>
              <span className="font-display text-[34px] md:text-[44px] leading-[1.05] tracking-tight">Same technology, same target metrics, two different 24-month outcomes.</span>
            </h2>
            <p className="mt-6">
              Consider one of the simplest workflows in any bank.
            </p>
            <p>
              A customer wants to update their address. They upload a supporting document — utility bill, ID, rental agreement. An associate downloads it, validates the document type, visually checks for forgery, extracts the new address, updates downstream systems.
            </p>
            <p>
              For a clean English-language document: about two minutes. For everything else: not.
            </p>
            <p>
              A document in an unfamiliar language — is this a utility bill, a tax notice, a government letter? Classification alone takes five minutes. A non-direct case can run 10–20.
            </p>
            <p>
              Most banks are now applying AI to this workflow. The two approaches diverge sharply.
            </p>
            <p>
              <strong>Inside-out redesign:</strong> Deploy AI document processing. Auto-approve clean cases. Route exceptions to human review. Target reduced AHT, reduced headcount, higher straight-through processing. The dashboard improves.
            </p>
            <p>
              <strong>Outside-In Triad redesign:</strong> the three lenses change the design from the first decision.
            </p>

            <figure className="my-12">
              <div className="smallcaps text-muted mb-3">Figure · The same workflow under two design disciplines</div>
              <div className="rule-top rule-bottom overflow-x-auto">
                <table className="w-full text-base font-sans">
                  <thead>
                    <tr className="border-b hairline">
                      <th className="text-left py-3 pr-6 smallcaps text-muted w-1/3">Governance</th>
                      <th className="text-left py-3 pr-6 smallcaps text-muted w-1/3">People</th>
                      <th className="text-left py-3 smallcaps text-muted w-1/3">Customer</th>
                    </tr>
                  </thead>
                  <tbody className="text-ink2">
                    <tr className="border-b hairline align-top">
                      <td className="py-4 pr-6">Information-security pipeline for document extraction designed before deployment — where the raw document sits, who accesses it, how it's disposed of.</td>
                      <td className="py-4 pr-6">Before automation, the associate handled a mix — direct cases and exceptions distributed across the day.</td>
                      <td className="py-4">The customer doesn't see internal complexity. They see — did it work, when will I know, what do I do if something is wrong.</td>
                    </tr>
                    <tr className="border-b hairline align-top">
                      <td className="py-4 pr-6">The model's reasoning for approving a document is captured in a form a human auditor can read, not just an outcome flag.</td>
                      <td className="py-4 pr-6">After automation, the associate handles only exceptions, all day — ambiguous types, suspicious legitimacy calls. Every case is cognitively heavy.</td>
                      <td className="py-4">An immediate, meaningful acknowledgement — not a status email that says nothing.</td>
                    </tr>
                    <tr className="border-b hairline align-top">
                      <td className="py-4 pr-6">When the AI eventually approves a forged document, the failure-capture mechanism already exists.</td>
                      <td className="py-4 pr-6">A new task is added: reading the AI's reasoning on flagged cases and judging whether the model's judgment was right. A different skill with its own learning curve.</td>
                      <td className="py-4">A rejection reason that is specific and actionable, not "document not accepted."</td>
                    </tr>
                    <tr className="align-top">
                      <td className="py-4 pr-6">Address change is also a KYC and AML signal — the governance design connects those events instead of treating them as separate workstreams.</td>
                      <td className="py-4 pr-6">Capacity expectations, training, and override criteria are designed for that reality.</td>
                      <td className="py-4">A realistic timeline when manual review is needed. Certainty built into the design.</td>
                    </tr>
                  </tbody>
                </table>
              </div>
            </figure>

            <PullQuote secondary>
              Inside-out produces a faster process. It also produces an unmeasured information-security gap, an associate role that burns out within months, and a customer updated by status emails that say nothing useful. Triad design produces a faster process too. It adds a defensible audit trail, a sustainable associate role, and a customer who feels the difference from the first touchpoint.
            </PullQuote>

            <p>
              This is the framework at workflow altitude.
            </p>

            {/* V */}
            <h2 id="sec-v" className="scroll-mt-24 mt-16">
              <span className="block smallcaps text-sienna mb-2">V · Lens 1 — Governance, compliance &amp; risk</span>
              <span className="font-display text-[34px] md:text-[44px] leading-[1.05] tracking-tight">Treated as oversight, governance fails slowly. Treated as architecture, it makes the rest of the portfolio work.</span>
            </h2>
            <p className="mt-6">
              Governance is consistently the most underdesigned element of enterprise AI transformation — not because organisations don't know it matters, but because it is treated as an oversight layer rather than a design input.
            </p>
            <p>
              In rule-based automation, governance was bounded. In an AI world — employees accessing GenAI tools directly, LLMs processing customer data, agentic systems making decisions without human review at each step — governance becomes significantly more consequential. Data-privacy regulations, AI-specific legislation, financial-services compliance, and information-security frameworks create hard boundaries on what AI systems can do, what data they touch, how decisions must be documented.
            </p>
            <p>
              In a regulated industry, these are non-negotiable constraints that must be in the room before the first workflow decision.
            </p>
            <p>
              Missing governance produces slow, systemic damage. Projects multiply without coordination. Vendors work in parallel without visibility. Data is handled inconsistently. Security risks accumulate without a clear owner. Benefits are tracked differently, or not at all. The organisation has activity but no architecture.
            </p>
            <h3 className="font-display text-2xl mt-8 text-sienna">The CoE as architecture, not oversight</h3>
            <p className="mt-3">
              The CoE is the architectural layer that makes reusability, visibility, and dependency management possible across the AI portfolio. Without it, every project rebuilds components from scratch, makes its own data decisions, and operates in a silo. The document AI built for address change verification is built again for KYC onboarding, again for loan application processing, again for claims. Three teams, three vendors, three different decisions on how the same data is handled, no visibility into which workflow is dependent on which capability.
            </p>
            <p className="mt-3">
              A CoE designed for reusability changes this. One capability built once, governed once, and consumed across workflows. Dependencies are explicit. The portfolio is visible in one place. Broken, isolated solutions become streamlined, connected ones — because someone designed for that outcome from the start.
            </p>
            <p className="mt-8 smallcaps text-sienna">Governance as a design input means:</p>
            <ul className="mt-4 space-y-2 list-disc pl-6 text-ink2">
              <li>A CoE architecture built for reusability — capabilities consumed across workflows, dependencies visible, silos eliminated by design.</li>
              <li>Compliance and information security in the design brief, not the review checklist.</li>
              <li>GenAI access governance — which tools, on which data, with which oversight — treated as a governance decision, not an IT ticket.</li>
              <li>Benefit tracking that connects portfolio outputs to strategic goals.</li>
              <li>Audit and escalation paths designed before deployment, not after the first incident.</li>
            </ul>
            <p className="mt-8 smallcaps text-sienna">Before finalising any workflow, Lens 1 must answer:</p>
            <ul className="mt-4 space-y-2 list-disc pl-6 text-ink2">
              <li>What does this governance control require the ops team to do differently every day?</li>
              <li>Does this structure make it harder or easier for the customer to get a resolution?</li>
              <li>How will we know it's working at 30, 90, and 180 days — and who owns the signal?</li>
              <li>Have we designed for AI-specific regulatory and information-security constraints, or only the old process ones?</li>
            </ul>

            {/* VI */}
            <h2 id="sec-vi" className="scroll-mt-24 mt-16">
              <span className="block smallcaps text-sienna mb-2">VI · Lens 2 — People &amp; internal ops</span>
              <span className="font-display text-[34px] md:text-[44px] leading-[1.05] tracking-tight">Adoption is a design problem treated as a communication one.</span>
            </h2>
            <p className="mt-6">
              The most common mistake in transformation change management is treating adoption as a communication problem. Send the training. Run the workshops. Measure adoption at ninety days.
            </p>
            <p>
              When AI surrounds a human role without redesigning it, the employee becomes the catch-all for everything the AI cannot handle. In rule-based automation, exceptions are predictable. In AI implementations, the exception landscape is different — cases the model hasn't seen, outputs it got wrong, edge cases requiring human judgment without defined criteria. The human inherits the hard cases, without the role redesign or training to handle them well.
            </p>
            <p>
              Before any AI transformation is finalised, the people question must be answered at the workflow level. Which exceptions are handled by humans after this transformation, how complex are they, what is the cognitive load, and what does the training plan actually cover? The answer cannot be generic AI literacy. It is specific exception types, override criteria, and escalation paths.
            </p>
            <p>
              The other failure mode is metrics. Organisations measure adoption at 90 days using the same KPIs as before. But the transformation changed what the work is. AHT goes down — looks like success. The cases remaining with humans are harder. Error rates on exceptions are up. Employee satisfaction is down. The old metric reported a win that wasn't there.
            </p>
            <h3 className="font-display text-2xl mt-8 text-sienna">Readiness is built during development, not announced at go-live</h3>
            <p className="mt-3">
              The most common change-management mistake is sequencing readiness after build. The app is developed in one stream. Training is scheduled in another. Go-live is the moment those streams are expected to meet. They rarely do — because the team has not had the time, exposure, or structured literacy to be genuinely ready for new ways of working.
            </p>
            <p className="mt-3">
              The discipline that works is parallel readiness. While the app is being designed, the team is being brought along — AI-literacy programmes calibrated to the specific exception landscape they will face, exposure to model outputs before they have to act on them, structured familiarisation with the new metrics and override criteria. Readiness is verified, not assumed. The gap between technical capability and human capability is closed before deployment, not after.
            </p>
            <p className="mt-3">
              This is also where the value of a multi-lens perspective becomes visible. A consultant or transformation lead who carries only the automation view will optimise the build. One who carries the governance, people, and customer lenses simultaneously will design the build, the readiness, the customer touchpoints, and the visibility architecture as one connected solution.
            </p>
            <p className="mt-8 smallcaps text-sienna">What actually works:</p>
            <ul className="mt-4 space-y-2 list-disc pl-6 text-ink2">
              <li>Exception workflow designed before go-live, not discovered after.</li>
              <li>Cognitive load mapped against what the human actually has to manage in the new process.</li>
              <li>Readiness built in parallel with development — AI literacy, model-output familiarisation, and override training delivered before go-live, not after.</li>
              <li>New metrics built for the new process, not inherited from the old one.</li>
              <li>Gradual outcome ramp — building toward target performance over realistic timelines.</li>
              <li>Literacy that addresses AI-specific uncertainty: when to trust, when to question, who's accountable.</li>
              <li>Deliberate unlearning time before people are measured on new performance.</li>
            </ul>
            <p className="mt-8 smallcaps text-sienna">Before finalising any workflow, Lens 2 must answer:</p>
            <ul className="mt-4 space-y-2 list-disc pl-6 text-ink2">
              <li>Which exceptions land on the human, how complex are they, what is the cognitive load?</li>
              <li>What is the training plan for those specific exceptions?</li>
              <li>If the employee follows this new process perfectly, what does the customer experience?</li>
              <li>Is the adoption metric still valid for the new process, or are we measuring the old world?</li>
            </ul>

            {/* VII */}
            <h2 id="sec-vii" className="scroll-mt-24 mt-16">
              <span className="block smallcaps text-sienna mb-2">VII · Lens 3 — Customer journey</span>
              <span className="font-display text-[34px] md:text-[44px] leading-[1.05] tracking-tight">The outside-in starts here. The customer doesn't experience the transformation. They experience the outcome of it.</span>
            </h2>
            <p className="mt-6">
              Every transformation exists to serve an end customer. The customer doesn't experience the transformation. They experience the outcome of it.
            </p>
            <h3 className="font-display text-2xl mt-8 text-sienna">Three layers of customer understanding, not one</h3>
            <p className="mt-3">
              Most workflow redesigns operate at one level: the operational touchpoint. How does the customer experience this address change, this claim, this loan application? That level matters, but it is incomplete.
            </p>
            <p className="mt-3">
              The Triad demands three layers of customer understanding before workflow redesign begins. Who is this customer at segment and life-stage level — a relationship-banking client where every interaction is an opportunity for proactive service, a digital-first customer who wants zero human contact, or someone in the middle of a life event where the process touchpoint signals something larger? Which product is in play — a mortgage, a savings account, a wealth offering — because the same workflow lands differently across products, and an optimisation that fits one product may add operational burden to another. And does this process fit seamlessly into the product team's ownership, or does it need a separate operational team to run well? That structural question is a design decision the Triad surfaces.
            </p>
            <p className="mt-3">
              Without these three layers, operational redesign produces a workflow that is efficient but generic — treating every customer as the same customer, every product as the same product, and every process as a standalone operational concern.
            </p>
            <p>
              When organisations redesign workflows for internal efficiency, they optimise for what they can measure internally: handle time, cost, processing speed, error rate. These metrics are valid. They don't map to what the customer experiences.
            </p>
            <p>
              The IVR failure is a documented pattern. A channel that is cheaper to operate is not automatically better to use. A self-service option that reduces agent load is not automatically one customers will choose, or choose again. Many organisations responded to IVR dissatisfaction by deploying AI chatbots — and repeated the same mistake at higher technical sophistication. The chatbot couldn't understand context, misread intent, looped customers through irrelevant flows.
            </p>
            <p>
              The reason is the same in both cases: the channel was designed for operational efficiency, not for what resolution feels like from the customer's side. Certainty, clarity, minimal effort — a sense of being understood rather than processed.
            </p>
            <p className="mt-8 smallcaps text-sienna">Designing from the customer journey backwards means:</p>
            <ul className="mt-4 space-y-2 list-disc pl-6 text-ink2">
              <li>Starting with the customer's moment of truth — what does success feel like at the end?</li>
              <li>Mapping the exception experience — the standard case usually works; the exception is where loyalty is won or lost.</li>
              <li>Testing channel transitions — when customers move to a new channel, design that transition rather than assume it.</li>
              <li>Designing AI interactions for comprehension, not just deflection.</li>
              <li>Measuring what the customer measures — resolution rate, effort, certainty — not just throughput.</li>
            </ul>
            <p className="mt-8 smallcaps text-sienna">Before finalising any workflow, Lens 3 must answer:</p>
            <ul className="mt-4 space-y-2 list-disc pl-6 text-ink2">
              <li>What does success feel like for the customer at the end — not on the dashboard?</li>
              <li>Who owns the exception when this journey breaks down?</li>
              <li>If the customer moves to a new channel, has the transition been designed?</li>
              <li>What does customer success at 30 days look like, and how does it connect back to our KPIs?</li>
            </ul>

            {/* VIII */}
            <h2 id="sec-viii" className="scroll-mt-24 mt-16">
              <span className="block smallcaps text-sienna mb-2">VIII · Measurement: the layer every framework skips</span>
              <span className="font-display text-[34px] md:text-[44px] leading-[1.05] tracking-tight">The transformation changes what needs to be measured. Most measurement frameworks don't notice.</span>
            </h2>
            <p className="mt-6">
              Most frameworks treat measurement as something you add at the end. The Triad treats it as a design question.
            </p>
            <p>
              The old process had metrics that made sense for the old process. After AI transformation, the work is different and the customer interaction is different — but the measurement framework is often unchanged. Organisations measure the new process with instruments designed for the old one.
            </p>
            <p>
              The result is false confidence. AHT drops while the human cases get harder and more error-prone. Call volume drops while digital-channel satisfaction sits unmeasured. Cost per transaction improves, and customer effort score was never tracked in the first place.
            </p>
            <h3 className="font-display text-2xl mt-8 text-sienna">Three stages, one question at each</h3>
            <p className="mt-4">
              <strong>Baseline.</strong> Not just what we measure today, but whether those metrics reflect the health of this process — or just what has historically been easy to count.
            </p>
            <p className="mt-3">
              <strong>Post-implementation.</strong> Are the same metrics still valid? What new signals has the AI process created — exception rates, model confidence, override frequency, customer effort on new channels?
            </p>
            <p className="mt-3">
              <strong>Operational.</strong> Daily metrics designed for the new process, not inherited. Each lens has its own signals, and those signals must validate each other. A governance metric that looks healthy while a customer metric is deteriorating is a warning sign, not a split result.
            </p>
            <PullQuote secondary>
              The central question across all three: are we measuring what matters in the new world, or what we could measure in the old one?
            </PullQuote>

            {/* IX */}
            <h2 id="sec-ix" className="scroll-mt-24 mt-16">
              <span className="block smallcaps text-sienna mb-2">IX · Cross-industry signals</span>
              <span className="font-display text-[34px] md:text-[44px] leading-[1.05] tracking-tight">Which lens is most commonly underweighted varies by sector. The failure is never in the lens that received investment.</span>
            </h2>
            <p className="mt-6">
              The Triad applies wherever AI transformation meets operational complexity and human behaviour. The pattern of which lens is most commonly underweighted varies by sector.
            </p>
            <h3 className="font-display text-2xl mt-8 text-sienna">BFSI</h3>
            <p className="mt-3">
              Governance is strongest. Customer journey is weakest — loan origination redesigned for compliance, not for the applicant. People come next — fraud analysts reviewing hundreds of AI alerts daily, a volume the role was never designed for.
            </p>
            <h3 className="font-display text-2xl mt-8 text-sienna">Telecom</h3>
            <p className="mt-3">
              Customer journey is highly visible. The gaps are in people — field engineers who won't act on AI predictions they don't understand or trust — and governance — AI in customer service deployed without accountability design for handoff failures.
            </p>
            <h3 className="font-display text-2xl mt-8 text-sienna">Retail &amp; e-commerce</h3>
            <p className="mt-3">
              Customer-journey investment is high in acquisition. The gaps appear in operations — supply-chain AI that creates accountability vacuums when forecasts are wrong, and returns automation that fails visibly on the exception.
            </p>
            <h3 className="font-display text-2xl mt-8 text-sienna">Manufacturing</h3>
            <p className="mt-3">
              Governance and technical investment are strong on the factory floor. The gap is in customer journey — AI improves production without connecting to order visibility, delivery reliability, or exception communication downstream.
            </p>
            <p className="mt-8 text-ink2">
              The failure is rarely in the lens that received investment. It sits in the one or two that were treated as secondary.
            </p>

            {/* X */}
            <h2 id="sec-x" className="scroll-mt-24 mt-16">
              <span className="block smallcaps text-sienna mb-2">X · When all three hold</span>
              <span className="font-display text-[34px] md:text-[44px] leading-[1.05] tracking-tight">A design discipline. Three perspectives in tension at every decision point.</span>
            </h2>
            <p className="mt-6">
              This is a design discipline. It requires holding three perspectives in tension at every decision point — and slowing down when one is being ignored.
            </p>
            <p>
              The organisations that do this consistently produce transformation that lands in the P&amp;L, in employee performance data, and in customer satisfaction scores — simultaneously.
            </p>
            <p>
              The ones that don't keep having the three conversations at the top of this article.
            </p>
            <PullQuote>
              Organisations capable of designing governance, operational reality, and customer experience as one integrated system will win the wave.
            </PullQuote>

            <hr className="mt-16 hairline" />

            <p className="mt-10 text-base text-muted">
              Prathyusha Vemula is a Senior Consultant at Concentrix specialising in AI transformation, agentic AI, GenAI, RPA, and Automation CoE strategy. Twelve years across BFSI, Telecom, FMCG, and Manufacturing.
            </p>
            <p className="mt-6 text-sm text-muted">
              Tags · AI Transformation · Enterprise AI · Outside-In Triad · Governance · Compliance · Change Management · Customer Journey · Automation CoE · Agentic AI · Measurement
            </p>
          </div>

          <div className="mt-16 rule-top pt-6 flex flex-wrap items-baseline justify-between gap-4 smallcaps text-muted">
            <span className="tabular">Revised v2 · May 2026</span>
            <a className="link-underline text-ink" href="mailto:vemula.prathyusha@gmail.com">vemula.prathyusha@gmail.com</a>
            <button onClick={() => go('writing')} className="link-underline">Back to writing →</button>
          </div>
        </article>
      </div>
    </main>
  );
}

Object.assign(window, { EssayOutsideInTriad });
