提交 1a516b26 authored 作者: testing's avatar testing

kafka

上级 ddab9835
...@@ -173,6 +173,7 @@ RUN echo @testing http://nl.alpinelinux.org/alpine/edge/testing >> /etc/apk/repo ...@@ -173,6 +173,7 @@ RUN echo @testing http://nl.alpinelinux.org/alpine/edge/testing >> /etc/apk/repo
apk update && apk upgrade &&\ apk update && apk upgrade &&\
apk add --no-cache \ apk add --no-cache \
bash \ bash \
librdkafka-dev \
openssh-client \ openssh-client \
wget \ wget \
supervisor \ supervisor \
...@@ -206,7 +207,6 @@ RUN echo @testing http://nl.alpinelinux.org/alpine/edge/testing >> /etc/apk/repo ...@@ -206,7 +207,6 @@ RUN echo @testing http://nl.alpinelinux.org/alpine/edge/testing >> /etc/apk/repo
imap-dev \ imap-dev \
libjpeg-turbo-dev \ libjpeg-turbo-dev \
postgresql-dev && \ postgresql-dev && \
librdkafka-dev \
docker-php-ext-install gd && \ docker-php-ext-install gd && \
docker-php-ext-configure gd \ docker-php-ext-configure gd \
--with-freetype \ --with-freetype \
...@@ -282,6 +282,11 @@ RUN cp /usr/local/etc/php/php.ini-development /usr/local/etc/php/php.ini && \ ...@@ -282,6 +282,11 @@ RUN cp /usr/local/etc/php/php.ini-development /usr/local/etc/php/php.ini && \
/usr/local/etc/php/php.ini /usr/local/etc/php/php.ini
ADD php-rdkafka /usr/local/php-rdkafka
RUN cd /usr/local/php-rdkafka && /usr/local/bin/phpize && ./configure --with-php-config=/usr/local/bin/php-config && make && make install
# Add Scripts # Add Scripts
ADD scripts/start.sh /start.sh ADD scripts/start.sh /start.sh
ADD scripts/pull /usr/bin/pull ADD scripts/pull /usr/bin/pull
......
#FROM php:8.1.6-fpm-alpine3.15
FROM php:7.4.30-fpm-alpine3.15
LABEL maintainer="Ric Harvey <ric@squarecows.com>"
ENV php_conf /usr/local/etc/php-fpm.conf
ENV fpm_conf /usr/local/etc/php-fpm.d/www.conf
ENV php_vars /usr/local/etc/php/conf.d/docker-vars.ini
ENV NGINX_VERSION 1.21.6
ENV LUA_MODULE_VERSION 0.10.14
ENV DEVEL_KIT_MODULE_VERSION 0.3.1
ENV GEOIP2_MODULE_VERSION 3.3
ENV LUAJIT_LIB=/usr/lib
ENV LUAJIT_INC=/usr/include/luajit-2.1
RUN echo @testing http://nl.alpinelinux.org/alpine/edge/testing >> /etc/apk/repositories && \
echo /etc/apk/respositories && \
apk update && apk upgrade &&\
apk add --no-cache \
bash \
librdkafka-dev \
openssh-client \
wget \
supervisor \
curl \
libcurl \
libpq \
git \
python3 \
py3-pip \
ca-certificates \
dialog \
autoconf \
make \
openssl-dev \
libressl-dev \
libzip-dev \
bzip2-dev \
icu-dev \
gcc && \
apk add --no-cache --virtual .sys-deps \
musl-dev \
linux-headers \
augeas-dev \
libmcrypt-dev \
libpng-dev \
libxslt-dev \
python3-dev \
libffi-dev \
freetype-dev \
sqlite-dev \
imap-dev \
libjpeg-turbo-dev \
postgresql-dev && \
docker-php-ext-install gd && \
docker-php-ext-configure gd \
--with-freetype \
--with-jpeg && \
pip install --upgrade pip && \
#curl iconv session
#docker-php-ext-install pdo_mysql pdo_sqlite mysqli mcrypt gd exif intl xsl json soap dom zip opcache && \
# docker-php-ext-install iconv pdo_mysql pdo_sqlite pgsql pdo_pgsql mysqli gd exif intl xsl json soap dom zip opcache && \
docker-php-ext-install pdo_mysql mysqli pdo_sqlite pgsql pdo_pgsql exif intl xsl soap zip
ADD php-rdkafka /usr/local/php-rdkafka
RUN cd /usr/local/php-rdkafka && /usr/local/bin/phpize && ./configure --with-php-config=/usr/local/bin/php-config && make && make install
EXPOSE 443 80 8080
WORKDIR "/var/www/html"
CMD ["/start.sh"]
# general configuration
version: '{branch}.{build}'
# environment configuration
image: Visual Studio 2017
clone_folder: C:\projects\rdkafka
environment:
BIN_SDK_VER: 2.2.0
DEP: librdkafka-1.5.3
matrix:
- PHP_VER: 7.4
TS: 0
VC: vc15
ARCH: x64
OPCACHE: 0
- PHP_VER: 7.4
TS: 1
VC: vc15
ARCH: x64
OPCACHE: 1
- PHP_VER: 8.0
TS: 0
VC: vs16
ARCH: x64
OPCACHE: 0
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
- PHP_VER: 8.0
TS: 1
VC: vs16
ARCH: x64
OPCACHE: 1
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
- PHP_VER: 8.1
TS: 0
VC: vs16
ARCH: x64
OPCACHE: 0
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
- PHP_VER: 8.1
TS: 1
VC: vs16
ARCH: x64
OPCACHE: 1
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
cache:
- C:\build-cache -> .appveyor.yml, .appveyor\install.ps1
install:
- ps: .appveyor\install.ps1
# build configuration
build_script:
- ps: .appveyor\build.ps1
after_build:
- ps: .appveyor\package.ps1
$ErrorActionPreference = "Stop"
Set-Location 'C:\projects\rdkafka'
$task = New-Item 'task.bat' -Force
Add-Content $task "call phpize 2>&1"
Add-Content $task "call configure --with-php-build=C:\build-cache\deps --with-rdkafka --enable-debug-pack 2>&1"
Add-Content $task "nmake /nologo 2>&1"
Add-Content $task "exit %errorlevel%"
& "C:\build-cache\php-sdk-$env:BIN_SDK_VER\phpsdk-$env:VC-$env:ARCH.bat" -t $task
if (-not $?) {
throw "build failed with errorlevel $LastExitCode"
}
$ErrorActionPreference = "Stop"
if (-not (Test-Path 'C:\build-cache')) {
[void](New-Item 'C:\build-cache' -ItemType 'directory')
}
$bname = "php-sdk-$env:BIN_SDK_VER.zip"
if (-not (Test-Path "C:\build-cache\$bname")) {
Invoke-WebRequest "https://github.com/Microsoft/php-sdk-binary-tools/archive/$bname" -OutFile "C:\build-cache\$bname"
}
$dname0 = "php-sdk-binary-tools-php-sdk-$env:BIN_SDK_VER"
$dname1 = "php-sdk-$env:BIN_SDK_VER"
if (-not (Test-Path "C:\build-cache\$dname1")) {
Expand-Archive "C:\build-cache\$bname" 'C:\build-cache'
Move-Item "C:\build-cache\$dname0" "C:\build-cache\$dname1"
}
$gareleases = Invoke-WebRequest "https://windows.php.net/downloads/releases/releases.json" | ConvertFrom-Json
$qareleases = Invoke-WebRequest "https://windows.php.net/downloads/qa/releases.json" | ConvertFrom-Json
$garev = [regex]::split($gareleases.$env:PHP_VER.version, '[^\d]')[2]
$qarev = [regex]::split($qareleases.$env:PHP_VER.version, '[^\d]')[2]
if ($qarev -gt $garev) {
$phpversion = $qareleases.$env:PHP_VER.version
$phprelease = 'QA'
} else {
$phpversion = $gareleases.$env:PHP_VER.version
$phprelease = 'GA'
}
$ts_part = ''
if ($env:TS -eq '0') {
$ts_part += '-nts'
}
$bname = "php-devel-pack-$phpversion$ts_part-Win32-$env:VC-$env:ARCH.zip"
if (-not (Test-Path "C:\build-cache\$bname")) {
if ($phprelease -eq "GA") {
Invoke-WebRequest "https://windows.php.net/downloads/releases/$bname" -OutFile "C:\build-cache\$bname"
} else {
Invoke-WebRequest "https://windows.php.net/downloads/qa/$bname" -OutFile "C:\build-cache\$bname"
}
}
$dname0 = "php-$phpversion-devel-$env:VC-$env:ARCH"
$dname1 = "php-$phpversion$ts_part-devel-$env:VC-$env:ARCH"
if (-not (Test-Path "C:\build-cache\$dname1")) {
Expand-Archive "C:\build-cache\$bname" 'C:\build-cache'
if ($dname0 -ne $dname1) {
Move-Item "C:\build-cache\$dname0" "C:\build-cache\$dname1"
}
}
$env:PATH = "C:\build-cache\$dname1;$env:PATH"
$bname = "php-$phpversion$ts_part-Win32-$env:VC-$env:ARCH.zip"
if (-not (Test-Path "C:\build-cache\$bname")) {
if ($phprelease -eq "GA") {
Invoke-WebRequest "https://windows.php.net/downloads/releases/$bname" -OutFile "C:\build-cache\$bname"
} else {
Invoke-WebRequest "https://windows.php.net/downloads/qa/$bname" -OutFile "C:\build-cache\$bname"
}
}
$dname = "php-$phpversion$ts_part-$env:VC-$env:ARCH"
if (-not (Test-Path "C:\build-cache\$dname")) {
Expand-Archive "C:\build-cache\$bname" "C:\build-cache\$dname"
}
$env:PATH = "c:\build-cache\$dname;$env:PATH"
$bname = "$env:DEP-$env:VC-$env:ARCH.zip"
if (-not (Test-Path "C:\build-cache\$bname")) {
Invoke-WebRequest "http://windows.php.net/downloads/pecl/deps/$bname" -OutFile "C:\build-cache\$bname"
Expand-Archive "C:\build-cache\$bname" 'C:\build-cache\deps'
Copy-Item "C:\build-cache\deps\LICENSE" "C:\build-cache\deps\LICENSE.LIBRDKAFKA"
}
$ErrorActionPreference = "Stop"
if ($env:TS -eq '0') {
$ts_part = 'nts'
} else {
$ts_part = 'ts';
}
if ($env:APPVEYOR_REPO_TAG -eq "true") {
$bname = "php_rdkafka-$env:APPVEYOR_REPO_TAG_NAME-$env:PHP_VER-$ts_part-$env:VC-$env:ARCH"
} else {
$bname = "php_rdkafka-$($env:APPVEYOR_REPO_COMMIT.substring(0, 8))-$env:PHP_VER-$ts_part-$env:VC-$env:ARCH"
}
$zip_bname = "$bname.zip"
$dir = 'C:\projects\rdkafka\';
if ($env:ARCH -eq 'x64') {
$dir += 'x64\'
}
$dir += 'Release'
if ($env:TS -eq '1') {
$dir += '_TS'
}
$files = @(
"$dir\php_rdkafka.dll",
"$dir\php_rdkafka.pdb",
"C:\projects\rdkafka\CREDITS",
"C:\projects\rdkafka\LICENSE",
"C:\projects\rdkafka\README.md",
"C:\build-cache\deps\bin\librdkafka.dll",
"C:\build-cache\deps\bin\librdkafka.pdb",
"C:\build-cache\deps\LICENSE.LIBRDKAFKA"
)
Compress-Archive $files "C:\$zip_bname"
Push-AppveyorArtifact "C:\$zip_bname"
root = true
[*]
insert_final_newline = true
[*.{c,h}]
indent_style = space
indent_size = 4
[.travis.yml]
indent_style = space
indent_size = 4
[*.md]
trim_trailing_whitespace = false
[*.phpt]
trim_trailing_whitespace = true
indent_style = space
indent_size = 4
[package.xml]
indent_style = space
indent_size = 1
name: Bug report
description: Create a bug report
labels: ["bug"]
body:
- type: textarea
attributes:
label: Description
description: "Please provide a minimal way to reproduce the problem and describe what the expected vs actual behavior is."
value: |
The following code:
```php
<?php
```
Resulted in this output:
```
```
But I expected this output instead:
```
```
validations:
required: true
- type: input
attributes:
label: php-rdkafka Version
description: "The used php-rdkafka version (if installed from the repository, please specify the commit number)."
placeholder: "php-rdkafka 6.0.1 (or commit number)"
validations:
required: true
- type: input
attributes:
label: librdkafka Version
description: "The used librdkafka version, if relevant."
placeholder: "librdkafka 1.7.0"
- type: input
attributes:
label: PHP Version
description: "The used PHP version, if relevant."
placeholder: "PHP 8.1.0"
- type: input
attributes:
label: Operating System
description: "The used operating system, if relevant."
placeholder: "Ubuntu 20.04"
- type: input
attributes:
label: Kafka Version
description: "The used Kafka Version, if relevant."
blank_issues_enabled: false
contact_links:
- name: Support / Question
url: https://gitter.im/arnaud-lb/php-rdkafka
about: Please join gitter for support / questions.
name: Feature request
description: Create a feature request
labels: ["feature"]
body:
- type: textarea
attributes:
label: Description
description: "Please describe the requested feature and why it should be included in php-rdkafka."
validations:
required: true
# https://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes
changelog:
exclude:
labels:
- ignore-for-release
- doc
authors:
- octocat
categories:
- title: Improvements
labels:
- enhancement
- feature
- title: Deprecations
labels:
- deprecations
- title: Bugfixes
labels:
- bug
- bugfix
- title: Other Changes
labels:
- "*"
name: 'Package'
on:
pull_request:
jobs:
package:
name: 'Package'
runs-on: 'ubuntu-20.04'
steps:
- name: 'Check out repository'
uses: 'actions/checkout@v2'
with:
path: 'php-rdkafka'
- name: 'Install dependencies'
run: 'sudo apt-get -y install php7.4-dev librdkafka-dev'
- name: 'Package and verify package'
run: './php-rdkafka/.github/workflows/package/package.sh'
- name: 'Archive package'
uses: 'actions/upload-artifact@v2'
with:
path: 'php-rdkafka/rdkafka.tgz'
#!/bin/sh
set -e
cd php-rdkafka
echo "Checking version consistency"
CODE_VERSION="$(grep PHP_RDKAFKA_VERSION php_rdkafka.h|cut -d'"' -f2)"
PACKAGE_VERSION="$(grep -m 1 '<release>' package.xml|cut -d'>' -f2|cut -d'<' -f1)"
if ! [ "$CODE_VERSION" = "$PACKAGE_VERSION" ]; then
printf "Version in php_rdkafka.h does not match version in package.xml: '%s' vs '%s'" "$CODE_VERSION" "$PACKAGE_VERSION" >&2
exit 1
fi
echo "Packaging"
pecl package
echo "Installing package.xml"
mv "./rdkafka-$PACKAGE_VERSION.tgz" rdkafka.tgz
sudo pecl install ./rdkafka.tgz
echo "Checking that all test files was included"
sudo pecl list-files rdkafka|grep ^test|sed 's@.*/tests/@@'|sort > installed-test-files
find tests/ -type f|sed 's@^tests/@@'|sort > repository-test-files
if ! diff -u repository-test-files installed-test-files; then
echo "Some test files are missing from package.xml (see diff above)" >&2
exit 1
fi
# https://help.github.com/en/categories/automating-your-workflow-with-github-actions
name: "Create release"
on:
push:
tags:
- '*'
concurrency: release
jobs:
create_release:
name: "Create release"
runs-on: "ubuntu-latest"
steps:
- name: "Checkout"
uses: actions/checkout@v3
- name: "Generate release notes"
run: ./tools/extract-release-notes.php > ${{ github.workspace }}-CHANGELOG.txt
- name: "Create release"
uses: softprops/action-gh-release@v1
with:
# token: ${{ secrets.BOT_TOKEN }}
draft: true
discussion_category_name: "General"
body_path: ${{ github.workspace }}-CHANGELOG.txt
name: 'Tests'
on:
push:
branches:
- '6.x'
pull_request:
schedule:
- cron: '30 8 * * 1'
jobs:
tests:
name: 'Tests'
strategy:
matrix:
include:
- php: '8.1.0'
librdkafka: 'v1.7.0'
memcheck: '1'
- php: '8.0.0'
librdkafka: 'v1.7.0'
memcheck: '1'
- php: '7.4.0'
librdkafka: 'v1.7.0'
memcheck: '1'
- php: '8.1.0'
librdkafka: 'v1.7.0'
- php: '8.0.0'
librdkafka: 'v1.7.0'
- php: '7.4.0'
librdkafka: 'v1.7.0'
- php: '7.3.0'
librdkafka: 'v1.7.0'
- php: '8.1.0'
librdkafka: 'v1.6.1'
- php: '8.0.0'
librdkafka: 'v1.6.1'
- php: '7.4.0'
librdkafka: 'v1.6.1'
- php: '7.3.0'
librdkafka: 'v1.6.1'
- php: '8.1.0'
librdkafka: 'v1.5.3'
- php: '8.0.0'
librdkafka: 'v1.5.3'
- php: '7.4.0'
librdkafka: 'v1.5.3'
- php: '7.3.0'
librdkafka: 'v1.5.3'
- php: '8.1.0'
librdkafka: 'v1.4.4'
- php: '8.0.0'
librdkafka: 'v1.4.4'
- php: '7.4.0'
librdkafka: 'v1.4.4'
- php: '7.3.0'
librdkafka: 'v1.4.4'
- php: '8.1.0'
librdkafka: 'v1.0.1'
- php: '8.0.0'
librdkafka: 'v1.0.1'
- php: '7.4.0'
librdkafka: 'v1.0.1'
- php: '7.3.0'
librdkafka: 'v1.0.1'
- php: '8.1.0'
librdkafka: 'v0.11.6'
- php: '8.0.0'
librdkafka: 'v0.11.6'
- php: '7.4.0'
librdkafka: 'v0.11.6'
- php: '7.3.0'
librdkafka: 'v0.11.6'
- php: '7.2.0'
librdkafka: 'v0.11.6'
- php: '7.1.0'
librdkafka: 'v0.11.6'
- php: '7.0.0'
librdkafka: 'v0.11.6'
- php: '8.1.0'
librdkafka: 'master'
experimental: true
- php: '8.0.0'
librdkafka: 'master'
experimental: true
- php: '7.4.0'
librdkafka: 'master'
experimental: true
- php: '7.3.0'
librdkafka: 'master'
experimental: true
runs-on: 'ubuntu-20.04'
continue-on-error: ${{ !!matrix.experimental }}
env:
PHP_VERSION: ${{ matrix.php }}
LIBRDKAFKA_VERSION: ${{ matrix.librdkafka }}
MEMORY_CHECK: ${{ matrix.memcheck }}
TEST_KAFKA_BROKERS: kafka:9092
TEST_KAFKA_BROKER_VERSION: 2.6
steps:
- name: 'Check out repository'
uses: 'actions/checkout@v2'
with:
path: 'php-rdkafka'
- uses: actions/cache@v2
with:
path: ~/build-cache/php
key: ${{ runner.os }}-${{ matrix.php }}-${{ matrix.memcheck }}
- uses: actions/cache@v2
with:
path: ~/build-cache/librdkafka
key: ${{ runner.os }}-${{ matrix.librdkafka }}
- name: 'Build librdkafka'
run: './php-rdkafka/.github/workflows/test/build-librdkafka.sh'
- name: 'Build PHP'
run: './php-rdkafka/.github/workflows/test/build-php.sh'
- name: 'Build php-rdkafka'
run: './php-rdkafka/.github/workflows/test/build-php-rdkafka.sh'
- name: 'Start Kafka'
run: './php-rdkafka/.github/workflows/test/start-kafka.sh'
- name: 'Run tests'
run: './php-rdkafka/.github/workflows/test/tests.sh'
#!/bin/sh
set -ex
if ! [ -f ~/build-cache/librdkafka/usr/local/include/librdkafka/rdkafka.h ] || ! [ -f ~/build-cache/librdkafka/usr/local/bin/kafkacat ]; then
echo "librdkafka build is not cached"
git clone --depth 1 --branch "${LIBRDKAFKA_VERSION:-1.5.0}" "${LIBRDKAFKA_REPOSITORY_URL:-https://github.com/edenhill/librdkafka.git}"
cd librdkafka
./configure
make
mkdir -p ~/build-cache/librdkafka
sudo make install DESTDIR=$HOME/build-cache/librdkafka
test -f ~/build-cache/librdkafka/usr/local/include/librdkafka/rdkafka.h || echo "librdkafka build failed"
sudo rsync -a ~/build-cache/librdkafka/ /
sudo ldconfig
cd ..
git clone --depth 1 --branch "1.6.0" "${LIBRDKAFKA_REPOSITORY_URL:-https://github.com/edenhill/kafkacat.git}"
cd kafkacat
./configure
make
sudo make install DESTDIR=$HOME/build-cache/librdkafka
else
echo "librdkafka build is cached"
fi
sudo rsync -av ~/build-cache/librdkafka/ /
sudo ldconfig
#!/bin/sh
set -e
echo "Building php-rdkafka with PHP version:"
php --version
cd php-rdkafka
PACKAGE_VERSION="$(grep -m 1 '<release>' package.xml|cut -d'>' -f2|cut -d'<' -f1)"
pecl package
if [ $MEMORY_CHECK -eq 1 ]; then
PHP_RDKAFKA_CFLAGS="-Wall -Werror -Wno-deprecated-declarations"
fi
sudo CFLAGS="$PHP_RDKAFKA_CFLAGS" pecl install "./rdkafka-$PACKAGE_VERSION.tgz"
echo "extension=rdkafka.so"|sudo tee /usr/local/etc/php/rdkafka.ini >/dev/null
#!/bin/sh
set -ex
if [ $MEMORY_CHECK -eq 1 ]; then
sudo apt-get -y install valgrind
fi
if ! [ -f ~/build-cache/php/usr/local/bin/php ]; then
echo "PHP build is not cached"
wget https://secure.php.net/distributions/php-${PHP_VERSION}.tar.bz2
tar xjf php-${PHP_VERSION}.tar.bz2
cd php-${PHP_VERSION}
PHP_BUILD_FLAGS="--prefix=/usr/local --disable-all --enable-cli --enable-cgi --with-config-file-scan-dir=/usr/local/etc/php --with-zlib"
if [ $MEMORY_CHECK -eq 1 ]; then
PHP_BUILD_FLAGS="$PHP_BUILD_FLAGS --enable-debug --with-valgrind"
else
case $PHP_VERSION in
8.*)
PHP_BUILD_FLAGS="$PHP_BUILD_FLAGS --enable-zts"
;;
7.*)
PHP_BUILD_FLAGS="$PHP_BUILD_FLAGS --enable-maintainer-zts"
;;
esac
fi
./configure $PHP_BUILD_FLAGS $PHP_BUILD_EXTRA_FLAGS
make -j $(nproc)
mkdir -p ~/build-cache/php
sudo make install INSTALL_ROOT=$HOME/build-cache/php
else
echo "PHP build is cached"
fi
sudo rsync -av ~/build-cache/php/ /
sudo mkdir -p /usr/local/etc/php
#!/bin/sh
docker network create kafka_network
docker pull wurstmeister/zookeeper:3.4.6
docker run -d --network kafka_network --name zookeeper wurstmeister/zookeeper:3.4.6
docker pull wurstmeister/kafka:2.13-2.6.0
docker run -d -p 9092:9092 --network kafka_network -e "KAFKA_AUTO_CREATE_TOPICS_ENABLE=true" -e "KAFKA_CREATE_TOPICS=test-topic:1:1:compact" -e "KAFKA_ADVERTISED_HOST_NAME=kafka" -e "KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181" -e "KAFKA_ADVERTISED_PORT=9092" --name kafka wurstmeister/kafka:2.13-2.6.0
printf "\n127.0.0.1 kafka\n"|sudo tee /etc/hosts >/dev/null
echo "Waiting for Kafka to be ready"
for i in $(seq 1 20); do
if kafkacat -b 127.0.0.1 -L; then
echo "Kafka is ready"
exit 0
fi
done
echo "Timedout waiting for Kafka to be ready"
exit 1
#!/bin/sh
set -xve
cd php-rdkafka
if [ ${MEMORY_CHECK:-0} -eq 1 ]; then
echo "Enabling memory checking"
showmem=--show-mem
checkmem=-m
fi
cp tests/test_env.php.sample tests/test_env.php
PHP=$(which php)
sudo REPORT_EXIT_STATUS=1 TEST_PHP_EXECUTABLE="$PHP" "$PHP" /usr/local/lib/php/build/run-tests.php -q $checkmem --show-diff $showmem
*.dep
*.la
*.lo
*.swp
.deps
.libs
Makefile
Makefile.fragments
Makefile.global
Makefile.objects
acinclude.m4
aclocal.m4
autom4te.cache
build
config.guess
config.h
config.h.in
config.h.in~
config.log
config.nice
config.status
config.sub
configure
configure.ac
configure.in
include
install-sh
libtool
ltmain.sh
missing
mkinstalldirs
modules
package.xml
rdkafka-*.tgz
run-tests.php
tests/*/*.diff
tests/*/*.exp
tests/*/*.log
tests/*/*.out
tests/*/*.php
tests/*/*.sh
tmp-php.ini
# How to contribute
If you would like to contribute, thank you :)
Here are a few informations you need to know before starting:
## Branches
Pull requests should be made against the 5.x branch, which supports both PHP 7 and PHP 8.
## How to make good contributions
- Before starting to work, maybe open an issue to find whether your change would be accepted.
- Create relatively small PRs. This is easier to review, and will be merged faster. Do not send huge PRs with multiple unrelated changes.
- Make sure that you followed the design/style (see bellow).
- Make sure that your changes do not introduce new compiler warnings or errors.
- Do not make changes that would break existing code.
## Testing
Tests are in phpt file format in the tests directory.
### Using your own machine for building and testing.
Tests can be run by following compilation and installation procedure
and executing `make test`.
To run integration tests, make sure you have Kafka instance running.
Then, rename `test_env.php.sample` to `test_env.php` and adjust it
with values proper for your kafka instance.
## Design / naming things
php-rdkafka's goal is to expose the librdkafka APIs to PHP scripts, without
abstracting it. Rationale:
- Abstractions would be inherently opinionated, which would make the extension
less than ideal or unusable in some cases.
- Abstractions are easily implemented in pure PHP on top of the extension.
- Remaining close to librdkafka in terms of naming/design makes it possible to
refer to librdkafka's documentation and other resources when needed.
As a result, php-rdkafka will:
- Follow librdkafka's naming for everything
- Avoid introducing functions, helpers, classes that do not exist in
librdkafka (these are easy to implement in pure PHP, on top of the
extension).
However, in order to make the API PHP-ish, some transformations have to be done.
Here is the full design/style guide:
- For librdkafka functions that return an error type, or signal errors via
errno, php-rdkafka throws a Rdkafka\Exception
- librdkafka structs are exposed as PHP objects. The object name is derived
from the struct name like this:
- Remove the `rd_kafka_` prefix
- Convert from snake case to camel case
- Add `Rdkafka\` namespace
- `rd_kafka_*_new` functions are implemented as PHP object constructors / object
instantiation
- `rd_kafka_*_destroy` functions are implemented as PHP object free handlers
- librdkaka functions that take a struct as first argument are implemented as
a method of the struct's related PHP object
- The user should not be required to manage memory (e.g. free somthing)
- Do not change librdkafka's default behavior
- Be safe: No user error should cause a crash or a memory leak.
The MIT License (MIT)
Copyright (c) 2015 Arnaud Le Blanc
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
# PHP Kafka client - php-rdkafka
[![Join the chat at https://gitter.im/arnaud-lb/php-rdkafka](https://badges.gitter.im/arnaud-lb/php-rdkafka.svg)](https://gitter.im/arnaud-lb/php-rdkafka?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Supported librdkafka versions: >= 0.11](https://img.shields.io/badge/librdkafka-%3E%3D%200.11-blue.svg)](https://github.com/edenhill/librdkafka/releases) [![Supported Kafka versions: >= 0.8](https://img.shields.io/badge/kafka-%3E%3D%200.8-blue.svg)](https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md#broker-version-compatibility) ![Supported PHP versions: 7.x .. 8.x](https://img.shields.io/badge/php-7.x%20..%208.x-blue.svg)
PHP-rdkafka is a **stable**, **production-ready**, **long term support**, and **fast** Kafka client for PHP based on [librdkafka](https://github.com/edenhill/librdkafka).
It supports PHP 7, PHP 8, PHP 5 (in older versions), all librdkafka versions since 0.11, all Kafka versions since 0.8. This makes it **easy to deploy** the extension in production.
The goal of the extension is to be a low-level un-opinionated librdkafka binding focused on production and long term support.
The high level and low level *consumers*, *producer*, and *metadata* APIs are supported.
Documentation is available [here](https://arnaud-lb.github.io/php-rdkafka/phpdoc/book.rdkafka.html).
## Sponsors
<table width="100%">
<tr>
<td>
<img width="1000" height="0">
<a href="https://upstash.com/?utm_source=php-rdkafka" >
<img src="https://raw.githubusercontent.com/upstash/sponsorship/master/kafka.png" alt="Upstash" width="260" align="right">
</a>
<h3>Upstash: Serverless Kafka</h3>
<ul>
<li>True Serverless Kafka with per-request-pricing</li>
<li>Managed Apache Kafka, works with all Kafka clients</li>
<li>Built-in REST API designed for serverless and edge functions</li>
</ul>
[Start for free in 30 seconds!](https://upstash.com/?utm_source=php-rdkafka)
</td>
</tr>
</table>
php-rdkafka supports Ukraine. Proceeds from our generous sponsors are currently donated to the [Support Ukraine collective](https://opencollective.com/support-ukraine).
## Table of Contents
1. [Installation](#installation)
2. [Examples](#examples)
3. [Usage](#usage)
* [Producing](#producing)
* [High-level consuming](#high-level-consuming)
* [Low-level consuming (legacy)](#low-level-consuming-legacy)
* [Low-level consuming from multiple topics / partitions (legacy)](#low-level-consuming-from-multiple-topics--partitions-legacy)
* [Using stored offsets](#using-stored-offsets)
* [Interesting configuration parameters](#interesting-configuration-parameters)
* [queued.max.messages.kbytes](#queuedmaxmessageskbytes)
* [topic.metadata.refresh.sparse and topic.metadata.refresh.interval.ms](#topicmetadatarefreshsparse-and-topicmetadatarefreshintervalms)
* [internal.termination.signal](#internalterminationsignal)
4. [Documentation](#documentation)
5. [Credits](#credits)
6. [License](#license)
## Installation
https://arnaud-lb.github.io/php-rdkafka-doc/phpdoc/rdkafka.setup.html
## Examples
https://arnaud-lb.github.io/php-rdkafka-doc/phpdoc/rdkafka.examples.html
## Usage
Configuration parameters used below can be found in [Librdkafka Configuration reference](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
### Producing
#### Creating a producer
For producing, we first need to create a producer, and to add brokers (Kafka
servers) to it:
``` php
<?php
$conf = new RdKafka\Conf();
$conf->set('log_level', (string) LOG_DEBUG);
$conf->set('debug', 'all');
$rk = new RdKafka\Producer($conf);
$rk->addBrokers("10.0.0.1:9092,10.0.0.2:9092");
```
#### Producing messages
> **Warning** Make sure that your producer follows proper shutdown (see below) to not lose messages.
Next, we create a topic instance from the producer:
``` php
<?php
$topic = $rk->newTopic("test");
```
From there, we can produce as much messages as we want, using the produce
method:
``` php
<?php
$topic->produce(RD_KAFKA_PARTITION_UA, 0, "Message payload");
```
The first argument is the partition. RD_KAFKA_PARTITION_UA stands for
*unassigned*, and lets librdkafka choose the partition.
The second argument are message flags and should be either 0
or `RD_KAFKA_MSG_F_BLOCK` to block produce on full queue.
The message payload can be anything.
#### Proper shutdown
This should be done prior to destroying a producer instance
to make sure all queued and in-flight produce requests are completed
before terminating. Use a reasonable value for `$timeout_ms`.
> **Warning** Not calling flush can lead to message loss!
```php
$rk->flush($timeout_ms);
```
In case you don't care about sending messages that haven't been sent yet,
you can use `purge()` before calling `flush()`:
```php
// Forget messages that are not fully sent yet
$rk->purge(RD_KAFKA_PURGE_F_QUEUE);
$rk->flush($timeout_ms);
```
### High-level consuming
The RdKafka\KafkaConsumer class supports automatic partition assignment/revocation. See the example [here](https://arnaud-lb.github.io/php-rdkafka-doc/phpdoc/rdkafka.examples.html#example-1).
### Low-level consuming (legacy)
> **Note** The low-level consumer is a legacy API, please prefer using the high-level consumer
We first need to create a low level consumer, and to add brokers (Kafka
servers) to it:
``` php
<?php
$conf = new RdKafka\Conf();
$conf->set('log_level', (string) LOG_DEBUG);
$conf->set('debug', 'all');
$rk = new RdKafka\Consumer($conf);
$rk->addBrokers("10.0.0.1,10.0.0.2");
```
Next, create a topic instance by calling the `newTopic()` method, and start
consuming on partition 0:
``` php
<?php
$topic = $rk->newTopic("test");
// The first argument is the partition to consume from.
// The second argument is the offset at which to start consumption. Valid values
// are: RD_KAFKA_OFFSET_BEGINNING, RD_KAFKA_OFFSET_END, RD_KAFKA_OFFSET_STORED.
$topic->consumeStart(0, RD_KAFKA_OFFSET_BEGINNING);
```
Next, retrieve the consumed messages:
``` php
<?php
while (true) {
// The first argument is the partition (again).
// The second argument is the timeout.
$msg = $topic->consume(0, 1000);
if (null === $msg || $msg->err === RD_KAFKA_RESP_ERR__PARTITION_EOF) {
// Constant check required by librdkafka 0.11.6. Newer librdkafka versions will return NULL instead.
continue;
} elseif ($msg->err) {
echo $msg->errstr(), "\n";
break;
} else {
echo $msg->payload, "\n";
}
}
```
### Low-level consuming from multiple topics / partitions (legacy)
> **Note** The low-level consumer is a legacy API, please prefer using the high-level consumer
Consuming from multiple topics and/or partitions can be done by telling
librdkafka to forward all messages from these topics/partitions to an internal
queue, and then consuming from this queue:
Creating the queue:
``` php
<?php
$queue = $rk->newQueue();
```
Adding topic partitions to the queue:
``` php
<?php
$topic1 = $rk->newTopic("topic1");
$topic1->consumeQueueStart(0, RD_KAFKA_OFFSET_BEGINNING, $queue);
$topic1->consumeQueueStart(1, RD_KAFKA_OFFSET_BEGINNING, $queue);
$topic2 = $rk->newTopic("topic2");
$topic2->consumeQueueStart(0, RD_KAFKA_OFFSET_BEGINNING, $queue);
```
Next, retrieve the consumed messages from the queue:
``` php
<?php
while (true) {
// The only argument is the timeout.
$msg = $queue->consume(1000);
if (null === $msg || $msg->err === RD_KAFKA_RESP_ERR__PARTITION_EOF) {
// Constant check required by librdkafka 0.11.6. Newer librdkafka versions will return NULL instead.
continue;
} elseif ($msg->err) {
echo $msg->errstr(), "\n";
break;
} else {
echo $msg->payload, "\n";
}
}
```
### Using stored offsets
#### Broker (default)
librdkafka per default stores offsets on the broker.
#### File offsets (deprecated)
If you're using local file for offset storage, then by default the file is created in the current directory, with a
name based on the topic and the partition. The directory can be changed by setting the ``offset.store.path``
[configuration property](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md).
### Consumer settings
#### Low-level consumer: auto commit settings
To manually control the offset, set `enable.auto.offset.store` to `false`.
The settings `auto.commit.interval.ms` and `auto.commit.enable` will control
if the stored offsets will be auto committed to the broker and in which interval.
#### High-level consumer: auto commit settings
To manually control the offset, set `enable.auto.commit` to `false`.
#### High level consumer: max.poll.interval.ms
Maximum allowed time between calls to consume messages for high-level consumers.
If this interval is exceeded the consumer is considered failed and the group will
rebalance in order to reassign the partitions to another consumer group member.
#### Consumer group id (general)
`group.id` is responsible for setting your consumer group ID and it should be unique (and should
not change). Kafka uses it to recognize applications and store offsets for them.
``` php
<?php
$topicConf = new RdKafka\TopicConf();
$topicConf->set("auto.commit.interval.ms", 1e3);
$topic = $rk->newTopic("test", $topicConf);
$topic->consumeStart(0, RD_KAFKA_OFFSET_STORED);
```
### Interesting configuration parameters
[Librdkafka Configuration reference](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
#### queued.max.messages.kbytes
librdkafka will buffer up to 1GB of messages for each consumed partition by default. You can lower memory usage by reducing the value of the ``queued.max.messages.kbytes`` parameter on your consumers.
### topic.metadata.refresh.sparse and topic.metadata.refresh.interval.ms
Each consumer and producer instance will fetch topics metadata at an interval defined by the ``topic.metadata.refresh.interval.ms`` parameter. Depending on your librdkafka version, the parameter defaults to 10 seconds, or 600 seconds.
librdkafka fetches the metadata for all topics of the cluster by default. Setting ``topic.metadata.refresh.sparse`` to the string ``"true"`` makes sure that librdkafka fetches only the topics he uses.
Setting ``topic.metadata.refresh.sparse`` to ``"true"``, and ``topic.metadata.refresh.interval.ms`` to 600 seconds (plus some jitter) can reduce the bandwidth a lot, depending on the number of consumers and topics.
### internal.termination.signal
This setting allows librdkafka threads to terminate as soon as librdkafka is done with them. This effectively allows your PHP processes / requests to terminate quickly.
When enabling this, you have to mask the signal like this:
``` php
<?php
// once
pcntl_sigprocmask(SIG_BLOCK, array(SIGIO));
// any time
$conf->set('internal.termination.signal', SIGIO);
```
### socket.blocking.max.ms (librdkafka < 1.0.0)
> Maximum time a broker socket operation may block. A lower value improves responsiveness at the expense of slightly higher CPU usage.
Reducing the value of this setting improves shutdown speed. The value defines the maximum time librdkafka will block in one iteration of a read loop. This also defines how often the main librdkafka thread will check for termination.
### queue.buffering.max.ms
This defines the maximum and default time librdkafka will wait before sending a batch of messages. Reducing this setting to e.g. 1ms ensures that messages are sent ASAP, instead of being batched.
This has been seen to reduce the shutdown time of the rdkafka instance, and of the PHP process / request.
## Performance / Low-latency settings
Here is a configuration optimized for low latency. This allows a PHP process / request to send messages ASAP and to terminate quickly.
``` php
<?php
$conf = new \RdKafka\Conf();
$conf->set('socket.timeout.ms', 50); // or socket.blocking.max.ms, depending on librdkafka version
if (function_exists('pcntl_sigprocmask')) {
pcntl_sigprocmask(SIG_BLOCK, array(SIGIO));
$conf->set('internal.termination.signal', SIGIO);
} else {
$conf->set('queue.buffering.max.ms', 1);
}
$producer = new \RdKafka\Producer($conf);
$consumer = new \RdKafka\Consumer($conf);
```
It is advised to call poll at regular intervals to serve callbacks. In `php-rdkafka:3.x`
poll was also called during shutdown, so not calling it in regular intervals might
lead to a slightly longer shutdown. The example below polls until there are no more events in the queue:
```
$producer->produce(...);
while ($producer->getOutQLen() > 0) {
$producer->poll(1);
}
```
## Documentation
https://arnaud-lb.github.io/php-rdkafka-doc/phpdoc/book.rdkafka.html
The source of the documentation can be found [here](https://github.com/arnaud-lb/php-rdkafka-doc)
## Asking for Help
If the documentation is not enough, feel free to ask a questions on the php-rdkafka channels on [Gitter](https://gitter.im/arnaud-lb/php-rdkafka) or [Google Groups](https://groups.google.com/forum/#!forum/php-rdkafka).
## Stubs
Because your IDE is not able to auto discover php-rdkadka api you can consider usage of external package providing a set of stubs for php-rdkafka classes, functions and constants: [kwn/php-rdkafka-stubs](https://github.com/kwn/php-rdkafka-stubs)
## Contributing
If you would like to contribute, thank you :)
Before you start, please take a look at the [CONTRIBUTING document](https://github.com/arnaud-lb/php-rdkafka/blob/master/CONTRIBUTING.md) to see how to get your changes merged in.
## Credits
Documentation copied from [librdkafka](https://github.com/edenhill/librdkafka).
Authors: see [contributors](https://github.com/arnaud-lb/php-rdkafka/graphs/contributors).
## License
php-rdkafka is released under the [MIT](https://github.com/arnaud-lb/php-rdkafka/blob/master/LICENSE) license.
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "php_ini.h"
#include "ext/standard/info.h"
#include "php_rdkafka.h"
#include "php_rdkafka_priv.h"
#include "librdkafka/rdkafka.h"
#include "Zend/zend_exceptions.h"
#include "ext/spl/spl_exceptions.h"
#include "conf.h"
#include "topic_partition.h"
#include "message.h"
#if PHP_VERSION_ID < 80000
#include "conf_legacy_arginfo.h"
#else
#include "conf_arginfo.h"
#endif
zend_class_entry * ce_kafka_conf;
zend_class_entry * ce_kafka_topic_conf;
static zend_object_handlers handlers;
static void kafka_conf_callback_dtor(kafka_conf_callback *cb) /* {{{ */
{
if (cb) {
zval_ptr_dtor(&cb->fci.function_name);
efree(cb);
}
} /* }}} */
void kafka_conf_callbacks_dtor(kafka_conf_callbacks *cbs) /* {{{ */
{
kafka_conf_callback_dtor(cbs->error);
cbs->error = NULL;
kafka_conf_callback_dtor(cbs->rebalance);
cbs->rebalance = NULL;
kafka_conf_callback_dtor(cbs->dr_msg);
cbs->dr_msg = NULL;
kafka_conf_callback_dtor(cbs->stats);
cbs->stats = NULL;
kafka_conf_callback_dtor(cbs->consume);
cbs->consume = NULL;
kafka_conf_callback_dtor(cbs->offset_commit);
cbs->offset_commit = NULL;
kafka_conf_callback_dtor(cbs->log);
cbs->log = NULL;
} /* }}} */
static void kafka_conf_callback_copy(kafka_conf_callback **to, kafka_conf_callback *from) /* {{{ */
{
if (from) {
*to = emalloc(sizeof(**to));
**to = *from;
zval_copy_ctor(&(*to)->fci.function_name);
}
} /* }}} */
void kafka_conf_callbacks_copy(kafka_conf_callbacks *to, kafka_conf_callbacks *from) /* {{{ */
{
kafka_conf_callback_copy(&to->error, from->error);
kafka_conf_callback_copy(&to->rebalance, from->rebalance);
kafka_conf_callback_copy(&to->dr_msg, from->dr_msg);
kafka_conf_callback_copy(&to->stats, from->stats);
kafka_conf_callback_copy(&to->consume, from->consume);
kafka_conf_callback_copy(&to->offset_commit, from->offset_commit);
kafka_conf_callback_copy(&to->log, from->log);
} /* }}} */
static void kafka_conf_free(zend_object *object) /* {{{ */
{
kafka_conf_object *intern = php_kafka_from_obj(kafka_conf_object, object);
switch (intern->type) {
case KAFKA_CONF:
if (intern->u.conf) {
rd_kafka_conf_destroy(intern->u.conf);
}
kafka_conf_callbacks_dtor(&intern->cbs);
break;
case KAFKA_TOPIC_CONF:
if (intern->u.topic_conf) {
rd_kafka_topic_conf_destroy(intern->u.topic_conf);
}
break;
}
zend_object_std_dtor(&intern->std);
}
/* }}} */
static zend_object *kafka_conf_new(zend_class_entry *class_type) /* {{{ */
{
zend_object* retval;
kafka_conf_object *intern;
intern = zend_object_alloc(sizeof(*intern), class_type);
zend_object_std_init(&intern->std, class_type);
object_properties_init(&intern->std, class_type);
retval = &intern->std;
retval->handlers = &handlers;
return retval;
}
/* }}} */
kafka_conf_object * get_kafka_conf_object(zval *zconf)
{
kafka_conf_object *oconf = Z_RDKAFKA_P(kafka_conf_object, zconf);
if (!oconf->type) {
zend_throw_exception_ex(NULL, 0, "RdKafka\\Conf::__construct() has not been called");
return NULL;
}
return oconf;
}
static void kafka_conf_error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque)
{
kafka_conf_callbacks *cbs = (kafka_conf_callbacks*) opaque;
zval args[3];
if (!opaque) {
return;
}
if (!cbs->error) {
return;
}
ZVAL_NULL(&args[0]);
ZVAL_NULL(&args[1]);
ZVAL_NULL(&args[2]);
ZVAL_ZVAL(&args[0], &cbs->zrk, 1, 0);
ZVAL_LONG(&args[1], err);
ZVAL_STRING(&args[2], reason);
rdkafka_call_function(&cbs->error->fci, &cbs->error->fcc, NULL, 3, args);
zval_ptr_dtor(&args[0]);
zval_ptr_dtor(&args[1]);
zval_ptr_dtor(&args[2]);
}
void kafka_conf_dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *msg, void *opaque)
{
kafka_conf_callbacks *cbs = (kafka_conf_callbacks*) opaque;
zend_string *msg_opaque = msg->_private;
zval args[2];
if (cbs != NULL && cbs->dr_msg) {
ZVAL_NULL(&args[0]);
ZVAL_NULL(&args[1]);
ZVAL_ZVAL(&args[0], &cbs->zrk, 1, 0);
kafka_message_new(&args[1], msg, msg_opaque);
rdkafka_call_function(&cbs->dr_msg->fci, &cbs->dr_msg->fcc, NULL, 2, args);
zval_ptr_dtor(&args[0]);
zval_ptr_dtor(&args[1]);
}
if (msg_opaque != NULL) {
zend_string_release(msg_opaque);
}
}
static int kafka_conf_stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque)
{
kafka_conf_callbacks *cbs = (kafka_conf_callbacks*) opaque;
zval args[3];
if (!opaque) {
return 0;
}
if (!cbs->stats) {
return 0;
}
ZVAL_NULL(&args[0]);
ZVAL_NULL(&args[1]);
ZVAL_NULL(&args[2]);
ZVAL_ZVAL(&args[0], &cbs->zrk, 1, 0);
ZVAL_STRING(&args[1], json);
ZVAL_LONG(&args[2], json_len);
rdkafka_call_function(&cbs->stats->fci, &cbs->stats->fcc, NULL, 3, args);
zval_ptr_dtor(&args[0]);
zval_ptr_dtor(&args[1]);
zval_ptr_dtor(&args[2]);
return 0;
}
static void kafka_conf_rebalance_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque)
{
kafka_conf_callbacks *cbs = (kafka_conf_callbacks*) opaque;
zval args[3];
if (!opaque) {
return;
}
if (!cbs->rebalance) {
err = rd_kafka_assign(rk, NULL);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
return;
}
ZVAL_NULL(&args[0]);
ZVAL_NULL(&args[1]);
ZVAL_NULL(&args[2]);
ZVAL_ZVAL(&args[0], &cbs->zrk, 1, 0);
ZVAL_LONG(&args[1], err);
kafka_topic_partition_list_to_array(&args[2], partitions);
rdkafka_call_function(&cbs->rebalance->fci, &cbs->rebalance->fcc, NULL, 3, args);
zval_ptr_dtor(&args[0]);
zval_ptr_dtor(&args[1]);
zval_ptr_dtor(&args[2]);
}
static void kafka_conf_consume_cb(rd_kafka_message_t *msg, void *opaque)
{
kafka_conf_callbacks *cbs = (kafka_conf_callbacks*) opaque;
zval args[2];
if (!opaque) {
return;
}
if (!cbs->consume) {
return;
}
ZVAL_NULL(&args[0]);
ZVAL_NULL(&args[1]);
kafka_message_new(&args[0], msg, NULL);
ZVAL_ZVAL(&args[1], &cbs->zrk, 1, 0);
rdkafka_call_function(&cbs->consume->fci, &cbs->consume->fcc, NULL, 2, args);
zval_ptr_dtor(&args[0]);
zval_ptr_dtor(&args[1]);
}
static void kafka_conf_offset_commit_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque)
{
kafka_conf_callbacks *cbs = (kafka_conf_callbacks*) opaque;
zval args[3];
if (!opaque) {
return;
}
if (!cbs->offset_commit) {
return;
}
ZVAL_NULL(&args[0]);
ZVAL_NULL(&args[1]);
ZVAL_NULL(&args[2]);
ZVAL_ZVAL(&args[0], &cbs->zrk, 1, 0);
ZVAL_LONG(&args[1], err);
kafka_topic_partition_list_to_array(&args[2], partitions);
rdkafka_call_function(&cbs->offset_commit->fci, &cbs->offset_commit->fcc, NULL, 3, args);
zval_ptr_dtor(&args[0]);
zval_ptr_dtor(&args[1]);
zval_ptr_dtor(&args[2]);
}
static void kafka_conf_log_cb(const rd_kafka_t *rk, int level, const char *facility, const char *message)
{
zval args[4];
kafka_conf_callbacks *cbs = (kafka_conf_callbacks*) rd_kafka_opaque(rk);
if (!cbs->log) {
return;
}
ZVAL_NULL(&args[0]);
ZVAL_NULL(&args[1]);
ZVAL_NULL(&args[2]);
ZVAL_NULL(&args[3]);
ZVAL_ZVAL(&args[0], &cbs->zrk, 1, 0);
ZVAL_LONG(&args[1], level);
ZVAL_STRING(&args[2], facility);
ZVAL_STRING(&args[3], message);
rdkafka_call_function(&cbs->log->fci, &cbs->log->fcc, NULL, 4, args);
zval_ptr_dtor(&args[0]);
zval_ptr_dtor(&args[1]);
zval_ptr_dtor(&args[2]);
zval_ptr_dtor(&args[3]);
}
/* {{{ proto RdKafka\Conf::__construct() */
PHP_METHOD(RdKafka_Conf, __construct)
{
kafka_conf_object *intern;
zend_error_handling error_handling;
zend_replace_error_handling(EH_THROW, spl_ce_InvalidArgumentException, &error_handling);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "") == FAILURE) {
zend_restore_error_handling(&error_handling);
return;
}
intern = Z_RDKAFKA_P(kafka_conf_object, getThis());
intern->type = KAFKA_CONF;
intern->u.conf = rd_kafka_conf_new();
zend_restore_error_handling(&error_handling);
}
/* }}} */
/* {{{ proto array RfKafka\Conf::dump()
Dump the configuration properties and values of `conf` to an array */
PHP_METHOD(RdKafka_Conf, dump)
{
size_t cntp;
const char **dump;
kafka_conf_object *intern;
size_t i;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "") == FAILURE) {
return;
}
intern = get_kafka_conf_object(getThis());
if (!intern) {
return;
}
switch (intern->type) {
case KAFKA_CONF:
dump = rd_kafka_conf_dump(intern->u.conf, &cntp);
break;
case KAFKA_TOPIC_CONF:
dump = rd_kafka_topic_conf_dump(intern->u.topic_conf, &cntp);
break;
default:
return;
}
array_init(return_value);
for (i = 0; i < cntp; i+=2) {
const char *key = dump[i];
const char *value = dump[i+1];
add_assoc_string(return_value, (char*)key, (char*)value);
}
rd_kafka_conf_dump_free(dump, cntp);
}
/* }}} */
/* {{{ proto void RdKafka\Conf::set(string $name, string $value)
Sets a configuration property. */
PHP_METHOD(RdKafka_Conf, set)
{
char *name;
size_t name_len;
char *value;
size_t value_len;
kafka_conf_object *intern;
rd_kafka_conf_res_t ret = 0;
char errstr[512];
if (zend_parse_parameters(ZEND_NUM_ARGS(), "ss", &name, &name_len, &value, &value_len) == FAILURE) {
return;
}
intern = get_kafka_conf_object(getThis());
if (!intern) {
return;
}
errstr[0] = '\0';
switch (intern->type) {
case KAFKA_CONF:
ret = rd_kafka_conf_set(intern->u.conf, name, value, errstr, sizeof(errstr));
break;
case KAFKA_TOPIC_CONF:
ret = rd_kafka_topic_conf_set(intern->u.topic_conf, name, value, errstr, sizeof(errstr));
break;
}
switch (ret) {
case RD_KAFKA_CONF_UNKNOWN:
zend_throw_exception(ce_kafka_exception, errstr, RD_KAFKA_CONF_UNKNOWN);
return;
case RD_KAFKA_CONF_INVALID:
zend_throw_exception(ce_kafka_exception, errstr, RD_KAFKA_CONF_INVALID);
return;
case RD_KAFKA_CONF_OK:
break;
}
}
/* }}} */
/* {{{ proto RdKafka\Conf::setDefaultTopicConf(RdKafka\TopicConf $topicConf) */
PHP_METHOD(RdKafka_Conf, setDefaultTopicConf)
{
zval *ztopic_conf;
kafka_conf_object *intern;
kafka_conf_object *topic_conf_intern;
rd_kafka_topic_conf_t *topic_conf;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "O", &ztopic_conf, ce_kafka_topic_conf) == FAILURE) {
return;
}
intern = get_kafka_conf_object(getThis());
if (!intern) {
return;
}
topic_conf_intern = get_kafka_conf_object(ztopic_conf);
if (!topic_conf_intern) {
return;
}
topic_conf = rd_kafka_topic_conf_dup(topic_conf_intern->u.topic_conf);
rd_kafka_conf_set_default_topic_conf(intern->u.conf, topic_conf);
}
/* }}} */
/* {{{ proto void RdKafka\Conf::setErrorCb(callable $callback)
Sets the error callback */
PHP_METHOD(RdKafka_Conf, setErrorCb)
{
zend_fcall_info fci;
zend_fcall_info_cache fcc;
kafka_conf_object *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "f", &fci, &fcc) == FAILURE) {
return;
}
intern = get_kafka_conf_object(getThis());
if (!intern) {
return;
}
Z_ADDREF_P(&fci.function_name);
if (intern->cbs.error) {
zval_ptr_dtor(&intern->cbs.error->fci.function_name);
} else {
intern->cbs.error = ecalloc(1, sizeof(*intern->cbs.error));
}
intern->cbs.error->fci = fci;
intern->cbs.error->fcc = fcc;
rd_kafka_conf_set_error_cb(intern->u.conf, kafka_conf_error_cb);
}
/* }}} */
/* {{{ proto void RdKafka\Conf::setDrMsgCb(callable $callback)
Sets the delivery report callback */
PHP_METHOD(RdKafka_Conf, setDrMsgCb)
{
zend_fcall_info fci;
zend_fcall_info_cache fcc;
kafka_conf_object *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "f", &fci, &fcc) == FAILURE) {
return;
}
intern = get_kafka_conf_object(getThis());
if (!intern) {
return;
}
Z_ADDREF_P(&fci.function_name);
if (intern->cbs.dr_msg) {
zval_ptr_dtor(&intern->cbs.dr_msg->fci.function_name);
} else {
intern->cbs.dr_msg = ecalloc(1, sizeof(*intern->cbs.dr_msg));
}
intern->cbs.dr_msg->fci = fci;
intern->cbs.dr_msg->fcc = fcc;
rd_kafka_conf_set_dr_msg_cb(intern->u.conf, kafka_conf_dr_msg_cb);
}
/* }}} */
/* {{{ proto void RdKafka\Conf::setStatsCb(callable $callback)
Sets the statistics report callback */
PHP_METHOD(RdKafka_Conf, setStatsCb)
{
zend_fcall_info fci;
zend_fcall_info_cache fcc;
kafka_conf_object *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "f", &fci, &fcc) == FAILURE) {
return;
}
intern = get_kafka_conf_object(getThis());
if (!intern) {
return;
}
Z_ADDREF_P(&fci.function_name);
if (intern->cbs.stats) {
zval_ptr_dtor(&intern->cbs.stats->fci.function_name);
} else {
intern->cbs.stats = ecalloc(1, sizeof(*intern->cbs.stats));
}
intern->cbs.stats->fci = fci;
intern->cbs.stats->fcc = fcc;
rd_kafka_conf_set_stats_cb(intern->u.conf, kafka_conf_stats_cb);
}
/* }}} */
/* {{{ proto void RdKafka\Conf::setRebalanceCb(mixed $callback)
Set rebalance callback for use with coordinated consumer group balancing */
PHP_METHOD(RdKafka_Conf, setRebalanceCb)
{
zend_fcall_info fci;
zend_fcall_info_cache fcc;
kafka_conf_object *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "f", &fci, &fcc) == FAILURE) {
return;
}
intern = get_kafka_conf_object(getThis());
if (!intern) {
return;
}
Z_ADDREF_P(&fci.function_name);
if (intern->cbs.rebalance) {
zval_ptr_dtor(&intern->cbs.rebalance->fci.function_name);
} else {
intern->cbs.rebalance = ecalloc(1, sizeof(*intern->cbs.rebalance));
}
intern->cbs.rebalance->fci = fci;
intern->cbs.rebalance->fcc = fcc;
rd_kafka_conf_set_rebalance_cb(intern->u.conf, kafka_conf_rebalance_cb);
}
/* }}} */
/* {{{ proto void RdKafka\Conf::setConsumeCb(callable $callback)
Set consume callback to use with poll */
PHP_METHOD(RdKafka_Conf, setConsumeCb)
{
zend_fcall_info fci;
zend_fcall_info_cache fcc;
kafka_conf_object *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "f", &fci, &fcc) == FAILURE) {
return;
}
intern = get_kafka_conf_object(getThis());
if (!intern) {
return;
}
Z_ADDREF_P(&fci.function_name);
if (intern->cbs.consume) {
zval_ptr_dtor(&intern->cbs.consume->fci.function_name);
} else {
intern->cbs.consume = ecalloc(1, sizeof(*intern->cbs.consume));
}
intern->cbs.consume->fci = fci;
intern->cbs.consume->fcc = fcc;
rd_kafka_conf_set_consume_cb(intern->u.conf, kafka_conf_consume_cb);
}
/* }}} */
/* {{{ proto void RdKafka\Conf::setOffsetCommitCb(mixed $callback)
Set offset commit callback for use with consumer groups */
PHP_METHOD(RdKafka_Conf, setOffsetCommitCb)
{
zend_fcall_info fci;
zend_fcall_info_cache fcc;
kafka_conf_object *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "f", &fci, &fcc) == FAILURE) {
return;
}
intern = get_kafka_conf_object(getThis());
if (!intern) {
return;
}
Z_ADDREF_P(&fci.function_name);
if (intern->cbs.offset_commit) {
zval_ptr_dtor(&intern->cbs.offset_commit->fci.function_name);
} else {
intern->cbs.offset_commit = ecalloc(1, sizeof(*intern->cbs.offset_commit));
}
intern->cbs.offset_commit->fci = fci;
intern->cbs.offset_commit->fcc = fcc;
rd_kafka_conf_set_offset_commit_cb(intern->u.conf, kafka_conf_offset_commit_cb);
}
/* }}} */
/* {{{ proto void RdKafka\Conf::setLogCb(mixed $callback)
Set offset commit callback for use with consumer groups */
PHP_METHOD(RdKafka_Conf, setLogCb)
{
zend_fcall_info fci;
zend_fcall_info_cache fcc;
kafka_conf_object *conf;
char errstr[512];
if (zend_parse_parameters(ZEND_NUM_ARGS(), "f", &fci, &fcc) == FAILURE) {
return;
}
conf = get_kafka_conf_object(getThis());
if (!conf) {
return;
}
Z_ADDREF_P(&fci.function_name);
if (conf->cbs.log) {
zval_ptr_dtor(&conf->cbs.log->fci.function_name);
} else {
conf->cbs.log = ecalloc(1, sizeof(*conf->cbs.log));
}
conf->cbs.log->fci = fci;
conf->cbs.log->fcc = fcc;
rd_kafka_conf_set_log_cb(conf->u.conf, kafka_conf_log_cb);
rd_kafka_conf_set(conf->u.conf, "log.queue", "true", errstr, sizeof(errstr));
}
/* }}} */
/* {{{ proto RdKafka\TopicConf::__construct() */
PHP_METHOD(RdKafka_TopicConf, __construct)
{
kafka_conf_object *intern;
zend_error_handling error_handling;
zend_replace_error_handling(EH_THROW, spl_ce_InvalidArgumentException, &error_handling);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "") == FAILURE) {
zend_restore_error_handling(&error_handling);
return;
}
intern = Z_RDKAFKA_P(kafka_conf_object, getThis());
intern->type = KAFKA_TOPIC_CONF;
intern->u.topic_conf = rd_kafka_topic_conf_new();
zend_restore_error_handling(&error_handling);
}
/* }}} */
/* {{{ proto RdKafka\TopicConf::setPartitioner(int $partitioner) */
PHP_METHOD(RdKafka_TopicConf, setPartitioner)
{
kafka_conf_object *intern;
zend_long id;
int32_t (*partitioner) (const rd_kafka_topic_t * rkt, const void * keydata, size_t keylen, int32_t partition_cnt, void * rkt_opaque, void * msg_opaque);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &id) == FAILURE) {
return;
}
intern = get_kafka_conf_object(getThis());
if (!intern) {
return;
}
switch (id) {
case MSG_PARTITIONER_RANDOM:
partitioner = rd_kafka_msg_partitioner_random;
break;
case MSG_PARTITIONER_CONSISTENT:
partitioner = rd_kafka_msg_partitioner_consistent;
break;
case MSG_PARTITIONER_CONSISTENT_RANDOM:
partitioner = rd_kafka_msg_partitioner_consistent_random;
break;
#ifdef HAS_RD_KAFKA_PARTITIONER_MURMUR2
case MSG_PARTITIONER_MURMUR2:
partitioner = rd_kafka_msg_partitioner_murmur2;
break;
case MSG_PARTITIONER_MURMUR2_RANDOM:
partitioner = rd_kafka_msg_partitioner_murmur2_random;
break;
#endif
default:
zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Invalid partitioner given");
return;
}
rd_kafka_topic_conf_set_partitioner_cb(intern->u.topic_conf, partitioner);
}
/* }}} */
void kafka_conf_minit(INIT_FUNC_ARGS)
{
handlers = kafka_default_object_handlers;
handlers.free_obj = kafka_conf_free;
handlers.offset = XtOffsetOf(kafka_conf_object, std);
ce_kafka_conf = register_class_RdKafka_Conf();
ce_kafka_conf->create_object = kafka_conf_new;
ce_kafka_topic_conf = register_class_RdKafka_TopicConf();
ce_kafka_topic_conf->create_object = kafka_conf_new;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifndef KAFKA_CONF_H
#define KAFKA_CONF_H
enum {
MSG_PARTITIONER_RANDOM = 2,
MSG_PARTITIONER_CONSISTENT = 3,
MSG_PARTITIONER_CONSISTENT_RANDOM = 4,
MSG_PARTITIONER_MURMUR2 = 5,
MSG_PARTITIONER_MURMUR2_RANDOM = 6
};
typedef enum {
KAFKA_CONF = 1,
KAFKA_TOPIC_CONF
} kafka_conf_type;
typedef struct _kafka_conf_callback {
zend_fcall_info fci;
zend_fcall_info_cache fcc;
} kafka_conf_callback;
typedef struct _kafka_conf_callbacks {
zval zrk;
kafka_conf_callback *error;
kafka_conf_callback *rebalance;
kafka_conf_callback *dr_msg;
kafka_conf_callback *stats;
kafka_conf_callback *consume;
kafka_conf_callback *offset_commit;
kafka_conf_callback *log;
} kafka_conf_callbacks;
typedef struct _kafka_conf_object {
kafka_conf_type type;
union {
rd_kafka_conf_t *conf;
rd_kafka_topic_conf_t *topic_conf;
} u;
kafka_conf_callbacks cbs;
zend_object std;
} kafka_conf_object;
kafka_conf_object * get_kafka_conf_object(zval *zconf);
void kafka_conf_minit(INIT_FUNC_ARGS);
void kafka_conf_callbacks_dtor(kafka_conf_callbacks *cbs);
void kafka_conf_callbacks_copy(kafka_conf_callbacks *to, kafka_conf_callbacks *from);
void kafka_conf_dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *msg, void *opaque);
extern zend_class_entry * ce_kafka_conf;
extern zend_class_entry * ce_kafka_topic_conf;
#endif /* KAFKA_CONF_H */
<?php
/**
* @generate-class-entries
* @generate-function-entries
* @generate-legacy-arginfo
*/
namespace RdKafka;
class Conf
{
public function __construct() {}
/** @tentative-return-type */
public function dump(): array {}
/** @tentative-return-type */
public function set(string $name, string $value): void {}
/**
* @tentative-return-type
* @deprecated
*/
public function setDefaultTopicConf(TopicConf $topic_conf): void {}
/** @tentative-return-type */
public function setErrorCb(callable $callback): void {}
/** @tentative-return-type */
public function setDrMsgCb(callable $callback): void {}
/** @tentative-return-type */
public function setStatsCb(callable $callback): void {}
/** @tentative-return-type */
public function setRebalanceCb(callable $callback): void {}
/** @tentative-return-type */
public function setConsumeCb(callable $callback): void {}
/** @tentative-return-type */
public function setOffsetCommitCb(callable $callback): void {}
/** @tentative-return-type */
public function setLogCb(callable $callback): void {}
}
class TopicConf
{
public function __construct() {}
/**
* @tentative-return-type
* @implementation-alias RdKafka\Conf::dump
*/
public function dump(): array {}
/**
* @tentative-return-type
* @implementation-alias RdKafka\Conf::set
*/
public function set(string $name, string $value): void {}
/** @tentative-return-type */
public function setPartitioner(int $partitioner): void {}
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 86e8e9fcd235f3affc4ef30ca0d96395abcad13f */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Conf___construct, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Conf_dump, 0, 0, IS_ARRAY, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Conf_set, 0, 2, IS_VOID, 0)
ZEND_ARG_TYPE_INFO(0, name, IS_STRING, 0)
ZEND_ARG_TYPE_INFO(0, value, IS_STRING, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Conf_setDefaultTopicConf, 0, 1, IS_VOID, 0)
ZEND_ARG_OBJ_INFO(0, topic_conf, RdKafka\\TopicConf, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Conf_setErrorCb, 0, 1, IS_VOID, 0)
ZEND_ARG_TYPE_INFO(0, callback, IS_CALLABLE, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_Conf_setDrMsgCb arginfo_class_RdKafka_Conf_setErrorCb
#define arginfo_class_RdKafka_Conf_setStatsCb arginfo_class_RdKafka_Conf_setErrorCb
#define arginfo_class_RdKafka_Conf_setRebalanceCb arginfo_class_RdKafka_Conf_setErrorCb
#define arginfo_class_RdKafka_Conf_setConsumeCb arginfo_class_RdKafka_Conf_setErrorCb
#define arginfo_class_RdKafka_Conf_setOffsetCommitCb arginfo_class_RdKafka_Conf_setErrorCb
#define arginfo_class_RdKafka_Conf_setLogCb arginfo_class_RdKafka_Conf_setErrorCb
#define arginfo_class_RdKafka_TopicConf___construct arginfo_class_RdKafka_Conf___construct
#define arginfo_class_RdKafka_TopicConf_dump arginfo_class_RdKafka_Conf_dump
#define arginfo_class_RdKafka_TopicConf_set arginfo_class_RdKafka_Conf_set
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_TopicConf_setPartitioner, 0, 1, IS_VOID, 0)
ZEND_ARG_TYPE_INFO(0, partitioner, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_METHOD(RdKafka_Conf, __construct);
ZEND_METHOD(RdKafka_Conf, dump);
ZEND_METHOD(RdKafka_Conf, set);
ZEND_METHOD(RdKafka_Conf, setDefaultTopicConf);
ZEND_METHOD(RdKafka_Conf, setErrorCb);
ZEND_METHOD(RdKafka_Conf, setDrMsgCb);
ZEND_METHOD(RdKafka_Conf, setStatsCb);
ZEND_METHOD(RdKafka_Conf, setRebalanceCb);
ZEND_METHOD(RdKafka_Conf, setConsumeCb);
ZEND_METHOD(RdKafka_Conf, setOffsetCommitCb);
ZEND_METHOD(RdKafka_Conf, setLogCb);
ZEND_METHOD(RdKafka_TopicConf, __construct);
ZEND_METHOD(RdKafka_TopicConf, setPartitioner);
static const zend_function_entry class_RdKafka_Conf_methods[] = {
ZEND_ME(RdKafka_Conf, __construct, arginfo_class_RdKafka_Conf___construct, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, dump, arginfo_class_RdKafka_Conf_dump, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, set, arginfo_class_RdKafka_Conf_set, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, setDefaultTopicConf, arginfo_class_RdKafka_Conf_setDefaultTopicConf, ZEND_ACC_PUBLIC|ZEND_ACC_DEPRECATED)
ZEND_ME(RdKafka_Conf, setErrorCb, arginfo_class_RdKafka_Conf_setErrorCb, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, setDrMsgCb, arginfo_class_RdKafka_Conf_setDrMsgCb, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, setStatsCb, arginfo_class_RdKafka_Conf_setStatsCb, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, setRebalanceCb, arginfo_class_RdKafka_Conf_setRebalanceCb, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, setConsumeCb, arginfo_class_RdKafka_Conf_setConsumeCb, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, setOffsetCommitCb, arginfo_class_RdKafka_Conf_setOffsetCommitCb, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, setLogCb, arginfo_class_RdKafka_Conf_setLogCb, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static const zend_function_entry class_RdKafka_TopicConf_methods[] = {
ZEND_ME(RdKafka_TopicConf, __construct, arginfo_class_RdKafka_TopicConf___construct, ZEND_ACC_PUBLIC)
ZEND_MALIAS(RdKafka_Conf, dump, dump, arginfo_class_RdKafka_TopicConf_dump, ZEND_ACC_PUBLIC)
ZEND_MALIAS(RdKafka_Conf, set, set, arginfo_class_RdKafka_TopicConf_set, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicConf, setPartitioner, arginfo_class_RdKafka_TopicConf_setPartitioner, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Conf(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Conf", class_RdKafka_Conf_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
static zend_class_entry *register_class_RdKafka_TopicConf(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "TopicConf", class_RdKafka_TopicConf_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 86e8e9fcd235f3affc4ef30ca0d96395abcad13f */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Conf___construct, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Conf_dump, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Conf_set, 0, 0, 2)
ZEND_ARG_INFO(0, name)
ZEND_ARG_INFO(0, value)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Conf_setDefaultTopicConf, 0, 0, 1)
ZEND_ARG_INFO(0, topic_conf)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Conf_setErrorCb, 0, 0, 1)
ZEND_ARG_INFO(0, callback)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_Conf_setDrMsgCb arginfo_class_RdKafka_Conf_setErrorCb
#define arginfo_class_RdKafka_Conf_setStatsCb arginfo_class_RdKafka_Conf_setErrorCb
#define arginfo_class_RdKafka_Conf_setRebalanceCb arginfo_class_RdKafka_Conf_setErrorCb
#define arginfo_class_RdKafka_Conf_setConsumeCb arginfo_class_RdKafka_Conf_setErrorCb
#define arginfo_class_RdKafka_Conf_setOffsetCommitCb arginfo_class_RdKafka_Conf_setErrorCb
#define arginfo_class_RdKafka_Conf_setLogCb arginfo_class_RdKafka_Conf_setErrorCb
#define arginfo_class_RdKafka_TopicConf___construct arginfo_class_RdKafka_Conf___construct
#define arginfo_class_RdKafka_TopicConf_dump arginfo_class_RdKafka_Conf_dump
#define arginfo_class_RdKafka_TopicConf_set arginfo_class_RdKafka_Conf_set
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_TopicConf_setPartitioner, 0, 0, 1)
ZEND_ARG_INFO(0, partitioner)
ZEND_END_ARG_INFO()
ZEND_METHOD(RdKafka_Conf, __construct);
ZEND_METHOD(RdKafka_Conf, dump);
ZEND_METHOD(RdKafka_Conf, set);
ZEND_METHOD(RdKafka_Conf, setDefaultTopicConf);
ZEND_METHOD(RdKafka_Conf, setErrorCb);
ZEND_METHOD(RdKafka_Conf, setDrMsgCb);
ZEND_METHOD(RdKafka_Conf, setStatsCb);
ZEND_METHOD(RdKafka_Conf, setRebalanceCb);
ZEND_METHOD(RdKafka_Conf, setConsumeCb);
ZEND_METHOD(RdKafka_Conf, setOffsetCommitCb);
ZEND_METHOD(RdKafka_Conf, setLogCb);
ZEND_METHOD(RdKafka_TopicConf, __construct);
ZEND_METHOD(RdKafka_TopicConf, setPartitioner);
static const zend_function_entry class_RdKafka_Conf_methods[] = {
ZEND_ME(RdKafka_Conf, __construct, arginfo_class_RdKafka_Conf___construct, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, dump, arginfo_class_RdKafka_Conf_dump, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, set, arginfo_class_RdKafka_Conf_set, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, setDefaultTopicConf, arginfo_class_RdKafka_Conf_setDefaultTopicConf, ZEND_ACC_PUBLIC|ZEND_ACC_DEPRECATED)
ZEND_ME(RdKafka_Conf, setErrorCb, arginfo_class_RdKafka_Conf_setErrorCb, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, setDrMsgCb, arginfo_class_RdKafka_Conf_setDrMsgCb, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, setStatsCb, arginfo_class_RdKafka_Conf_setStatsCb, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, setRebalanceCb, arginfo_class_RdKafka_Conf_setRebalanceCb, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, setConsumeCb, arginfo_class_RdKafka_Conf_setConsumeCb, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, setOffsetCommitCb, arginfo_class_RdKafka_Conf_setOffsetCommitCb, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Conf, setLogCb, arginfo_class_RdKafka_Conf_setLogCb, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static const zend_function_entry class_RdKafka_TopicConf_methods[] = {
ZEND_ME(RdKafka_TopicConf, __construct, arginfo_class_RdKafka_TopicConf___construct, ZEND_ACC_PUBLIC)
ZEND_MALIAS(RdKafka_Conf, dump, dump, arginfo_class_RdKafka_TopicConf_dump, ZEND_ACC_PUBLIC)
ZEND_MALIAS(RdKafka_Conf, set, set, arginfo_class_RdKafka_TopicConf_set, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicConf, setPartitioner, arginfo_class_RdKafka_TopicConf_setPartitioner, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Conf(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Conf", class_RdKafka_Conf_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
static zend_class_entry *register_class_RdKafka_TopicConf(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "TopicConf", class_RdKafka_TopicConf_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
dnl $Id$
dnl config.m4 for extension rdkafka
PHP_ARG_WITH(rdkafka, for rdkafka support,
[ --with-rdkafka Include rdkafka support])
if test "$PHP_RDKAFKA" != "no"; then
SEARCH_PATH="/usr/local /usr" # you might want to change this
SEARCH_FOR="/include/librdkafka/rdkafka.h" # you most likely want to change this
if test -r $PHP_RDKAFKA/$SEARCH_FOR; then # path given as parameter
RDKAFKA_DIR=$PHP_RDKAFKA
else # search default path list
AC_MSG_CHECKING([for librdkafka/rdkafka.h" in default path])
for i in $SEARCH_PATH ; do
if test -r $i/$SEARCH_FOR; then
RDKAFKA_DIR=$i
AC_MSG_RESULT(found in $i)
fi
done
fi
if test -z "$RDKAFKA_DIR"; then
AC_MSG_RESULT([not found])
AC_MSG_ERROR([Please reinstall the rdkafka distribution])
fi
PHP_ADD_INCLUDE($RDKAFKA_DIR/include)
SOURCES="rdkafka.c metadata.c metadata_broker.c metadata_topic.c metadata_partition.c metadata_collection.c conf.c topic.c queue.c message.c fun.c kafka_consumer.c topic_partition.c"
LIBNAME=rdkafka
LIBSYMBOL=rd_kafka_new
PHP_CHECK_LIBRARY($LIBNAME,$LIBSYMBOL,
[
PHP_ADD_LIBRARY_WITH_PATH($LIBNAME, $RDKAFKA_DIR/$PHP_LIBDIR, RDKAFKA_SHARED_LIBADD)
AC_DEFINE(HAVE_RDKAFKALIB,1,[ ])
],[
AC_MSG_ERROR([wrong rdkafka lib version or lib not found])
],[
-L$RDKAFKA_DIR/$PHP_LIBDIR -lm
])
ORIG_LDFLAGS="$LDFLAGS"
ORIG_CPPFLAGS="$CPPFLAGS"
LDFLAGS="-L$RDKAFKA_DIR/$PHP_LIBDIR -lm"
CPPFLAGS="-I$RDKAFKA_DIR/include"
AC_MSG_CHECKING([for librdkafka version])
AC_EGREP_CPP(yes,[
#include <librdkafka/rdkafka.h>
#if RD_KAFKA_VERSION >= 0x000b0000
yes
#endif
],[
AC_MSG_RESULT([>= 0.11.0])
],[
AC_MSG_ERROR([librdkafka version 0.11.0 or greater required.])
])
AC_CHECK_LIB($LIBNAME,[rd_kafka_message_headers],[
AC_DEFINE(HAVE_RD_KAFKA_MESSAGE_HEADERS,1,[ ])
],[
AC_MSG_WARN([no rd_kafka_message_headers, headers support will not be available])
])
AC_CHECK_LIB($LIBNAME,[rd_kafka_purge],[
AC_DEFINE(HAS_RD_KAFKA_PURGE,1,[ ])
],[
AC_MSG_WARN([purge is not available])
])
AC_CHECK_LIB($LIBNAME,[rd_kafka_init_transactions],[
AC_DEFINE(HAS_RD_KAFKA_TRANSACTIONS,1,[ ])
SOURCES="$SOURCES kafka_error_exception.c"
],[
AC_MSG_WARN([transactions are not available])
])
AC_CHECK_LIB($LIBNAME,[rd_kafka_msg_partitioner_murmur2],[
AC_DEFINE(HAS_RD_KAFKA_PARTITIONER_MURMUR2,1,[ ])
],[
AC_MSG_WARN([murmur2 partitioner is not available])
])
LDFLAGS="$ORIG_LDFLAGS"
CPPFLAGS="$ORIG_CPPFLAGS"
PHP_SUBST(RDKAFKA_SHARED_LIBADD)
PHP_NEW_EXTENSION(rdkafka, $SOURCES, $ext_shared)
fi
// $Id$
// vim:ft=javascript
ARG_WITH("rdkafka", "for rdkafka support", "no");
if (PHP_RDKAFKA != "no") {
if (CHECK_LIB("librdkafka.lib", "rdkafka", PHP_RDKAFKA) &&
CHECK_HEADER_ADD_INCLUDE("librdkafka/rdkafka.h", "CFLAGS_RDKAFKA")) {
AC_DEFINE('HAVE_RD_KAFKA_MESSAGE_HEADERS', 1, '');
AC_DEFINE('HAS_RD_KAFKA_PURGE', 1, '');
AC_DEFINE('HAS_RD_KAFKA_TRANSACTIONS', 1, '');
AC_DEFINE('HAS_RD_KAFKA_PARTITIONER_MURMUR2', 1, '');
EXTENSION("rdkafka", "rdkafka.c metadata.c metadata_broker.c metadata_topic.c \
metadata_partition.c metadata_collection.c conf.c \
topic.c queue.c message.c fun.c kafka_consumer.c topic_partition.c kafka_error_exception.c");
AC_DEFINE('HAVE_RDKAFKA', 1, '');
} else {
WARNING("rdkafka not enabled; libraries and headers not found");
}
}
For examples please check the documentation: https://arnaud-lb.github.io/php-rdkafka/phpdoc/rdkafka.examples.html
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "php_rdkafka.h"
#include "php_rdkafka_priv.h"
#include "librdkafka/rdkafka.h"
#include "Zend/zend_exceptions.h"
#include "ext/spl/spl_exceptions.h"
/* {{{ proto array rd_kafka_get_err_descs()
* Returns the full list of error codes.
*/
PHP_FUNCTION(rd_kafka_get_err_descs)
{
const struct rd_kafka_err_desc *errdescs;
size_t cnt;
size_t i;
int seen_zero = 0;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
rd_kafka_get_err_descs(&errdescs, &cnt);
array_init_size(return_value, cnt);
for (i = 0; i < cnt; i++) {
const struct rd_kafka_err_desc *desc = &errdescs[i];
zval el;
if (desc->code == 0) {
if (seen_zero) {
continue;
}
seen_zero = 1;
}
ZVAL_NULL(&el);
array_init(&el);
add_assoc_long(&el, "code", desc->code);
if (desc->name) {
add_assoc_string(&el, "name", (char*) desc->name);
} else {
add_assoc_null(&el, "name");
}
if (desc->desc) {
add_assoc_string(&el, "desc", (char*) desc->desc);
}else {
add_assoc_null(&el, "desc");
}
add_next_index_zval(return_value, &el);
}
}
/* }}} */
/* {{{ proto string rd_kafka_err2name(int $err)
* Returns the name of an error code
*/
PHP_FUNCTION(rd_kafka_err2name)
{
zend_long err;
const char *name;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &err) == FAILURE) {
return;
}
name = rd_kafka_err2name(err);
if (name) {
RETURN_STRING(name);
}
}
/* }}} */
/* {{{ proto string rd_kafka_err2str(int $err)
* Returns a human readable representation of a kafka error.
*/
PHP_FUNCTION(rd_kafka_err2str)
{
zend_long err;
const char *errstr;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &err) == FAILURE) {
return;
}
errstr = rd_kafka_err2str(err);
if (errstr) {
RETURN_STRING(errstr);
}
}
/* }}} */
/* {{{ proto int rd_kafka_errno()
* Returns `errno` */
PHP_FUNCTION(rd_kafka_errno)
{
if (zend_parse_parameters(ZEND_NUM_ARGS(), "") == FAILURE) {
return;
}
RETURN_LONG(errno);
}
/* }}} */
/* {{{ proto int rd_kafka_errno2err(int $errnox)
* Converts `errno` to a rdkafka error code */
PHP_FUNCTION(rd_kafka_errno2err)
{
zend_long errnox;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &errnox) == FAILURE) {
return;
}
RETURN_LONG(rd_kafka_errno2err(errnox));
}
/* }}} */
/* {{{ proto int rd_kafka_thread_cnt()
* Retrieve the current number of threads in use by librdkafka.
*/
PHP_FUNCTION(rd_kafka_thread_cnt)
{
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_LONG(rd_kafka_thread_cnt());
}
/* }}} */
/* {{{ proto int rd_kafka_offset_tail(int $cnt)
* Start consuming `$cnt` messages from topic's current `.._END` offset.
*/
PHP_FUNCTION(rd_kafka_offset_tail)
{
zend_long cnt;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &cnt) == FAILURE) {
return;
}
RETURN_LONG(RD_KAFKA_OFFSET_TAIL(cnt));
}
/* }}} */
<?php
/**
* @generate-function-entries
* @generate-legacy-arginfo
*/
function rd_kafka_get_err_descs(): array {}
function rd_kafka_err2name(int $err): ?string {}
function rd_kafka_err2str(int $err): ?string {}
/** @deprecated */
function rd_kafka_errno2err(int $errnox): int {}
/** @deprecated */
function rd_kafka_errno(): int {}
function rd_kafka_offset_tail(int $cnt): int {}
function rd_kafka_thread_cnt(): int {}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 0e1e56d853a47168a1f7f0950b674c2de6a91976 */
ZEND_BEGIN_ARG_WITH_RETURN_TYPE_INFO_EX(arginfo_rd_kafka_get_err_descs, 0, 0, IS_ARRAY, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_RETURN_TYPE_INFO_EX(arginfo_rd_kafka_err2name, 0, 1, IS_STRING, 1)
ZEND_ARG_TYPE_INFO(0, err, IS_LONG, 0)
ZEND_END_ARG_INFO()
#define arginfo_rd_kafka_err2str arginfo_rd_kafka_err2name
ZEND_BEGIN_ARG_WITH_RETURN_TYPE_INFO_EX(arginfo_rd_kafka_errno2err, 0, 1, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, errnox, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_RETURN_TYPE_INFO_EX(arginfo_rd_kafka_errno, 0, 0, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_RETURN_TYPE_INFO_EX(arginfo_rd_kafka_offset_tail, 0, 1, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, cnt, IS_LONG, 0)
ZEND_END_ARG_INFO()
#define arginfo_rd_kafka_thread_cnt arginfo_rd_kafka_errno
ZEND_FUNCTION(rd_kafka_get_err_descs);
ZEND_FUNCTION(rd_kafka_err2name);
ZEND_FUNCTION(rd_kafka_err2str);
ZEND_FUNCTION(rd_kafka_errno2err);
ZEND_FUNCTION(rd_kafka_errno);
ZEND_FUNCTION(rd_kafka_offset_tail);
ZEND_FUNCTION(rd_kafka_thread_cnt);
static const zend_function_entry ext_functions[] = {
ZEND_FE(rd_kafka_get_err_descs, arginfo_rd_kafka_get_err_descs)
ZEND_FE(rd_kafka_err2name, arginfo_rd_kafka_err2name)
ZEND_FE(rd_kafka_err2str, arginfo_rd_kafka_err2str)
ZEND_DEP_FE(rd_kafka_errno2err, arginfo_rd_kafka_errno2err)
ZEND_DEP_FE(rd_kafka_errno, arginfo_rd_kafka_errno)
ZEND_FE(rd_kafka_offset_tail, arginfo_rd_kafka_offset_tail)
ZEND_FE(rd_kafka_thread_cnt, arginfo_rd_kafka_thread_cnt)
ZEND_FE_END
};
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 0e1e56d853a47168a1f7f0950b674c2de6a91976 */
ZEND_BEGIN_ARG_INFO_EX(arginfo_rd_kafka_get_err_descs, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_rd_kafka_err2name, 0, 0, 1)
ZEND_ARG_INFO(0, err)
ZEND_END_ARG_INFO()
#define arginfo_rd_kafka_err2str arginfo_rd_kafka_err2name
ZEND_BEGIN_ARG_INFO_EX(arginfo_rd_kafka_errno2err, 0, 0, 1)
ZEND_ARG_INFO(0, errnox)
ZEND_END_ARG_INFO()
#define arginfo_rd_kafka_errno arginfo_rd_kafka_get_err_descs
ZEND_BEGIN_ARG_INFO_EX(arginfo_rd_kafka_offset_tail, 0, 0, 1)
ZEND_ARG_INFO(0, cnt)
ZEND_END_ARG_INFO()
#define arginfo_rd_kafka_thread_cnt arginfo_rd_kafka_get_err_descs
ZEND_FUNCTION(rd_kafka_get_err_descs);
ZEND_FUNCTION(rd_kafka_err2name);
ZEND_FUNCTION(rd_kafka_err2str);
ZEND_FUNCTION(rd_kafka_errno2err);
ZEND_FUNCTION(rd_kafka_errno);
ZEND_FUNCTION(rd_kafka_offset_tail);
ZEND_FUNCTION(rd_kafka_thread_cnt);
static const zend_function_entry ext_functions[] = {
ZEND_FE(rd_kafka_get_err_descs, arginfo_rd_kafka_get_err_descs)
ZEND_FE(rd_kafka_err2name, arginfo_rd_kafka_err2name)
ZEND_FE(rd_kafka_err2str, arginfo_rd_kafka_err2str)
ZEND_DEP_FE(rd_kafka_errno2err, arginfo_rd_kafka_errno2err)
ZEND_DEP_FE(rd_kafka_errno, arginfo_rd_kafka_errno)
ZEND_FE(rd_kafka_offset_tail, arginfo_rd_kafka_offset_tail)
ZEND_FE(rd_kafka_thread_cnt, arginfo_rd_kafka_thread_cnt)
ZEND_FE_END
};
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "php_rdkafka.h"
#include "php_rdkafka_priv.h"
#include "librdkafka/rdkafka.h"
#include "Zend/zend_exceptions.h"
#include "ext/spl/spl_exceptions.h"
#include "conf.h"
#include "topic_partition.h"
#include "topic.h"
#include "message.h"
#include "metadata.h"
#if PHP_VERSION_ID < 80000
#include "kafka_consumer_legacy_arginfo.h"
#else
#include "kafka_consumer_arginfo.h"
#endif
typedef struct _object_intern {
rd_kafka_t *rk;
kafka_conf_callbacks cbs;
zend_object std;
} object_intern;
static zend_class_entry * ce;
static zend_object_handlers handlers;
static void kafka_consumer_free(zend_object *object) /* {{{ */
{
object_intern *intern = php_kafka_from_obj(object_intern, object);
rd_kafka_resp_err_t err;
kafka_conf_callbacks_dtor(&intern->cbs);
if (intern->rk) {
err = rd_kafka_consumer_close(intern->rk);
if (err) {
php_error(E_WARNING, "rd_kafka_consumer_close failed: %s", rd_kafka_err2str(err));
}
rd_kafka_destroy(intern->rk);
intern->rk = NULL;
}
kafka_conf_callbacks_dtor(&intern->cbs);
zend_object_std_dtor(&intern->std);
}
/* }}} */
static zend_object *kafka_consumer_new(zend_class_entry *class_type) /* {{{ */
{
zend_object* retval;
object_intern *intern;
intern = zend_object_alloc(sizeof(*intern), class_type);
zend_object_std_init(&intern->std, class_type);
object_properties_init(&intern->std, class_type);
retval = &intern->std;
retval->handlers = &handlers;
return retval;
}
/* }}} */
static object_intern * get_object(zval *zconsumer) /* {{{ */
{
object_intern *oconsumer = Z_RDKAFKA_P(object_intern, zconsumer);
if (!oconsumer->rk) {
zend_throw_exception_ex(NULL, 0, "RdKafka\\KafkaConsumer::__construct() has not been called, or RdKafka\\KafkaConsumer::close() was already called");
return NULL;
}
return oconsumer;
} /* }}} */
static int has_group_id(rd_kafka_conf_t *conf) { /* {{{ */
size_t len;
if (conf == NULL) {
return 0;
}
if (rd_kafka_conf_get(conf, "group.id", NULL, &len) != RD_KAFKA_CONF_OK) {
return 0;
}
if (len <= 1) {
return 0;
}
return 1;
} /* }}} */
/* {{{ proto RdKafka\KafkaConsumer::__construct(RdKafka\Conf $conf) */
PHP_METHOD(RdKafka_KafkaConsumer, __construct)
{
zval *zconf;
zend_error_handling error_handling;
char errstr[512];
rd_kafka_t *rk;
object_intern *intern;
kafka_conf_object *conf_intern;
rd_kafka_conf_t *conf = NULL;
zend_replace_error_handling(EH_THROW, spl_ce_InvalidArgumentException, &error_handling);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "O", &zconf, ce_kafka_conf) == FAILURE) {
zend_restore_error_handling(&error_handling);
return;
}
intern = Z_RDKAFKA_P(object_intern, getThis());
conf_intern = get_kafka_conf_object(zconf);
if (conf_intern) {
conf = rd_kafka_conf_dup(conf_intern->u.conf);
kafka_conf_callbacks_copy(&intern->cbs, &conf_intern->cbs);
intern->cbs.zrk = *getThis();
rd_kafka_conf_set_opaque(conf, &intern->cbs);
}
if (!has_group_id(conf)) {
if (conf) {
rd_kafka_conf_destroy(conf);
}
zend_throw_exception(ce_kafka_exception, "\"group.id\" must be configured", 0);
return;
}
rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
if (rk == NULL) {
zend_restore_error_handling(&error_handling);
zend_throw_exception(ce_kafka_exception, errstr, 0);
return;
}
if (intern->cbs.log) {
rd_kafka_set_log_queue(rk, NULL);
}
intern->rk = rk;
rd_kafka_poll_set_consumer(rk);
zend_restore_error_handling(&error_handling);
}
/* }}} */
/* {{{ proto void RdKafka\KafkaConsumer::assign([array $topics])
Atomic assignment of partitions to consume */
PHP_METHOD(RdKafka_KafkaConsumer, assign)
{
HashTable *htopars = NULL;
rd_kafka_topic_partition_list_t *topics;
rd_kafka_resp_err_t err;
object_intern *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "|h!", &htopars) == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
if (htopars) {
topics = array_arg_to_kafka_topic_partition_list(1, htopars);
if (!topics) {
return;
}
} else {
topics = NULL;
}
err = rd_kafka_assign(intern->rk, topics);
if (topics) {
rd_kafka_topic_partition_list_destroy(topics);
}
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
}
/* }}} */
/* {{{ proto array RdKafka\KafkaConsumer::getAssignment()
Returns the current partition getAssignment */
PHP_METHOD(RdKafka_KafkaConsumer, getAssignment)
{
rd_kafka_resp_err_t err;
rd_kafka_topic_partition_list_t *topics;
object_intern *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "") == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
err = rd_kafka_assignment(intern->rk, &topics);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
kafka_topic_partition_list_to_array(return_value, topics);
rd_kafka_topic_partition_list_destroy(topics);
}
/* }}} */
/* {{{ proto void RdKafka\KafkaConsumer::subscribe(array $topics)
Update the subscription set to $topics */
PHP_METHOD(RdKafka_KafkaConsumer, subscribe)
{
HashTable *htopics;
HashPosition pos;
object_intern *intern;
rd_kafka_topic_partition_list_t *topics;
rd_kafka_resp_err_t err;
zval *zv;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "h", &htopics) == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
topics = rd_kafka_topic_partition_list_new(zend_hash_num_elements(htopics));
for (zend_hash_internal_pointer_reset_ex(htopics, &pos);
(zv = zend_hash_get_current_data_ex(htopics, &pos)) != NULL;
zend_hash_move_forward_ex(htopics, &pos)) {
convert_to_string_ex(zv);
rd_kafka_topic_partition_list_add(topics, Z_STRVAL_P(zv), RD_KAFKA_PARTITION_UA);
}
err = rd_kafka_subscribe(intern->rk, topics);
rd_kafka_topic_partition_list_destroy(topics);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
}
/* }}} */
/* {{{ proto array RdKafka\KafkaConsumer::getSubscription()
Returns the current subscription as set by subscribe() */
PHP_METHOD(RdKafka_KafkaConsumer, getSubscription)
{
rd_kafka_resp_err_t err;
rd_kafka_topic_partition_list_t *topics;
object_intern *intern;
int i;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "") == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
err = rd_kafka_subscription(intern->rk, &topics);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
array_init_size(return_value, topics->cnt);
for (i = 0; i < topics->cnt; i++) {
add_next_index_string(return_value, topics->elems[i].topic);
}
rd_kafka_topic_partition_list_destroy(topics);
}
/* }}} */
/* {{{ proto void RdKafka\KafkaConsumer::unsubsribe()
Unsubscribe from the current subscription set */
PHP_METHOD(RdKafka_KafkaConsumer, unsubscribe)
{
object_intern *intern;
rd_kafka_resp_err_t err;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "") == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
err = rd_kafka_unsubscribe(intern->rk);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
}
/* }}} */
/* {{{ proto Message RdKafka\KafkaConsumer::consume()
Consume message or get error event, triggers callbacks */
PHP_METHOD(RdKafka_KafkaConsumer, consume)
{
object_intern *intern;
zend_long timeout_ms;
rd_kafka_message_t *rkmessage, rkmessage_tmp = {0};
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &timeout_ms) == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
rkmessage = rd_kafka_consumer_poll(intern->rk, timeout_ms);
if (!rkmessage) {
rkmessage_tmp.err = RD_KAFKA_RESP_ERR__TIMED_OUT;
rkmessage = &rkmessage_tmp;
}
kafka_message_new(return_value, rkmessage, NULL);
if (rkmessage != &rkmessage_tmp) {
rd_kafka_message_destroy(rkmessage);
}
}
/* }}} */
static void consumer_commit(int async, INTERNAL_FUNCTION_PARAMETERS) /* {{{ */
{
zval *zarg = NULL;
object_intern *intern;
rd_kafka_topic_partition_list_t *offsets = NULL;
rd_kafka_resp_err_t err;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "|z!", &zarg) == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
if (zarg) {
if (Z_TYPE_P(zarg) == IS_OBJECT && instanceof_function(Z_OBJCE_P(zarg), ce_kafka_message)) {
zval *zerr;
zval *ztopic;
zval *zpartition;
zval *zoffset;
rd_kafka_topic_partition_t *rktpar;
zerr = rdkafka_read_property(NULL, Z_RDKAFKA_PROP_OBJ(zarg), ZEND_STRL("err"), 0);
if (zerr && Z_TYPE_P(zerr) != IS_NULL && (Z_TYPE_P(zerr) != IS_LONG || Z_LVAL_P(zerr) != RD_KAFKA_RESP_ERR_NO_ERROR)) {
zend_throw_exception(ce_kafka_exception, "Invalid argument: Specified Message has an error", RD_KAFKA_RESP_ERR__INVALID_ARG);
return;
}
ztopic = rdkafka_read_property(NULL, Z_RDKAFKA_PROP_OBJ(zarg), ZEND_STRL("topic_name"), 0);
if (!ztopic || Z_TYPE_P(ztopic) != IS_STRING) {
zend_throw_exception(ce_kafka_exception, "Invalid argument: Specified Message's topic_name is not a string", RD_KAFKA_RESP_ERR__INVALID_ARG);
return;
}
zpartition = rdkafka_read_property(NULL, Z_RDKAFKA_PROP_OBJ(zarg), ZEND_STRL("partition"), 0);
if (!zpartition || Z_TYPE_P(zpartition) != IS_LONG) {
zend_throw_exception(ce_kafka_exception, "Invalid argument: Specified Message's partition is not an int", RD_KAFKA_RESP_ERR__INVALID_ARG);
return;
}
zoffset = rdkafka_read_property(NULL, Z_RDKAFKA_PROP_OBJ(zarg), ZEND_STRL("offset"), 0);
if (!zoffset || Z_TYPE_P(zoffset) != IS_LONG) {
zend_throw_exception(ce_kafka_exception, "Invalid argument: Specified Message's offset is not an int", RD_KAFKA_RESP_ERR__INVALID_ARG);
return;
}
offsets = rd_kafka_topic_partition_list_new(1);
rktpar = rd_kafka_topic_partition_list_add(
offsets, Z_STRVAL_P(ztopic),
Z_LVAL_P(zpartition));
rktpar->offset = Z_LVAL_P(zoffset)+1;
} else if (Z_TYPE_P(zarg) == IS_ARRAY) {
HashTable *ary = Z_ARRVAL_P(zarg);
offsets = array_arg_to_kafka_topic_partition_list(1, ary);
if (!offsets) {
return;
}
} else if (Z_TYPE_P(zarg) != IS_NULL) {
php_error(E_ERROR,
"RdKafka\\KafkaConsumer::%s() expects parameter %d to be %s, %s given",
get_active_function_name(),
1,
"an instance of RdKafka\\Message or an array of RdKafka\\TopicPartition",
zend_zval_type_name(zarg));
return;
}
}
err = rd_kafka_commit(intern->rk, offsets, async);
if (offsets) {
rd_kafka_topic_partition_list_destroy(offsets);
}
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
}
/* }}} */
/* {{{ proto void RdKafka\KafkaConsumer::commit([mixed $message_or_offsets])
Commit offsets */
PHP_METHOD(RdKafka_KafkaConsumer, commit)
{
consumer_commit(0, INTERNAL_FUNCTION_PARAM_PASSTHRU);
}
/* }}} */
/* {{{ proto void RdKafka\KafkaConsumer::commitAsync([mixed $message_or_offsets])
Commit offsets */
PHP_METHOD(RdKafka_KafkaConsumer, commitAsync)
{
consumer_commit(1, INTERNAL_FUNCTION_PARAM_PASSTHRU);
}
/* }}} */
/* {{{ proto void RdKafka\KafkaConsumer::close()
Close connection */
PHP_METHOD(RdKafka_KafkaConsumer, close)
{
object_intern *intern;
intern = get_object(getThis());
if (!intern) {
return;
}
rd_kafka_consumer_close(intern->rk);
intern->rk = NULL;
}
/* }}} */
/* {{{ proto Metadata RdKafka\KafkaConsumer::getMetadata(bool all_topics, RdKafka\Topic only_topic, int timeout_ms)
Request Metadata from broker */
PHP_METHOD(RdKafka_KafkaConsumer, getMetadata)
{
zend_bool all_topics;
zval *only_zrkt;
zend_long timeout_ms;
rd_kafka_resp_err_t err;
object_intern *intern;
const rd_kafka_metadata_t *metadata;
kafka_topic_object *only_orkt = NULL;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "bO!l", &all_topics, &only_zrkt, ce_kafka_topic, &timeout_ms) == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
if (only_zrkt) {
only_orkt = get_kafka_topic_object(only_zrkt);
if (!only_orkt) {
return;
}
}
err = rd_kafka_metadata(intern->rk, all_topics, only_orkt ? only_orkt->rkt : NULL, &metadata, timeout_ms);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
kafka_metadata_init(return_value, metadata);
}
/* }}} */
/* {{{ proto RdKafka\KafkaConsumerTopic RdKafka\KafkaConsumer::newTopic(string $topic)
Returns a RdKafka\KafkaConsumerTopic object */
PHP_METHOD(RdKafka_KafkaConsumer, newTopic)
{
char *topic;
size_t topic_len;
rd_kafka_topic_t *rkt;
object_intern *intern;
kafka_topic_object *topic_intern;
zval *zconf = NULL;
rd_kafka_topic_conf_t *conf = NULL;
kafka_conf_object *conf_intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|O!", &topic, &topic_len, &zconf, ce_kafka_topic_conf) == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
if (zconf) {
conf_intern = get_kafka_conf_object(zconf);
if (conf_intern) {
conf = rd_kafka_topic_conf_dup(conf_intern->u.topic_conf);
}
}
rkt = rd_kafka_topic_new(intern->rk, topic, conf);
if (!rkt) {
return;
}
if (object_init_ex(return_value, ce_kafka_kafka_consumer_topic) != SUCCESS) {
return;
}
topic_intern = Z_RDKAFKA_P(kafka_topic_object, return_value);
if (!topic_intern) {
return;
}
topic_intern->rkt = rkt;
}
/* }}} */
/* {{{ proto array RdKafka\KafkaConsumer::getCommittedOffsets(array $topics, int timeout_ms)
Retrieve committed offsets for topics+partitions */
PHP_METHOD(RdKafka_KafkaConsumer, getCommittedOffsets)
{
HashTable *htopars = NULL;
zend_long timeout_ms;
object_intern *intern;
rd_kafka_resp_err_t err;
rd_kafka_topic_partition_list_t *topics;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "hl", &htopars, &timeout_ms) == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
topics = array_arg_to_kafka_topic_partition_list(1, htopars);
if (!topics) {
return;
}
err = rd_kafka_committed(intern->rk, topics, timeout_ms);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
rd_kafka_topic_partition_list_destroy(topics);
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
kafka_topic_partition_list_to_array(return_value, topics);
rd_kafka_topic_partition_list_destroy(topics);
}
/* }}} */
/* }}} */
/* {{{ proto array RdKafka\KafkaConsumer::getOffsetPositions(array $topics)
Retrieve current offsets for topics+partitions */
PHP_METHOD(RdKafka_KafkaConsumer, getOffsetPositions)
{
HashTable *htopars = NULL;
object_intern *intern;
rd_kafka_resp_err_t err;
rd_kafka_topic_partition_list_t *topics;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "h", &htopars) == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
topics = array_arg_to_kafka_topic_partition_list(1, htopars);
if (!topics) {
return;
}
err = rd_kafka_position(intern->rk, topics);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
rd_kafka_topic_partition_list_destroy(topics);
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
kafka_topic_partition_list_to_array(return_value, topics);
rd_kafka_topic_partition_list_destroy(topics);
}
/* }}} */
/* {{{ proto void RdKafka\KafkaConsumer::offsetsForTimes(array $topicPartitions, int $timeout_ms)
Look up the offsets for the given partitions by timestamp. */
PHP_METHOD(RdKafka_KafkaConsumer, offsetsForTimes)
{
HashTable *htopars = NULL;
object_intern *intern;
rd_kafka_topic_partition_list_t *topicPartitions;
zend_long timeout_ms;
rd_kafka_resp_err_t err;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "hl", &htopars, &timeout_ms) == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
topicPartitions = array_arg_to_kafka_topic_partition_list(1, htopars);
if (!topicPartitions) {
return;
}
err = rd_kafka_offsets_for_times(intern->rk, topicPartitions, timeout_ms);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
rd_kafka_topic_partition_list_destroy(topicPartitions);
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
kafka_topic_partition_list_to_array(return_value, topicPartitions);
rd_kafka_topic_partition_list_destroy(topicPartitions);
}
/* }}} */
/* {{{ proto void RdKafka\KafkaConsumer::queryWatermarkOffsets(string $topic, int $partition, int &$low, int &$high, int $timeout_ms)
Query broker for low (oldest/beginning) or high (newest/end) offsets for partition */
PHP_METHOD(RdKafka_KafkaConsumer, queryWatermarkOffsets)
{
object_intern *intern;
char *topic;
size_t topic_length;
long low, high;
zend_long partition, timeout;
zval *lowResult, *highResult;
rd_kafka_resp_err_t err;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "slzzl", &topic, &topic_length, &partition, &lowResult, &highResult, &timeout) == FAILURE) {
return;
}
ZVAL_DEREF(lowResult);
ZVAL_DEREF(highResult);
intern = get_object(getThis());
if (!intern) {
return;
}
err = rd_kafka_query_watermark_offsets(intern->rk, topic, partition, &low, &high, timeout);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
ZVAL_LONG(lowResult, low);
ZVAL_LONG(highResult, high);
}
/* }}} */
/* {{{ proto RdKafka\TopicPartition[] RdKafka\KafkaConsumer::pausePatitions(RdKafka\TopicPartition[] $topicPartitions)
Pause consumption for the provided list of partitions. */
PHP_METHOD(RdKafka_KafkaConsumer, pausePartitions)
{
HashTable *htopars;
rd_kafka_topic_partition_list_t *topars;
rd_kafka_resp_err_t err;
object_intern *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "h", &htopars) == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
topars = array_arg_to_kafka_topic_partition_list(1, htopars);
if (!topars) {
return;
}
err = rd_kafka_pause_partitions(intern->rk, topars);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
rd_kafka_topic_partition_list_destroy(topars);
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
kafka_topic_partition_list_to_array(return_value, topars);
rd_kafka_topic_partition_list_destroy(topars);
}
/* }}} */
/* {{{ proto RdKafka\TopicPartition[] RdKafka\KafkaConsumer::resumePatitions(RdKafka\TopicPartition[] $topicPartitions)
Resume consumption for the provided list of partitions. */
PHP_METHOD(RdKafka_KafkaConsumer, resumePartitions)
{
HashTable *htopars;
rd_kafka_topic_partition_list_t *topars;
rd_kafka_resp_err_t err;
object_intern *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "h", &htopars) == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
topars = array_arg_to_kafka_topic_partition_list(1, htopars);
if (!topars) {
return;
}
err = rd_kafka_resume_partitions(intern->rk, topars);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
rd_kafka_topic_partition_list_destroy(topars);
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
kafka_topic_partition_list_to_array(return_value, topars);
rd_kafka_topic_partition_list_destroy(topars);
}
/* }}} */
void kafka_kafka_consumer_minit(INIT_FUNC_ARGS) /* {{{ */
{
ce = register_class_RdKafka_KafkaConsumer();
ce->create_object = kafka_consumer_new;
handlers = kafka_default_object_handlers;
handlers.free_obj = kafka_consumer_free;
handlers.offset = XtOffsetOf(object_intern, std);
} /* }}} */
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
void kafka_kafka_consumer_minit(INIT_FUNC_ARGS);
<?php
/**
* @generate-class-entries
* @generate-function-entries
* @generate-legacy-arginfo
*/
namespace RdKafka;
class KafkaConsumer
{
private ?callable $error_cb;
private ?callable $rebalance_cb;
private ?callable $dr_msg_cb;
public function __construct(Conf $conf) {}
/** @tentative-return-type */
public function assign(?array $topic_partitions = null): void {}
/** @tentative-return-type */
public function getAssignment(): array {}
/** @tentative-return-type */
public function commit(Message|array|null $message_or_offsets = null): void {}
/** @tentative-return-type */
public function close(): void {}
/** @tentative-return-type */
public function commitAsync(Message|array|null $message_or_offsets = null): void {}
/** @tentative-return-type */
public function consume(int $timeout_ms): Message {}
/** @tentative-return-type */
public function subscribe(array $topics): void {}
/** @tentative-return-type */
public function getSubscription(): array {}
/** @tentative-return-type */
public function unsubscribe(): void {}
/** @tentative-return-type */
public function getMetadata(bool $all_topics, ?Topic $only_topic, int $timeout_ms): Metadata {}
/** @tentative-return-type */
public function newTopic(string $topic_name, ?TopicConf $topic_conf = null): KafkaConsumerTopic {}
/** @tentative-return-type */
public function getCommittedOffsets(array $topic_partitions, int $timeout_ms): array {}
/** @tentative-return-type */
public function getOffsetPositions(array $topic_partitions): array {}
/** @tentative-return-type */
public function queryWatermarkOffsets(string $topic, int $partition, int &$low, int &$high, int $timeout_ms): void {}
/** @tentative-return-type */
public function offsetsForTimes(array $topic_partitions, int $timeout_ms): array {}
/** @tentative-return-type */
public function pausePartitions(array $topic_partitions): array {}
/** @tentative-return-type */
public function resumePartitions(array $topic_partitions): array {}
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 47e9238c79f5508833423d31a2e09041754dbffb */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_KafkaConsumer___construct, 0, 0, 1)
ZEND_ARG_OBJ_INFO(0, conf, RdKafka\\Conf, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_assign, 0, 0, IS_VOID, 0)
ZEND_ARG_TYPE_INFO_WITH_DEFAULT_VALUE(0, topic_partitions, IS_ARRAY, 1, "null")
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_getAssignment, 0, 0, IS_ARRAY, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_commit, 0, 0, IS_VOID, 0)
ZEND_ARG_OBJ_TYPE_MASK(0, message_or_offsets, RdKafka\\Message, MAY_BE_ARRAY|MAY_BE_NULL, "null")
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_close, 0, 0, IS_VOID, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_KafkaConsumer_commitAsync arginfo_class_RdKafka_KafkaConsumer_commit
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_consume, 0, 1, RdKafka\\Message, 0)
ZEND_ARG_TYPE_INFO(0, timeout_ms, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_subscribe, 0, 1, IS_VOID, 0)
ZEND_ARG_TYPE_INFO(0, topics, IS_ARRAY, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_KafkaConsumer_getSubscription arginfo_class_RdKafka_KafkaConsumer_getAssignment
#define arginfo_class_RdKafka_KafkaConsumer_unsubscribe arginfo_class_RdKafka_KafkaConsumer_close
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_getMetadata, 0, 3, RdKafka\\Metadata, 0)
ZEND_ARG_TYPE_INFO(0, all_topics, _IS_BOOL, 0)
ZEND_ARG_OBJ_INFO(0, only_topic, RdKafka\\Topic, 1)
ZEND_ARG_TYPE_INFO(0, timeout_ms, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_newTopic, 0, 1, RdKafka\\KafkaConsumerTopic, 0)
ZEND_ARG_TYPE_INFO(0, topic_name, IS_STRING, 0)
ZEND_ARG_OBJ_INFO_WITH_DEFAULT_VALUE(0, topic_conf, RdKafka\\TopicConf, 1, "null")
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_getCommittedOffsets, 0, 2, IS_ARRAY, 0)
ZEND_ARG_TYPE_INFO(0, topic_partitions, IS_ARRAY, 0)
ZEND_ARG_TYPE_INFO(0, timeout_ms, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_getOffsetPositions, 0, 1, IS_ARRAY, 0)
ZEND_ARG_TYPE_INFO(0, topic_partitions, IS_ARRAY, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_queryWatermarkOffsets, 0, 5, IS_VOID, 0)
ZEND_ARG_TYPE_INFO(0, topic, IS_STRING, 0)
ZEND_ARG_TYPE_INFO(0, partition, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(1, low, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(1, high, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, timeout_ms, IS_LONG, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_KafkaConsumer_offsetsForTimes arginfo_class_RdKafka_KafkaConsumer_getCommittedOffsets
#define arginfo_class_RdKafka_KafkaConsumer_pausePartitions arginfo_class_RdKafka_KafkaConsumer_getOffsetPositions
#define arginfo_class_RdKafka_KafkaConsumer_resumePartitions arginfo_class_RdKafka_KafkaConsumer_getOffsetPositions
ZEND_METHOD(RdKafka_KafkaConsumer, __construct);
ZEND_METHOD(RdKafka_KafkaConsumer, assign);
ZEND_METHOD(RdKafka_KafkaConsumer, getAssignment);
ZEND_METHOD(RdKafka_KafkaConsumer, commit);
ZEND_METHOD(RdKafka_KafkaConsumer, close);
ZEND_METHOD(RdKafka_KafkaConsumer, commitAsync);
ZEND_METHOD(RdKafka_KafkaConsumer, consume);
ZEND_METHOD(RdKafka_KafkaConsumer, subscribe);
ZEND_METHOD(RdKafka_KafkaConsumer, getSubscription);
ZEND_METHOD(RdKafka_KafkaConsumer, unsubscribe);
ZEND_METHOD(RdKafka_KafkaConsumer, getMetadata);
ZEND_METHOD(RdKafka_KafkaConsumer, newTopic);
ZEND_METHOD(RdKafka_KafkaConsumer, getCommittedOffsets);
ZEND_METHOD(RdKafka_KafkaConsumer, getOffsetPositions);
ZEND_METHOD(RdKafka_KafkaConsumer, queryWatermarkOffsets);
ZEND_METHOD(RdKafka_KafkaConsumer, offsetsForTimes);
ZEND_METHOD(RdKafka_KafkaConsumer, pausePartitions);
ZEND_METHOD(RdKafka_KafkaConsumer, resumePartitions);
static const zend_function_entry class_RdKafka_KafkaConsumer_methods[] = {
ZEND_ME(RdKafka_KafkaConsumer, __construct, arginfo_class_RdKafka_KafkaConsumer___construct, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, assign, arginfo_class_RdKafka_KafkaConsumer_assign, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, getAssignment, arginfo_class_RdKafka_KafkaConsumer_getAssignment, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, commit, arginfo_class_RdKafka_KafkaConsumer_commit, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, close, arginfo_class_RdKafka_KafkaConsumer_close, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, commitAsync, arginfo_class_RdKafka_KafkaConsumer_commitAsync, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, consume, arginfo_class_RdKafka_KafkaConsumer_consume, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, subscribe, arginfo_class_RdKafka_KafkaConsumer_subscribe, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, getSubscription, arginfo_class_RdKafka_KafkaConsumer_getSubscription, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, unsubscribe, arginfo_class_RdKafka_KafkaConsumer_unsubscribe, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, getMetadata, arginfo_class_RdKafka_KafkaConsumer_getMetadata, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, newTopic, arginfo_class_RdKafka_KafkaConsumer_newTopic, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, getCommittedOffsets, arginfo_class_RdKafka_KafkaConsumer_getCommittedOffsets, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, getOffsetPositions, arginfo_class_RdKafka_KafkaConsumer_getOffsetPositions, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, queryWatermarkOffsets, arginfo_class_RdKafka_KafkaConsumer_queryWatermarkOffsets, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, offsetsForTimes, arginfo_class_RdKafka_KafkaConsumer_offsetsForTimes, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, pausePartitions, arginfo_class_RdKafka_KafkaConsumer_pausePartitions, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, resumePartitions, arginfo_class_RdKafka_KafkaConsumer_resumePartitions, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_KafkaConsumer(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "KafkaConsumer", class_RdKafka_KafkaConsumer_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
zval property_error_cb_default_value;
ZVAL_UNDEF(&property_error_cb_default_value);
zend_string *property_error_cb_name = zend_string_init("error_cb", sizeof("error_cb") - 1, 1);
zend_declare_typed_property(class_entry, property_error_cb_name, &property_error_cb_default_value, ZEND_ACC_PRIVATE, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_CALLABLE|MAY_BE_NULL));
zend_string_release(property_error_cb_name);
zval property_rebalance_cb_default_value;
ZVAL_UNDEF(&property_rebalance_cb_default_value);
zend_string *property_rebalance_cb_name = zend_string_init("rebalance_cb", sizeof("rebalance_cb") - 1, 1);
zend_declare_typed_property(class_entry, property_rebalance_cb_name, &property_rebalance_cb_default_value, ZEND_ACC_PRIVATE, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_CALLABLE|MAY_BE_NULL));
zend_string_release(property_rebalance_cb_name);
zval property_dr_msg_cb_default_value;
ZVAL_UNDEF(&property_dr_msg_cb_default_value);
zend_string *property_dr_msg_cb_name = zend_string_init("dr_msg_cb", sizeof("dr_msg_cb") - 1, 1);
zend_declare_typed_property(class_entry, property_dr_msg_cb_name, &property_dr_msg_cb_default_value, ZEND_ACC_PRIVATE, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_CALLABLE|MAY_BE_NULL));
zend_string_release(property_dr_msg_cb_name);
return class_entry;
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 47e9238c79f5508833423d31a2e09041754dbffb */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_KafkaConsumer___construct, 0, 0, 1)
ZEND_ARG_INFO(0, conf)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_assign, 0, 0, 0)
ZEND_ARG_INFO(0, topic_partitions)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_getAssignment, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_commit, 0, 0, 0)
ZEND_ARG_INFO(0, message_or_offsets)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_KafkaConsumer_close arginfo_class_RdKafka_KafkaConsumer_getAssignment
#define arginfo_class_RdKafka_KafkaConsumer_commitAsync arginfo_class_RdKafka_KafkaConsumer_commit
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_consume, 0, 0, 1)
ZEND_ARG_INFO(0, timeout_ms)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_subscribe, 0, 0, 1)
ZEND_ARG_INFO(0, topics)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_KafkaConsumer_getSubscription arginfo_class_RdKafka_KafkaConsumer_getAssignment
#define arginfo_class_RdKafka_KafkaConsumer_unsubscribe arginfo_class_RdKafka_KafkaConsumer_getAssignment
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_getMetadata, 0, 0, 3)
ZEND_ARG_INFO(0, all_topics)
ZEND_ARG_INFO(0, only_topic)
ZEND_ARG_INFO(0, timeout_ms)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_newTopic, 0, 0, 1)
ZEND_ARG_INFO(0, topic_name)
ZEND_ARG_INFO(0, topic_conf)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_getCommittedOffsets, 0, 0, 2)
ZEND_ARG_INFO(0, topic_partitions)
ZEND_ARG_INFO(0, timeout_ms)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_getOffsetPositions, 0, 0, 1)
ZEND_ARG_INFO(0, topic_partitions)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_KafkaConsumer_queryWatermarkOffsets, 0, 0, 5)
ZEND_ARG_INFO(0, topic)
ZEND_ARG_INFO(0, partition)
ZEND_ARG_INFO(1, low)
ZEND_ARG_INFO(1, high)
ZEND_ARG_INFO(0, timeout_ms)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_KafkaConsumer_offsetsForTimes arginfo_class_RdKafka_KafkaConsumer_getCommittedOffsets
#define arginfo_class_RdKafka_KafkaConsumer_pausePartitions arginfo_class_RdKafka_KafkaConsumer_getOffsetPositions
#define arginfo_class_RdKafka_KafkaConsumer_resumePartitions arginfo_class_RdKafka_KafkaConsumer_getOffsetPositions
ZEND_METHOD(RdKafka_KafkaConsumer, __construct);
ZEND_METHOD(RdKafka_KafkaConsumer, assign);
ZEND_METHOD(RdKafka_KafkaConsumer, getAssignment);
ZEND_METHOD(RdKafka_KafkaConsumer, commit);
ZEND_METHOD(RdKafka_KafkaConsumer, close);
ZEND_METHOD(RdKafka_KafkaConsumer, commitAsync);
ZEND_METHOD(RdKafka_KafkaConsumer, consume);
ZEND_METHOD(RdKafka_KafkaConsumer, subscribe);
ZEND_METHOD(RdKafka_KafkaConsumer, getSubscription);
ZEND_METHOD(RdKafka_KafkaConsumer, unsubscribe);
ZEND_METHOD(RdKafka_KafkaConsumer, getMetadata);
ZEND_METHOD(RdKafka_KafkaConsumer, newTopic);
ZEND_METHOD(RdKafka_KafkaConsumer, getCommittedOffsets);
ZEND_METHOD(RdKafka_KafkaConsumer, getOffsetPositions);
ZEND_METHOD(RdKafka_KafkaConsumer, queryWatermarkOffsets);
ZEND_METHOD(RdKafka_KafkaConsumer, offsetsForTimes);
ZEND_METHOD(RdKafka_KafkaConsumer, pausePartitions);
ZEND_METHOD(RdKafka_KafkaConsumer, resumePartitions);
static const zend_function_entry class_RdKafka_KafkaConsumer_methods[] = {
ZEND_ME(RdKafka_KafkaConsumer, __construct, arginfo_class_RdKafka_KafkaConsumer___construct, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, assign, arginfo_class_RdKafka_KafkaConsumer_assign, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, getAssignment, arginfo_class_RdKafka_KafkaConsumer_getAssignment, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, commit, arginfo_class_RdKafka_KafkaConsumer_commit, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, close, arginfo_class_RdKafka_KafkaConsumer_close, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, commitAsync, arginfo_class_RdKafka_KafkaConsumer_commitAsync, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, consume, arginfo_class_RdKafka_KafkaConsumer_consume, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, subscribe, arginfo_class_RdKafka_KafkaConsumer_subscribe, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, getSubscription, arginfo_class_RdKafka_KafkaConsumer_getSubscription, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, unsubscribe, arginfo_class_RdKafka_KafkaConsumer_unsubscribe, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, getMetadata, arginfo_class_RdKafka_KafkaConsumer_getMetadata, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, newTopic, arginfo_class_RdKafka_KafkaConsumer_newTopic, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, getCommittedOffsets, arginfo_class_RdKafka_KafkaConsumer_getCommittedOffsets, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, getOffsetPositions, arginfo_class_RdKafka_KafkaConsumer_getOffsetPositions, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, queryWatermarkOffsets, arginfo_class_RdKafka_KafkaConsumer_queryWatermarkOffsets, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, offsetsForTimes, arginfo_class_RdKafka_KafkaConsumer_offsetsForTimes, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, pausePartitions, arginfo_class_RdKafka_KafkaConsumer_pausePartitions, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaConsumer, resumePartitions, arginfo_class_RdKafka_KafkaConsumer_resumePartitions, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_KafkaConsumer(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "KafkaConsumer", class_RdKafka_KafkaConsumer_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
zval property_error_cb_default_value;
ZVAL_NULL(&property_error_cb_default_value);
zend_string *property_error_cb_name = zend_string_init("error_cb", sizeof("error_cb") - 1, 1);
zend_declare_property_ex(class_entry, property_error_cb_name, &property_error_cb_default_value, ZEND_ACC_PRIVATE, NULL);
zend_string_release(property_error_cb_name);
zval property_rebalance_cb_default_value;
ZVAL_NULL(&property_rebalance_cb_default_value);
zend_string *property_rebalance_cb_name = zend_string_init("rebalance_cb", sizeof("rebalance_cb") - 1, 1);
zend_declare_property_ex(class_entry, property_rebalance_cb_name, &property_rebalance_cb_default_value, ZEND_ACC_PRIVATE, NULL);
zend_string_release(property_rebalance_cb_name);
zval property_dr_msg_cb_default_value;
ZVAL_NULL(&property_dr_msg_cb_default_value);
zend_string *property_dr_msg_cb_name = zend_string_init("dr_msg_cb", sizeof("dr_msg_cb") - 1, 1);
zend_declare_property_ex(class_entry, property_dr_msg_cb_name, &property_dr_msg_cb_default_value, ZEND_ACC_PRIVATE, NULL);
zend_string_release(property_dr_msg_cb_name);
return class_entry;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#ifdef HAS_RD_KAFKA_TRANSACTIONS
#include "php_rdkafka.h"
#include "php_rdkafka_priv.h"
#include "Zend/zend_interfaces.h"
#include "Zend/zend_exceptions.h"
#include "kafka_error_exception.h"
#if PHP_VERSION_ID < 80000
#include "kafka_error_exception_legacy_arginfo.h"
#else
#include "kafka_error_exception_arginfo.h"
#endif
zend_class_entry * ce_kafka_error;
void create_kafka_error(zval *return_value, const rd_kafka_error_t *error) /* {{{ */
{
object_init_ex(return_value, ce_kafka_error);
zend_update_property_string(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("message"), rd_kafka_error_name(error));
zend_update_property_long(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("code"), rd_kafka_error_code(error));
zend_update_property_string(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("error_string"), rd_kafka_error_string(error));
zend_update_property_bool(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("isFatal"), rd_kafka_error_is_fatal(error));
zend_update_property_bool(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("isRetriable"), rd_kafka_error_is_retriable(error));
zend_update_property_bool(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("transactionRequiresAbort"), rd_kafka_error_txn_requires_abort(error));
Z_ADDREF_P(return_value);
}
/* }}} */
/* {{{ proto RdKafka\KafkaErrorException::__construct(string $message, int $code[, string $error_string, bool $isFatal, bool $isRetriable, bool $transactionRequiresAbort]) */
PHP_METHOD(RdKafka_KafkaErrorException, __construct)
{
char *message, *error_string = "";
size_t message_length = 0, error_string_length = 0;
zend_bool isFatal = 0, isRetriable = 0, transactionRequiresAbort = 0;
zend_long code = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "sl|sbbb", &message, &message_length, &code, &error_string, &error_string_length, &isFatal, &isRetriable, &transactionRequiresAbort) == FAILURE) {
return;
}
zend_update_property_string(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(getThis()), ZEND_STRL("message"), message);
zend_update_property_long(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(getThis()), ZEND_STRL("code"), code);
zend_update_property_string(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(getThis()), ZEND_STRL("error_string"), error_string);
zend_update_property_bool(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(getThis()), ZEND_STRL("isFatal"), isFatal);
zend_update_property_bool(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(getThis()), ZEND_STRL("isRetriable"), isRetriable);
zend_update_property_bool(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(getThis()), ZEND_STRL("transactionRequiresAbort"), transactionRequiresAbort);
}
/* }}} */
/* {{{ proto void RdKafka\KafkaErrorException::getErrorString()
Get name of error */
PHP_METHOD(RdKafka_KafkaErrorException, getErrorString)
{
zval *res;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "") == FAILURE) {
return;
}
res = rdkafka_read_property(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(getThis()), ZEND_STRL("error_string"), 0);
if (!res || Z_TYPE_P(res) != IS_STRING) {
return;
}
ZVAL_DEREF(res);
ZVAL_COPY(return_value, res);
}
/* }}} */
/* {{{ proto void RdKafka\KafkaErrorException::isFatal()
Return true if error is fatal */
PHP_METHOD(RdKafka_KafkaErrorException, isFatal)
{
zval *res;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "") == FAILURE) {
return;
}
res = rdkafka_read_property(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(getThis()), ZEND_STRL("isFatal"), 0);
if (!res || (Z_TYPE_P(res) != IS_TRUE && Z_TYPE_P(res) != IS_FALSE)) {
return;
}
ZVAL_DEREF(res);
ZVAL_COPY(return_value, res);
}
/* }}} */
/* {{{ proto void RdKafka\KafkaErrorException::isRetriable()
Return true if error is fatal */
PHP_METHOD(RdKafka_KafkaErrorException, isRetriable)
{
zval *res;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "") == FAILURE) {
return;
}
res = rdkafka_read_property(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(getThis()), ZEND_STRL("isRetriable"), 0);
if (!res || (Z_TYPE_P(res) != IS_TRUE && Z_TYPE_P(res) != IS_FALSE)) {
return;
}
ZVAL_DEREF(res);
ZVAL_COPY(return_value, res);
}
/* }}} */
/* {{{ proto void RdKafka\KafkaErrorException::transactionRequiresAbort()
Return true if error is fatal */
PHP_METHOD(RdKafka_KafkaErrorException, transactionRequiresAbort)
{
zval *res;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "") == FAILURE) {
return;
}
res = rdkafka_read_property(ce_kafka_error, Z_RDKAFKA_PROP_OBJ(getThis()), ZEND_STRL("transactionRequiresAbort"), 0);
if (!res || (Z_TYPE_P(res) != IS_TRUE && Z_TYPE_P(res) != IS_FALSE)) {
return;
}
ZVAL_DEREF(res);
ZVAL_COPY(return_value, res);
}
/* }}} */
void kafka_error_minit() /* {{{ */
{
ce_kafka_error = register_class_RdKafka_KafkaErrorException(ce_kafka_exception);
} /* }}} */
#endif
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifdef HAS_RD_KAFKA_TRANSACTIONS
#include "librdkafka/rdkafka.h"
#include "Zend/zend_interfaces.h"
extern zend_class_entry * ce_kafka_error;
void kafka_error_minit();
void create_kafka_error(zval *return_value, const rd_kafka_error_t *error);
#endif
<?php
/**
* @generate-class-entries
* @generate-function-entries
* @generate-legacy-arginfo
*/
namespace RdKafka;
class KafkaErrorException extends Exception
{
private string $error_string;
private bool $isFatal;
private bool $isRetriable;
private bool $transactionRequiresAbort;
public function __construct(string $message, int $code, string $error_string, bool $isFatal, bool $isRetriable, bool $transactionRequiresAbort) {}
/** @tentative-return-type */
public function getErrorString(): string {}
/** @tentative-return-type */
public function isFatal(): bool {}
/** @tentative-return-type */
public function isRetriable(): bool {}
/** @tentative-return-type */
public function transactionRequiresAbort(): bool {}
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 1a50cd552973f23b01a2d6b4e5464ba14320c393 */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_KafkaErrorException___construct, 0, 0, 6)
ZEND_ARG_TYPE_INFO(0, message, IS_STRING, 0)
ZEND_ARG_TYPE_INFO(0, code, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, error_string, IS_STRING, 0)
ZEND_ARG_TYPE_INFO(0, isFatal, _IS_BOOL, 0)
ZEND_ARG_TYPE_INFO(0, isRetriable, _IS_BOOL, 0)
ZEND_ARG_TYPE_INFO(0, transactionRequiresAbort, _IS_BOOL, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_KafkaErrorException_getErrorString, 0, 0, IS_STRING, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_KafkaErrorException_isFatal, 0, 0, _IS_BOOL, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_KafkaErrorException_isRetriable arginfo_class_RdKafka_KafkaErrorException_isFatal
#define arginfo_class_RdKafka_KafkaErrorException_transactionRequiresAbort arginfo_class_RdKafka_KafkaErrorException_isFatal
ZEND_METHOD(RdKafka_KafkaErrorException, __construct);
ZEND_METHOD(RdKafka_KafkaErrorException, getErrorString);
ZEND_METHOD(RdKafka_KafkaErrorException, isFatal);
ZEND_METHOD(RdKafka_KafkaErrorException, isRetriable);
ZEND_METHOD(RdKafka_KafkaErrorException, transactionRequiresAbort);
static const zend_function_entry class_RdKafka_KafkaErrorException_methods[] = {
ZEND_ME(RdKafka_KafkaErrorException, __construct, arginfo_class_RdKafka_KafkaErrorException___construct, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaErrorException, getErrorString, arginfo_class_RdKafka_KafkaErrorException_getErrorString, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaErrorException, isFatal, arginfo_class_RdKafka_KafkaErrorException_isFatal, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaErrorException, isRetriable, arginfo_class_RdKafka_KafkaErrorException_isRetriable, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaErrorException, transactionRequiresAbort, arginfo_class_RdKafka_KafkaErrorException_transactionRequiresAbort, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_KafkaErrorException(zend_class_entry *class_entry_RdKafka_Exception)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "KafkaErrorException", class_RdKafka_KafkaErrorException_methods);
class_entry = zend_register_internal_class_ex(&ce, class_entry_RdKafka_Exception);
zval property_error_string_default_value;
ZVAL_UNDEF(&property_error_string_default_value);
zend_string *property_error_string_name = zend_string_init("error_string", sizeof("error_string") - 1, 1);
zend_declare_typed_property(class_entry, property_error_string_name, &property_error_string_default_value, ZEND_ACC_PRIVATE, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_STRING));
zend_string_release(property_error_string_name);
zval property_isFatal_default_value;
ZVAL_UNDEF(&property_isFatal_default_value);
zend_string *property_isFatal_name = zend_string_init("isFatal", sizeof("isFatal") - 1, 1);
zend_declare_typed_property(class_entry, property_isFatal_name, &property_isFatal_default_value, ZEND_ACC_PRIVATE, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_BOOL));
zend_string_release(property_isFatal_name);
zval property_isRetriable_default_value;
ZVAL_UNDEF(&property_isRetriable_default_value);
zend_string *property_isRetriable_name = zend_string_init("isRetriable", sizeof("isRetriable") - 1, 1);
zend_declare_typed_property(class_entry, property_isRetriable_name, &property_isRetriable_default_value, ZEND_ACC_PRIVATE, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_BOOL));
zend_string_release(property_isRetriable_name);
zval property_transactionRequiresAbort_default_value;
ZVAL_UNDEF(&property_transactionRequiresAbort_default_value);
zend_string *property_transactionRequiresAbort_name = zend_string_init("transactionRequiresAbort", sizeof("transactionRequiresAbort") - 1, 1);
zend_declare_typed_property(class_entry, property_transactionRequiresAbort_name, &property_transactionRequiresAbort_default_value, ZEND_ACC_PRIVATE, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_BOOL));
zend_string_release(property_transactionRequiresAbort_name);
return class_entry;
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 1a50cd552973f23b01a2d6b4e5464ba14320c393 */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_KafkaErrorException___construct, 0, 0, 6)
ZEND_ARG_INFO(0, message)
ZEND_ARG_INFO(0, code)
ZEND_ARG_INFO(0, error_string)
ZEND_ARG_INFO(0, isFatal)
ZEND_ARG_INFO(0, isRetriable)
ZEND_ARG_INFO(0, transactionRequiresAbort)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_KafkaErrorException_getErrorString, 0, 0, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_KafkaErrorException_isFatal arginfo_class_RdKafka_KafkaErrorException_getErrorString
#define arginfo_class_RdKafka_KafkaErrorException_isRetriable arginfo_class_RdKafka_KafkaErrorException_getErrorString
#define arginfo_class_RdKafka_KafkaErrorException_transactionRequiresAbort arginfo_class_RdKafka_KafkaErrorException_getErrorString
ZEND_METHOD(RdKafka_KafkaErrorException, __construct);
ZEND_METHOD(RdKafka_KafkaErrorException, getErrorString);
ZEND_METHOD(RdKafka_KafkaErrorException, isFatal);
ZEND_METHOD(RdKafka_KafkaErrorException, isRetriable);
ZEND_METHOD(RdKafka_KafkaErrorException, transactionRequiresAbort);
static const zend_function_entry class_RdKafka_KafkaErrorException_methods[] = {
ZEND_ME(RdKafka_KafkaErrorException, __construct, arginfo_class_RdKafka_KafkaErrorException___construct, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaErrorException, getErrorString, arginfo_class_RdKafka_KafkaErrorException_getErrorString, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaErrorException, isFatal, arginfo_class_RdKafka_KafkaErrorException_isFatal, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaErrorException, isRetriable, arginfo_class_RdKafka_KafkaErrorException_isRetriable, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_KafkaErrorException, transactionRequiresAbort, arginfo_class_RdKafka_KafkaErrorException_transactionRequiresAbort, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_KafkaErrorException(zend_class_entry *class_entry_RdKafka_Exception)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "KafkaErrorException", class_RdKafka_KafkaErrorException_methods);
class_entry = zend_register_internal_class_ex(&ce, class_entry_RdKafka_Exception);
zval property_error_string_default_value;
ZVAL_NULL(&property_error_string_default_value);
zend_string *property_error_string_name = zend_string_init("error_string", sizeof("error_string") - 1, 1);
zend_declare_property_ex(class_entry, property_error_string_name, &property_error_string_default_value, ZEND_ACC_PRIVATE, NULL);
zend_string_release(property_error_string_name);
zval property_isFatal_default_value;
ZVAL_NULL(&property_isFatal_default_value);
zend_string *property_isFatal_name = zend_string_init("isFatal", sizeof("isFatal") - 1, 1);
zend_declare_property_ex(class_entry, property_isFatal_name, &property_isFatal_default_value, ZEND_ACC_PRIVATE, NULL);
zend_string_release(property_isFatal_name);
zval property_isRetriable_default_value;
ZVAL_NULL(&property_isRetriable_default_value);
zend_string *property_isRetriable_name = zend_string_init("isRetriable", sizeof("isRetriable") - 1, 1);
zend_declare_property_ex(class_entry, property_isRetriable_name, &property_isRetriable_default_value, ZEND_ACC_PRIVATE, NULL);
zend_string_release(property_isRetriable_name);
zval property_transactionRequiresAbort_default_value;
ZVAL_NULL(&property_transactionRequiresAbort_default_value);
zend_string *property_transactionRequiresAbort_name = zend_string_init("transactionRequiresAbort", sizeof("transactionRequiresAbort") - 1, 1);
zend_declare_property_ex(class_entry, property_transactionRequiresAbort_name, &property_transactionRequiresAbort_default_value, ZEND_ACC_PRIVATE, NULL);
zend_string_release(property_transactionRequiresAbort_name);
return class_entry;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "php_rdkafka.h"
#include "php_rdkafka_priv.h"
#include "librdkafka/rdkafka.h"
#include "ext/spl/spl_iterators.h"
#include "Zend/zend_interfaces.h"
#include "Zend/zend_exceptions.h"
#include "topic.h"
#include "message.h"
#if PHP_VERSION_ID < 80000
#include "message_legacy_arginfo.h"
#else
#include "message_arginfo.h"
#endif
zend_class_entry * ce_kafka_message;
void kafka_message_new(zval *return_value, const rd_kafka_message_t *message, zend_string *msg_opaque)
{
object_init_ex(return_value, ce_kafka_message);
rd_kafka_timestamp_type_t tstype;
int64_t timestamp;
timestamp = rd_kafka_message_timestamp(message, &tstype);
zval headers_array;
#ifdef HAVE_RD_KAFKA_MESSAGE_HEADERS
rd_kafka_headers_t *message_headers = NULL;
rd_kafka_resp_err_t header_response;
const char *header_name = NULL;
const void *header_value = NULL;
size_t header_size = 0;
size_t i;
#endif /* HAVE_RD_KAFKA_MESSAGE_HEADERS */
zend_update_property_long(NULL, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("err"), message->err);
if (message->rkt) {
zend_update_property_string(NULL, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("topic_name"), rd_kafka_topic_name(message->rkt));
}
zend_update_property_long(NULL, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("partition"), message->partition);
if (message->payload) {
zend_update_property_long(NULL, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("timestamp"), timestamp);
zend_update_property_stringl(NULL, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("payload"), message->payload, message->len);
zend_update_property_long(NULL, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("len"), message->len);
}
if (message->key) {
zend_update_property_stringl(NULL, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("key"), message->key, message->key_len);
}
zend_update_property_long(NULL, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("offset"), message->offset);
array_init(&headers_array);
#ifdef HAVE_RD_KAFKA_MESSAGE_HEADERS
if (message->err == RD_KAFKA_RESP_ERR_NO_ERROR) {
rd_kafka_message_headers(message, &message_headers);
if (message_headers != NULL) {
for (i = 0; i < rd_kafka_header_cnt(message_headers); i++) {
header_response = rd_kafka_header_get_all(message_headers, i, &header_name, &header_value, &header_size);
if (header_response != RD_KAFKA_RESP_ERR_NO_ERROR) {
break;
}
add_assoc_stringl(&headers_array, header_name, (const char*)header_value, header_size);
}
}
}
#endif
zend_update_property(NULL, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("headers"), &headers_array);
zval_ptr_dtor(&headers_array);
if (msg_opaque != NULL) {
zend_update_property_str(NULL, Z_RDKAFKA_PROP_OBJ(return_value), ZEND_STRL("opaque"), msg_opaque);
}
}
void kafka_message_list_to_array(zval *return_value, rd_kafka_message_t **messages, long size) /* {{{ */
{
rd_kafka_message_t *msg;
zval zmsg;
int i;
array_init_size(return_value, size);
for (i = 0; i < size; i++) {
msg = messages[i];
ZVAL_NULL(&zmsg);
kafka_message_new(&zmsg, msg, NULL);
add_next_index_zval(return_value, &zmsg);
}
} /* }}} */
/* {{{ proto string RdKafka\Message::errstr()
* Returns the error string for an errored KrKafka\Message or NULL if there was no error.
*/
PHP_METHOD(RdKafka_Message, errstr)
{
zval *zerr;
zval *zpayload;
const char *errstr;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "") == FAILURE) {
return;
}
zerr = rdkafka_read_property(NULL, Z_RDKAFKA_PROP_OBJ(getThis()), ZEND_STRL("err"), 0);
if (!zerr || Z_TYPE_P(zerr) != IS_LONG) {
return;
}
errstr = rd_kafka_err2str(Z_LVAL_P(zerr));
if (errstr) {
RETURN_STRING(errstr);
}
zpayload = rdkafka_read_property(NULL, Z_RDKAFKA_PROP_OBJ(getThis()), ZEND_STRL("payload"), 0);
if (zpayload && Z_TYPE_P(zpayload) == IS_STRING) {
RETURN_ZVAL(zpayload, 1, 0);
}
}
/* }}} */
void kafka_message_minit(INIT_FUNC_ARGS) { /* {{{ */
ce_kafka_message = register_class_RdKafka_Message();
} /* }}} */
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
void kafka_message_minit(INIT_FUNC_ARGS);
void kafka_message_new(zval *return_value, const rd_kafka_message_t *message, zend_string *msg_opaque);
void kafka_message_list_to_array(zval *return_value, rd_kafka_message_t **messages, long size);
extern zend_class_entry * ce_kafka_message;
<?php
/**
* @generate-class-entries
* @generate-function-entries
* @generate-legacy-arginfo
*/
namespace RdKafka;
class Message
{
public int $err;
public ?string $topic_name = null;
public ?int $timestamp = null;
public int $partition;
public ?string $payload = null;
public ?int $len = null;
public ?string $key = null;
public int $offset;
public array $headers;
public ?string $opaque = null;
/** @tentative-return-type */
public function errstr(): ?string {}
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: f642f90b8a3c35c353320c0574902898a3645ee1 */
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Message_errstr, 0, 0, IS_STRING, 1)
ZEND_END_ARG_INFO()
ZEND_METHOD(RdKafka_Message, errstr);
static const zend_function_entry class_RdKafka_Message_methods[] = {
ZEND_ME(RdKafka_Message, errstr, arginfo_class_RdKafka_Message_errstr, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Message(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Message", class_RdKafka_Message_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
zval property_err_default_value;
ZVAL_UNDEF(&property_err_default_value);
zend_string *property_err_name = zend_string_init("err", sizeof("err") - 1, 1);
zend_declare_typed_property(class_entry, property_err_name, &property_err_default_value, ZEND_ACC_PUBLIC, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_LONG));
zend_string_release(property_err_name);
zval property_topic_name_default_value;
ZVAL_NULL(&property_topic_name_default_value);
zend_string *property_topic_name_name = zend_string_init("topic_name", sizeof("topic_name") - 1, 1);
zend_declare_typed_property(class_entry, property_topic_name_name, &property_topic_name_default_value, ZEND_ACC_PUBLIC, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_STRING|MAY_BE_NULL));
zend_string_release(property_topic_name_name);
zval property_timestamp_default_value;
ZVAL_NULL(&property_timestamp_default_value);
zend_string *property_timestamp_name = zend_string_init("timestamp", sizeof("timestamp") - 1, 1);
zend_declare_typed_property(class_entry, property_timestamp_name, &property_timestamp_default_value, ZEND_ACC_PUBLIC, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_LONG|MAY_BE_NULL));
zend_string_release(property_timestamp_name);
zval property_partition_default_value;
ZVAL_UNDEF(&property_partition_default_value);
zend_string *property_partition_name = zend_string_init("partition", sizeof("partition") - 1, 1);
zend_declare_typed_property(class_entry, property_partition_name, &property_partition_default_value, ZEND_ACC_PUBLIC, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_LONG));
zend_string_release(property_partition_name);
zval property_payload_default_value;
ZVAL_NULL(&property_payload_default_value);
zend_string *property_payload_name = zend_string_init("payload", sizeof("payload") - 1, 1);
zend_declare_typed_property(class_entry, property_payload_name, &property_payload_default_value, ZEND_ACC_PUBLIC, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_STRING|MAY_BE_NULL));
zend_string_release(property_payload_name);
zval property_len_default_value;
ZVAL_NULL(&property_len_default_value);
zend_string *property_len_name = zend_string_init("len", sizeof("len") - 1, 1);
zend_declare_typed_property(class_entry, property_len_name, &property_len_default_value, ZEND_ACC_PUBLIC, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_LONG|MAY_BE_NULL));
zend_string_release(property_len_name);
zval property_key_default_value;
ZVAL_NULL(&property_key_default_value);
zend_string *property_key_name = zend_string_init("key", sizeof("key") - 1, 1);
zend_declare_typed_property(class_entry, property_key_name, &property_key_default_value, ZEND_ACC_PUBLIC, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_STRING|MAY_BE_NULL));
zend_string_release(property_key_name);
zval property_offset_default_value;
ZVAL_UNDEF(&property_offset_default_value);
zend_string *property_offset_name = zend_string_init("offset", sizeof("offset") - 1, 1);
zend_declare_typed_property(class_entry, property_offset_name, &property_offset_default_value, ZEND_ACC_PUBLIC, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_LONG));
zend_string_release(property_offset_name);
zval property_headers_default_value;
ZVAL_UNDEF(&property_headers_default_value);
zend_string *property_headers_name = zend_string_init("headers", sizeof("headers") - 1, 1);
zend_declare_typed_property(class_entry, property_headers_name, &property_headers_default_value, ZEND_ACC_PUBLIC, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_ARRAY));
zend_string_release(property_headers_name);
zval property_opaque_default_value;
ZVAL_NULL(&property_opaque_default_value);
zend_string *property_opaque_name = zend_string_init("opaque", sizeof("opaque") - 1, 1);
zend_declare_typed_property(class_entry, property_opaque_name, &property_opaque_default_value, ZEND_ACC_PUBLIC, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_STRING|MAY_BE_NULL));
zend_string_release(property_opaque_name);
return class_entry;
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: f642f90b8a3c35c353320c0574902898a3645ee1 */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Message_errstr, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_METHOD(RdKafka_Message, errstr);
static const zend_function_entry class_RdKafka_Message_methods[] = {
ZEND_ME(RdKafka_Message, errstr, arginfo_class_RdKafka_Message_errstr, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Message(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Message", class_RdKafka_Message_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
zval property_err_default_value;
ZVAL_NULL(&property_err_default_value);
zend_string *property_err_name = zend_string_init("err", sizeof("err") - 1, 1);
zend_declare_property_ex(class_entry, property_err_name, &property_err_default_value, ZEND_ACC_PUBLIC, NULL);
zend_string_release(property_err_name);
zval property_topic_name_default_value;
ZVAL_NULL(&property_topic_name_default_value);
zend_string *property_topic_name_name = zend_string_init("topic_name", sizeof("topic_name") - 1, 1);
zend_declare_property_ex(class_entry, property_topic_name_name, &property_topic_name_default_value, ZEND_ACC_PUBLIC, NULL);
zend_string_release(property_topic_name_name);
zval property_timestamp_default_value;
ZVAL_NULL(&property_timestamp_default_value);
zend_string *property_timestamp_name = zend_string_init("timestamp", sizeof("timestamp") - 1, 1);
zend_declare_property_ex(class_entry, property_timestamp_name, &property_timestamp_default_value, ZEND_ACC_PUBLIC, NULL);
zend_string_release(property_timestamp_name);
zval property_partition_default_value;
ZVAL_NULL(&property_partition_default_value);
zend_string *property_partition_name = zend_string_init("partition", sizeof("partition") - 1, 1);
zend_declare_property_ex(class_entry, property_partition_name, &property_partition_default_value, ZEND_ACC_PUBLIC, NULL);
zend_string_release(property_partition_name);
zval property_payload_default_value;
ZVAL_NULL(&property_payload_default_value);
zend_string *property_payload_name = zend_string_init("payload", sizeof("payload") - 1, 1);
zend_declare_property_ex(class_entry, property_payload_name, &property_payload_default_value, ZEND_ACC_PUBLIC, NULL);
zend_string_release(property_payload_name);
zval property_len_default_value;
ZVAL_NULL(&property_len_default_value);
zend_string *property_len_name = zend_string_init("len", sizeof("len") - 1, 1);
zend_declare_property_ex(class_entry, property_len_name, &property_len_default_value, ZEND_ACC_PUBLIC, NULL);
zend_string_release(property_len_name);
zval property_key_default_value;
ZVAL_NULL(&property_key_default_value);
zend_string *property_key_name = zend_string_init("key", sizeof("key") - 1, 1);
zend_declare_property_ex(class_entry, property_key_name, &property_key_default_value, ZEND_ACC_PUBLIC, NULL);
zend_string_release(property_key_name);
zval property_offset_default_value;
ZVAL_NULL(&property_offset_default_value);
zend_string *property_offset_name = zend_string_init("offset", sizeof("offset") - 1, 1);
zend_declare_property_ex(class_entry, property_offset_name, &property_offset_default_value, ZEND_ACC_PUBLIC, NULL);
zend_string_release(property_offset_name);
zval property_headers_default_value;
ZVAL_NULL(&property_headers_default_value);
zend_string *property_headers_name = zend_string_init("headers", sizeof("headers") - 1, 1);
zend_declare_property_ex(class_entry, property_headers_name, &property_headers_default_value, ZEND_ACC_PUBLIC, NULL);
zend_string_release(property_headers_name);
zval property_opaque_default_value;
ZVAL_NULL(&property_opaque_default_value);
zend_string *property_opaque_name = zend_string_init("opaque", sizeof("opaque") - 1, 1);
zend_declare_property_ex(class_entry, property_opaque_name, &property_opaque_default_value, ZEND_ACC_PUBLIC, NULL);
zend_string_release(property_opaque_name);
return class_entry;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "php_rdkafka.h"
#include "php_rdkafka_priv.h"
#include "librdkafka/rdkafka.h"
#include "metadata_collection.h"
#include "metadata_topic.h"
#include "metadata_broker.h"
#include "metadata_partition.h"
#include "Zend/zend_exceptions.h"
#if PHP_VERSION_ID < 80000
#include "metadata_legacy_arginfo.h"
#else
#include "metadata_arginfo.h"
#endif
typedef struct _object_intern {
const rd_kafka_metadata_t *metadata;
zend_object std;
} object_intern;
static HashTable *get_debug_info(Z_RDKAFKA_OBJ *object, int *is_temp);
static zend_class_entry * ce;
static zend_object_handlers handlers;
static void brokers_collection(zval *return_value, Z_RDKAFKA_OBJ *parent, object_intern *intern) { /* {{{ */
kafka_metadata_collection_init(return_value, parent, intern->metadata->brokers, intern->metadata->broker_cnt, sizeof(*intern->metadata->brokers), kafka_metadata_broker_ctor);
}
/* }}} */
static void topics_collection(zval *return_value, Z_RDKAFKA_OBJ *parent, object_intern *intern) { /* {{{ */
kafka_metadata_collection_init(return_value, parent, intern->metadata->topics, intern->metadata->topic_cnt, sizeof(*intern->metadata->topics), kafka_metadata_topic_ctor);
}
/* }}} */
static void kafka_metadata_free(zend_object *object) /* {{{ */
{
object_intern *intern = php_kafka_from_obj(object_intern, object);
if (intern->metadata) {
rd_kafka_metadata_destroy(intern->metadata);
}
zend_object_std_dtor(&intern->std);
}
/* }}} */
static zend_object *kafka_metadata_new(zend_class_entry *class_type) /* {{{ */
{
zend_object* retval;
object_intern *intern;
intern = zend_object_alloc(sizeof(*intern), class_type);
zend_object_std_init(&intern->std, class_type);
object_properties_init(&intern->std, class_type);
retval = &intern->std;
retval->handlers = &handlers;
return retval;
}
/* }}} */
static object_intern * get_object(zval *zmetadata)
{
object_intern *ometadata = Z_RDKAFKA_P(object_intern, zmetadata);
if (!ometadata->metadata) {
zend_throw_exception_ex(NULL, 0, "RdKafka\\Metadata::__construct() has not been called");
return NULL;
}
return ometadata;
}
static HashTable *get_debug_info(Z_RDKAFKA_OBJ *object, int *is_temp) /* {{{ */
{
zval ary;
object_intern *intern;
zval brokers;
zval topics;
*is_temp = 1;
array_init(&ary);
intern = rdkafka_get_debug_object(object_intern, object);
if (!intern) {
return Z_ARRVAL(ary);
}
ZVAL_NULL(&brokers);
brokers_collection(&brokers, object, intern);
add_assoc_zval(&ary, "brokers", &brokers);
ZVAL_NULL(&topics);
topics_collection(&topics, object, intern);
add_assoc_zval(&ary, "topics", &topics);
add_assoc_long(&ary, "orig_broker_id", intern->metadata->orig_broker_id);
add_assoc_string(&ary, "orig_broker_name", intern->metadata->orig_broker_name);
return Z_ARRVAL(ary);
}
/* }}} */
/* {{{ proto long RdKafka\Metadata::getOrigBrokerId()
Broker originating this metadata */
PHP_METHOD(RdKafka_Metadata, getOrigBrokerId)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
RETURN_LONG(intern->metadata->orig_broker_id);
}
/* }}} */
/* {{{ proto string RdKafka\Metadata::getOrigBrokerName()
Name of originating broker */
PHP_METHOD(RdKafka_Metadata, getOrigBrokerName)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
RETURN_STRING(intern->metadata->orig_broker_name);
}
/* }}} */
/* {{{ proto RdKafka\Metadata\Collection RdKafka\Metadata::getBrokers()
Topics */
PHP_METHOD(RdKafka_Metadata, getBrokers)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
brokers_collection(return_value, Z_RDKAFKA_PROP_OBJ(getThis()), intern);
}
/* }}} */
/* {{{ proto RdKafka\Metadata\Collection RdKafka\Metadata::getTopics()
Topics */
PHP_METHOD(RdKafka_Metadata, getTopics)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
topics_collection(return_value, Z_RDKAFKA_PROP_OBJ(getThis()), intern);
}
/* }}} */
void kafka_metadata_minit(INIT_FUNC_ARGS)
{
ce = register_class_RdKafka_Metadata();
ce->create_object = kafka_metadata_new;
handlers = kafka_default_object_handlers;
handlers.get_debug_info = get_debug_info;
handlers.free_obj = kafka_metadata_free;
handlers.offset = XtOffsetOf(object_intern, std);
kafka_metadata_topic_minit(INIT_FUNC_ARGS_PASSTHRU);
kafka_metadata_broker_minit(INIT_FUNC_ARGS_PASSTHRU);
kafka_metadata_partition_minit(INIT_FUNC_ARGS_PASSTHRU);
kafka_metadata_collection_minit(INIT_FUNC_ARGS_PASSTHRU);
}
void kafka_metadata_init(zval *return_value, const rd_kafka_metadata_t *metadata)
{
object_intern *intern;
if (object_init_ex(return_value, ce) != SUCCESS) {
return;
}
intern = Z_RDKAFKA_P(object_intern, return_value);
if (!intern) {
return;
}
intern->metadata = metadata;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
void kafka_metadata_minit(INIT_FUNC_ARGS);
void kafka_metadata_init(zval *return_value, const rd_kafka_metadata_t *metadata);
<?php
/**
* @generate-class-entries
* @generate-function-entries
* @generate-legacy-arginfo
*/
namespace RdKafka;
class Metadata
{
/** @tentative-return-type */
public function getOrigBrokerId(): int {}
/** @tentative-return-type */
public function getOrigBrokerName(): string {}
/** @tentative-return-type */
public function getBrokers(): Metadata\Collection {}
/** @tentative-return-type */
public function getTopics(): Metadata\Collection {}
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 7bdf537cb18915955d6c3f1d4775dcc9fc43eb4a */
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Metadata_getOrigBrokerId, 0, 0, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Metadata_getOrigBrokerName, 0, 0, IS_STRING, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX(arginfo_class_RdKafka_Metadata_getBrokers, 0, 0, RdKafka\\Metadata\\Collection, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_Metadata_getTopics arginfo_class_RdKafka_Metadata_getBrokers
ZEND_METHOD(RdKafka_Metadata, getOrigBrokerId);
ZEND_METHOD(RdKafka_Metadata, getOrigBrokerName);
ZEND_METHOD(RdKafka_Metadata, getBrokers);
ZEND_METHOD(RdKafka_Metadata, getTopics);
static const zend_function_entry class_RdKafka_Metadata_methods[] = {
ZEND_ME(RdKafka_Metadata, getOrigBrokerId, arginfo_class_RdKafka_Metadata_getOrigBrokerId, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata, getOrigBrokerName, arginfo_class_RdKafka_Metadata_getOrigBrokerName, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata, getBrokers, arginfo_class_RdKafka_Metadata_getBrokers, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata, getTopics, arginfo_class_RdKafka_Metadata_getTopics, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Metadata(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Metadata", class_RdKafka_Metadata_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "php_rdkafka.h"
#include "php_rdkafka_priv.h"
#include "librdkafka/rdkafka.h"
#include "ext/spl/spl_iterators.h"
#include "Zend/zend_interfaces.h"
#include "Zend/zend_exceptions.h"
#if PHP_VERSION_ID < 80000
#include "metadata_broker_legacy_arginfo.h"
#else
#include "metadata_broker_arginfo.h"
#endif
typedef struct _object_intern {
zval zmetadata;
const rd_kafka_metadata_broker_t *metadata_broker;
zend_object std;
} object_intern;
static HashTable *get_debug_info(Z_RDKAFKA_OBJ *object, int *is_temp);
static zend_class_entry * ce;
static zend_object_handlers handlers;
static void free_object(zend_object *object) /* {{{ */
{
object_intern *intern = php_kafka_from_obj(object_intern, object);
if (intern->metadata_broker) {
zval_dtor(&intern->zmetadata);
}
zend_object_std_dtor(&intern->std);
}
/* }}} */
static zend_object *create_object(zend_class_entry *class_type) /* {{{ */
{
zend_object* retval;
object_intern *intern;
intern = zend_object_alloc(sizeof(*intern), class_type);
zend_object_std_init(&intern->std, class_type);
object_properties_init(&intern->std, class_type);
retval = &intern->std;
retval->handlers = &handlers;
return retval;
}
/* }}} */
static object_intern * get_object(zval *zmt)
{
object_intern *omt = Z_RDKAFKA_P(object_intern, zmt);
if (!omt->metadata_broker) {
zend_throw_exception_ex(NULL, 0, "RdKafka\\Metadata\\Broker::__construct() has not been called");
return NULL;
}
return omt;
}
static HashTable *get_debug_info(Z_RDKAFKA_OBJ *object, int *is_temp) /* {{{ */
{
zval ary;
object_intern *intern;
*is_temp = 1;
array_init(&ary);
intern = rdkafka_get_debug_object(object_intern, object);
if (!intern) {
return Z_ARRVAL(ary);
}
add_assoc_long(&ary, "id", intern->metadata_broker->id);
add_assoc_string(&ary, "host", intern->metadata_broker->host);
add_assoc_long(&ary, "port", intern->metadata_broker->port);
return Z_ARRVAL(ary);
}
/* }}} */
/* {{{ proto int RdKafka\Metadata\Broker::getId()
Broker id */
PHP_METHOD(RdKafka_Metadata_Broker, getId)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
RETURN_LONG(intern->metadata_broker->id);
}
/* }}} */
/* {{{ proto string RdKafka\Metadata\Broker::getHost()
Broker hostname */
PHP_METHOD(RdKafka_Metadata_Broker, getHost)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
RETURN_STRING(intern->metadata_broker->host);
}
/* }}} */
/* {{{ proto int RdKafka\Metadata\Broker::getPort()
Broker port */
PHP_METHOD(RdKafka_Metadata_Broker, getPort)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
RETURN_LONG(intern->metadata_broker->port);
}
/* }}} */
void kafka_metadata_broker_minit(INIT_FUNC_ARGS)
{
ce = register_class_RdKafka_Metadata_Broker();
ce->create_object = create_object;
handlers = kafka_default_object_handlers;
handlers.get_debug_info = get_debug_info;
handlers.free_obj = free_object;
handlers.offset = XtOffsetOf(object_intern, std);
}
void kafka_metadata_broker_ctor(zval *return_value, zval *zmetadata, const void *data)
{
rd_kafka_metadata_broker_t *metadata_broker = (rd_kafka_metadata_broker_t*)data;
object_intern *intern;
if (object_init_ex(return_value, ce) != SUCCESS) {
return;
}
intern = Z_RDKAFKA_P(object_intern, return_value);
if (!intern) {
return;
}
ZVAL_ZVAL(&intern->zmetadata, zmetadata, 1, 0);
intern->metadata_broker = metadata_broker;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
void kafka_metadata_broker_minit(INIT_FUNC_ARGS);
void kafka_metadata_broker_ctor(zval *return_value, zval *zmetadata, const void *metadata_broker);
<?php
/**
* @generate-class-entries
* @generate-function-entries
* @generate-legacy-arginfo
*/
namespace RdKafka\Metadata;
class Broker
{
/** @tentative-return-type */
public function getId(): int {}
/** @tentative-return-type */
public function getHost(): string {}
/** @tentative-return-type */
public function getPort(): int {}
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 53fa93c8908858d59ab75b39a77efdea9e843675 */
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Metadata_Broker_getId, 0, 0, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Metadata_Broker_getHost, 0, 0, IS_STRING, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_Metadata_Broker_getPort arginfo_class_RdKafka_Metadata_Broker_getId
ZEND_METHOD(RdKafka_Metadata_Broker, getId);
ZEND_METHOD(RdKafka_Metadata_Broker, getHost);
ZEND_METHOD(RdKafka_Metadata_Broker, getPort);
static const zend_function_entry class_RdKafka_Metadata_Broker_methods[] = {
ZEND_ME(RdKafka_Metadata_Broker, getId, arginfo_class_RdKafka_Metadata_Broker_getId, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Broker, getHost, arginfo_class_RdKafka_Metadata_Broker_getHost, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Broker, getPort, arginfo_class_RdKafka_Metadata_Broker_getPort, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Metadata_Broker(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka\\Metadata", "Broker", class_RdKafka_Metadata_Broker_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 53fa93c8908858d59ab75b39a77efdea9e843675 */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Metadata_Broker_getId, 0, 0, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_Metadata_Broker_getHost arginfo_class_RdKafka_Metadata_Broker_getId
#define arginfo_class_RdKafka_Metadata_Broker_getPort arginfo_class_RdKafka_Metadata_Broker_getId
ZEND_METHOD(RdKafka_Metadata_Broker, getId);
ZEND_METHOD(RdKafka_Metadata_Broker, getHost);
ZEND_METHOD(RdKafka_Metadata_Broker, getPort);
static const zend_function_entry class_RdKafka_Metadata_Broker_methods[] = {
ZEND_ME(RdKafka_Metadata_Broker, getId, arginfo_class_RdKafka_Metadata_Broker_getId, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Broker, getHost, arginfo_class_RdKafka_Metadata_Broker_getHost, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Broker, getPort, arginfo_class_RdKafka_Metadata_Broker_getPort, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Metadata_Broker(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka\\Metadata", "Broker", class_RdKafka_Metadata_Broker_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "php_rdkafka.h"
#include "php_rdkafka_priv.h"
#include "librdkafka/rdkafka.h"
#include "ext/spl/spl_iterators.h"
#include "Zend/zend_interfaces.h"
#include "metadata_collection.h"
#include "Zend/zend_exceptions.h"
#if PHP_VERSION_ID < 80000
#include "metadata_collection_legacy_arginfo.h"
#else
#include "metadata_collection_arginfo.h"
#endif
typedef struct _object_intern {
zval zmetadata;
const void *items;
size_t item_cnt;
size_t item_size;
size_t position;
kafka_metadata_collection_ctor_t ctor;
zend_object std;
} object_intern;
static HashTable *get_debug_info(Z_RDKAFKA_OBJ *object, int *is_temp);
static zend_class_entry *ce;
static zend_object_handlers handlers;
static void free_object(zend_object *object) /* {{{ */
{
object_intern *intern = php_kafka_from_obj(object_intern, object);
if (intern->items) {
zval_dtor(&intern->zmetadata);
}
zend_object_std_dtor(&intern->std);
}
/* }}} */
static zend_object *create_object(zend_class_entry *class_type) /* {{{ */
{
zend_object* retval;
object_intern *intern;
intern = zend_object_alloc(sizeof(*intern), class_type);
zend_object_std_init(&intern->std, class_type);
object_properties_init(&intern->std, class_type);
retval = &intern->std;
retval->handlers = &handlers;
return retval;
}
/* }}} */
static object_intern * get_object(zval *zmti)
{
object_intern *omti = Z_RDKAFKA_P(object_intern, zmti);
if (!omti->items) {
zend_throw_exception_ex(NULL, 0, "RdKafka\\Metadata\\Collection::__construct() has not been called");
return NULL;
}
return omti;
}
static HashTable *get_debug_info(Z_RDKAFKA_OBJ *object, int *is_temp) /* {{{ */
{
zval ary;
object_intern *intern;
size_t i;
zval item;
*is_temp = 1;
array_init(&ary);
intern = rdkafka_get_debug_object(object_intern, object);
if (!intern) {
return Z_ARRVAL(ary);
}
for (i = 0; i < intern->item_cnt; i++) {
ZVAL_NULL(&item);
intern->ctor(&item, &intern->zmetadata, (char *)intern->items + i * intern->item_size);
add_next_index_zval(&ary, &item);
}
return Z_ARRVAL(ary);
}
/* }}} */
/* {{{ proto int RdKafka\Metadata\Collection::count()
*/
PHP_METHOD(RdKafka_Metadata_Collection, count)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
RETURN_LONG(intern->item_cnt);
}
/* }}} */
/* {{{ proto void RdKafka\Metadata\Collection::rewind()
*/
PHP_METHOD(RdKafka_Metadata_Collection, rewind)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
intern->position = 0;
}
/* }}} */
/* {{{ proto mixed RdKafka\Metadata\Collection::current()
*/
PHP_METHOD(RdKafka_Metadata_Collection, current)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
if (intern->position >= intern->item_cnt) {
zend_throw_exception(ce_kafka_exception, "Called current() on invalid iterator", 0);
return;
}
intern->ctor(return_value, &intern->zmetadata, (char *)intern->items + intern->position * intern->item_size);
}
/* }}} */
/* {{{ proto mixed RdKafka\Metadata\Collection::key()
*/
PHP_METHOD(RdKafka_Metadata_Collection, key)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
if (intern->position >= intern->item_cnt) {
zend_throw_exception(ce_kafka_exception, "Called key() on invalid iterator", 0);
return;
}
RETURN_LONG(intern->position);
}
/* }}} */
/* {{{ proto void RdKafka\Metadata\Collection::next()
*/
PHP_METHOD(RdKafka_Metadata_Collection, next)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
intern->position ++;
}
/* }}} */
/* {{{ proto bool RdKafka\Metadata\Collection::valid()
*/
PHP_METHOD(RdKafka_Metadata_Collection, valid)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
RETURN_BOOL(intern->position < intern->item_cnt);
}
/* }}} */
void kafka_metadata_collection_minit(INIT_FUNC_ARGS)
{
#if PHP_VERSION_ID < 80100
ce = register_class_RdKafka_Metadata_Collection(spl_ce_Countable, spl_ce_Iterator);
#else
ce = register_class_RdKafka_Metadata_Collection(zend_ce_countable, zend_ce_iterator);
#endif
ce->create_object = create_object;
handlers = kafka_default_object_handlers;
handlers.get_debug_info = get_debug_info;
handlers.free_obj = free_object;
handlers.offset = XtOffsetOf(object_intern, std);
}
void kafka_metadata_collection_init(zval *return_value, Z_RDKAFKA_OBJ *zmetadata, const void * items, size_t item_cnt, size_t item_size, kafka_metadata_collection_ctor_t ctor)
{
object_intern *intern;
if (object_init_ex(return_value, ce) != SUCCESS) {
return;
}
intern = Z_RDKAFKA_P(object_intern, return_value);
if (!intern) {
return;
}
Z_RDKAFKA_OBJ_COPY(&intern->zmetadata, zmetadata);
intern->items = items;
intern->item_cnt = item_cnt;
intern->item_size = item_size;
intern->ctor = ctor;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
typedef void (*kafka_metadata_collection_ctor_t)(zval *renurn_value, zval *zmetadata, const void *object);
void kafka_metadata_collection_minit(INIT_FUNC_ARGS);
void kafka_metadata_collection_init(zval *return_value, Z_RDKAFKA_OBJ *zmetadata, const void * items, size_t item_cnt, size_t item_size, kafka_metadata_collection_ctor_t ctor);
<?php
/**
* @generate-class-entries
* @generate-function-entries
* @generate-legacy-arginfo
*/
namespace RdKafka\Metadata;
class Collection implements \Countable, \Iterator
{
/** @tentative-return-type */
public function count(): int {}
/** @tentative-return-type */
public function current(): mixed {}
/** @tentative-return-type */
public function key(): int {}
/** @tentative-return-type */
public function next(): void {}
/** @tentative-return-type */
public function rewind(): void {}
/** @tentative-return-type */
public function valid(): bool {}
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: a84f8bd542d46b72179ff6aa0d9d17bed530a2de */
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Metadata_Collection_count, 0, 0, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Metadata_Collection_current, 0, 0, IS_MIXED, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_Metadata_Collection_key arginfo_class_RdKafka_Metadata_Collection_count
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Metadata_Collection_next, 0, 0, IS_VOID, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_Metadata_Collection_rewind arginfo_class_RdKafka_Metadata_Collection_next
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Metadata_Collection_valid, 0, 0, _IS_BOOL, 0)
ZEND_END_ARG_INFO()
ZEND_METHOD(RdKafka_Metadata_Collection, count);
ZEND_METHOD(RdKafka_Metadata_Collection, current);
ZEND_METHOD(RdKafka_Metadata_Collection, key);
ZEND_METHOD(RdKafka_Metadata_Collection, next);
ZEND_METHOD(RdKafka_Metadata_Collection, rewind);
ZEND_METHOD(RdKafka_Metadata_Collection, valid);
static const zend_function_entry class_RdKafka_Metadata_Collection_methods[] = {
ZEND_ME(RdKafka_Metadata_Collection, count, arginfo_class_RdKafka_Metadata_Collection_count, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Collection, current, arginfo_class_RdKafka_Metadata_Collection_current, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Collection, key, arginfo_class_RdKafka_Metadata_Collection_key, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Collection, next, arginfo_class_RdKafka_Metadata_Collection_next, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Collection, rewind, arginfo_class_RdKafka_Metadata_Collection_rewind, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Collection, valid, arginfo_class_RdKafka_Metadata_Collection_valid, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Metadata_Collection(zend_class_entry *class_entry_Countable, zend_class_entry *class_entry_Iterator)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka\\Metadata", "Collection", class_RdKafka_Metadata_Collection_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
zend_class_implements(class_entry, 2, class_entry_Countable, class_entry_Iterator);
return class_entry;
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: a84f8bd542d46b72179ff6aa0d9d17bed530a2de */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Metadata_Collection_count, 0, 0, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_Metadata_Collection_current arginfo_class_RdKafka_Metadata_Collection_count
#define arginfo_class_RdKafka_Metadata_Collection_key arginfo_class_RdKafka_Metadata_Collection_count
#define arginfo_class_RdKafka_Metadata_Collection_next arginfo_class_RdKafka_Metadata_Collection_count
#define arginfo_class_RdKafka_Metadata_Collection_rewind arginfo_class_RdKafka_Metadata_Collection_count
#define arginfo_class_RdKafka_Metadata_Collection_valid arginfo_class_RdKafka_Metadata_Collection_count
ZEND_METHOD(RdKafka_Metadata_Collection, count);
ZEND_METHOD(RdKafka_Metadata_Collection, current);
ZEND_METHOD(RdKafka_Metadata_Collection, key);
ZEND_METHOD(RdKafka_Metadata_Collection, next);
ZEND_METHOD(RdKafka_Metadata_Collection, rewind);
ZEND_METHOD(RdKafka_Metadata_Collection, valid);
static const zend_function_entry class_RdKafka_Metadata_Collection_methods[] = {
ZEND_ME(RdKafka_Metadata_Collection, count, arginfo_class_RdKafka_Metadata_Collection_count, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Collection, current, arginfo_class_RdKafka_Metadata_Collection_current, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Collection, key, arginfo_class_RdKafka_Metadata_Collection_key, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Collection, next, arginfo_class_RdKafka_Metadata_Collection_next, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Collection, rewind, arginfo_class_RdKafka_Metadata_Collection_rewind, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Collection, valid, arginfo_class_RdKafka_Metadata_Collection_valid, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Metadata_Collection(zend_class_entry *class_entry_Countable, zend_class_entry *class_entry_Iterator)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka\\Metadata", "Collection", class_RdKafka_Metadata_Collection_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
zend_class_implements(class_entry, 2, class_entry_Countable, class_entry_Iterator);
return class_entry;
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 7bdf537cb18915955d6c3f1d4775dcc9fc43eb4a */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Metadata_getOrigBrokerId, 0, 0, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_Metadata_getOrigBrokerName arginfo_class_RdKafka_Metadata_getOrigBrokerId
#define arginfo_class_RdKafka_Metadata_getBrokers arginfo_class_RdKafka_Metadata_getOrigBrokerId
#define arginfo_class_RdKafka_Metadata_getTopics arginfo_class_RdKafka_Metadata_getOrigBrokerId
ZEND_METHOD(RdKafka_Metadata, getOrigBrokerId);
ZEND_METHOD(RdKafka_Metadata, getOrigBrokerName);
ZEND_METHOD(RdKafka_Metadata, getBrokers);
ZEND_METHOD(RdKafka_Metadata, getTopics);
static const zend_function_entry class_RdKafka_Metadata_methods[] = {
ZEND_ME(RdKafka_Metadata, getOrigBrokerId, arginfo_class_RdKafka_Metadata_getOrigBrokerId, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata, getOrigBrokerName, arginfo_class_RdKafka_Metadata_getOrigBrokerName, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata, getBrokers, arginfo_class_RdKafka_Metadata_getBrokers, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata, getTopics, arginfo_class_RdKafka_Metadata_getTopics, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Metadata(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Metadata", class_RdKafka_Metadata_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "php_rdkafka.h"
#include "php_rdkafka_priv.h"
#include "librdkafka/rdkafka.h"
#include "ext/spl/spl_iterators.h"
#include "Zend/zend_interfaces.h"
#include "Zend/zend_exceptions.h"
#include "metadata_collection.h"
#if PHP_VERSION_ID < 80000
#include "metadata_partition_legacy_arginfo.h"
#else
#include "metadata_partition_arginfo.h"
#endif
typedef struct _object_intern {
zval zmetadata;
const rd_kafka_metadata_partition_t *metadata_partition;
zend_object std;
} object_intern;
static HashTable *get_debug_info(Z_RDKAFKA_OBJ *object, int *is_temp);
static zend_class_entry * ce;
static zend_object_handlers handlers;
static void free_object(zend_object *object) /* {{{ */
{
object_intern *intern = php_kafka_from_obj(object_intern, object);
if (intern->metadata_partition) {
zval_dtor(&intern->zmetadata);
}
zend_object_std_dtor(&intern->std);
}
/* }}} */
static zend_object *create_object(zend_class_entry *class_type) /* {{{ */
{
zend_object* retval;
object_intern *intern;
intern = zend_object_alloc(sizeof(*intern), class_type);
zend_object_std_init(&intern->std, class_type);
object_properties_init(&intern->std, class_type);
retval = &intern->std;
retval->handlers = &handlers;
return retval;
}
/* }}} */
static object_intern * get_object(zval *zmt)
{
object_intern *omt = Z_RDKAFKA_P(object_intern, zmt);
if (!omt->metadata_partition) {
zend_throw_exception_ex(NULL, 0, "RdKafka\\Metadata\\Partition::__construct() has not been called");
return NULL;
}
return omt;
}
static HashTable *get_debug_info(Z_RDKAFKA_OBJ *object, int *is_temp) /* {{{ */
{
zval ary;
object_intern *intern;
*is_temp = 1;
array_init(&ary);
intern = rdkafka_get_debug_object(object_intern, object);
if (!intern) {
return Z_ARRVAL(ary);
}
add_assoc_long(&ary, "id", intern->metadata_partition->id);
add_assoc_long(&ary, "err", intern->metadata_partition->err);
add_assoc_long(&ary, "leader", intern->metadata_partition->leader);
add_assoc_long(&ary, "replica_cnt", intern->metadata_partition->replica_cnt);
add_assoc_long(&ary, "isr_cnt", intern->metadata_partition->isr_cnt);
return Z_ARRVAL(ary);
}
/* }}} */
/* {{{ proto int RdKafka\Metadata\Partition::getId()
Partition id */
PHP_METHOD(RdKafka_Metadata_Partition, getId)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
RETURN_LONG(intern->metadata_partition->id);
}
/* }}} */
/* {{{ proto int RdKafka\Metadata\Partition::getErr()
Partition error reported by broker */
PHP_METHOD(RdKafka_Metadata_Partition, getErr)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
RETURN_LONG(intern->metadata_partition->err);
}
/* }}} */
/* {{{ proto int RdKafka\Metadata\Partition::getLeader()
Leader broker */
PHP_METHOD(RdKafka_Metadata_Partition, getLeader)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
RETURN_LONG(intern->metadata_partition->leader);
}
/* }}} */
void int32_ctor(zval *return_value, zval *zmetadata, const void *data) {
ZVAL_LONG(return_value, *(int32_t*)data);
}
/* {{{ proto array RdKafka\Metadata\Partition::getReplicas()
Replica broker ids */
PHP_METHOD(RdKafka_Metadata_Partition, getReplicas)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
kafka_metadata_collection_init(return_value, Z_RDKAFKA_PROP_OBJ(getThis()), intern->metadata_partition->replicas, intern->metadata_partition->replica_cnt, sizeof(*intern->metadata_partition->replicas), int32_ctor);
}
/* }}} */
/* {{{ proto array RdKafka\Metadata\Partition::getIsrs()
In-Sync-Replica broker ids */
PHP_METHOD(RdKafka_Metadata_Partition, getIsrs)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
kafka_metadata_collection_init(return_value, Z_RDKAFKA_PROP_OBJ(getThis()), intern->metadata_partition->isrs, intern->metadata_partition->isr_cnt, sizeof(*intern->metadata_partition->isrs), int32_ctor);
}
/* }}} */
void kafka_metadata_partition_minit(INIT_FUNC_ARGS)
{
ce = register_class_RdKafka_Metadata_Partition();
ce->create_object = create_object;
handlers = kafka_default_object_handlers;
handlers.get_debug_info = get_debug_info;
handlers.free_obj = free_object;
handlers.offset = XtOffsetOf(object_intern, std);
}
void kafka_metadata_partition_ctor(zval *return_value, zval *zmetadata, const void *data)
{
rd_kafka_metadata_partition_t *metadata_partition = (rd_kafka_metadata_partition_t*)data;
object_intern *intern;
if (object_init_ex(return_value, ce) != SUCCESS) {
return;
}
intern = Z_RDKAFKA_P(object_intern, return_value);
if (!intern) {
return;
}
ZVAL_ZVAL(&intern->zmetadata, zmetadata, 1, 0);
intern->metadata_partition = metadata_partition;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
void kafka_metadata_partition_minit(INIT_FUNC_ARGS);
void kafka_metadata_partition_ctor(zval *return_value, zval *zmetadata, const void *metadata_partition);
<?php
/**
* @generate-class-entries
* @generate-function-entries
* @generate-legacy-arginfo
*/
namespace RdKafka\Metadata;
class Partition
{
/** @tentative-return-type */
public function getId(): int {}
/** @tentative-return-type */
public function getErr(): int {}
/** @tentative-return-type */
public function getLeader(): int {}
/** @tentative-return-type */
public function getReplicas(): \RdKafka\Metadata\Collection {}
/** @tentative-return-type */
public function getIsrs(): \RdKafka\Metadata\Collection {}
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 4c02c5ac3a6240ab8cbc90451bdc54a3de2c2d2f */
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Metadata_Partition_getId, 0, 0, IS_LONG, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_Metadata_Partition_getErr arginfo_class_RdKafka_Metadata_Partition_getId
#define arginfo_class_RdKafka_Metadata_Partition_getLeader arginfo_class_RdKafka_Metadata_Partition_getId
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX(arginfo_class_RdKafka_Metadata_Partition_getReplicas, 0, 0, RdKafka\\Metadata\\Collection, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_Metadata_Partition_getIsrs arginfo_class_RdKafka_Metadata_Partition_getReplicas
ZEND_METHOD(RdKafka_Metadata_Partition, getId);
ZEND_METHOD(RdKafka_Metadata_Partition, getErr);
ZEND_METHOD(RdKafka_Metadata_Partition, getLeader);
ZEND_METHOD(RdKafka_Metadata_Partition, getReplicas);
ZEND_METHOD(RdKafka_Metadata_Partition, getIsrs);
static const zend_function_entry class_RdKafka_Metadata_Partition_methods[] = {
ZEND_ME(RdKafka_Metadata_Partition, getId, arginfo_class_RdKafka_Metadata_Partition_getId, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Partition, getErr, arginfo_class_RdKafka_Metadata_Partition_getErr, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Partition, getLeader, arginfo_class_RdKafka_Metadata_Partition_getLeader, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Partition, getReplicas, arginfo_class_RdKafka_Metadata_Partition_getReplicas, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Partition, getIsrs, arginfo_class_RdKafka_Metadata_Partition_getIsrs, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Metadata_Partition(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka\\Metadata", "Partition", class_RdKafka_Metadata_Partition_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 4c02c5ac3a6240ab8cbc90451bdc54a3de2c2d2f */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Metadata_Partition_getId, 0, 0, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_Metadata_Partition_getErr arginfo_class_RdKafka_Metadata_Partition_getId
#define arginfo_class_RdKafka_Metadata_Partition_getLeader arginfo_class_RdKafka_Metadata_Partition_getId
#define arginfo_class_RdKafka_Metadata_Partition_getReplicas arginfo_class_RdKafka_Metadata_Partition_getId
#define arginfo_class_RdKafka_Metadata_Partition_getIsrs arginfo_class_RdKafka_Metadata_Partition_getId
ZEND_METHOD(RdKafka_Metadata_Partition, getId);
ZEND_METHOD(RdKafka_Metadata_Partition, getErr);
ZEND_METHOD(RdKafka_Metadata_Partition, getLeader);
ZEND_METHOD(RdKafka_Metadata_Partition, getReplicas);
ZEND_METHOD(RdKafka_Metadata_Partition, getIsrs);
static const zend_function_entry class_RdKafka_Metadata_Partition_methods[] = {
ZEND_ME(RdKafka_Metadata_Partition, getId, arginfo_class_RdKafka_Metadata_Partition_getId, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Partition, getErr, arginfo_class_RdKafka_Metadata_Partition_getErr, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Partition, getLeader, arginfo_class_RdKafka_Metadata_Partition_getLeader, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Partition, getReplicas, arginfo_class_RdKafka_Metadata_Partition_getReplicas, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Partition, getIsrs, arginfo_class_RdKafka_Metadata_Partition_getIsrs, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Metadata_Partition(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka\\Metadata", "Partition", class_RdKafka_Metadata_Partition_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "php_rdkafka.h"
#include "php_rdkafka_priv.h"
#include "librdkafka/rdkafka.h"
#include "ext/spl/spl_iterators.h"
#include "Zend/zend_interfaces.h"
#include "metadata_partition.h"
#include "metadata_collection.h"
#include "Zend/zend_exceptions.h"
#if PHP_VERSION_ID < 80000
#include "metadata_topic_legacy_arginfo.h"
#else
#include "metadata_topic_arginfo.h"
#endif
typedef struct _object_intern {
zval zmetadata;
const rd_kafka_metadata_topic_t *metadata_topic;
zend_object std;
} object_intern;
static HashTable *get_debug_info(Z_RDKAFKA_OBJ *object, int *is_temp);
static zend_class_entry * ce;
static zend_object_handlers handlers;
static void partitions_collection(zval *return_value, Z_RDKAFKA_OBJ *parent, object_intern *intern) { /* {{{ */
kafka_metadata_collection_init(return_value, parent, intern->metadata_topic->partitions, intern->metadata_topic->partition_cnt, sizeof(*intern->metadata_topic->partitions), kafka_metadata_partition_ctor);
}
/* }}} */
static void free_object(zend_object *object) /* {{{ */
{
object_intern *intern = php_kafka_from_obj(object_intern, object);
if (intern->metadata_topic) {
zval_dtor(&intern->zmetadata);
}
zend_object_std_dtor(&intern->std);
}
/* }}} */
static zend_object *create_object(zend_class_entry *class_type) /* {{{ */
{
zend_object* retval;
object_intern *intern;
intern = zend_object_alloc(sizeof(*intern), class_type);
zend_object_std_init(&intern->std, class_type);
object_properties_init(&intern->std, class_type);
retval = &intern->std;
retval->handlers = &handlers;
return retval;
}
/* }}} */
static object_intern * get_object(zval *zmt)
{
object_intern *omt = Z_RDKAFKA_P(object_intern, zmt);
if (!omt->metadata_topic) {
zend_throw_exception_ex(NULL, 0, "RdKafka\\Metadata\\Topic::__construct() has not been called");
return NULL;
}
return omt;
}
static HashTable *get_debug_info(Z_RDKAFKA_OBJ *object, int *is_temp) /* {{{ */
{
zval ary;
object_intern *intern;
zval partitions;
*is_temp = 1;
array_init(&ary);
intern = rdkafka_get_debug_object(object_intern, object);
if (!intern) {
return Z_ARRVAL(ary);
}
add_assoc_string(&ary, "topic", intern->metadata_topic->topic);
ZVAL_NULL(&partitions);
partitions_collection(&partitions, object, intern);
add_assoc_zval(&ary, "partitions", &partitions);
add_assoc_long(&ary, "err", intern->metadata_topic->err);
return Z_ARRVAL(ary);
}
/* }}} */
/* {{{ proto string RdKafka\MetadataTopic::getTopic()
Topic name */
PHP_METHOD(RdKafka_Metadata_Topic, getTopic)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
RETURN_STRING(intern->metadata_topic->topic);
}
/* }}} */
/* {{{ proto int RdKafka\MetadataTopic::getErr()
Error */
PHP_METHOD(RdKafka_Metadata_Topic, getErr)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
RETURN_LONG(intern->metadata_topic->err);
}
/* }}} */
/* {{{ proto RdKafka\Metadata\Collection RdKafka\Metadata\Topic::getPartitions()
Partitions */
PHP_METHOD(RdKafka_Metadata_Topic, getPartitions)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
partitions_collection(return_value, Z_RDKAFKA_PROP_OBJ(getThis()), intern);
}
/* }}} */
void kafka_metadata_topic_minit(INIT_FUNC_ARGS)
{
ce = register_class_RdKafka_Metadata_Topic();
ce->create_object = create_object;
handlers = kafka_default_object_handlers;
handlers.get_debug_info = get_debug_info;
handlers.free_obj = free_object;
handlers.offset = XtOffsetOf(object_intern, std);
}
void kafka_metadata_topic_ctor(zval *return_value, zval *zmetadata, const void *data)
{
rd_kafka_metadata_topic_t *metadata_topic = (rd_kafka_metadata_topic_t*)data;
object_intern *intern;
if (object_init_ex(return_value, ce) != SUCCESS) {
return;
}
intern = Z_RDKAFKA_P(object_intern, return_value);
if (!intern) {
return;
}
ZVAL_ZVAL(&intern->zmetadata, zmetadata, 1, 0);
intern->metadata_topic = metadata_topic;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
void kafka_metadata_topic_minit(INIT_FUNC_ARGS);
void kafka_metadata_topic_ctor(zval *return_value, zval *zmetadata, const void *metadata_topic);
<?php
/**
* @generate-class-entries
* @generate-function-entries
* @generate-legacy-arginfo
*/
namespace RdKafka\Metadata;
class Topic
{
/** @tentative-return-type */
public function getTopic(): string {}
/** @tentative-return-type */
public function getErr(): int {}
/** @tentative-return-type */
public function getPartitions(): \RdKafka\Metadata\Collection {}
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 90ddb49b26a651772f9b71a1233435f2a502af6d */
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Metadata_Topic_getTopic, 0, 0, IS_STRING, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Metadata_Topic_getErr, 0, 0, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX(arginfo_class_RdKafka_Metadata_Topic_getPartitions, 0, 0, RdKafka\\Metadata\\Collection, 0)
ZEND_END_ARG_INFO()
ZEND_METHOD(RdKafka_Metadata_Topic, getTopic);
ZEND_METHOD(RdKafka_Metadata_Topic, getErr);
ZEND_METHOD(RdKafka_Metadata_Topic, getPartitions);
static const zend_function_entry class_RdKafka_Metadata_Topic_methods[] = {
ZEND_ME(RdKafka_Metadata_Topic, getTopic, arginfo_class_RdKafka_Metadata_Topic_getTopic, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Topic, getErr, arginfo_class_RdKafka_Metadata_Topic_getErr, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Topic, getPartitions, arginfo_class_RdKafka_Metadata_Topic_getPartitions, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Metadata_Topic(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka\\Metadata", "Topic", class_RdKafka_Metadata_Topic_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 90ddb49b26a651772f9b71a1233435f2a502af6d */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Metadata_Topic_getTopic, 0, 0, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_Metadata_Topic_getErr arginfo_class_RdKafka_Metadata_Topic_getTopic
#define arginfo_class_RdKafka_Metadata_Topic_getPartitions arginfo_class_RdKafka_Metadata_Topic_getTopic
ZEND_METHOD(RdKafka_Metadata_Topic, getTopic);
ZEND_METHOD(RdKafka_Metadata_Topic, getErr);
ZEND_METHOD(RdKafka_Metadata_Topic, getPartitions);
static const zend_function_entry class_RdKafka_Metadata_Topic_methods[] = {
ZEND_ME(RdKafka_Metadata_Topic, getTopic, arginfo_class_RdKafka_Metadata_Topic_getTopic, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Topic, getErr, arginfo_class_RdKafka_Metadata_Topic_getErr, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Metadata_Topic, getPartitions, arginfo_class_RdKafka_Metadata_Topic_getPartitions, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Metadata_Topic(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka\\Metadata", "Topic", class_RdKafka_Metadata_Topic_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
/* $Id$ */
#ifndef PHP_RDKAFKA_H
#define PHP_RDKAFKA_H
#include "librdkafka/rdkafka.h"
#include "conf.h"
#ifndef PHP_FE_END
#define PHP_FE_END { NULL, NULL, NULL, 0, 0 }
#endif
typedef struct _kafka_object {
rd_kafka_type_t type;
rd_kafka_t *rk;
kafka_conf_callbacks cbs;
HashTable consuming;
HashTable topics;
HashTable queues;
zend_object std;
} kafka_object;
PHP_METHOD(RdKafka, __construct);
extern zend_module_entry rdkafka_module_entry;
#define phpext_rdkafka_ptr &rdkafka_module_entry
#define PHP_RDKAFKA_VERSION "6.0.3"
extern zend_object_handlers kafka_default_object_handlers;
extern zend_class_entry * ce_kafka_exception;
#ifdef PHP_WIN32
# define PHP_RDKAFKA_API __declspec(dllexport)
#elif defined(__GNUC__) && __GNUC__ >= 4
# define PHP_RDKAFKA_API __attribute__ ((visibility("default")))
#else
# define PHP_RDKAFKA_API
#endif
#endif /* PHP_RDKAFKA_H */
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifndef PHP_RDKAFKA_PRIV_H
#define PHP_RDKAFKA_PRIV_H
#ifndef Z_PARAM_STRING_OR_NULL
#define Z_PARAM_STRING_OR_NULL(dest, dest_len) \
Z_PARAM_STRING_EX(dest, dest_len, 1, 0)
#endif
#ifndef Z_PARAM_STR_OR_NULL
#define Z_PARAM_STR_OR_NULL(dest) \
Z_PARAM_STR_EX(dest, 1, 0)
#endif
#ifndef Z_PARAM_ARRAY_HT_OR_NULL
#define Z_PARAM_ARRAY_HT_OR_NULL(dest) \
Z_PARAM_ARRAY_HT_EX(dest, 1, 0)
#endif
#ifndef Z_PARAM_LONG_OR_NULL
#define Z_PARAM_LONG_OR_NULL(dest, is_null) \
Z_PARAM_LONG_EX(dest, is_null, 1, 0)
#endif
#ifndef ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX
#define ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX(name, return_reference, required_num_args, type, allow_null) \
ZEND_BEGIN_ARG_INFO_EX(name, 0, return_reference, required_num_args)
#endif
#ifndef ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX
#define ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(name, return_reference, required_num_args, type, allow_null) \
ZEND_BEGIN_ARG_INFO_EX(name, 0, return_reference, required_num_args)
#endif
#if PHP_MAJOR_VERSION >= 8
#define Z_RDKAFKA_OBJ zend_object
#define Z_RDKAFKA_OBJ_COPY(z, o) ZVAL_OBJ_COPY(z, o)
#define Z_RDKAFKA_PROP_OBJ(object) Z_OBJ_P(object)
#define rdkafka_get_debug_object(type, object) php_kafka_from_obj(type, object)
#else // PHP 7
#define Z_RDKAFKA_OBJ zval
#define Z_RDKAFKA_OBJ_COPY(z, o) ZVAL_ZVAL(z, o, 1, 0)
#define Z_RDKAFKA_PROP_OBJ(object) object
#define rdkafka_get_debug_object(type, object) get_object(object)
#if PHP_MINOR_VERSION < 3
/* Allocates object type and zeros it, but not the properties.
* Properties MUST be initialized using object_properties_init(). */
static zend_always_inline void *zend_object_alloc(size_t obj_size, zend_class_entry *ce) {
void *obj = emalloc(obj_size + zend_object_properties_size(ce));
/* Subtraction of sizeof(zval) is necessary, because zend_object_properties_size() may be
* -sizeof(zval), if the object has no properties. */
memset(obj, 0, obj_size - sizeof(zval));
return obj;
}
#endif
#endif // PHP 7
#define Z_RDKAFKA_P(php_kafka_type, zobject) php_kafka_from_obj(php_kafka_type, Z_OBJ_P(zobject))
#define php_kafka_from_obj(php_kafka_type, object) \
((php_kafka_type*)((char *)(object) - XtOffsetOf(php_kafka_type, std)))
static inline void rdkafka_call_function(zend_fcall_info *fci, zend_fcall_info_cache *fci_cache, zval *retval, uint32_t param_count, zval params[])
{
int local_retval;
zval local_retval_zv;
if (retval) {
local_retval = 0;
} else {
local_retval = 1;
retval = &local_retval_zv;
}
fci->retval = retval;
fci->params = params;
fci->param_count = param_count;
zend_call_function(fci, fci_cache);
if (local_retval) {
zval_ptr_dtor(retval);
}
}
static inline zval *rdkafka_read_property(zend_class_entry *scope, Z_RDKAFKA_OBJ *object, const char *name, size_t name_length, zend_bool silent)
{
zval rv;
return zend_read_property(scope, object, name, name_length, silent, &rv);
}
static inline char *rdkafka_hash_get_current_key_ex(HashTable *ht, HashPosition *pos)
{
zend_string* key;
zend_ulong index;
if (zend_hash_get_current_key_ex(ht, &key, &index, pos) == HASH_KEY_IS_STRING) {
return key->val;
}
return NULL;
}
kafka_object * get_kafka_object(zval *zrk);
void add_consuming_toppar(kafka_object * intern, rd_kafka_topic_t * rkt, int32_t partition);
void del_consuming_toppar(kafka_object * intern, rd_kafka_topic_t * rkt, int32_t partition);
int is_consuming_toppar(kafka_object * intern, rd_kafka_topic_t * rkt, int32_t partition);
#endif /* PHP_RDKAFKA_PRIV_H */
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "php_rdkafka.h"
#include "php_rdkafka_priv.h"
#include "librdkafka/rdkafka.h"
#include "ext/spl/spl_iterators.h"
#include "Zend/zend_interfaces.h"
#include "Zend/zend_exceptions.h"
#include "topic.h"
#include "queue.h"
#include "message.h"
#if PHP_VERSION_ID < 80000
#include "queue_legacy_arginfo.h"
#else
#include "queue_arginfo.h"
#endif
zend_class_entry * ce_kafka_queue;
static zend_object_handlers handlers;
static void kafka_queue_free(zend_object *object) /* {{{ */
{
kafka_queue_object *intern = php_kafka_from_obj(kafka_queue_object, object);
if (intern->rkqu) {
kafka_object *kafka_intern = get_kafka_object(&intern->zrk);
if (kafka_intern) {
zend_hash_index_del(&kafka_intern->queues, (zend_ulong)intern);
}
}
zend_object_std_dtor(&intern->std);
}
/* }}} */
static zend_object *kafka_queue_new(zend_class_entry *class_type) /* {{{ */
{
zend_object* retval;
kafka_queue_object *intern;
intern = zend_object_alloc(sizeof(*intern), class_type);
zend_object_std_init(&intern->std, class_type);
object_properties_init(&intern->std, class_type);
retval = &intern->std;
retval->handlers = &handlers;
return retval;
}
/* }}} */
kafka_queue_object * get_kafka_queue_object(zval *zrkqu)
{
kafka_queue_object *orkqu = Z_RDKAFKA_P(kafka_queue_object, zrkqu);
if (!orkqu->rkqu) {
zend_throw_exception_ex(NULL, 0, "RdKafka\\Queue::__construct() has not been called");
return NULL;
}
return orkqu;
}
/* {{{ proto RdKafka\Message RdKafka\Queue::consume(int timeout_ms)
Consume a single message */
PHP_METHOD(RdKafka_Queue, consume)
{
kafka_queue_object *intern;
zend_long timeout_ms;
rd_kafka_message_t *message;
rd_kafka_resp_err_t err;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &timeout_ms) == FAILURE) {
return;
}
intern = get_kafka_queue_object(getThis());
if (!intern) {
return;
}
message = rd_kafka_consume_queue(intern->rkqu, timeout_ms);
if (!message) {
err = rd_kafka_last_error();
if (err == RD_KAFKA_RESP_ERR__TIMED_OUT) {
return;
}
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
kafka_message_new(return_value, message, NULL);
rd_kafka_message_destroy(message);
}
/* }}} */
void kafka_queue_minit(INIT_FUNC_ARGS) { /* {{{ */
handlers = kafka_default_object_handlers;
handlers.free_obj = kafka_queue_free;
handlers.offset = XtOffsetOf(kafka_queue_object, std);
ce_kafka_queue = register_class_RdKafka_Queue();
ce_kafka_queue->create_object = kafka_queue_new;
} /* }}} */
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
typedef struct _kafka_queue_object {
rd_kafka_queue_t *rkqu;
zval zrk;
zend_object std;
} kafka_queue_object;
void kafka_queue_minit(INIT_FUNC_ARGS);
kafka_queue_object * get_kafka_queue_object(zval *zrkqu);
extern zend_class_entry * ce_kafka_queue;
<?php
/**
* @generate-class-entries
* @generate-function-entries
* @generate-legacy-arginfo
*/
namespace RdKafka;
class Queue
{
/** @implementation-alias RdKafka::__construct */
private function __construct() {}
/** @tentative-return-type */
public function consume(int $timeout_ms): ?Message {}
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 9e80d48bb60ede4003fffcfe0da09ac0e5c2f4d1 */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Queue___construct, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX(arginfo_class_RdKafka_Queue_consume, 0, 1, RdKafka\\Message, 1)
ZEND_ARG_TYPE_INFO(0, timeout_ms, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_METHOD(RdKafka, __construct);
ZEND_METHOD(RdKafka_Queue, consume);
static const zend_function_entry class_RdKafka_Queue_methods[] = {
ZEND_MALIAS(RdKafka, __construct, __construct, arginfo_class_RdKafka_Queue___construct, ZEND_ACC_PRIVATE)
ZEND_ME(RdKafka_Queue, consume, arginfo_class_RdKafka_Queue_consume, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Queue(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Queue", class_RdKafka_Queue_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 9e80d48bb60ede4003fffcfe0da09ac0e5c2f4d1 */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Queue___construct, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Queue_consume, 0, 0, 1)
ZEND_ARG_INFO(0, timeout_ms)
ZEND_END_ARG_INFO()
ZEND_METHOD(RdKafka, __construct);
ZEND_METHOD(RdKafka_Queue, consume);
static const zend_function_entry class_RdKafka_Queue_methods[] = {
ZEND_MALIAS(RdKafka, __construct, __construct, arginfo_class_RdKafka_Queue___construct, ZEND_ACC_PRIVATE)
ZEND_ME(RdKafka_Queue, consume, arginfo_class_RdKafka_Queue_consume, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Queue(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Queue", class_RdKafka_Queue_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
/* $Id$ */
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HAS_RD_KAFKA_TRANSACTIONS
#include "kafka_error_exception.h"
#endif
#include "php.h"
#include "php_ini.h"
#include "ext/standard/info.h"
#include "php_rdkafka.h"
#include "php_rdkafka_priv.h"
#include "librdkafka/rdkafka.h"
#include "Zend/zend_exceptions.h"
#include "ext/spl/spl_exceptions.h"
#include "metadata.h"
#include "conf.h"
#include "topic.h"
#include "queue.h"
#include "message.h"
#include "kafka_consumer.h"
#include "topic_partition.h"
#if PHP_VERSION_ID < 80000
#include "rdkafka_legacy_arginfo.h"
#include "fun_legacy_arginfo.h"
#else
#include "rdkafka_arginfo.h"
#include "fun_arginfo.h"
#endif
#if RD_KAFKA_VERSION < 0x000b0000
# error librdkafka version 0.11.0 or greater required
#endif
enum {
RD_KAFKA_LOG_PRINT = 100
, RD_KAFKA_LOG_SYSLOG = 101
, RD_KAFKA_LOG_SYSLOG_PRINT = 102
};
typedef struct _toppar {
rd_kafka_topic_t *rkt;
int32_t partition;
} toppar;
static zend_object_handlers kafka_object_handlers;
zend_object_handlers kafka_default_object_handlers;
static zend_class_entry * ce_kafka;
static zend_class_entry * ce_kafka_consumer;
zend_class_entry * ce_kafka_exception;
static zend_class_entry * ce_kafka_producer;
static void stop_consuming_toppar_pp(toppar ** tp) {
rd_kafka_consume_stop((*tp)->rkt, (*tp)->partition);
}
static void stop_consuming(kafka_object * intern) {
zend_hash_apply(&intern->consuming, (apply_func_t)stop_consuming_toppar_pp);
}
static void kafka_free(zend_object *object) /* {{{ */
{
kafka_object *intern = php_kafka_from_obj(kafka_object, object);
kafka_conf_callbacks_dtor(&intern->cbs);
if (intern->rk) {
if (intern->type == RD_KAFKA_CONSUMER) {
stop_consuming(intern);
zend_hash_destroy(&intern->consuming);
zend_hash_destroy(&intern->queues);
} else if (intern->type == RD_KAFKA_PRODUCER) {
#ifdef HAS_RD_KAFKA_PURGE
// Force internal delivery callbacks for queued messages, as we rely
// on these to free msg_opaques
rd_kafka_purge(intern->rk, RD_KAFKA_PURGE_F_QUEUE | RD_KAFKA_PURGE_F_INFLIGHT);
rd_kafka_flush(intern->rk, 0);
#endif
}
zend_hash_destroy(&intern->topics);
rd_kafka_destroy(intern->rk);
intern->rk = NULL;
}
zend_object_std_dtor(&intern->std);
}
/* }}} */
static void toppar_pp_dtor(toppar ** tp) {
efree(*tp);
}
static void kafka_queue_object_pre_free(kafka_queue_object ** pp) {
kafka_queue_object *intern = *pp;
rd_kafka_queue_destroy(intern->rkqu);
intern->rkqu = NULL;
zval_ptr_dtor(&intern->zrk);
}
static void kafka_topic_object_pre_free(kafka_topic_object ** pp) {
kafka_topic_object *intern = *pp;
rd_kafka_topic_destroy(intern->rkt);
intern->rkt = NULL;
zval_ptr_dtor(&intern->zrk);
}
static void kafka_init(zval *this_ptr, rd_kafka_type_t type, zval *zconf) /* {{{ */
{
char errstr[512];
rd_kafka_t *rk;
kafka_object *intern;
kafka_conf_object *conf_intern;
rd_kafka_conf_t *conf = NULL;
intern = Z_RDKAFKA_P(kafka_object, this_ptr);
intern->type = type;
if (zconf) {
conf_intern = get_kafka_conf_object(zconf);
if (conf_intern) {
conf = rd_kafka_conf_dup(conf_intern->u.conf);
kafka_conf_callbacks_copy(&intern->cbs, &conf_intern->cbs);
}
}
if (conf == NULL) {
conf = rd_kafka_conf_new();
}
intern->cbs.zrk = *this_ptr;
rd_kafka_conf_set_opaque(conf, &intern->cbs);
if (type == RD_KAFKA_PRODUCER) {
rd_kafka_conf_set_dr_msg_cb(conf, kafka_conf_dr_msg_cb);
}
rk = rd_kafka_new(type, conf, errstr, sizeof(errstr));
if (rk == NULL) {
zend_throw_exception(ce_kafka_exception, errstr, 0);
return;
}
if (intern->cbs.log) {
rd_kafka_set_log_queue(rk, NULL);
}
intern->rk = rk;
if (type == RD_KAFKA_CONSUMER) {
zend_hash_init(&intern->consuming, 0, NULL, (dtor_func_t)toppar_pp_dtor, 0);
zend_hash_init(&intern->queues, 0, NULL, (dtor_func_t)kafka_queue_object_pre_free, 0);
}
zend_hash_init(&intern->topics, 0, NULL, (dtor_func_t)kafka_topic_object_pre_free, 0);
}
/* }}} */
static zend_object *kafka_new(zend_class_entry *class_type) /* {{{ */
{
zend_object* retval;
kafka_object *intern;
intern = zend_object_alloc(sizeof(*intern), class_type);
zend_object_std_init(&intern->std, class_type);
object_properties_init(&intern->std, class_type);
retval = &intern->std;
retval->handlers = &kafka_object_handlers;
return retval;
}
/* }}} */
kafka_object * get_kafka_object(zval *zrk)
{
kafka_object *ork = Z_RDKAFKA_P(kafka_object, zrk);
if (!ork->rk) {
zend_throw_exception_ex(NULL, 0, "RdKafka\\Kafka::__construct() has not been called");
return NULL;
}
return ork;
}
static void kafka_log_syslog_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf) {
rd_kafka_log_print(rk, level, fac, buf);
#ifndef _MSC_VER
rd_kafka_log_syslog(rk, level, fac, buf);
#endif
}
void add_consuming_toppar(kafka_object * intern, rd_kafka_topic_t * rkt, int32_t partition) {
char *key = NULL;
int key_len;
const char *topic_name = rd_kafka_topic_name(rkt);
toppar *tp;
tp = emalloc(sizeof(*tp));
tp->rkt = rkt;
tp->partition = partition;
key_len = spprintf(&key, 0, "%s:%d", topic_name, partition);
zend_hash_str_add_ptr(&intern->consuming, key, key_len+1, tp);
efree(key);
}
void del_consuming_toppar(kafka_object * intern, rd_kafka_topic_t * rkt, int32_t partition) {
char *key = NULL;
int key_len;
const char *topic_name = rd_kafka_topic_name(rkt);
key_len = spprintf(&key, 0, "%s:%d", topic_name, partition);
zend_hash_str_del(&intern->consuming, key, key_len+1);
efree(key);
}
int is_consuming_toppar(kafka_object * intern, rd_kafka_topic_t * rkt, int32_t partition) {
char *key = NULL;
int key_len;
const char *topic_name = rd_kafka_topic_name(rkt);
int ret;
key_len = spprintf(&key, 0, "%s:%d", topic_name, partition);
ret = zend_hash_str_exists(&intern->consuming, key, key_len+1);
efree(key);
return ret;
}
/* {{{ private constructor */
PHP_METHOD(RdKafka, __construct)
{
zend_throw_exception(NULL, "Private constructor", 0);
return;
}
/* }}} */
/* {{{ proto RdKafka\Consumer::__construct([RdKafka\Conf $conf]) */
PHP_METHOD(RdKafka_Consumer, __construct)
{
zval *zconf = NULL;
zend_error_handling error_handling;
zend_replace_error_handling(EH_THROW, spl_ce_InvalidArgumentException, &error_handling);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "|O!", &zconf, ce_kafka_conf) == FAILURE) {
zend_restore_error_handling(&error_handling);
return;
}
kafka_init(getThis(), RD_KAFKA_CONSUMER, zconf);
zend_restore_error_handling(&error_handling);
}
/* }}} */
/* {{{ proto RdKafka\Queue RdKafka\Consumer::newQueue()
Returns a RdKafka\Queue object */
PHP_METHOD(RdKafka_Consumer, newQueue)
{
rd_kafka_queue_t *rkqu;
kafka_object *intern;
kafka_queue_object *queue_intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "") == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
rkqu = rd_kafka_queue_new(intern->rk);
if (!rkqu) {
return;
}
if (object_init_ex(return_value, ce_kafka_queue) != SUCCESS) {
return;
}
queue_intern = Z_RDKAFKA_P(kafka_queue_object, return_value);
if (!queue_intern) {
return;
}
queue_intern->rkqu = rkqu;
// Keep a reference to the parent Kafka object, attempts to ensure that
// the Queue object is destroyed before the Kafka object.
// This avoids rd_kafka_destroy() hanging.
queue_intern->zrk = *getThis();
Z_ADDREF_P(&queue_intern->zrk);
zend_hash_index_add_ptr(&intern->queues, (zend_ulong)queue_intern, queue_intern);
}
/* }}} */
/* {{{ proto int RdKafka::addBrokers(string $brokerList)
Returns the number of brokers successfully added */
PHP_METHOD(RdKafka, addBrokers)
{
char *broker_list;
size_t broker_list_len;
kafka_object *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &broker_list, &broker_list_len) == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
RETURN_LONG(rd_kafka_brokers_add(intern->rk, broker_list));
}
/* }}} */
/* {{{ proto RdKafka\Metadata::getMetadata(bool $all_topics, RdKafka\Topic $only_topic, int $timeout_ms)
Request Metadata from broker */
PHP_METHOD(RdKafka, getMetadata)
{
zend_bool all_topics;
zval *only_zrkt;
zend_long timeout_ms;
rd_kafka_resp_err_t err;
kafka_object *intern;
const rd_kafka_metadata_t *metadata;
kafka_topic_object *only_orkt = NULL;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "bO!l", &all_topics, &only_zrkt, ce_kafka_topic, &timeout_ms) == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
if (only_zrkt) {
only_orkt = get_kafka_topic_object(only_zrkt);
if (!only_orkt) {
return;
}
}
err = rd_kafka_metadata(intern->rk, all_topics, only_orkt ? only_orkt->rkt : NULL, &metadata, timeout_ms);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
kafka_metadata_init(return_value, metadata);
}
/* }}} */
/* {{{ proto void RdKafka::setLogLevel(int $level)
Specifies the maximum logging level produced by internal kafka logging and debugging */
PHP_METHOD(RdKafka, setLogLevel)
{
kafka_object *intern;
zend_long level;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &level) == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
rd_kafka_set_log_level(intern->rk, level);
}
/* }}} */
/* {{{ proto RdKafka\Topic RdKafka::newTopic(string $topic)
Returns an RdKafka\Topic object */
PHP_METHOD(RdKafka, newTopic)
{
char *topic;
size_t topic_len;
rd_kafka_topic_t *rkt;
kafka_object *intern;
kafka_topic_object *topic_intern;
zend_class_entry *topic_type;
zval *zconf = NULL;
rd_kafka_topic_conf_t *conf = NULL;
kafka_conf_object *conf_intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s|O!", &topic, &topic_len, &zconf, ce_kafka_topic_conf) == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
if (zconf) {
conf_intern = get_kafka_conf_object(zconf);
if (conf_intern) {
conf = rd_kafka_topic_conf_dup(conf_intern->u.topic_conf);
}
}
rkt = rd_kafka_topic_new(intern->rk, topic, conf);
if (!rkt) {
return;
}
switch (intern->type) {
case RD_KAFKA_CONSUMER:
topic_type = ce_kafka_consumer_topic;
break;
case RD_KAFKA_PRODUCER:
topic_type = ce_kafka_producer_topic;
break;
default:
return;
}
if (object_init_ex(return_value, topic_type) != SUCCESS) {
return;
}
topic_intern = Z_RDKAFKA_P(kafka_topic_object, return_value);
if (!topic_intern) {
return;
}
topic_intern->rkt = rkt;
topic_intern->zrk = *getThis();
Z_ADDREF_P(&topic_intern->zrk);
zend_hash_index_add_ptr(&intern->topics, (zend_ulong)topic_intern, topic_intern);
}
/* }}} */
/* {{{ proto int RdKafka::getOutQLen()
Returns the current out queue length */
PHP_METHOD(RdKafka, getOutQLen)
{
kafka_object *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "") == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
RETURN_LONG(rd_kafka_outq_len(intern->rk));
}
/* }}} */
/* {{{ proto int RdKafka::poll(int $timeout_ms)
Polls the provided kafka handle for events */
PHP_METHOD(RdKafka, poll)
{
kafka_object *intern;
zend_long timeout;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &timeout) == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
RETURN_LONG(rd_kafka_poll(intern->rk, timeout));
}
/* }}} */
/* {{{ proto int RdKafka::flush(int $timeout_ms)
Wait until all outstanding produce requests, et.al, are completed. */
PHP_METHOD(RdKafka, flush)
{
kafka_object *intern;
zend_long timeout;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &timeout) == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
RETURN_LONG(rd_kafka_flush(intern->rk, timeout));
}
/* }}} */
#ifdef HAS_RD_KAFKA_PURGE
/* {{{ proto int RdKafka::purge(int $purge_flags)
Purge messages that are in queue or in flight */
PHP_METHOD(RdKafka, purge)
{
kafka_object *intern;
zend_long purge_flags;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &purge_flags) == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
RETURN_LONG(rd_kafka_purge(intern->rk, purge_flags));
}
/* }}} */
#endif
/* {{{ proto void RdKafka::queryWatermarkOffsets(string $topic, int $partition, int &$low, int &$high, int $timeout_ms)
Query broker for low (oldest/beginning) or high (newest/end) offsets for partition */
PHP_METHOD(RdKafka, queryWatermarkOffsets)
{
kafka_object *intern;
char *topic;
size_t topic_length;
long low, high;
zend_long partition, timeout;
zval *lowResult, *highResult;
rd_kafka_resp_err_t err;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "slzzl", &topic, &topic_length, &partition, &lowResult, &highResult, &timeout) == FAILURE) {
return;
}
ZVAL_DEREF(lowResult);
ZVAL_DEREF(highResult);
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
err = rd_kafka_query_watermark_offsets(intern->rk, topic, partition, &low, &high, timeout);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
ZVAL_LONG(lowResult, low);
ZVAL_LONG(highResult, high);
}
/* }}} */
/* {{{ proto void RdKafka::offsetsForTimes(array $topicPartitions, int $timeout_ms)
Look up the offsets for the given partitions by timestamp. */
PHP_METHOD(RdKafka, offsetsForTimes)
{
HashTable *htopars = NULL;
kafka_object *intern;
rd_kafka_topic_partition_list_t *topicPartitions;
zend_long timeout_ms;
rd_kafka_resp_err_t err;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "hl", &htopars, &timeout_ms) == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
topicPartitions = array_arg_to_kafka_topic_partition_list(1, htopars);
if (!topicPartitions) {
return;
}
err = rd_kafka_offsets_for_times(intern->rk, topicPartitions, timeout_ms);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
rd_kafka_topic_partition_list_destroy(topicPartitions);
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
kafka_topic_partition_list_to_array(return_value, topicPartitions);
rd_kafka_topic_partition_list_destroy(topicPartitions);
}
/* }}} */
/* {{{ proto void RdKafka::setLogger(mixed $logger)
Sets the log callback */
PHP_METHOD(RdKafka, setLogger)
{
kafka_object *intern;
zend_long id;
void (*logger) (const rd_kafka_t * rk, int level, const char *fac, const char *buf);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &id) == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
switch (id) {
case RD_KAFKA_LOG_PRINT:
logger = rd_kafka_log_print;
break;
#ifndef _MSC_VER
case RD_KAFKA_LOG_SYSLOG:
logger = rd_kafka_log_syslog;
break;
#endif
case RD_KAFKA_LOG_SYSLOG_PRINT:
logger = kafka_log_syslog_print;
break;
default:
zend_throw_exception_ex(NULL, 0, "Invalid logger");
return;
}
rd_kafka_set_logger(intern->rk, logger);
}
/* }}} */
/* {{{ proto RdKafka\TopicPartition[] RdKafka::pausePatitions(RdKafka\TopicPartition[] $topicPartitions)
Pause producing or consumption for the provided list of partitions. */
PHP_METHOD(RdKafka, pausePartitions)
{
HashTable *htopars;
rd_kafka_topic_partition_list_t *topars;
rd_kafka_resp_err_t err;
kafka_object *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "h", &htopars) == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
topars = array_arg_to_kafka_topic_partition_list(1, htopars);
if (!topars) {
return;
}
err = rd_kafka_pause_partitions(intern->rk, topars);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
rd_kafka_topic_partition_list_destroy(topars);
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
kafka_topic_partition_list_to_array(return_value, topars);
rd_kafka_topic_partition_list_destroy(topars);
}
/* }}} */
/* {{{ proto RdKafka\TopicPartition[] RdKafka::resumePatitions(RdKafka\TopicPartition[] $topicPartitions)
Resume producing consumption for the provided list of partitions. */
PHP_METHOD(RdKafka, resumePartitions)
{
HashTable *htopars;
rd_kafka_topic_partition_list_t *topars;
rd_kafka_resp_err_t err;
kafka_object *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "h", &htopars) == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
topars = array_arg_to_kafka_topic_partition_list(1, htopars);
if (!topars) {
return;
}
err = rd_kafka_pause_partitions(intern->rk, topars);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
rd_kafka_topic_partition_list_destroy(topars);
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
kafka_topic_partition_list_to_array(return_value, topars);
rd_kafka_topic_partition_list_destroy(topars);
}
/* }}} */
/* {{{ proto RdKafka\Producer::__construct([RdKafka\Conf $conf]) */
PHP_METHOD(RdKafka_Producer, __construct)
{
zval *zconf = NULL;
zend_error_handling error_handling;
zend_replace_error_handling(EH_THROW, spl_ce_InvalidArgumentException, &error_handling);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "|O!", &zconf, ce_kafka_conf) == FAILURE) {
zend_restore_error_handling(&error_handling);
return;
}
kafka_init(getThis(), RD_KAFKA_PRODUCER, zconf);
zend_restore_error_handling(&error_handling);
}
/* }}} */
#ifdef HAS_RD_KAFKA_TRANSACTIONS
/* {{{ proto int RdKafka\Producer::initTransactions(int timeout_ms)
Initializes transactions, needs to be done before producing and starting a transaction */
PHP_METHOD(RdKafka_Producer, initTransactions)
{
kafka_object *intern;
zend_long timeout_ms;
const rd_kafka_error_t *error;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &timeout_ms) == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
error = rd_kafka_init_transactions(intern->rk, timeout_ms);
if (NULL == error) {
return;
}
create_kafka_error(return_value, error);
zend_throw_exception_object(return_value);
}
/* }}} */
/* {{{ proto int RdKafka\Producer::beginTransaction()
Start a transaction */
PHP_METHOD(RdKafka_Producer, beginTransaction)
{
kafka_object *intern;
const rd_kafka_error_t *error;
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
error = rd_kafka_begin_transaction(intern->rk);
if (NULL == error) {
return;
}
create_kafka_error(return_value, error);
zend_throw_exception_object(return_value);
}
/* }}} */
/* {{{ proto int RdKafka\Producer::commitTransaction(int timeout_ms)
Commit a transaction */
PHP_METHOD(RdKafka_Producer, commitTransaction)
{
kafka_object *intern;
zend_long timeout_ms;
const rd_kafka_error_t *error;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &timeout_ms) == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
error = rd_kafka_commit_transaction(intern->rk, timeout_ms);
if (NULL == error) {
return;
}
create_kafka_error(return_value, error);
zend_throw_exception_object(return_value);
}
/* }}} */
/* {{{ proto int RdKafka\Producer::abortTransaction(int timeout_ms)
Commit a transaction */
PHP_METHOD(RdKafka_Producer, abortTransaction)
{
kafka_object *intern;
zend_long timeout_ms;
const rd_kafka_error_t *error;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &timeout_ms) == FAILURE) {
return;
}
intern = get_kafka_object(getThis());
if (!intern) {
return;
}
error = rd_kafka_abort_transaction(intern->rk, timeout_ms);
if (NULL == error) {
return;
}
create_kafka_error(return_value, error);
zend_throw_exception_object(return_value);
}
/* }}} */
#endif
#define COPY_CONSTANT(name) \
REGISTER_LONG_CONSTANT(#name, name, CONST_CS | CONST_PERSISTENT)
void register_err_constants(INIT_FUNC_ARGS) /* {{{ */
{
const struct rd_kafka_err_desc *errdescs;
size_t cnt;
size_t i;
char buf[128];
rd_kafka_get_err_descs(&errdescs, &cnt);
for (i = 0; i < cnt; i++) {
const struct rd_kafka_err_desc *desc = &errdescs[i];
int len;
if (!desc->name) {
continue;
}
len = snprintf(buf, sizeof(buf), "RD_KAFKA_RESP_ERR_%s", desc->name);
if ((size_t)len >= sizeof(buf)) {
len = sizeof(buf)-1;
}
zend_register_long_constant(buf, len, desc->code, CONST_CS | CONST_PERSISTENT, module_number);
}
} /* }}} */
/* {{{ PHP_MINIT_FUNCTION
*/
PHP_MINIT_FUNCTION(rdkafka)
{
COPY_CONSTANT(RD_KAFKA_CONSUMER);
COPY_CONSTANT(RD_KAFKA_OFFSET_BEGINNING);
COPY_CONSTANT(RD_KAFKA_OFFSET_END);
COPY_CONSTANT(RD_KAFKA_OFFSET_STORED);
COPY_CONSTANT(RD_KAFKA_PARTITION_UA);
COPY_CONSTANT(RD_KAFKA_PRODUCER);
COPY_CONSTANT(RD_KAFKA_MSG_F_BLOCK);
#ifdef HAS_RD_KAFKA_PURGE
COPY_CONSTANT(RD_KAFKA_PURGE_F_QUEUE);
COPY_CONSTANT(RD_KAFKA_PURGE_F_INFLIGHT);
COPY_CONSTANT(RD_KAFKA_PURGE_F_NON_BLOCKING);
#endif
REGISTER_LONG_CONSTANT("RD_KAFKA_VERSION", rd_kafka_version(), CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("RD_KAFKA_BUILD_VERSION", RD_KAFKA_VERSION, CONST_CS | CONST_PERSISTENT);
register_err_constants(INIT_FUNC_ARGS_PASSTHRU);
COPY_CONSTANT(RD_KAFKA_CONF_UNKNOWN);
COPY_CONSTANT(RD_KAFKA_CONF_INVALID);
COPY_CONSTANT(RD_KAFKA_CONF_OK);
REGISTER_LONG_CONSTANT("RD_KAFKA_MSG_PARTITIONER_RANDOM", MSG_PARTITIONER_RANDOM, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("RD_KAFKA_MSG_PARTITIONER_CONSISTENT", MSG_PARTITIONER_CONSISTENT, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("RD_KAFKA_MSG_PARTITIONER_CONSISTENT_RANDOM", MSG_PARTITIONER_CONSISTENT_RANDOM, CONST_CS | CONST_PERSISTENT);
#ifdef HAS_RD_KAFKA_PARTITIONER_MURMUR2
REGISTER_LONG_CONSTANT("RD_KAFKA_MSG_PARTITIONER_MURMUR2", MSG_PARTITIONER_MURMUR2, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("RD_KAFKA_MSG_PARTITIONER_MURMUR2_RANDOM", MSG_PARTITIONER_MURMUR2_RANDOM, CONST_CS | CONST_PERSISTENT);
#endif
REGISTER_LONG_CONSTANT("RD_KAFKA_LOG_PRINT", RD_KAFKA_LOG_PRINT, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("RD_KAFKA_LOG_SYSLOG", RD_KAFKA_LOG_SYSLOG, CONST_CS | CONST_PERSISTENT);
REGISTER_LONG_CONSTANT("RD_KAFKA_LOG_SYSLOG_PRINT", RD_KAFKA_LOG_SYSLOG_PRINT, CONST_CS | CONST_PERSISTENT);
memcpy(&kafka_default_object_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers));
kafka_default_object_handlers.clone_obj = NULL;
kafka_object_handlers = kafka_default_object_handlers;
kafka_object_handlers.free_obj = kafka_free;
kafka_object_handlers.offset = XtOffsetOf(kafka_object, std);
ce_kafka = register_class_RdKafka();
ce_kafka->create_object = kafka_new;
ce_kafka_consumer = register_class_RdKafka_Consumer(ce_kafka);
ce_kafka_producer = register_class_RdKafka_Producer(ce_kafka);
ce_kafka_exception = register_class_RdKafka_Exception(zend_ce_exception);
kafka_conf_minit(INIT_FUNC_ARGS_PASSTHRU);
#ifdef HAS_RD_KAFKA_TRANSACTIONS
kafka_error_minit();
#endif
kafka_kafka_consumer_minit(INIT_FUNC_ARGS_PASSTHRU);
kafka_message_minit(INIT_FUNC_ARGS_PASSTHRU);
kafka_metadata_minit(INIT_FUNC_ARGS_PASSTHRU);
kafka_metadata_topic_partition_minit(INIT_FUNC_ARGS_PASSTHRU);
kafka_queue_minit(INIT_FUNC_ARGS_PASSTHRU);
kafka_topic_minit(INIT_FUNC_ARGS_PASSTHRU);
return SUCCESS;
}
/* }}} */
/* {{{ PHP_MINFO_FUNCTION
*/
PHP_MINFO_FUNCTION(rdkafka)
{
char *rd_kafka_version;
php_info_print_table_start();
php_info_print_table_row(2, "rdkafka support", "enabled");
php_info_print_table_row(2, "version", PHP_RDKAFKA_VERSION);
php_info_print_table_row(2, "build date", __DATE__ " " __TIME__);
spprintf(
&rd_kafka_version,
0,
"%u.%u.%u.%u",
(RD_KAFKA_VERSION & 0xFF000000) >> 24,
(RD_KAFKA_VERSION & 0x00FF0000) >> 16,
(RD_KAFKA_VERSION & 0x0000FF00) >> 8,
(RD_KAFKA_VERSION & 0x000000FF)
);
php_info_print_table_row(2, "librdkafka version (runtime)", rd_kafka_version_str());
php_info_print_table_row(2, "librdkafka version (build)", rd_kafka_version);
efree(rd_kafka_version);
php_info_print_table_end();
}
/* }}} */
/* {{{ rdkafka_module_entry
*/
zend_module_entry rdkafka_module_entry = {
STANDARD_MODULE_HEADER,
"rdkafka",
ext_functions,
PHP_MINIT(rdkafka),
NULL,
NULL,
NULL,
PHP_MINFO(rdkafka),
PHP_RDKAFKA_VERSION,
STANDARD_MODULE_PROPERTIES
};
/* }}} */
#ifdef COMPILE_DL_RDKAFKA
ZEND_GET_MODULE(rdkafka)
#endif
<?php
/**
* @generate-class-entries
* @generate-function-entries
* @generate-legacy-arginfo
*/
namespace {
abstract class RdKafka {
private ?callable $error_cb;
private ?callable $dr_cb;
private function __construct() {}
/** @tentative-return-type */
public function addBrokers(string $broker_list): int {}
/** @tentative-return-type */
public function getMetadata(bool $all_topics, ?RdKafka\Topic $only_topic, int $timeout_ms): RdKafka\Metadata {}
/** @tentative-return-type */
public function getOutQLen(): int {}
/**
* @alias RdKafka::getMetadata
* @deprecated
* @tentative-return-type
*/
public function metadata(bool $all_topics, ?RdKafka\Topic $only_topic, int $timeout_ms): RdKafka\Metadata {}
/**
* @deprecated
* @tentative-return-type
*/
public function setLogLevel(int $level): void {}
/** @tentative-return-type */
public function newTopic(string $topic_name, ?RdKafka\TopicConf $topic_conf = null): RdKafka\Topic {}
/**
* @alias RdKafka::getOutQLen
* @deprecated
* @tentative-return-type
*/
public function outqLen(): int {}
/** @tentative-return-type */
public function poll(int $timeout_ms): int {}
/** @tentative-return-type */
public function flush(int $timeout_ms): int {}
#ifdef HAS_RD_KAFKA_PURGE
/** @tentative-return-type */
public function purge(int $purge_flags): int {}
#endif
/**
* @deprecated
* @tentative-return-type
*/
public function setLogger(int $logger): void {}
/** @tentative-return-type */
public function queryWatermarkOffsets(string $topic, int $partition, int &$low, int &$high, int $timeout_ms): void {}
/** @tentative-return-type */
public function offsetsForTimes(array $topic_partitions, int $timeout_ms): array {}
/** @tentative-return-type */
public function pausePartitions(array $topic_partitions): array {}
/** @tentative-return-type */
public function resumePartitions(array $topic_partitions): array {}
}
}
namespace RdKafka {
class Exception extends \Exception {
}
class Consumer extends \RdKafka {
public function __construct(?Conf $conf = null) {}
/** @tentative-return-type */
public function newQueue(): Queue {}
}
class Producer extends \RdKafka {
public function __construct(?Conf $conf = null) {}
#ifdef HAS_RD_KAFKA_TRANSACTIONS
/** @tentative-return-type */
public function initTransactions(int $timeout_ms): void {}
/** @tentative-return-type */
public function beginTransaction(): void {}
/** @tentative-return-type */
public function commitTransaction(int $timeout_ms): void {}
/** @tentative-return-type */
public function abortTransaction(int $timeout_ms): void {}
#endif
}
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: fbfdb28740208d5f909e9db261bea0aa26bfd471 */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka___construct, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_addBrokers, 0, 1, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, broker_list, IS_STRING, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX(arginfo_class_RdKafka_getMetadata, 0, 3, RdKafka\\Metadata, 0)
ZEND_ARG_TYPE_INFO(0, all_topics, _IS_BOOL, 0)
ZEND_ARG_OBJ_INFO(0, only_topic, RdKafka\\Topic, 1)
ZEND_ARG_TYPE_INFO(0, timeout_ms, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_getOutQLen, 0, 0, IS_LONG, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_metadata arginfo_class_RdKafka_getMetadata
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_setLogLevel, 0, 1, IS_VOID, 0)
ZEND_ARG_TYPE_INFO(0, level, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX(arginfo_class_RdKafka_newTopic, 0, 1, RdKafka\\Topic, 0)
ZEND_ARG_TYPE_INFO(0, topic_name, IS_STRING, 0)
ZEND_ARG_OBJ_INFO_WITH_DEFAULT_VALUE(0, topic_conf, RdKafka\\TopicConf, 1, "null")
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_outqLen arginfo_class_RdKafka_getOutQLen
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_poll, 0, 1, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, timeout_ms, IS_LONG, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_flush arginfo_class_RdKafka_poll
#if defined(HAS_RD_KAFKA_PURGE)
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_purge, 0, 1, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, purge_flags, IS_LONG, 0)
ZEND_END_ARG_INFO()
#endif
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_setLogger, 0, 1, IS_VOID, 0)
ZEND_ARG_TYPE_INFO(0, logger, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_queryWatermarkOffsets, 0, 5, IS_VOID, 0)
ZEND_ARG_TYPE_INFO(0, topic, IS_STRING, 0)
ZEND_ARG_TYPE_INFO(0, partition, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(1, low, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(1, high, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, timeout_ms, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_offsetsForTimes, 0, 2, IS_ARRAY, 0)
ZEND_ARG_TYPE_INFO(0, topic_partitions, IS_ARRAY, 0)
ZEND_ARG_TYPE_INFO(0, timeout_ms, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_pausePartitions, 0, 1, IS_ARRAY, 0)
ZEND_ARG_TYPE_INFO(0, topic_partitions, IS_ARRAY, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_resumePartitions arginfo_class_RdKafka_pausePartitions
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Consumer___construct, 0, 0, 0)
ZEND_ARG_OBJ_INFO_WITH_DEFAULT_VALUE(0, conf, RdKafka\\Conf, 1, "null")
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX(arginfo_class_RdKafka_Consumer_newQueue, 0, 0, RdKafka\\Queue, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_Producer___construct arginfo_class_RdKafka_Consumer___construct
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Producer_initTransactions, 0, 1, IS_VOID, 0)
ZEND_ARG_TYPE_INFO(0, timeout_ms, IS_LONG, 0)
ZEND_END_ARG_INFO()
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Producer_beginTransaction, 0, 0, IS_VOID, 0)
ZEND_END_ARG_INFO()
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
#define arginfo_class_RdKafka_Producer_commitTransaction arginfo_class_RdKafka_Producer_initTransactions
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
#define arginfo_class_RdKafka_Producer_abortTransaction arginfo_class_RdKafka_Producer_initTransactions
#endif
ZEND_METHOD(RdKafka, __construct);
ZEND_METHOD(RdKafka, addBrokers);
ZEND_METHOD(RdKafka, getMetadata);
ZEND_METHOD(RdKafka, getOutQLen);
ZEND_METHOD(RdKafka, setLogLevel);
ZEND_METHOD(RdKafka, newTopic);
ZEND_METHOD(RdKafka, poll);
ZEND_METHOD(RdKafka, flush);
#if defined(HAS_RD_KAFKA_PURGE)
ZEND_METHOD(RdKafka, purge);
#endif
ZEND_METHOD(RdKafka, setLogger);
ZEND_METHOD(RdKafka, queryWatermarkOffsets);
ZEND_METHOD(RdKafka, offsetsForTimes);
ZEND_METHOD(RdKafka, pausePartitions);
ZEND_METHOD(RdKafka, resumePartitions);
ZEND_METHOD(RdKafka_Consumer, __construct);
ZEND_METHOD(RdKafka_Consumer, newQueue);
ZEND_METHOD(RdKafka_Producer, __construct);
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_METHOD(RdKafka_Producer, initTransactions);
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_METHOD(RdKafka_Producer, beginTransaction);
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_METHOD(RdKafka_Producer, commitTransaction);
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_METHOD(RdKafka_Producer, abortTransaction);
#endif
static const zend_function_entry class_RdKafka_methods[] = {
ZEND_ME(RdKafka, __construct, arginfo_class_RdKafka___construct, ZEND_ACC_PRIVATE)
ZEND_ME(RdKafka, addBrokers, arginfo_class_RdKafka_addBrokers, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka, getMetadata, arginfo_class_RdKafka_getMetadata, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka, getOutQLen, arginfo_class_RdKafka_getOutQLen, ZEND_ACC_PUBLIC)
ZEND_MALIAS(RdKafka, metadata, getMetadata, arginfo_class_RdKafka_metadata, ZEND_ACC_PUBLIC|ZEND_ACC_DEPRECATED)
ZEND_ME(RdKafka, setLogLevel, arginfo_class_RdKafka_setLogLevel, ZEND_ACC_PUBLIC|ZEND_ACC_DEPRECATED)
ZEND_ME(RdKafka, newTopic, arginfo_class_RdKafka_newTopic, ZEND_ACC_PUBLIC)
ZEND_MALIAS(RdKafka, outqLen, getOutQLen, arginfo_class_RdKafka_outqLen, ZEND_ACC_PUBLIC|ZEND_ACC_DEPRECATED)
ZEND_ME(RdKafka, poll, arginfo_class_RdKafka_poll, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka, flush, arginfo_class_RdKafka_flush, ZEND_ACC_PUBLIC)
#if defined(HAS_RD_KAFKA_PURGE)
ZEND_ME(RdKafka, purge, arginfo_class_RdKafka_purge, ZEND_ACC_PUBLIC)
#endif
ZEND_ME(RdKafka, setLogger, arginfo_class_RdKafka_setLogger, ZEND_ACC_PUBLIC|ZEND_ACC_DEPRECATED)
ZEND_ME(RdKafka, queryWatermarkOffsets, arginfo_class_RdKafka_queryWatermarkOffsets, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka, offsetsForTimes, arginfo_class_RdKafka_offsetsForTimes, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka, pausePartitions, arginfo_class_RdKafka_pausePartitions, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka, resumePartitions, arginfo_class_RdKafka_resumePartitions, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static const zend_function_entry class_RdKafka_Exception_methods[] = {
ZEND_FE_END
};
static const zend_function_entry class_RdKafka_Consumer_methods[] = {
ZEND_ME(RdKafka_Consumer, __construct, arginfo_class_RdKafka_Consumer___construct, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Consumer, newQueue, arginfo_class_RdKafka_Consumer_newQueue, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static const zend_function_entry class_RdKafka_Producer_methods[] = {
ZEND_ME(RdKafka_Producer, __construct, arginfo_class_RdKafka_Producer___construct, ZEND_ACC_PUBLIC)
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_ME(RdKafka_Producer, initTransactions, arginfo_class_RdKafka_Producer_initTransactions, ZEND_ACC_PUBLIC)
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_ME(RdKafka_Producer, beginTransaction, arginfo_class_RdKafka_Producer_beginTransaction, ZEND_ACC_PUBLIC)
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_ME(RdKafka_Producer, commitTransaction, arginfo_class_RdKafka_Producer_commitTransaction, ZEND_ACC_PUBLIC)
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_ME(RdKafka_Producer, abortTransaction, arginfo_class_RdKafka_Producer_abortTransaction, ZEND_ACC_PUBLIC)
#endif
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka(void)
{
zend_class_entry ce, *class_entry;
INIT_CLASS_ENTRY(ce, "RdKafka", class_RdKafka_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
class_entry->ce_flags |= ZEND_ACC_ABSTRACT;
zval property_error_cb_default_value;
ZVAL_UNDEF(&property_error_cb_default_value);
zend_string *property_error_cb_name = zend_string_init("error_cb", sizeof("error_cb") - 1, 1);
zend_declare_typed_property(class_entry, property_error_cb_name, &property_error_cb_default_value, ZEND_ACC_PRIVATE, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_CALLABLE|MAY_BE_NULL));
zend_string_release(property_error_cb_name);
zval property_dr_cb_default_value;
ZVAL_UNDEF(&property_dr_cb_default_value);
zend_string *property_dr_cb_name = zend_string_init("dr_cb", sizeof("dr_cb") - 1, 1);
zend_declare_typed_property(class_entry, property_dr_cb_name, &property_dr_cb_default_value, ZEND_ACC_PRIVATE, NULL, (zend_type) ZEND_TYPE_INIT_MASK(MAY_BE_CALLABLE|MAY_BE_NULL));
zend_string_release(property_dr_cb_name);
return class_entry;
}
static zend_class_entry *register_class_RdKafka_Exception(zend_class_entry *class_entry_Exception)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Exception", class_RdKafka_Exception_methods);
class_entry = zend_register_internal_class_ex(&ce, class_entry_Exception);
return class_entry;
}
static zend_class_entry *register_class_RdKafka_Consumer(zend_class_entry *class_entry_RdKafka)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Consumer", class_RdKafka_Consumer_methods);
class_entry = zend_register_internal_class_ex(&ce, class_entry_RdKafka);
return class_entry;
}
static zend_class_entry *register_class_RdKafka_Producer(zend_class_entry *class_entry_RdKafka)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Producer", class_RdKafka_Producer_methods);
class_entry = zend_register_internal_class_ex(&ce, class_entry_RdKafka);
return class_entry;
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: fbfdb28740208d5f909e9db261bea0aa26bfd471 */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka___construct, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_addBrokers, 0, 0, 1)
ZEND_ARG_INFO(0, broker_list)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_getMetadata, 0, 0, 3)
ZEND_ARG_INFO(0, all_topics)
ZEND_ARG_INFO(0, only_topic)
ZEND_ARG_INFO(0, timeout_ms)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_getOutQLen, 0, 0, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_metadata arginfo_class_RdKafka_getMetadata
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_setLogLevel, 0, 0, 1)
ZEND_ARG_INFO(0, level)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_newTopic, 0, 0, 1)
ZEND_ARG_INFO(0, topic_name)
ZEND_ARG_INFO(0, topic_conf)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_outqLen arginfo_class_RdKafka_getOutQLen
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_poll, 0, 0, 1)
ZEND_ARG_INFO(0, timeout_ms)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_flush arginfo_class_RdKafka_poll
#if defined(HAS_RD_KAFKA_PURGE)
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_purge, 0, 0, 1)
ZEND_ARG_INFO(0, purge_flags)
ZEND_END_ARG_INFO()
#endif
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_setLogger, 0, 0, 1)
ZEND_ARG_INFO(0, logger)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_queryWatermarkOffsets, 0, 0, 5)
ZEND_ARG_INFO(0, topic)
ZEND_ARG_INFO(0, partition)
ZEND_ARG_INFO(1, low)
ZEND_ARG_INFO(1, high)
ZEND_ARG_INFO(0, timeout_ms)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_offsetsForTimes, 0, 0, 2)
ZEND_ARG_INFO(0, topic_partitions)
ZEND_ARG_INFO(0, timeout_ms)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_pausePartitions, 0, 0, 1)
ZEND_ARG_INFO(0, topic_partitions)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_resumePartitions arginfo_class_RdKafka_pausePartitions
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Consumer___construct, 0, 0, 0)
ZEND_ARG_INFO(0, conf)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_Consumer_newQueue arginfo_class_RdKafka_getOutQLen
#define arginfo_class_RdKafka_Producer___construct arginfo_class_RdKafka_Consumer___construct
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Producer_initTransactions, 0, 0, 1)
ZEND_ARG_INFO(0, timeout_ms)
ZEND_END_ARG_INFO()
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Producer_beginTransaction, 0, 0, 0)
ZEND_END_ARG_INFO()
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
#define arginfo_class_RdKafka_Producer_commitTransaction arginfo_class_RdKafka_Producer_initTransactions
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
#define arginfo_class_RdKafka_Producer_abortTransaction arginfo_class_RdKafka_Producer_initTransactions
#endif
ZEND_METHOD(RdKafka, __construct);
ZEND_METHOD(RdKafka, addBrokers);
ZEND_METHOD(RdKafka, getMetadata);
ZEND_METHOD(RdKafka, getOutQLen);
ZEND_METHOD(RdKafka, setLogLevel);
ZEND_METHOD(RdKafka, newTopic);
ZEND_METHOD(RdKafka, poll);
ZEND_METHOD(RdKafka, flush);
#if defined(HAS_RD_KAFKA_PURGE)
ZEND_METHOD(RdKafka, purge);
#endif
ZEND_METHOD(RdKafka, setLogger);
ZEND_METHOD(RdKafka, queryWatermarkOffsets);
ZEND_METHOD(RdKafka, offsetsForTimes);
ZEND_METHOD(RdKafka, pausePartitions);
ZEND_METHOD(RdKafka, resumePartitions);
ZEND_METHOD(RdKafka_Consumer, __construct);
ZEND_METHOD(RdKafka_Consumer, newQueue);
ZEND_METHOD(RdKafka_Producer, __construct);
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_METHOD(RdKafka_Producer, initTransactions);
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_METHOD(RdKafka_Producer, beginTransaction);
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_METHOD(RdKafka_Producer, commitTransaction);
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_METHOD(RdKafka_Producer, abortTransaction);
#endif
static const zend_function_entry class_RdKafka_methods[] = {
ZEND_ME(RdKafka, __construct, arginfo_class_RdKafka___construct, ZEND_ACC_PRIVATE)
ZEND_ME(RdKafka, addBrokers, arginfo_class_RdKafka_addBrokers, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka, getMetadata, arginfo_class_RdKafka_getMetadata, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka, getOutQLen, arginfo_class_RdKafka_getOutQLen, ZEND_ACC_PUBLIC)
ZEND_MALIAS(RdKafka, metadata, getMetadata, arginfo_class_RdKafka_metadata, ZEND_ACC_PUBLIC|ZEND_ACC_DEPRECATED)
ZEND_ME(RdKafka, setLogLevel, arginfo_class_RdKafka_setLogLevel, ZEND_ACC_PUBLIC|ZEND_ACC_DEPRECATED)
ZEND_ME(RdKafka, newTopic, arginfo_class_RdKafka_newTopic, ZEND_ACC_PUBLIC)
ZEND_MALIAS(RdKafka, outqLen, getOutQLen, arginfo_class_RdKafka_outqLen, ZEND_ACC_PUBLIC|ZEND_ACC_DEPRECATED)
ZEND_ME(RdKafka, poll, arginfo_class_RdKafka_poll, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka, flush, arginfo_class_RdKafka_flush, ZEND_ACC_PUBLIC)
#if defined(HAS_RD_KAFKA_PURGE)
ZEND_ME(RdKafka, purge, arginfo_class_RdKafka_purge, ZEND_ACC_PUBLIC)
#endif
ZEND_ME(RdKafka, setLogger, arginfo_class_RdKafka_setLogger, ZEND_ACC_PUBLIC|ZEND_ACC_DEPRECATED)
ZEND_ME(RdKafka, queryWatermarkOffsets, arginfo_class_RdKafka_queryWatermarkOffsets, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka, offsetsForTimes, arginfo_class_RdKafka_offsetsForTimes, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka, pausePartitions, arginfo_class_RdKafka_pausePartitions, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka, resumePartitions, arginfo_class_RdKafka_resumePartitions, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static const zend_function_entry class_RdKafka_Exception_methods[] = {
ZEND_FE_END
};
static const zend_function_entry class_RdKafka_Consumer_methods[] = {
ZEND_ME(RdKafka_Consumer, __construct, arginfo_class_RdKafka_Consumer___construct, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_Consumer, newQueue, arginfo_class_RdKafka_Consumer_newQueue, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static const zend_function_entry class_RdKafka_Producer_methods[] = {
ZEND_ME(RdKafka_Producer, __construct, arginfo_class_RdKafka_Producer___construct, ZEND_ACC_PUBLIC)
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_ME(RdKafka_Producer, initTransactions, arginfo_class_RdKafka_Producer_initTransactions, ZEND_ACC_PUBLIC)
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_ME(RdKafka_Producer, beginTransaction, arginfo_class_RdKafka_Producer_beginTransaction, ZEND_ACC_PUBLIC)
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_ME(RdKafka_Producer, commitTransaction, arginfo_class_RdKafka_Producer_commitTransaction, ZEND_ACC_PUBLIC)
#endif
#if defined(HAS_RD_KAFKA_TRANSACTIONS)
ZEND_ME(RdKafka_Producer, abortTransaction, arginfo_class_RdKafka_Producer_abortTransaction, ZEND_ACC_PUBLIC)
#endif
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka(void)
{
zend_class_entry ce, *class_entry;
INIT_CLASS_ENTRY(ce, "RdKafka", class_RdKafka_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
class_entry->ce_flags |= ZEND_ACC_ABSTRACT;
zval property_error_cb_default_value;
ZVAL_NULL(&property_error_cb_default_value);
zend_string *property_error_cb_name = zend_string_init("error_cb", sizeof("error_cb") - 1, 1);
zend_declare_property_ex(class_entry, property_error_cb_name, &property_error_cb_default_value, ZEND_ACC_PRIVATE, NULL);
zend_string_release(property_error_cb_name);
zval property_dr_cb_default_value;
ZVAL_NULL(&property_dr_cb_default_value);
zend_string *property_dr_cb_name = zend_string_init("dr_cb", sizeof("dr_cb") - 1, 1);
zend_declare_property_ex(class_entry, property_dr_cb_name, &property_dr_cb_default_value, ZEND_ACC_PRIVATE, NULL);
zend_string_release(property_dr_cb_name);
return class_entry;
}
static zend_class_entry *register_class_RdKafka_Exception(zend_class_entry *class_entry_Exception)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Exception", class_RdKafka_Exception_methods);
class_entry = zend_register_internal_class_ex(&ce, class_entry_Exception);
return class_entry;
}
static zend_class_entry *register_class_RdKafka_Consumer(zend_class_entry *class_entry_RdKafka)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Consumer", class_RdKafka_Consumer_methods);
class_entry = zend_register_internal_class_ex(&ce, class_entry_RdKafka);
return class_entry;
}
static zend_class_entry *register_class_RdKafka_Producer(zend_class_entry *class_entry_RdKafka)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Producer", class_RdKafka_Producer_methods);
class_entry = zend_register_internal_class_ex(&ce, class_entry_RdKafka);
return class_entry;
}
--TEST--
Allow null payload
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$conf = new RdKafka\Conf();
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$topicName = sprintf('test_rdkafka_%s', uniqid());
$producer = new RdKafka\Producer($conf);
$topic = $producer->newTopic($topicName);
$topic->produce(0, 0, NULL, 'message_key_1');
while ($producer->getOutQLen() > 0) {
$producer->poll(50);
}
$consumer = new RdKafka\Consumer($conf);
$topic = $consumer->newTopic($topicName);
$topic->consumeStart(0, RD_KAFKA_OFFSET_BEGINNING);
while (true) {
$message = $topic->consume(0, 1000);
if ($message === null) {
continue;
}
if (RD_KAFKA_RESP_ERR_NO_ERROR === $message->err) {
var_dump($message->payload);
var_dump($message->key);
break;
}
}
$topic->consumeStop(0);
--EXPECTF--
NULL
string(13) "message_key_1"
--TEST--
Allow null payload
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$conf = new RdKafka\Conf();
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$topicName = sprintf('test_rdkafka_%s', uniqid());
$producer = new RdKafka\Producer($conf);
$topic = $producer->newTopic($topicName);
$topic->produce(0, 0);
while ($producer->getOutQLen() > 0) {
$producer->poll(50);
}
$consumer = new RdKafka\Consumer($conf);
$topic = $consumer->newTopic($topicName);
$topic->consumeStart(0, RD_KAFKA_OFFSET_BEGINNING);
while (true) {
$message = $topic->consume(0, 1000);
if ($message === null) {
continue;
}
if (RD_KAFKA_RESP_ERR_NO_ERROR === $message->err) {
var_dump($message->payload);
var_dump($message->key);
break;
}
}
$topic->consumeStop(0);
--EXPECTF--
NULL
NULL
--TEST--
Bug 115
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$delivered = 0;
$conf = new RdKafka\Conf();
if (RD_KAFKA_VERSION >= 0x090000 && false !== getenv('TEST_KAFKA_BROKER_VERSION')) {
$conf->set('broker.version.fallback', getenv('TEST_KAFKA_BROKER_VERSION'));
}
$conf->setErrorCb(function ($producer, $err, $errstr) {
printf("%s: %s\n", rd_kafka_err2str($err), $errstr);
exit;
});
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$topicName = sprintf("test_rdkafka_%s", uniqid());
$consumer = new RdKafka\Consumer($conf);
$topic = $consumer->newTopic($topicName);
$topic->consumeStart(0, RD_KAFKA_OFFSET_BEGINNING);
while (true) {
$msg = $topic->consume(0, 1000);
// librdkafka before 1.0 returns message with RD_KAFKA_RESP_ERR__PARTITION_EOF when reaching topic end.
if (!$msg || RD_KAFKA_RESP_ERR__PARTITION_EOF === $msg->err) {
break;
}
}
$topic->consumeStop(0);
--EXPECT--
--TEST--
TopicPartition destruct for high level consumer
--FILE--
<?php
use RdKafka\Conf;
use RdKafka\KafkaConsumer;
$conf = new Conf();
$conf->set('group.id','test');
$consumer = new KafkaConsumer($conf);
$topic = $consumer->newTopic('test');
unset($topic);
var_dump(isset($topic));
--EXPECT--
bool(false)
--TEST--
Bug #465
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$n = 0;
foreach (getTopicS() as $topicMetadata) {
$n++;
$topicMetadata->getTopic();
}
var_dump($n > 0);
function getTopics() {
$conf = new RdKafka\Conf();
if (RD_KAFKA_VERSION >= 0x090000 && false !== getenv('TEST_KAFKA_BROKER_VERSION')) {
$conf->set('broker.version.fallback', getenv('TEST_KAFKA_BROKER_VERSION'));
}
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$consumer = new RdKafka\Consumer($conf);
$topicName = sprintf("test_rdkafka_%s", uniqid());
$consumer->newTopic($topicName);
return $consumer->getMetadata(true, null, 2*1000)->getTopics();
}
--EXPECT--
bool(true)
--TEST--
Bug 508
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$topicName = sprintf("test_rdkafka_%s", uniqid());
$conf = new RdKafka\Conf();
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$conf->setDrMsgCb(function ($producer, $msg) use (&$delivered) {
if ($msg->err) {
throw new Exception("Message delivery failed: " . $msg->errstr());
}
$delivered++;
});
$producer = new RdKafka\Producer($conf);
$topic = $producer->newTopic($topicName);
if (!$producer->getMetadata(false, $topic, 10*1000)) {
echo "Failed to get metadata, is broker down?\n";
}
$topic->produce(0, 0, "message");
while ($producer->getOutQLen()) {
$producer->poll(50);
}
printf("%d messages delivered\n", $delivered);
$conf = new RdKafka\Conf();
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$conf->set('enable.partition.eof', 'true');
$consumer = new RdKafka\Consumer($conf);
$topic = $consumer->newTopic($topicName);
$topic->consumeStart(0, RD_KAFKA_OFFSET_BEGINNING);
while (true) {
$msg = $topic->consume(0, 1000);
if (!$msg) {
continue;
}
// All props are initialized and readable in all cases
var_dump([
'err' => $msg->err,
'topic_name' => $msg->topic_name,
'timestamp' => $msg->timestamp,
'partition' => $msg->partition,
'payload' => $msg->payload,
'len' => $msg->len,
'key' => $msg->key,
'offset' => $msg->offset,
'headers' => $msg->headers,
'opaque' => $msg->opaque,
]);
echo "--------------\n";
if ($msg->err === RD_KAFKA_RESP_ERR__PARTITION_EOF) {
echo "EOF\n";
break;
}
}
--EXPECTF--
1 messages delivered
array(10) {
["err"]=>
int(0)
["topic_name"]=>
string(%d) "test_rdkafka_%s"
["timestamp"]=>
int(%d)
["partition"]=>
int(0)
["payload"]=>
string(7) "message"
["len"]=>
int(7)
["key"]=>
NULL
["offset"]=>
int(0)
["headers"]=>
array(0) {
}
["opaque"]=>
NULL
}
--------------
array(10) {
["err"]=>
int(-%d)
["topic_name"]=>
string(%d) "test_rdkafka_%s"
["timestamp"]=>
int(-1)
["partition"]=>
int(0)
["payload"]=>
string(%d) "%s"
["len"]=>
int(%d)
["key"]=>
NULL
["offset"]=>
int(1)
["headers"]=>
array(0) {
}
["opaque"]=>
NULL
}
--------------
EOF
--TEST--
Bug #521
--SKIPIF--
<?php version_compare(PHP_VERSION, "8.1") < 0 && die("skip PHP < 8.1"); ?>
--FILE--
<?php
$reflection = new ReflectionMethod(RdKafka\KafkaConsumer::class, 'getMetadata');
foreach ($reflection->getParameters() as $reflectionParam) {
printf(
"%s%s%s\n",
(string) $reflectionParam->getType(),
$reflectionParam->getType() !== null ? ' ' : '',
$reflectionParam->getName(),
);
}
--EXPECT--
bool all_topics
?RdKafka\Topic only_topic
int timeout_ms
--TEST--
Bug 74
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$conf = new RdKafka\Conf();
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$consumer = new RdKafka\Consumer($conf);
$topic = $consumer->newTopic("batman", null);
$producer = new RdKafka\Producer($conf);
if (class_exists('RdKafka\TopicPartition')) {
$tp = new RdKafka\TopicPartition("batman", 0);
}
--EXPECT--
--TEST--
Bug 88
--SKIPIF--
<?php
if (!class_exists("RdKafka\\KafkaConsumer")) {
echo "skip";
}
--FILE--
<?php
$conf = new RdKafka\Conf();
$conf->set('metadata.broker.list', '127.0.0.1:9092');
$consumer = new RdKafka\KafkaConsumer($conf);
echo "ok\n";
--EXPECTF--
Fatal error: Uncaught %SRdKafka\Exception%S"group.id" must be configured%s
Stack trace:
%a
--TEST--
RdKafka\Conf
--SKIPIF--
<?php
version_compare(PHP_VERSION, "7.1") < 0 && die("skip PHP < 7.1");
--FILE--
<?php
class TestBug extends RdKafka\Conf
{
public function set($name, $value): void
{
parent::set($name, $value);
}
}
$conf = new TestBug();
$conf->set('metadata.broker.list', '127.0.0.1');
echo "done" . PHP_EOL;
--EXPECT--
done
--TEST--
RdKafka\Conf
--FILE--
<?php
$conf = new RdKafka\Conf();
echo "Setting a string property\n";
$conf->set("client.id", "acme");
echo "Setting an integer property\n";
$conf->set("message.max.bytes", 1 << 20);
echo "Setting a boolean property\n";
$conf->set("topic.metadata.refresh.sparse", "true");
echo "Setting a boolean property to an invalid value\n";
try {
$conf->set("topic.metadata.refresh.sparse", "xx");
} catch(Exception $e) {
printf("Caught a %s: %s\n", get_class($e), $e->getMessage());
}
echo "Setting an invalid property\n";
try {
$conf->set("invalid", "xx");
} catch(Exception $e) {
printf("Caught a %s: %s\n", get_class($e), $e->getMessage());
}
echo "Setting error callback\n";
$conf->setErrorCb(function () { });
$dump = $conf->dump();
var_dump(isset($dump["error_cb"]));
echo "Setting dr_msg callback\n";
$conf->setDrMsgCb(function () { });
$dump = $conf->dump();
var_dump(isset($dump["dr_msg_cb"]));
echo "Setting stats callback\n";
$conf->setStatsCb(function () { });
$dump = $conf->dump();
var_dump(isset($dump["stats_cb"]));
echo "Dumping conf\n";
var_dump(array_intersect_key($conf->dump(), array(
"client.id" => true,
"message.max.bytes" => true,
"topic.metadata.refresh.sparse" => true,
)));
--EXPECT--
Setting a string property
Setting an integer property
Setting a boolean property
Setting a boolean property to an invalid value
Caught a RdKafka\Exception: Expected bool value for "topic.metadata.refresh.sparse": true or false
Setting an invalid property
Caught a RdKafka\Exception: No such configuration property: "invalid"
Setting error callback
bool(true)
Setting dr_msg callback
bool(true)
Setting stats callback
bool(true)
Dumping conf
array(3) {
["client.id"]=>
string(4) "acme"
["message.max.bytes"]=>
string(7) "1048576"
["topic.metadata.refresh.sparse"]=>
string(4) "true"
}
--TEST--
RdKafka\Conf
--SKIPIF--
<?php
RD_KAFKA_VERSION >= 0x090000 || die("skip librdkafka too old");
--FILE--
<?php
$conf = new RdKafka\Conf();
echo "Setting consume callback\n";
$conf->setConsumeCb(function () { });
$dump = $conf->dump();
var_dump(isset($dump["consume_cb"]));
echo "Setting offset_commit callback\n";
$conf->setOffsetCommitCb(function () { });
$dump = $conf->dump();
var_dump(isset($dump["offset_commit_cb"]));
echo "Setting rebalance callback\n";
$conf->setRebalanceCb(function () { });
$dump = $conf->dump();
var_dump(isset($dump["rebalance_cb"]));
--EXPECT--
Setting consume callback
bool(true)
Setting offset_commit callback
bool(true)
Setting rebalance callback
bool(true)
--TEST--
RdKafka\Conf
--SKIPIF--
<?php
RD_KAFKA_VERSION >= 0x090000 || die("skip librdkafka too old");
(!isset($_ENV['TESTS_DONT_SKIP_RISKY']) || $_ENV['TESTS_DONT_SKIP_RISKY']) && die("skip Risky/broken test");
require __DIR__ . '/integration-tests-check.php';
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$conf = new RdKafka\Conf();
$conf->set('auto.offset.reset', 'earliest');
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$conf->set('group.id', sprintf("test_rdkafka_group_%s", uniqid()));
$producer = new RdKafka\Producer($conf);
$topicName = sprintf("test_rdkafka_%s", uniqid());
$topic = $producer->newTopic($topicName);
for ($i = 0; $i < 10; $i++) {
$topic->produce(0, 0, "message $i");
$producer->poll(0);
}
while ($producer->getOutQLen()) {
$producer->poll(50);
}
// Make sure there is enough time for the stats_cb to pick up the consumer lag
sleep(1);
$conf = new RdKafka\Conf();
$conf->set('auto.offset.reset', 'earliest');
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$conf->set('group.id', sprintf("test_rdkafka_group_%s", uniqid()));
$conf->set('statistics.interval.ms', 10);
$conf->setOffsetCommitCb(function ($consumer, $error, $topicPartitions) {
echo "Offset " . $topicPartitions[0]->getOffset() . " committed.\n";
});
$statsCbCalled = false;
$conf->setStatsCb(function ($consumer, $json) use (&$statsCbCalled) {
if ($statsCbCalled) {
return;
}
$statsCbCalled = true;
});
$consumer = new RdKafka\KafkaConsumer($conf);
$consumer->subscribe([$topicName]);
while (true) {
$msg = $consumer->consume(15000);
if (!$msg || RD_KAFKA_RESP_ERR__PARTITION_EOF === $msg->err) {
break;
}
if (RD_KAFKA_RESP_ERR_NO_ERROR !== $msg->err) {
throw new Exception($msg->errstr(), $msg->err);
}
$consumer->commit($msg);
}
var_dump($statsCbCalled);
--EXPECT--
Offset 1 committed.
Offset 2 committed.
Offset 3 committed.
Offset 4 committed.
Offset 5 committed.
Offset 6 committed.
Offset 7 committed.
Offset 8 committed.
Offset 9 committed.
Offset 10 committed.
bool(true)
--TEST--
RdKafka\Conf::setDefaultTopicConf()
--SKIPIF--
<?php
if (!method_exists('RdKafka\Conf', 'setDefaultTopicConf') || 7 < PHP_MAJOR_VERSION) {
echo "skip";
}
?>
--FILE--
<?php
$conf = new RdKafka\Conf();
echo "Setting valid topic conf\n";
$conf->setDefaultTopicConf(new RdKafka\TopicConf());
echo "Setting invalid topic conf\n";
$conf->setDefaultTopicConf($conf);
--EXPECTF--
Setting valid topic conf
Deprecated: Function RdKafka\Conf::setDefaultTopicConf() is deprecated in %s%econf_setDefaultTopicConf.php on line 6
Setting invalid topic conf
Deprecated: Function RdKafka\Conf::setDefaultTopicConf() is deprecated in %s%econf_setDefaultTopicConf.php on line 9
Warning: RdKafka\Conf::setDefaultTopicConf() expects parameter 1 to be RdKafka\TopicConf, object given in %s%econf_setDefaultTopicConf.php on line 9
--TEST--
RdKafka\Conf::setDefaultTopicConf()
--SKIPIF--
<?php
if (!method_exists('RdKafka\Conf', 'setDefaultTopicConf') || 8 > PHP_MAJOR_VERSION) {
echo "skip";
}
?>
--FILE--
<?php
$conf = new RdKafka\Conf();
echo "Setting valid topic conf\n";
$conf->setDefaultTopicConf(new RdKafka\TopicConf());
echo "Setting invalid topic conf\n";
try {
$conf->setDefaultTopicConf($conf);
} catch(TypeError $error) {
echo $error->getMessage() . PHP_EOL;
echo $error->getFile() . PHP_EOL;
echo $error->getLine() . PHP_EOL;
echo $error->getCode();
}
--EXPECTF--
Setting valid topic conf
Deprecated: Method RdKafka\Conf::setDefaultTopicConf() is deprecated in %s%econf_setDefaultTopicConf8.php on line 6
Setting invalid topic conf
Deprecated: Method RdKafka\Conf::setDefaultTopicConf() is deprecated in %s%econf_setDefaultTopicConf8.php on line 10
RdKafka\Conf::setDefaultTopicConf(): Argument #1 ($topic_conf) must be of type RdKafka\TopicConf, RdKafka\Conf given
%s%econf_setDefaultTopicConf8.php
10
0
--TEST--
constants
--FILE--
<?php
var_dump(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION);
--EXPECT--
int(-190)
--TEST--
err2name
--FILE--
<?php
var_dump(rd_kafka_err2name(RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE));
--EXPECT--
string(19) "OFFSET_OUT_OF_RANGE"
--TEST--
initTransaction() not configured
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
if (!class_exists("RdKafka\\KafkaErrorException")) {
echo "skip";
}
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$conf = new RdKafka\Conf();
if (RD_KAFKA_VERSION >= 0x090000 && false !== getenv('TEST_KAFKA_BROKER_VERSION')) {
$conf->set('broker.version.fallback', getenv('TEST_KAFKA_BROKER_VERSION'));
}
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$producer = new RdKafka\Producer($conf);
try {
$producer->initTransactions(10000);
} catch (RdKafka\KafkaErrorException $e) {
echo $e->getMessage() . PHP_EOL;
echo $e->getCode() . PHP_EOL;
echo $e->getFile() . PHP_EOL;
echo $e->getLine() . PHP_EOL;
}
--EXPECTF--
_NOT_CONFIGURED
-145
%s/tests/init_transaction_not_configured.php
14
<?php
if (file_exists(__DIR__ . "/test_env.php")) {
include __DIR__ . '/test_env.php';
}
if (getenv('TEST_KAFKA_BROKERS')) {
return;
}
die('skip due to missing TEST_KAFKA_BROKERS environment & no test_env.php');
--TEST--
KafkaErrorException
--SKIPIF--
<?php
if (!class_exists("RdKafka\\KafkaErrorException")) {
echo "skip";
}
--FILE--
<?php
$e = new RdKafka\KafkaErrorException('exception message', -100, 'exception description', true, false, true);
echo sprintf('Exception message: %s', $e->getMessage()) . PHP_EOL;
echo sprintf('Exception code: %d', $e->getCode()) . PHP_EOL;
echo sprintf('Exception description: %s', $e->getErrorString()) . PHP_EOL;
echo sprintf('Exception is fatal: %b', $e->isFatal()) . PHP_EOL;
echo sprintf('Exception is retriable: %b', $e->isRetriable()) . PHP_EOL;
echo sprintf('Exception requires transaction abort: %b', $e->transactionRequiresAbort()) . PHP_EOL;
--EXPECT--
Exception message: exception message
Exception code: -100
Exception description: exception description
Exception is fatal: 1
Exception is retriable: 0
Exception requires transaction abort: 1
--TEST--
Message headers
--SKIPIF--
<?php
RD_KAFKA_VERSION >= 0x000b04ff || die("skip librdkafka too old");
require __DIR__ . '/integration-tests-check.php';
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$delivered = 0;
$conf = new RdKafka\Conf();
$conf->setErrorCb(function ($producer, $err, $errstr) {
printf("%s: %s\n", rd_kafka_err2str($err), $errstr);
exit;
});
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$consumer = new RdKafka\Consumer($conf);
$conf->setDrMsgCb(function ($producer, $msg) use (&$delivered) {
if ($msg->err) {
throw new Exception("Message delivery failed: " . $msg->errstr());
}
$delivered++;
});
$producer = new RdKafka\Producer($conf);
$topicName = sprintf("test_rdkafka_%s", uniqid());
$topic = $producer->newTopic($topicName);
if (!$producer->getMetadata(false, $topic, 10*1000)) {
echo "Failed to get metadata, is broker down?\n";
}
$headers = [
['key' => 'value'],
[
'key1' => 'value1',
'key2' => 'value2',
'key3' => 'value3',
],
['gzencoded' => gzencode('gzdata')],
[],
null,
['key'],
];
foreach ($headers as $index => $header) {
$topic->producev(0, 0, "message $index", null, $header);
$producer->poll(0);
}
while ($producer->getOutQLen()) {
$producer->poll(50);
}
printf("%d messages delivered\n", $delivered);
$topic = $consumer->newTopic($topicName);
$topic->consumeStart(0, RD_KAFKA_OFFSET_BEGINNING);
$messages = [];
while (true) {
$msg = $topic->consume(0, 1000);
if (!$msg || $msg->err === RD_KAFKA_RESP_ERR__PARTITION_EOF) {
break;
}
if (RD_KAFKA_RESP_ERR_NO_ERROR !== $msg->err) {
throw new Exception($msg->errstr(), $msg->err);
}
$headersString = isset($msg->headers) ? $msg->headers : [];
array_walk($headersString, function(&$value, $key) {
if ('gzencoded' === $key) {
$value = gzdecode($value);
}
$value = "{$key}: {$value}";
});
if (empty($headersString)) {
$headersString = "none";
} else {
$headersString = implode(", ", $headersString);
}
printf("Got message: %s | Headers: %s\n", $msg->payload, $headersString);
}
--EXPECT--
6 messages delivered
Got message: message 0 | Headers: key: value
Got message: message 1 | Headers: key1: value1, key2: value2, key3: value3
Got message: message 2 | Headers: gzencoded: gzdata
Got message: message 3 | Headers: none
Got message: message 4 | Headers: none
Got message: message 5 | Headers: none
--TEST--
newTopic with topic conf
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$conf = new RdKafka\Conf();
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$topicName = "test";
$topicConf = new RdKafka\TopicConf();
$producer = new RdKafka\Producer($conf);
var_dump(get_class($producer->newtopic($topicName, $topicConf)));
$conf = new RdKafka\Conf();
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$conf->set('group.id', sprintf("test_rdkafka_group_%s", uniqid()));
$consumer = new RdKafka\Consumer($conf);
var_dump(get_class($consumer->newtopic($topicName, $topicConf)));
$kafkaConsumer = new RdKafka\KafkaConsumer($conf);
var_dump(get_class($kafkaConsumer->newtopic($topicName, $topicConf)));
--EXPECT--
string(21) "RdKafka\ProducerTopic"
string(21) "RdKafka\ConsumerTopic"
string(26) "RdKafka\KafkaConsumerTopic"
--TEST--
Pause and resume partitions
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$conf = new RdKafka\Conf();
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$producer = new RdKafka\Producer($conf);
$topicName = sprintf("test_rdkafka_%s", uniqid());
$topic = $producer->newTopic($topicName);
var_dump($producer->pausePartitions([
new RdKafka\TopicPartition($topicName, 0),
]));
var_dump($producer->resumePartitions([
new RdKafka\TopicPartition($topicName, 0),
]));
$conf = new RdKafka\Conf();
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$conf->set('group.id', sprintf("test_rdkafka_group_%s", uniqid()));
$consumer = new RdKafka\KafkaConsumer($conf);
$consumer->assign([
new RdKafka\TopicPartition($topicName, 0),
]);
var_dump($consumer->pausePartitions([
new RdKafka\TopicPartition($topicName, 0),
]));
var_dump($consumer->resumePartitions([
new RdKafka\TopicPartition($topicName, 0),
]));
var_dump($consumer->resumePartitions([
new RdKafka\TopicPartition("", -1),
]));
--EXPECTF--
array(1) {
[0]=>
object(RdKafka\TopicPartition)#5 (4) {
["topic"]=>
string(26) "test_rdkafka_%s"
["partition"]=>
int(0)
["offset"]=>
int(0)
["err"]=>
int(0)
}
}
array(1) {
[0]=>
object(RdKafka\TopicPartition)#4 (4) {
["topic"]=>
string(26) "test_rdkafka_%s"
["partition"]=>
int(0)
["offset"]=>
int(0)
["err"]=>
int(0)
}
}
array(1) {
[0]=>
object(RdKafka\TopicPartition)#6 (4) {
["topic"]=>
string(26) "test_rdkafka_%s"
["partition"]=>
int(0)
["offset"]=>
int(0)
["err"]=>
int(0)
}
}
array(1) {
[0]=>
object(RdKafka\TopicPartition)#5 (4) {
["topic"]=>
string(26) "test_rdkafka_%s"
["partition"]=>
int(0)
["offset"]=>
int(0)
["err"]=>
int(0)
}
}
array(1) {
[0]=>
object(RdKafka\TopicPartition)#6 (4) {
["topic"]=>
string(0) ""
["partition"]=>
int(-1)
["offset"]=>
int(0)
["err"]=>
int(-190)
}
}
--TEST--
Produce, consume
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$delivered = 0;
$conf = new RdKafka\Conf();
if (RD_KAFKA_VERSION >= 0x090000 && false !== getenv('TEST_KAFKA_BROKER_VERSION')) {
$conf->set('broker.version.fallback', getenv('TEST_KAFKA_BROKER_VERSION'));
}
$conf->setErrorCb(function ($producer, $err, $errstr) {
printf("%s: %s\n", rd_kafka_err2str($err), $errstr);
exit;
});
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$consumer = new RdKafka\Consumer($conf);
$conf->setDrMsgCb(function ($producer, $msg) use (&$delivered) {
if ($msg->err) {
throw new Exception("Message delivery failed: " . $msg->errstr());
}
$delivered++;
});
$producer = new RdKafka\Producer($conf);
$topicName = sprintf("test_rdkafka_%s", uniqid());
$topic = $producer->newTopic($topicName);
if (!$producer->getMetadata(false, $topic, 10*1000)) {
echo "Failed to get metadata, is broker down?\n";
}
for ($i = 0; $i < 10; $i++) {
$topic->produce(0, 0, "message $i");
$producer->poll(0);
}
while ($producer->getOutQLen()) {
$producer->poll(50);
}
printf("%d messages delivered\n", $delivered);
$topic = $consumer->newTopic($topicName);
$topic->consumeStart(0, RD_KAFKA_OFFSET_BEGINNING);
$messages = [];
while (true) {
$msg = $topic->consume(0, 1000);
// librdkafka before 1.0 returns message with RD_KAFKA_RESP_ERR__PARTITION_EOF when reaching topic end.
if (!$msg || $msg->err === RD_KAFKA_RESP_ERR__PARTITION_EOF) {
break;
}
if (RD_KAFKA_RESP_ERR_NO_ERROR !== $msg->err) {
throw new Exception($msg->errstr(), $msg->err);
}
printf("Got message: %s\n", $msg->payload);
}
--EXPECT--
10 messages delivered
Got message: message 0
Got message: message 1
Got message: message 2
Got message: message 3
Got message: message 4
Got message: message 5
Got message: message 6
Got message: message 7
Got message: message 8
Got message: message 9
--TEST--
Produce, consume queue
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$delivered = 0;
$conf = new RdKafka\Conf();
if (RD_KAFKA_VERSION >= 0x090000 && false !== getenv('TEST_KAFKA_BROKER_VERSION')) {
$conf->set('broker.version.fallback', getenv('TEST_KAFKA_BROKER_VERSION'));
}
$conf->setErrorCb(function ($producer, $err, $errstr) {
printf("%s: %s\n", rd_kafka_err2str($err), $errstr);
exit;
});
$conf->setDrMsgCb(function ($producer, $msg) use (&$delivered) {
if ($msg->err) {
throw new Exception("Message delivery failed: " . $msg->errstr());
}
$delivered++;
});
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$producer = new RdKafka\Producer($conf);
$topicNames = [
sprintf("test_rdkafka_0_%s", uniqid()),
sprintf("test_rdkafka_1_%s", uniqid()),
];
$topics = array_map(function ($topicName) use ($producer) {
return $producer->newTopic($topicName);
}, $topicNames);
if (!$producer->getMetadata(false, reset($topics), 10*1000)) {
echo "Failed to get metadata, is broker down?\n";
}
for ($i = 0; $i < 10; $i++) {
$topics[$i%2]->produce(0, 0, "message $i");
$producer->poll(0);
}
while ($producer->getOutQLen()) {
$producer->poll(50);
}
printf("%d messages delivered\n", $delivered);
$conf = new RdKafka\Conf();
// Required to detect actual reaching of partition EOF for both topics
$conf->set('enable.partition.eof', 'true');
if (RD_KAFKA_VERSION >= 0x090000 && false !== getenv('TEST_KAFKA_BROKER_VERSION')) {
$conf->set('broker.version.fallback', getenv('TEST_KAFKA_BROKER_VERSION'));
}
$conf->setErrorCb(function ($producer, $err, $errstr) {
printf("%s: %s\n", rd_kafka_err2str($err), $errstr);
exit;
});
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$consumer = new RdKafka\Consumer($conf);
$queue = $consumer->newQueue();
array_walk($topicNames, function ($topicName) use ($consumer, $queue) {
$topic = $consumer->newTopic($topicName);
$topic->consumeQueueStart(0, RD_KAFKA_OFFSET_BEGINNING, $queue);
});
$messages = [];
$receivedTopicEofs = [];
while (count($receivedTopicEofs) < 2) {
$msg = $queue->consume(15000);
if (!$msg) {
// Still waiting for messages
continue;
}
if (RD_KAFKA_RESP_ERR__PARTITION_EOF === $msg->err) {
// Reached actual EOF
$receivedTopicEofs[$msg->topic_name] = true;
continue;
}
if (RD_KAFKA_RESP_ERR_NO_ERROR !== $msg->err) {
throw new Exception($msg->errstr(), $msg->err);
}
$messages[] = sprintf("Got message: %s from %s", $msg->payload, $msg->topic_name);
}
sort($messages);
echo implode("\n", $messages), "\n";
--EXPECTF--
10 messages delivered
Got message: message 0 from test_rdkafka_0_%s
Got message: message 1 from test_rdkafka_1_%s
Got message: message 2 from test_rdkafka_0_%s
Got message: message 3 from test_rdkafka_1_%s
Got message: message 4 from test_rdkafka_0_%s
Got message: message 5 from test_rdkafka_1_%s
Got message: message 6 from test_rdkafka_0_%s
Got message: message 7 from test_rdkafka_1_%s
Got message: message 8 from test_rdkafka_0_%s
Got message: message 9 from test_rdkafka_1_%s
--TEST--
Produce, consume
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
if (!class_exists("RdKafka\\KafkaErrorException")) {
echo "skip";
}
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$delivered = 0;
$conf = new RdKafka\Conf();
$conf->set('transactional.id', 'transactional-producer');
if (RD_KAFKA_VERSION >= 0x090000 && false !== getenv('TEST_KAFKA_BROKER_VERSION')) {
$conf->set('broker.version.fallback', getenv('TEST_KAFKA_BROKER_VERSION'));
}
$conf->setLogCb(function ($kafka, $level, $facility, $message) {});
$conf->setErrorCb(function ($producer, $err, $errstr) {
printf("%s: %s\n", rd_kafka_err2str($err), $errstr);
exit;
});
$conf->setDrMsgCb(function ($producer, $msg) use (&$delivered) {
if ($msg->err) {
throw new Exception("Message delivery failed: " . $msg->errstr());
}
$delivered++;
});
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$producer = new RdKafka\Producer($conf);
$producer->initTransactions(10000);
$producer->beginTransaction();
$topicName = sprintf("test_rdkafka_%s", uniqid());
$topic = $producer->newTopic($topicName);
if (!$producer->getMetadata(false, $topic, 10*1000)) {
echo "Failed to get metadata, is broker down?\n";
}
for ($i = 0; $i < 10; $i++) {
$topic->produce(0, 0, "message $i");
$producer->poll(0);
}
while ($producer->getOutQLen()) {
$producer->poll(50);
}
$producer->commitTransaction(10000);
printf("%d messages delivered\n", $delivered);
$conf = new RdKafka\Conf();
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$conf->setErrorCb(function ($producer, $err, $errstr) {
printf("%s: %s\n", rd_kafka_err2str($err), $errstr);
exit;
});
$consumer = new RdKafka\Consumer($conf);
$topic = $consumer->newTopic($topicName);
$topic->consumeStart(0, RD_KAFKA_OFFSET_BEGINNING);
$messages = [];
while (true) {
$msg = $topic->consume(0, 1000);
// librdkafka before 1.0 returns message with RD_KAFKA_RESP_ERR__PARTITION_EOF when reaching topic end.
if (!$msg || $msg->err === RD_KAFKA_RESP_ERR__PARTITION_EOF) {
break;
}
if (RD_KAFKA_RESP_ERR_NO_ERROR !== $msg->err) {
throw new Exception($msg->errstr(), $msg->err);
}
printf("Got message: %s\n", $msg->payload);
}
--EXPECT--
10 messages delivered
Got message: message 0
Got message: message 1
Got message: message 2
Got message: message 3
Got message: message 4
Got message: message 5
Got message: message 6
Got message: message 7
Got message: message 8
Got message: message 9
--TEST--
Produce with opaque
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
RD_KAFKA_BUILD_VERSION < 0x1000000 && die("skip librdkafka < 1.0.0");
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$conf = new RdKafka\Conf();
if (RD_KAFKA_VERSION >= 0x090000 && false !== getenv('TEST_KAFKA_BROKER_VERSION')) {
$conf->set('broker.version.fallback', getenv('TEST_KAFKA_BROKER_VERSION'));
}
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$opaques = [];
$conf->setDrMsgCb(function ($producer, $msg) use (&$opaques) {
$opaques[] = $msg->opaque;
});
$producer = new RdKafka\Producer($conf);
$topicName = sprintf("test_rdkafka_%s", uniqid());
$topic = $producer->newTopic($topicName);
if (!$producer->getMetadata(false, $topic, 10*1000)) {
echo "Failed to get metadata, is broker down?\n";
}
for ($i = 0; $i < 10; $i++) {
$topic->produce(0, 0, "message $i", null, "opaque $i");
}
$producer->flush(10*1000);
var_dump($opaques);
--EXPECT--
array(10) {
[0]=>
string(8) "opaque 0"
[1]=>
string(8) "opaque 1"
[2]=>
string(8) "opaque 2"
[3]=>
string(8) "opaque 3"
[4]=>
string(8) "opaque 4"
[5]=>
string(8) "opaque 5"
[6]=>
string(8) "opaque 6"
[7]=>
string(8) "opaque 7"
[8]=>
string(8) "opaque 8"
[9]=>
string(8) "opaque 9"
}
--TEST--
Produce with opaque, no conf
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
RD_KAFKA_BUILD_VERSION < 0x1000000 && die("skip librdkafka < 1.0.0");
RD_KAFKA_BUILD_VERSION >= 0x1050000 && die("skip librdkafka >= 1.5.0");
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$producer = new RdKafka\Producer();
var_dump($producer->addBrokers(getenv('TEST_KAFKA_BROKERS')));
$topicName = sprintf("test_rdkafka_%s", uniqid());
$topic = $producer->newTopic($topicName);
if (!$producer->getMetadata(false, $topic, 10*1000)) {
echo "Failed to get metadata, is broker down?\n";
}
for ($i = 0; $i < 10; $i++) {
$topic->produce(0, 0, "message $i", null, "opaque $i");
}
echo "Expect no leaks\n";
--EXPECT--
int(1)
Expect no leaks
--TEST--
Produce with opaque, no flush
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
RD_KAFKA_BUILD_VERSION < 0x1000000 && die("skip librdkafka < 1.0.0");
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$conf = new RdKafka\Conf();
if (RD_KAFKA_VERSION >= 0x090000 && false !== getenv('TEST_KAFKA_BROKER_VERSION')) {
$conf->set('broker.version.fallback', getenv('TEST_KAFKA_BROKER_VERSION'));
}
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$producer = new RdKafka\Producer($conf);
$topicName = sprintf("test_rdkafka_%s", uniqid());
$topic = $producer->newTopic($topicName);
if (!$producer->getMetadata(false, $topic, 10*1000)) {
echo "Failed to get metadata, is broker down?\n";
}
for ($i = 0; $i < 10; $i++) {
$topic->produce(0, 0, "message $i", null, "opaque $i");
}
echo "Expect no leaks\n";
--EXPECT--
Expect no leaks
--TEST--
Produce with opaque, no flush, with delivery callback
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
RD_KAFKA_BUILD_VERSION < 0x1000000 && die("skip librdkafka < 1.0.0");
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$conf = new RdKafka\Conf();
if (RD_KAFKA_VERSION >= 0x090000 && false !== getenv('TEST_KAFKA_BROKER_VERSION')) {
$conf->set('broker.version.fallback', getenv('TEST_KAFKA_BROKER_VERSION'));
}
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$conf->setDrMsgCb(function ($rdkafka, $msg) {
var_dump($rdkafka, $msg);
});
$producer = new RdKafka\Producer($conf);
$topicName = sprintf("test_rdkafka_%s", uniqid());
$topic = $producer->newTopic($topicName);
if (!$producer->getMetadata(false, $topic, 10*1000)) {
echo "Failed to get metadata, is broker down?\n";
}
for ($i = 0; $i < 10; $i++) {
$topic->produce(0, 0, "message $i", null, "opaque $i");
}
echo "Expect no leaks\n";
--EXPECT--
Expect no leaks
--TEST--
Produce with opaque, purge queued/inflight messages
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
RD_KAFKA_BUILD_VERSION < 0x1000000 && die("skip librdkafka < 1.0.0");
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$conf = new RdKafka\Conf();
if (RD_KAFKA_VERSION >= 0x090000 && false !== getenv('TEST_KAFKA_BROKER_VERSION')) {
$conf->set('broker.version.fallback', getenv('TEST_KAFKA_BROKER_VERSION'));
}
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$producer = new RdKafka\Producer($conf);
$topicName = sprintf("test_rdkafka_%s", uniqid());
$topic = $producer->newTopic($topicName);
if (!$producer->getMetadata(false, $topic, 10*1000)) {
echo "Failed to get metadata, is broker down?\n";
}
for ($i = 0; $i < 10; $i++) {
$topic->produce(0, 0, "message $i", null, "opaque $i");
}
$producer->purge(RD_KAFKA_PURGE_F_QUEUE | RD_KAFKA_PURGE_F_INFLIGHT);
echo "Expect no leaks\n";
--EXPECT--
Expect no leaks
--TEST--
Produce with opaque, purge queued/inflight messages, with delivery callback
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
RD_KAFKA_BUILD_VERSION < 0x1000000 && die("skip librdkafka < 1.0.0");
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$conf = new RdKafka\Conf();
if (RD_KAFKA_VERSION >= 0x090000 && false !== getenv('TEST_KAFKA_BROKER_VERSION')) {
$conf->set('broker.version.fallback', getenv('TEST_KAFKA_BROKER_VERSION'));
}
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$conf->setDrMsgCb(function ($rdkafka, $msg) {
var_dump($rdkafka, $msg);
});
$producer = new RdKafka\Producer($conf);
$topicName = sprintf("test_rdkafka_%s", uniqid());
$topic = $producer->newTopic($topicName);
if (!$producer->getMetadata(false, $topic, 10*1000)) {
echo "Failed to get metadata, is broker down?\n";
}
for ($i = 0; $i < 10; $i++) {
$topic->produce(0, 0, "message $i", null, "opaque $i");
}
$producer->purge(RD_KAFKA_PURGE_F_QUEUE | RD_KAFKA_PURGE_F_INFLIGHT);
echo "Expect no leaks\n";
--EXPECT--
Expect no leaks
--TEST--
Producev with opaque
--SKIPIF--
<?php
require __DIR__ . '/integration-tests-check.php';
RD_KAFKA_BUILD_VERSION < 0x1000000 && die("skip librdkafka < 1.0.0");
--FILE--
<?php
require __DIR__ . '/integration-tests-check.php';
$conf = new RdKafka\Conf();
if (RD_KAFKA_VERSION >= 0x090000 && false !== getenv('TEST_KAFKA_BROKER_VERSION')) {
$conf->set('broker.version.fallback', getenv('TEST_KAFKA_BROKER_VERSION'));
}
$conf->set('metadata.broker.list', getenv('TEST_KAFKA_BROKERS'));
$opaques = [];
$conf->setDrMsgCb(function ($producer, $msg) use (&$opaques) {
$opaques[] = $msg->opaque;
});
$producer = new RdKafka\Producer($conf);
$topicName = sprintf("test_rdkafka_%s", uniqid());
$topic = $producer->newTopic($topicName);
if (!$producer->getMetadata(false, $topic, 10*1000)) {
echo "Failed to get metadata, is broker down?\n";
}
for ($i = 0; $i < 10; $i++) {
$topic->producev(0, 0, "message $i", null, [], 0, "opaque $i");
}
$producer->flush(10*1000);
var_dump($opaques);
--EXPECT--
array(10) {
[0]=>
string(8) "opaque 0"
[1]=>
string(8) "opaque 1"
[2]=>
string(8) "opaque 2"
[3]=>
string(8) "opaque 3"
[4]=>
string(8) "opaque 4"
[5]=>
string(8) "opaque 5"
[6]=>
string(8) "opaque 6"
[7]=>
string(8) "opaque 7"
[8]=>
string(8) "opaque 8"
[9]=>
string(8) "opaque 9"
}
--TEST--
rd_kafka_get_err_descs()
--SKIPIF--
<?php
if (!function_exists('rd_kafka_get_err_descs')) {
echo "skip";
}
--FILE--
<?php
$descs = rd_kafka_get_err_descs();
var_dump(gettype($descs));
foreach ($descs as $desc) {
if ($desc['name'] == '_MSG_TIMED_OUT') {
var_dump($desc);
}
}
--EXPECT--
string(5) "array"
array(3) {
["code"]=>
int(-192)
["name"]=>
string(14) "_MSG_TIMED_OUT"
["desc"]=>
string(24) "Local: Message timed out"
}
--TEST--
test0
--FILE--
<?php
var_dump(class_exists("Rdkafka\Consumer"));
--EXPECT--
bool(true)
<?php
if (false === getenv('TEST_KAFKA_BROKERS')) {
putenv('TEST_KAFKA_BROKERS=localhost:9092');
}
if (false === getenv('TEST_KAFKA_BROKER_VERSION')) {
putenv('TEST_KAFKA_BROKER_VERSION=2.3');
}
--TEST--
RdKafka\TopicConf
--FILE--
<?php
$conf = new RdKafka\TopicConf();
echo "Setting partitioner\n";
$conf->setPartitioner(RD_KAFKA_MSG_PARTITIONER_RANDOM);
--EXPECT--
Setting partitioner
--TEST--
RdKafka\TopicPartition
--SKIPIF--
<?php
if (!class_exists('RdKafka\TopicPartition')) {
echo 'skip';
}
?>
--FILE--
<?php
$topar = new RdKafka\TopicPartition("test", RD_KAFKA_PARTITION_UA, 42);
var_dump($topar);
var_dump(array(
"topic" => $topar->getTopic(),
"partition" => $topar->getPartition(),
"offset" => $topar->getOffset(),
));
$topar
->setTopic("foo")
->setPartition(123)
->setOffset(43);
var_dump($topar);
--EXPECT--
object(RdKafka\TopicPartition)#1 (4) {
["topic"]=>
string(4) "test"
["partition"]=>
int(-1)
["offset"]=>
int(42)
["err"]=>
int(0)
}
array(3) {
["topic"]=>
string(4) "test"
["partition"]=>
int(-1)
["offset"]=>
int(42)
}
object(RdKafka\TopicPartition)#1 (4) {
["topic"]=>
string(3) "foo"
["partition"]=>
int(123)
["offset"]=>
int(43)
["err"]=>
int(0)
}
#!/usr/bin/env php
<?php
const NS = 'http://pear.php.net/dtd/package-2.1';
$doc = new DOMDocument();
if (!$doc->load(__DIR__.'/../package.xml')) {
throw new \Exception('Failed loading package.xml');
}
$notesElem = findOneElement($doc, '/ns:package/ns:notes');
echo $notesElem->textContent;
function findOneElement(DOMDocument $doc, string $path): DOMElement
{
$xp = new DOMXPath($doc, false);
$xp->registerNamespace('ns', NS);
$list = $xp->evaluate($path);
if ($list->length !== 1) {
throw new \Exception(sprintf(
'XPath expression %s expected to find 1 element, but found %d',
$path,
$list->length,
));
}
return $list->item(0);
}
#!/usr/bin/env php
<?php
const NS = 'http://pear.php.net/dtd/package-2.1';
if (!isset($argv[1])) {
fprintf(STDERR, "Missing version parameter\n");
printUsage();
exit(1);
}
$newVersion = $argv[1];
$doc = new DOMDocument();
if (!$doc->load(__DIR__.'/../package.xml')) {
throw new \Exception('Failed loading package.xml');
}
$releaseNotes = generateReleaseNotes($newVersion);
moveCurrentReleaseToChangelog($doc);
updateCurrentRelease($doc, $newVersion, $releaseNotes);
file_put_contents(__DIR__.'/../package.xml', $doc->saveXML());
function printUsage(): void
{
fprintf(STDERR, "Usage: %s <version>\n", $_SERVER['argv'][0]);
}
function generateReleaseNotes(string $newVersion): string
{
$cmd = sprintf(
'gh api repos/arnaud-lb/php-rdkafka/releases/generate-notes -f tag_name=%s',
escapeshellcmd($newVersion),
);
$result = exec($cmd);
if ($result === false) {
throw new \Exception(sprintf('Command `%s` failed', $cmd));
}
$lines = explode("\n", json_decode($result, true)['body']);
$newLines = [];
$add = false;
foreach ($lines as $line) {
if (str_starts_with($line, '###')) {
$add = true;
$line = substr($line, 1);
} elseif (str_starts_with($line, '##')) {
$add = false;
}
if ($add) {
$line = preg_replace(
'# by (@[^ ]+) in https://github.com/[^ ]+/pull/([0-9]+)#',
' (#\2, \1)',
$line,
);
$line = preg_replace(
'#^\*#',
'-',
$line,
);
$newLines[] = $line;
}
}
return implode("\n", $newLines);
}
function updateCurrentRelease(DOMDocument $doc, string $newVersion, string $releaseNotes): void
{
$date = date('Y-m-d');
$time = date('H:i:s');
$dateElem = findOneElement($doc, '/ns:package/ns:date');
replaceContent($dateElem, $doc->createTextNode($date));
$timeElem = findOneElement($doc, '/ns:package/ns:time');
replaceContent($timeElem, $doc->createTextNode($time));
$versionElem = findOneElement($doc, '/ns:package/ns:version/ns:release');
replaceContent($versionElem, $doc->createTextNode($newVersion));
$notesElem = findOneElement($doc, '/ns:package/ns:notes');
$releaseNotes = rtrim("\n " . str_replace("\n", "\n ", $releaseNotes))."\n ";
replaceContent($notesElem, $doc->createTextNode($releaseNotes));
}
function moveCurrentReleaseToChangelog(DOMDocument $doc): void
{
$oldRelease = $doc->createElementNS(NS, 'release');
$oldRelease->appendChild($doc->createTextNode("\n"));
$nodesToCopy = ['date', 'time', 'version', 'stability', 'license', 'notes'];
foreach ($nodesToCopy as $nodeName) {
$path = sprintf('/ns:package/ns:%s', $nodeName);
$elem = findOneElement($doc, $path);
$elem = $elem->cloneNode(true);
$oldRelease->appendChild($doc->createTextNode(" "));
$oldRelease->appendChild($elem);
$oldRelease->appendChild($doc->createTextNode("\n"));
}
indent($oldRelease, ' ');
$changelogElem = findOneElement($doc, '/ns:package/ns:changelog');
$changelogElem->insertBefore($oldRelease, $changelogElem->firstChild);
$changelogElem->insertBefore($doc->createTextNode("\n "), $oldRelease);
}
function findOneElement(DOMDocument $doc, string $path): DOMElement
{
$xp = new DOMXPath($doc, false);
$xp->registerNamespace('ns', NS);
$list = $xp->evaluate($path);
if ($list->length !== 1) {
throw new \Exception(sprintf(
'XPath expression %s expected to find 1 element, but found %d',
$path,
$list->length,
));
}
return $list->item(0);
}
function indent(DOMElement $elem, string $indentString): void
{
foreach ($elem->childNodes as $node) {
if ($node instanceof DOMText) {
$node->textContent = str_replace("\n", "\n".$indentString, $node->textContent);
} else if ($node instanceof DOMElement) {
indent($node, $indentString);
}
}
}
function replaceContent(DOMElement $elem, DOMNode $newContent): void
{
while ($elem->firstChild !== null) {
$elem->removeChild($elem->firstChild);
}
$elem->appendChild($newContent);
}
#!/bin/bash
set -e
version="$1"
baseBranch=6.x
if [[ -z "$version" ]]; then
printf "Missing version parameter" >&2
printf "Usage: %s <version>\n" "$0" >&2
exit 1
fi
echo "Updating package.xml"
./tools/new-package-release.php "$1"
pecl package-validate
echo "Updating PHP_RDKAFKA_VERSION"
sed -i 's/#define PHP_RDKAFKA_VERSION.*/#define PHP_RDKAFKA_VERSION "'"$1"'"/' php_rdkafka.h
echo "Printing diff"
git diff
read -p "Commit and send pull request for version $version ? [n/N]" -n 1 -r
echo
if ! [[ $REPLY =~ ^[Yy]$ ]]; then
echo "Ok, aborting"
exit 1
fi
git checkout -b "release/$version" "$baseBranch"
git commit package.xml php_rdkafka.h -m "$1"
gh pr create --fill --label release --base "$baseBranch"
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "php_rdkafka.h"
#include "php_rdkafka_priv.h"
#include "librdkafka/rdkafka.h"
#include "ext/spl/spl_iterators.h"
#include "Zend/zend_interfaces.h"
#include "Zend/zend_exceptions.h"
#include "ext/spl/spl_exceptions.h"
#include "topic.h"
#include "queue.h"
#include "message.h"
#if PHP_VERSION_ID < 80000
#include "topic_legacy_arginfo.h"
#else
#include "topic_arginfo.h"
#endif
static zend_object_handlers object_handlers;
zend_class_entry * ce_kafka_consumer_topic;
zend_class_entry * ce_kafka_kafka_consumer_topic;
zend_class_entry * ce_kafka_producer_topic;
zend_class_entry * ce_kafka_topic;
typedef struct _php_callback {
zend_fcall_info fci;
zend_fcall_info_cache fcc;
} php_callback;
static void kafka_topic_free(zend_object *object) /* {{{ */
{
kafka_topic_object *intern = php_kafka_from_obj(kafka_topic_object, object);
if (Z_TYPE(intern->zrk) != IS_UNDEF && intern->rkt) {
kafka_object *kafka_intern = get_kafka_object(&intern->zrk);
if (kafka_intern) {
zend_hash_index_del(&kafka_intern->topics, (zend_ulong)intern);
}
}
zend_object_std_dtor(&intern->std);
}
/* }}} */
static zend_object *kafka_topic_new(zend_class_entry *class_type) /* {{{ */
{
zend_object* retval;
kafka_topic_object *intern;
intern = zend_object_alloc(sizeof(*intern), class_type);
zend_object_std_init(&intern->std, class_type);
object_properties_init(&intern->std, class_type);
retval = &intern->std;
retval->handlers = &object_handlers;
return retval;
}
/* }}} */
static void consume_callback(rd_kafka_message_t *msg, void *opaque)
{
php_callback *cb = (php_callback*) opaque;
zval args[1];
if (!opaque) {
return;
}
if (!cb) {
return;
}
ZVAL_NULL(&args[0]);
kafka_message_new(&args[0], msg, NULL);
rdkafka_call_function(&cb->fci, &cb->fcc, NULL, 1, args);
zval_ptr_dtor(&args[0]);
}
kafka_topic_object * get_kafka_topic_object(zval *zrkt)
{
kafka_topic_object *orkt = Z_RDKAFKA_P(kafka_topic_object, zrkt);
if (!orkt->rkt) {
zend_throw_exception_ex(NULL, 0, "RdKafka\\Topic::__construct() has not been called");
return NULL;
}
return orkt;
}
/* {{{ proto RdKafka\ConsumerTopic::consumeCallback([int $partition, int timeout_ms, mixed $callback]) */
PHP_METHOD(RdKafka_ConsumerTopic, consumeCallback)
{
php_callback cb;
zend_long partition;
zend_long timeout_ms;
long result;
kafka_topic_object *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "llf", &partition, &timeout_ms, &cb.fci, &cb.fcc) == FAILURE) {
return;
}
if (partition < 0 || partition > 0x7FFFFFFF) {
zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Out of range value '%ld' for $partition", partition);
return;
}
intern = get_kafka_topic_object(getThis());
if (!intern) {
return;
}
Z_ADDREF_P(&cb.fci.function_name);
result = rd_kafka_consume_callback(intern->rkt, partition, timeout_ms, consume_callback, &cb);
zval_ptr_dtor(&cb.fci.function_name);
RETURN_LONG(result);
}
/* }}} */
/* {{{ proto void RdKafka\ConsumerTopic::consumeQueueStart(int $partition, int $offset, RdKafka\Queue $queue)
* Same as consumeStart(), but re-routes incoming messages to the provided queue */
PHP_METHOD(RdKafka_ConsumerTopic, consumeQueueStart)
{
zval *zrkqu;
kafka_topic_object *intern;
kafka_queue_object *queue_intern;
zend_long partition;
zend_long offset;
int ret;
rd_kafka_resp_err_t err;
kafka_object *kafka_intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "llO", &partition, &offset, &zrkqu, ce_kafka_queue) == FAILURE) {
return;
}
if (partition != RD_KAFKA_PARTITION_UA && (partition < 0 || partition > 0x7FFFFFFF)) {
zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Out of range value '%ld' for $partition", partition);
return;
}
intern = get_kafka_topic_object(getThis());
if (!intern) {
return;
}
queue_intern = get_kafka_queue_object(zrkqu);
if (!queue_intern) {
return;
}
kafka_intern = get_kafka_object(&intern->zrk);
if (!kafka_intern) {
return;
}
if (is_consuming_toppar(kafka_intern, intern->rkt, partition)) {
zend_throw_exception_ex(
ce_kafka_exception,
0,
"%s:" ZEND_LONG_FMT " is already being consumed by the same Consumer instance",
rd_kafka_topic_name(intern->rkt),
partition
);
return;
}
ret = rd_kafka_consume_start_queue(intern->rkt, partition, offset, queue_intern->rkqu);
if (ret == -1) {
err = rd_kafka_last_error();
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
add_consuming_toppar(kafka_intern, intern->rkt, partition);
}
/* }}} */
/* {{{ proto void RdKafka\ConsumerTopic::consumeStart(int partition, int offset)
Start consuming messages */
PHP_METHOD(RdKafka_ConsumerTopic, consumeStart)
{
kafka_topic_object *intern;
zend_long partition;
zend_long offset;
int ret;
rd_kafka_resp_err_t err;
kafka_object *kafka_intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "ll", &partition, &offset) == FAILURE) {
return;
}
if (partition != RD_KAFKA_PARTITION_UA && (partition < 0 || partition > 0x7FFFFFFF)) {
zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Out of range value '%ld' for $partition", partition);
return;
}
intern = get_kafka_topic_object(getThis());
if (!intern) {
return;
}
kafka_intern = get_kafka_object(&intern->zrk);
if (!kafka_intern) {
return;
}
if (is_consuming_toppar(kafka_intern, intern->rkt, partition)) {
zend_throw_exception_ex(
ce_kafka_exception,
0,
"%s:" ZEND_LONG_FMT " is already being consumed by the same Consumer instance",
rd_kafka_topic_name(intern->rkt),
partition
);
return;
}
ret = rd_kafka_consume_start(intern->rkt, partition, offset);
if (ret == -1) {
err = rd_kafka_last_error();
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
add_consuming_toppar(kafka_intern, intern->rkt, partition);
}
/* }}} */
/* {{{ proto void RdKafka\ConsumerTopic::consumeStop(int partition)
Stop consuming messages */
PHP_METHOD(RdKafka_ConsumerTopic, consumeStop)
{
kafka_topic_object *intern;
zend_long partition;
int ret;
rd_kafka_resp_err_t err;
kafka_object *kafka_intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &partition) == FAILURE) {
return;
}
if (partition != RD_KAFKA_PARTITION_UA && (partition < 0 || partition > 0x7FFFFFFF)) {
zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Out of range value '%ld' for $partition", partition);
return;
}
intern = get_kafka_topic_object(getThis());
if (!intern) {
return;
}
kafka_intern = get_kafka_object(&intern->zrk);
if (!kafka_intern) {
return;
}
ret = rd_kafka_consume_stop(intern->rkt, partition);
if (ret == -1) {
err = rd_kafka_last_error();
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
del_consuming_toppar(kafka_intern, intern->rkt, partition);
}
/* }}} */
/* {{{ proto RdKafka\Message RdKafka\ConsumerTopic::consume(int $partition, int timeout_ms)
Consume a single message from partition */
PHP_METHOD(RdKafka_ConsumerTopic, consume)
{
kafka_topic_object *intern;
zend_long partition;
zend_long timeout_ms;
rd_kafka_message_t *message;
rd_kafka_resp_err_t err;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "ll", &partition, &timeout_ms) == FAILURE) {
return;
}
if (partition != RD_KAFKA_PARTITION_UA && (partition < 0 || partition > 0x7FFFFFFF)) {
zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Out of range value '%ld' for $partition", partition);
return;
}
intern = get_kafka_topic_object(getThis());
if (!intern) {
return;
}
message = rd_kafka_consume(intern->rkt, partition, timeout_ms);
if (!message) {
err = rd_kafka_last_error();
if (err == RD_KAFKA_RESP_ERR__TIMED_OUT) {
return;
}
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
kafka_message_new(return_value, message, NULL);
rd_kafka_message_destroy(message);
}
/* }}} */
/* {{{ proto RdKafka\Message RdKafka\ConsumerTopic::consumeBatch(int $partition, int $timeout_ms, int $batch_size)
Consume a batch of messages from a partition */
PHP_METHOD(RdKafka_ConsumerTopic, consumeBatch)
{
kafka_topic_object *intern;
zend_long partition, timeout_ms, batch_size;
long result, i;
rd_kafka_message_t **rkmessages;
rd_kafka_resp_err_t err;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "lll", &partition, &timeout_ms, &batch_size) == FAILURE) {
return;
}
if (0 >= batch_size) {
zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Out of range value '%ld' for batch_size", batch_size);
return;
}
if (partition != RD_KAFKA_PARTITION_UA && (partition < 0 || partition > 0x7FFFFFFF)) {
zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Out of range value '%ld' for $partition", partition);
return;
}
intern = get_kafka_topic_object(getThis());
if (!intern) {
return;
}
rkmessages = malloc(sizeof(*rkmessages) * batch_size);
result = rd_kafka_consume_batch(intern->rkt, partition, timeout_ms, rkmessages, batch_size);
if (result == -1) {
free(rkmessages);
err = rd_kafka_last_error();
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
if (result >= 0) {
kafka_message_list_to_array(return_value, rkmessages, result);
for (i = 0; i < result; ++i) {
rd_kafka_message_destroy(rkmessages[i]);
}
}
free(rkmessages);
}
/* }}} */
/* {{{ proto void RdKafka\ConsumerTopic::offsetStore(int partition, int offset) */
PHP_METHOD(RdKafka_ConsumerTopic, offsetStore)
{
kafka_topic_object *intern;
zend_long partition;
zend_long offset;
rd_kafka_resp_err_t err;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "ll", &partition, &offset) == FAILURE) {
return;
}
if (partition < 0 || partition > 0x7FFFFFFF) {
zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Out of range value '%ld' for $partition", partition);
return;
}
intern = get_kafka_topic_object(getThis());
if (!intern) {
return;
}
err = rd_kafka_offset_store(intern->rkt, partition, offset);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
}
/* }}} */
/* {{{ proto void RdKafka\ProducerTopic::produce(int $partition, int $msgflags[, string $payload[, string $key[, string $msg_opaque]]])
Produce and send a single message to broker. */
PHP_METHOD(RdKafka_ProducerTopic, produce)
{
zend_long partition;
zend_long msgflags;
char *payload = NULL;
size_t payload_len = 0;
char *key = NULL;
size_t key_len = 0;
zend_string *opaque = NULL;
int ret;
rd_kafka_resp_err_t err;
kafka_topic_object *intern;
#ifdef HAS_RD_KAFKA_PURGE
ZEND_PARSE_PARAMETERS_START(2, 5)
#else
ZEND_PARSE_PARAMETERS_START(2, 4)
#endif
Z_PARAM_LONG(partition)
Z_PARAM_LONG(msgflags)
Z_PARAM_OPTIONAL
Z_PARAM_STRING_OR_NULL(payload, payload_len)
Z_PARAM_STRING_OR_NULL(key, key_len)
#ifdef HAS_RD_KAFKA_PURGE
Z_PARAM_STR_OR_NULL(opaque)
#endif
ZEND_PARSE_PARAMETERS_END();
if (partition != RD_KAFKA_PARTITION_UA && (partition < 0 || partition > 0x7FFFFFFF)) {
zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Out of range value '%ld' for $partition", partition);
return;
}
if (msgflags != 0 && msgflags != RD_KAFKA_MSG_F_BLOCK) {
zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Invalid value '%ld' for $msgflags", msgflags);
return;
}
intern = get_kafka_topic_object(getThis());
if (opaque != NULL) {
zend_string_addref(opaque);
}
ret = rd_kafka_produce(intern->rkt, partition, msgflags | RD_KAFKA_MSG_F_COPY, payload, payload_len, key, key_len, opaque);
if (ret == -1) {
if (opaque != NULL) {
zend_string_release(opaque);
}
err = rd_kafka_last_error();
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
}
/* }}} */
#ifdef HAVE_RD_KAFKA_MESSAGE_HEADERS
/* {{{ proto void RdKafka\ProducerTopic::producev(int $partition, int $msgflags[, string $payload[, string $key[, array $headers[, int $timestamp_ms[, string msg_opaque]]]]])
Produce and send a single message to broker (with headers possibility and timestamp). */
PHP_METHOD(RdKafka_ProducerTopic, producev)
{
zend_long partition;
zend_long msgflags;
char *payload = NULL;
size_t payload_len = 0;
char *key = NULL;
size_t key_len = 0;
rd_kafka_resp_err_t err;
kafka_topic_object *intern;
kafka_object *kafka_intern;
HashTable *headersParam = NULL;
HashPosition headersParamPos;
char *header_key;
zval *header_value;
rd_kafka_headers_t *headers;
zend_long timestamp_ms = 0;
zend_bool timestamp_ms_is_null = 0;
zend_string *opaque = NULL;
#ifdef HAS_RD_KAFKA_PURGE
ZEND_PARSE_PARAMETERS_START(2, 7)
#else
ZEND_PARSE_PARAMETERS_START(2, 6)
#endif
Z_PARAM_LONG(partition)
Z_PARAM_LONG(msgflags)
Z_PARAM_OPTIONAL
Z_PARAM_STRING_OR_NULL(payload, payload_len)
Z_PARAM_STRING_OR_NULL(key, key_len)
Z_PARAM_ARRAY_HT_OR_NULL(headersParam)
Z_PARAM_LONG_OR_NULL(timestamp_ms, timestamp_ms_is_null)
#ifdef HAS_RD_KAFKA_PURGE
Z_PARAM_STR_OR_NULL(opaque)
#endif
ZEND_PARSE_PARAMETERS_END();
if (partition != RD_KAFKA_PARTITION_UA && (partition < 0 || partition > 0x7FFFFFFF)) {
zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Out of range value '%ld' for $partition", partition);
return;
}
if (msgflags != 0 && msgflags != RD_KAFKA_MSG_F_BLOCK) {
zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0, "Invalid value '%ld' for $msgflags", msgflags);
return;
}
if (timestamp_ms_is_null == 1) {
timestamp_ms = 0;
}
intern = get_kafka_topic_object(getThis());
if (opaque != NULL) {
zend_string_addref(opaque);
}
if (headersParam != NULL && zend_hash_num_elements(headersParam) > 0) {
headers = rd_kafka_headers_new(zend_hash_num_elements(headersParam));
for (zend_hash_internal_pointer_reset_ex(headersParam, &headersParamPos);
(header_value = zend_hash_get_current_data_ex(headersParam, &headersParamPos)) != NULL &&
(header_key = rdkafka_hash_get_current_key_ex(headersParam, &headersParamPos)) != NULL;
zend_hash_move_forward_ex(headersParam, &headersParamPos)) {
convert_to_string_ex(header_value);
rd_kafka_header_add(
headers,
header_key,
-1, // Auto detect header title length
Z_STRVAL_P(header_value),
Z_STRLEN_P(header_value)
);
}
} else {
headers = rd_kafka_headers_new(0);
}
kafka_intern = get_kafka_object(&intern->zrk);
if (!kafka_intern) {
return;
}
err = rd_kafka_producev(
kafka_intern->rk,
RD_KAFKA_V_RKT(intern->rkt),
RD_KAFKA_V_PARTITION(partition),
RD_KAFKA_V_MSGFLAGS(msgflags | RD_KAFKA_MSG_F_COPY),
RD_KAFKA_V_VALUE(payload, payload_len),
RD_KAFKA_V_KEY(key, key_len),
RD_KAFKA_V_TIMESTAMP(timestamp_ms),
RD_KAFKA_V_HEADERS(headers),
RD_KAFKA_V_OPAQUE(opaque),
RD_KAFKA_V_END
);
if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
rd_kafka_headers_destroy(headers);
if (opaque != NULL) {
zend_string_release(opaque);
}
zend_throw_exception(ce_kafka_exception, rd_kafka_err2str(err), err);
return;
}
}
/* }}} */
#endif
/* {{{ proto string RdKafka\Topic::getName() */
PHP_METHOD(RdKafka_Topic, getName)
{
kafka_topic_object *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_kafka_topic_object(getThis());
if (!intern) {
return;
}
RETURN_STRING(rd_kafka_topic_name(intern->rkt));
}
/* }}} */
void kafka_topic_minit(INIT_FUNC_ARGS) { /* {{{ */
memcpy(&object_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers));
object_handlers.clone_obj = NULL;
object_handlers.free_obj = kafka_topic_free;
object_handlers.offset = XtOffsetOf(kafka_topic_object, std);
ce_kafka_topic = register_class_RdKafka_Topic();
ce_kafka_topic->create_object = kafka_topic_new;
ce_kafka_consumer_topic = register_class_RdKafka_ConsumerTopic(ce_kafka_topic);
ce_kafka_kafka_consumer_topic = register_class_RdKafka_KafkaConsumerTopic(ce_kafka_topic);
ce_kafka_producer_topic = register_class_RdKafka_ProducerTopic(ce_kafka_topic);
} /* }}} */
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
typedef struct _kafka_topic_object {
rd_kafka_topic_t *rkt;
zval zrk;
zend_object std;
} kafka_topic_object;
void kafka_topic_minit(INIT_FUNC_ARGS);
kafka_topic_object * get_kafka_topic_object(zval *zrkt);
extern zend_class_entry * ce_kafka_consumer_topic;
extern zend_class_entry * ce_kafka_kafka_consumer_topic;
extern zend_class_entry * ce_kafka_producer_topic;
extern zend_class_entry * ce_kafka_topic;
<?php
/**
* @generate-class-entries
* @generate-function-entries
* @generate-legacy-arginfo
*/
namespace RdKafka;
abstract class Topic
{
/** @tentative-return-type */
public function getName(): string {}
}
class ConsumerTopic extends Topic
{
/** @implementation-alias RdKafka::__construct */
private function __construct() {}
/** @tentative-return-type */
public function consumeQueueStart(int $partition, int $offset, Queue $queue): void {}
/** @tentative-return-type */
public function consumeCallback(int $partition, int $timeout_ms, callable $callback): int {}
/** @tentative-return-type */
public function consumeStart(int $partition, int $offset): void {}
/** @tentative-return-type */
public function consumeStop(int $partition): void {}
/** @tentative-return-type */
public function consume(int $partition, int $timeout_ms): ?Message {}
/** @tentative-return-type */
public function consumeBatch(int $partition, int $timeout_ms, int $batch_size): array {}
/** @tentative-return-type */
public function offsetStore(int $partition, int $offset): void {}
}
class KafkaConsumerTopic extends Topic
{
/** @implementation-alias RdKafka::__construct */
private function __construct() {}
/**
* @implementation-alias RdKafka\ConsumerTopic::offsetStore
* @tentative-return-type
*/
public function offsetStore(int $partition, int $offset): void {}
}
class ProducerTopic extends Topic
{
/** @implementation-alias RdKafka::__construct */
private function __construct() {}
/** @tentative-return-type */
public function produce(int $partition, int $msgflags, ?string $payload = null, ?string $key = null, ?string $msg_opaque = null): void {}
#ifdef HAVE_RD_KAFKA_MESSAGE_HEADERS
/** @tentative-return-type */
public function producev(int $partition, int $msgflags, ?string $payload = null, ?string $key = null, ?array $headers = null, ?int $timestamp_ms = null, ?string $msg_opaque = null): void {}
#endif
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 7d23f208609b509bb75f8286fc5c93c127c406d6 */
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_Topic_getName, 0, 0, IS_STRING, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_ConsumerTopic___construct, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_ConsumerTopic_consumeQueueStart, 0, 3, IS_VOID, 0)
ZEND_ARG_TYPE_INFO(0, partition, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, offset, IS_LONG, 0)
ZEND_ARG_OBJ_INFO(0, queue, RdKafka\\Queue, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_ConsumerTopic_consumeCallback, 0, 3, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, partition, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, timeout_ms, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, callback, IS_CALLABLE, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_ConsumerTopic_consumeStart, 0, 2, IS_VOID, 0)
ZEND_ARG_TYPE_INFO(0, partition, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, offset, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_ConsumerTopic_consumeStop, 0, 1, IS_VOID, 0)
ZEND_ARG_TYPE_INFO(0, partition, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX(arginfo_class_RdKafka_ConsumerTopic_consume, 0, 2, RdKafka\\Message, 1)
ZEND_ARG_TYPE_INFO(0, partition, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, timeout_ms, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_ConsumerTopic_consumeBatch, 0, 3, IS_ARRAY, 0)
ZEND_ARG_TYPE_INFO(0, partition, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, timeout_ms, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, batch_size, IS_LONG, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_ConsumerTopic_offsetStore arginfo_class_RdKafka_ConsumerTopic_consumeStart
#define arginfo_class_RdKafka_KafkaConsumerTopic___construct arginfo_class_RdKafka_ConsumerTopic___construct
#define arginfo_class_RdKafka_KafkaConsumerTopic_offsetStore arginfo_class_RdKafka_ConsumerTopic_consumeStart
#define arginfo_class_RdKafka_ProducerTopic___construct arginfo_class_RdKafka_ConsumerTopic___construct
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_ProducerTopic_produce, 0, 2, IS_VOID, 0)
ZEND_ARG_TYPE_INFO(0, partition, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, msgflags, IS_LONG, 0)
ZEND_ARG_TYPE_INFO_WITH_DEFAULT_VALUE(0, payload, IS_STRING, 1, "null")
ZEND_ARG_TYPE_INFO_WITH_DEFAULT_VALUE(0, key, IS_STRING, 1, "null")
ZEND_ARG_TYPE_INFO_WITH_DEFAULT_VALUE(0, msg_opaque, IS_STRING, 1, "null")
ZEND_END_ARG_INFO()
#if defined(HAVE_RD_KAFKA_MESSAGE_HEADERS)
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_ProducerTopic_producev, 0, 2, IS_VOID, 0)
ZEND_ARG_TYPE_INFO(0, partition, IS_LONG, 0)
ZEND_ARG_TYPE_INFO(0, msgflags, IS_LONG, 0)
ZEND_ARG_TYPE_INFO_WITH_DEFAULT_VALUE(0, payload, IS_STRING, 1, "null")
ZEND_ARG_TYPE_INFO_WITH_DEFAULT_VALUE(0, key, IS_STRING, 1, "null")
ZEND_ARG_TYPE_INFO_WITH_DEFAULT_VALUE(0, headers, IS_ARRAY, 1, "null")
ZEND_ARG_TYPE_INFO_WITH_DEFAULT_VALUE(0, timestamp_ms, IS_LONG, 1, "null")
ZEND_ARG_TYPE_INFO_WITH_DEFAULT_VALUE(0, msg_opaque, IS_STRING, 1, "null")
ZEND_END_ARG_INFO()
#endif
ZEND_METHOD(RdKafka_Topic, getName);
ZEND_METHOD(RdKafka, __construct);
ZEND_METHOD(RdKafka_ConsumerTopic, consumeQueueStart);
ZEND_METHOD(RdKafka_ConsumerTopic, consumeCallback);
ZEND_METHOD(RdKafka_ConsumerTopic, consumeStart);
ZEND_METHOD(RdKafka_ConsumerTopic, consumeStop);
ZEND_METHOD(RdKafka_ConsumerTopic, consume);
ZEND_METHOD(RdKafka_ConsumerTopic, consumeBatch);
ZEND_METHOD(RdKafka_ConsumerTopic, offsetStore);
ZEND_METHOD(RdKafka_ProducerTopic, produce);
#if defined(HAVE_RD_KAFKA_MESSAGE_HEADERS)
ZEND_METHOD(RdKafka_ProducerTopic, producev);
#endif
static const zend_function_entry class_RdKafka_Topic_methods[] = {
ZEND_ME(RdKafka_Topic, getName, arginfo_class_RdKafka_Topic_getName, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static const zend_function_entry class_RdKafka_ConsumerTopic_methods[] = {
ZEND_MALIAS(RdKafka, __construct, __construct, arginfo_class_RdKafka_ConsumerTopic___construct, ZEND_ACC_PRIVATE)
ZEND_ME(RdKafka_ConsumerTopic, consumeQueueStart, arginfo_class_RdKafka_ConsumerTopic_consumeQueueStart, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_ConsumerTopic, consumeCallback, arginfo_class_RdKafka_ConsumerTopic_consumeCallback, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_ConsumerTopic, consumeStart, arginfo_class_RdKafka_ConsumerTopic_consumeStart, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_ConsumerTopic, consumeStop, arginfo_class_RdKafka_ConsumerTopic_consumeStop, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_ConsumerTopic, consume, arginfo_class_RdKafka_ConsumerTopic_consume, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_ConsumerTopic, consumeBatch, arginfo_class_RdKafka_ConsumerTopic_consumeBatch, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_ConsumerTopic, offsetStore, arginfo_class_RdKafka_ConsumerTopic_offsetStore, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static const zend_function_entry class_RdKafka_KafkaConsumerTopic_methods[] = {
ZEND_MALIAS(RdKafka, __construct, __construct, arginfo_class_RdKafka_KafkaConsumerTopic___construct, ZEND_ACC_PRIVATE)
ZEND_MALIAS(RdKafka_ConsumerTopic, offsetStore, offsetStore, arginfo_class_RdKafka_KafkaConsumerTopic_offsetStore, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static const zend_function_entry class_RdKafka_ProducerTopic_methods[] = {
ZEND_MALIAS(RdKafka, __construct, __construct, arginfo_class_RdKafka_ProducerTopic___construct, ZEND_ACC_PRIVATE)
ZEND_ME(RdKafka_ProducerTopic, produce, arginfo_class_RdKafka_ProducerTopic_produce, ZEND_ACC_PUBLIC)
#if defined(HAVE_RD_KAFKA_MESSAGE_HEADERS)
ZEND_ME(RdKafka_ProducerTopic, producev, arginfo_class_RdKafka_ProducerTopic_producev, ZEND_ACC_PUBLIC)
#endif
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Topic(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Topic", class_RdKafka_Topic_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
class_entry->ce_flags |= ZEND_ACC_ABSTRACT;
return class_entry;
}
static zend_class_entry *register_class_RdKafka_ConsumerTopic(zend_class_entry *class_entry_RdKafka_Topic)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "ConsumerTopic", class_RdKafka_ConsumerTopic_methods);
class_entry = zend_register_internal_class_ex(&ce, class_entry_RdKafka_Topic);
return class_entry;
}
static zend_class_entry *register_class_RdKafka_KafkaConsumerTopic(zend_class_entry *class_entry_RdKafka_Topic)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "KafkaConsumerTopic", class_RdKafka_KafkaConsumerTopic_methods);
class_entry = zend_register_internal_class_ex(&ce, class_entry_RdKafka_Topic);
return class_entry;
}
static zend_class_entry *register_class_RdKafka_ProducerTopic(zend_class_entry *class_entry_RdKafka_Topic)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "ProducerTopic", class_RdKafka_ProducerTopic_methods);
class_entry = zend_register_internal_class_ex(&ce, class_entry_RdKafka_Topic);
return class_entry;
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 7d23f208609b509bb75f8286fc5c93c127c406d6 */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_Topic_getName, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_ConsumerTopic___construct, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_ConsumerTopic_consumeQueueStart, 0, 0, 3)
ZEND_ARG_INFO(0, partition)
ZEND_ARG_INFO(0, offset)
ZEND_ARG_INFO(0, queue)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_ConsumerTopic_consumeCallback, 0, 0, 3)
ZEND_ARG_INFO(0, partition)
ZEND_ARG_INFO(0, timeout_ms)
ZEND_ARG_INFO(0, callback)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_ConsumerTopic_consumeStart, 0, 0, 2)
ZEND_ARG_INFO(0, partition)
ZEND_ARG_INFO(0, offset)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_ConsumerTopic_consumeStop, 0, 0, 1)
ZEND_ARG_INFO(0, partition)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_ConsumerTopic_consume, 0, 0, 2)
ZEND_ARG_INFO(0, partition)
ZEND_ARG_INFO(0, timeout_ms)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_ConsumerTopic_consumeBatch, 0, 0, 3)
ZEND_ARG_INFO(0, partition)
ZEND_ARG_INFO(0, timeout_ms)
ZEND_ARG_INFO(0, batch_size)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_ConsumerTopic_offsetStore arginfo_class_RdKafka_ConsumerTopic_consumeStart
#define arginfo_class_RdKafka_KafkaConsumerTopic___construct arginfo_class_RdKafka_ConsumerTopic___construct
#define arginfo_class_RdKafka_KafkaConsumerTopic_offsetStore arginfo_class_RdKafka_ConsumerTopic_consumeStart
#define arginfo_class_RdKafka_ProducerTopic___construct arginfo_class_RdKafka_ConsumerTopic___construct
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_ProducerTopic_produce, 0, 0, 2)
ZEND_ARG_INFO(0, partition)
ZEND_ARG_INFO(0, msgflags)
ZEND_ARG_INFO(0, payload)
ZEND_ARG_INFO(0, key)
ZEND_ARG_INFO(0, msg_opaque)
ZEND_END_ARG_INFO()
#if defined(HAVE_RD_KAFKA_MESSAGE_HEADERS)
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_ProducerTopic_producev, 0, 0, 2)
ZEND_ARG_INFO(0, partition)
ZEND_ARG_INFO(0, msgflags)
ZEND_ARG_INFO(0, payload)
ZEND_ARG_INFO(0, key)
ZEND_ARG_INFO(0, headers)
ZEND_ARG_INFO(0, timestamp_ms)
ZEND_ARG_INFO(0, msg_opaque)
ZEND_END_ARG_INFO()
#endif
ZEND_METHOD(RdKafka_Topic, getName);
ZEND_METHOD(RdKafka, __construct);
ZEND_METHOD(RdKafka_ConsumerTopic, consumeQueueStart);
ZEND_METHOD(RdKafka_ConsumerTopic, consumeCallback);
ZEND_METHOD(RdKafka_ConsumerTopic, consumeStart);
ZEND_METHOD(RdKafka_ConsumerTopic, consumeStop);
ZEND_METHOD(RdKafka_ConsumerTopic, consume);
ZEND_METHOD(RdKafka_ConsumerTopic, consumeBatch);
ZEND_METHOD(RdKafka_ConsumerTopic, offsetStore);
ZEND_METHOD(RdKafka_ProducerTopic, produce);
#if defined(HAVE_RD_KAFKA_MESSAGE_HEADERS)
ZEND_METHOD(RdKafka_ProducerTopic, producev);
#endif
static const zend_function_entry class_RdKafka_Topic_methods[] = {
ZEND_ME(RdKafka_Topic, getName, arginfo_class_RdKafka_Topic_getName, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static const zend_function_entry class_RdKafka_ConsumerTopic_methods[] = {
ZEND_MALIAS(RdKafka, __construct, __construct, arginfo_class_RdKafka_ConsumerTopic___construct, ZEND_ACC_PRIVATE)
ZEND_ME(RdKafka_ConsumerTopic, consumeQueueStart, arginfo_class_RdKafka_ConsumerTopic_consumeQueueStart, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_ConsumerTopic, consumeCallback, arginfo_class_RdKafka_ConsumerTopic_consumeCallback, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_ConsumerTopic, consumeStart, arginfo_class_RdKafka_ConsumerTopic_consumeStart, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_ConsumerTopic, consumeStop, arginfo_class_RdKafka_ConsumerTopic_consumeStop, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_ConsumerTopic, consume, arginfo_class_RdKafka_ConsumerTopic_consume, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_ConsumerTopic, consumeBatch, arginfo_class_RdKafka_ConsumerTopic_consumeBatch, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_ConsumerTopic, offsetStore, arginfo_class_RdKafka_ConsumerTopic_offsetStore, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static const zend_function_entry class_RdKafka_KafkaConsumerTopic_methods[] = {
ZEND_MALIAS(RdKafka, __construct, __construct, arginfo_class_RdKafka_KafkaConsumerTopic___construct, ZEND_ACC_PRIVATE)
ZEND_MALIAS(RdKafka_ConsumerTopic, offsetStore, offsetStore, arginfo_class_RdKafka_KafkaConsumerTopic_offsetStore, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static const zend_function_entry class_RdKafka_ProducerTopic_methods[] = {
ZEND_MALIAS(RdKafka, __construct, __construct, arginfo_class_RdKafka_ProducerTopic___construct, ZEND_ACC_PRIVATE)
ZEND_ME(RdKafka_ProducerTopic, produce, arginfo_class_RdKafka_ProducerTopic_produce, ZEND_ACC_PUBLIC)
#if defined(HAVE_RD_KAFKA_MESSAGE_HEADERS)
ZEND_ME(RdKafka_ProducerTopic, producev, arginfo_class_RdKafka_ProducerTopic_producev, ZEND_ACC_PUBLIC)
#endif
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_Topic(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "Topic", class_RdKafka_Topic_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
class_entry->ce_flags |= ZEND_ACC_ABSTRACT;
return class_entry;
}
static zend_class_entry *register_class_RdKafka_ConsumerTopic(zend_class_entry *class_entry_RdKafka_Topic)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "ConsumerTopic", class_RdKafka_ConsumerTopic_methods);
class_entry = zend_register_internal_class_ex(&ce, class_entry_RdKafka_Topic);
return class_entry;
}
static zend_class_entry *register_class_RdKafka_KafkaConsumerTopic(zend_class_entry *class_entry_RdKafka_Topic)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "KafkaConsumerTopic", class_RdKafka_KafkaConsumerTopic_methods);
class_entry = zend_register_internal_class_ex(&ce, class_entry_RdKafka_Topic);
return class_entry;
}
static zend_class_entry *register_class_RdKafka_ProducerTopic(zend_class_entry *class_entry_RdKafka_Topic)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "ProducerTopic", class_RdKafka_ProducerTopic_methods);
class_entry = zend_register_internal_class_ex(&ce, class_entry_RdKafka_Topic);
return class_entry;
}
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "php_ini.h"
#include "ext/standard/info.h"
#include "php_rdkafka.h"
#include "php_rdkafka_priv.h"
#include "librdkafka/rdkafka.h"
#include "Zend/zend_exceptions.h"
#include "ext/spl/spl_exceptions.h"
#include "topic_partition.h"
#if PHP_VERSION_ID < 80000
#include "topic_partition_legacy_arginfo.h"
#else
#include "topic_partition_arginfo.h"
#endif
typedef kafka_topic_partition_intern object_intern;
static HashTable *get_debug_info(Z_RDKAFKA_OBJ *object, int *is_temp);
zend_class_entry * ce_kafka_topic_partition;
static zend_object_handlers handlers;
static void free_object(zend_object *object) /* {{{ */
{
object_intern *intern = php_kafka_from_obj(object_intern, object);
if (intern->topic) {
efree(intern->topic);
}
zend_object_std_dtor(&intern->std);
}
/* }}} */
static zend_object *create_object(zend_class_entry *class_type) /* {{{ */
{
zend_object* retval;
object_intern *intern;
intern = ecalloc(1, sizeof(*intern));
zend_object_std_init(&intern->std, class_type);
object_properties_init(&intern->std, class_type);
retval = &intern->std;
retval->handlers = &handlers;
return retval;
}
/* }}} */
static object_intern * get_object(zval *z) /* {{{ */
{
object_intern *intern = Z_RDKAFKA_P(object_intern, z);
if (!intern->topic) {
zend_throw_exception_ex(NULL, 0, "RdKafka\\TopicPartition::__construct() has not been called");
return NULL;
}
return intern;
} /* }}} */
kafka_topic_partition_intern * get_topic_partition_object(zval *z) /* {{{ */
{
return get_object(z);
} /* }}} */
static HashTable *get_debug_info(Z_RDKAFKA_OBJ *object, int *is_temp) /* {{{ */
{
zval ary;
object_intern *intern;
*is_temp = 1;
array_init(&ary);
intern = rdkafka_get_debug_object(object_intern, object);
if (!intern) {
return Z_ARRVAL(ary);
}
if (intern->topic) {
add_assoc_string(&ary, "topic", intern->topic);
} else {
add_assoc_null(&ary, "topic");
}
add_assoc_long(&ary, "partition", intern->partition);
add_assoc_long(&ary, "offset", intern->offset);
add_assoc_long(&ary, "err", (zend_long) intern->err);
return Z_ARRVAL(ary);
}
/* }}} */
void kafka_topic_partition_init(zval *zobj, char * topic, int32_t partition, int64_t offset, rd_kafka_resp_err_t err) /* {{{ */
{
object_intern *intern;
intern = Z_RDKAFKA_P(object_intern, zobj);
if (!intern) {
return;
}
if (intern->topic) {
efree(intern->topic);
}
intern->topic = estrdup(topic);
intern->partition = partition;
intern->offset = offset;
intern->err = err;
} /* }}} */
void kafka_topic_partition_list_to_array(zval *return_value, rd_kafka_topic_partition_list_t *list) /* {{{ */
{
rd_kafka_topic_partition_t *topar;
zval ztopar;
int i;
array_init_size(return_value, list->cnt);
for (i = 0; i < list->cnt; i++) {
topar = &list->elems[i];
ZVAL_NULL(&ztopar);
object_init_ex(&ztopar, ce_kafka_topic_partition);
kafka_topic_partition_init(&ztopar, topar->topic, topar->partition, topar->offset, topar->err);
add_next_index_zval(return_value, &ztopar);
}
} /* }}} */
rd_kafka_topic_partition_list_t * array_arg_to_kafka_topic_partition_list(int argnum, HashTable *ary) { /* {{{ */
HashPosition pos;
rd_kafka_topic_partition_list_t *list;
zval *zv;
list = rd_kafka_topic_partition_list_new(zend_hash_num_elements(ary));
for (zend_hash_internal_pointer_reset_ex(ary, &pos);
(zv = zend_hash_get_current_data_ex(ary, &pos)) != NULL;
zend_hash_move_forward_ex(ary, &pos)) {
kafka_topic_partition_intern *topar_intern;
rd_kafka_topic_partition_t *topar;
if (Z_TYPE_P(zv) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(zv), ce_kafka_topic_partition)) {
const char *space;
const char *class_name = get_active_class_name(&space);
rd_kafka_topic_partition_list_destroy(list);
php_error(E_ERROR,
"Argument %d passed to %s%s%s() must be an array of RdKafka\\TopicPartition, at least one element is a(n) %s",
argnum,
class_name, space,
get_active_function_name(),
zend_zval_type_name(zv));
return NULL;
}
topar_intern = get_topic_partition_object(zv);
if (!topar_intern) {
rd_kafka_topic_partition_list_destroy(list);
return NULL;
}
topar = rd_kafka_topic_partition_list_add(list, topar_intern->topic, topar_intern->partition);
topar->offset = topar_intern->offset;
}
return list;
} /* }}} */
/* {{{ proto void RdKafka\TopicPartition::__construct(string $topic, int $partition[, int $offset])
Constructor */
PHP_METHOD(RdKafka_TopicPartition, __construct)
{
char *topic;
size_t topic_len;
zend_long partition;
zend_long offset = 0;
zend_error_handling error_handling;
zend_replace_error_handling(EH_THROW, spl_ce_InvalidArgumentException, &error_handling);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "sl|l", &topic, &topic_len, &partition, &offset) == FAILURE) {
zend_restore_error_handling(&error_handling);
return;
}
kafka_topic_partition_init(getThis(), topic, partition, offset, RD_KAFKA_RESP_ERR_NO_ERROR);
zend_restore_error_handling(&error_handling);
}
/* }}} */
/* {{{ proto string RdKafka\TopicPartition::getTopic()
Returns topic name */
PHP_METHOD(RdKafka_TopicPartition, getTopic)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
if (intern->topic) {
RETURN_STRING(intern->topic);
} else {
RETURN_NULL();
}
}
/* }}} */
/* {{{ proto TopicPartition RdKafka\TopicPartition::setTopic($topicName)
Sets topic name */
PHP_METHOD(RdKafka_TopicPartition, setTopic)
{
char * topic;
size_t topic_len;
object_intern *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", &topic, &topic_len) == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
if (intern->topic) {
efree(intern->topic);
}
intern->topic = estrdup(topic);
RETURN_ZVAL(getThis(), 1, 0);
}
/* }}} */
/* {{{ proto int RdKafka\TopicPartition::getPartition()
Returns partition */
PHP_METHOD(RdKafka_TopicPartition, getPartition)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
RETURN_LONG(intern->partition);
}
/* }}} */
/* {{{ proto TopicPartition RdKafka\TopicPartition::setPartition($partition)
Sets partition */
PHP_METHOD(RdKafka_TopicPartition, setPartition)
{
zend_long partition;
object_intern *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &partition) == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
intern->partition = partition;
RETURN_ZVAL(getThis(), 1, 0);
}
/* }}} */
/* {{{ proto int RdKafka\TopicPartition::getOffset()
Returns offset */
PHP_METHOD(RdKafka_TopicPartition, getOffset)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
RETURN_LONG(intern->offset);
}
/* }}} */
/* {{{ proto TopicPartition RdKafka\TopicPartition::setOffset($offset)
Sets offset */
PHP_METHOD(RdKafka_TopicPartition, setOffset)
{
zend_long offset;
object_intern *intern;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &offset) == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
intern->offset = offset;
RETURN_ZVAL(getThis(), 1, 0);
}
/* }}} */
/* {{{ proto int RdKafka\TopicPartition::getErr()
Returns err */
PHP_METHOD(RdKafka_TopicPartition, getErr)
{
object_intern *intern;
if (zend_parse_parameters_none() == FAILURE) {
return;
}
intern = get_object(getThis());
if (!intern) {
return;
}
RETURN_LONG((zend_long) intern->err);
}
/* }}} */
void kafka_metadata_topic_partition_minit(INIT_FUNC_ARGS) /* {{{ */
{
ce_kafka_topic_partition = register_class_RdKafka_TopicPartition();
ce_kafka_topic_partition->create_object = create_object;
handlers = kafka_default_object_handlers;
handlers.get_debug_info = get_debug_info;
handlers.free_obj = free_object;
handlers.offset = XtOffsetOf(object_intern, std);
} /* }}} */
/*
+----------------------------------------------------------------------+
| php-rdkafka |
+----------------------------------------------------------------------+
| Copyright (c) 2016 Arnaud Le Blanc |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Arnaud Le Blanc <arnaud.lb@gmail.com> |
+----------------------------------------------------------------------+
*/
typedef struct _kafka_topic_partition_intern {
char *topic;
int32_t partition;
int64_t offset;
rd_kafka_resp_err_t err;
zend_object std;
} kafka_topic_partition_intern;
void kafka_metadata_topic_partition_minit(INIT_FUNC_ARGS);
kafka_topic_partition_intern * get_topic_partition_object(zval *z);
void kafka_topic_partition_init(zval *z, char *topic, int32_t partition, int64_t offset, rd_kafka_resp_err_t err);
void kafka_topic_partition_list_to_array(zval *return_value, rd_kafka_topic_partition_list_t *list);
rd_kafka_topic_partition_list_t * array_arg_to_kafka_topic_partition_list(int argnum, HashTable *ary);
extern zend_class_entry * ce_kafka_topic_partition;
<?php
/**
* @generate-class-entries
* @generate-function-entries
* @generate-legacy-arginfo
*/
namespace RdKafka;
class TopicPartition
{
public function __construct(string $topic, int $partition, int $offset = 0) {}
/** @tentative-return-type */
public function getTopic(): ?string {}
/** @tentative-return-type */
public function setTopic(string $topic_name): TopicPartition {}
/** @tentative-return-type */
public function getPartition(): int {}
/** @tentative-return-type */
public function setPartition(int $partition): TopicPartition {}
/** @tentative-return-type */
public function getOffset(): int {}
/** @tentative-return-type */
public function setOffset(int $offset): TopicPartition {}
/** @tentative-return-type */
public function getErr(): ?int {}
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 7c722b9eb9357157d89a14431ebcfd79cc6f1116 */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_TopicPartition___construct, 0, 0, 2)
ZEND_ARG_TYPE_INFO(0, topic, IS_STRING, 0)
ZEND_ARG_TYPE_INFO(0, partition, IS_LONG, 0)
ZEND_ARG_TYPE_INFO_WITH_DEFAULT_VALUE(0, offset, IS_LONG, 0, "0")
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_TopicPartition_getTopic, 0, 0, IS_STRING, 1)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX(arginfo_class_RdKafka_TopicPartition_setTopic, 0, 1, RdKafka\\TopicPartition, 0)
ZEND_ARG_TYPE_INFO(0, topic_name, IS_STRING, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_TopicPartition_getPartition, 0, 0, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX(arginfo_class_RdKafka_TopicPartition_setPartition, 0, 1, RdKafka\\TopicPartition, 0)
ZEND_ARG_TYPE_INFO(0, partition, IS_LONG, 0)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_TopicPartition_getOffset arginfo_class_RdKafka_TopicPartition_getPartition
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_OBJ_INFO_EX(arginfo_class_RdKafka_TopicPartition_setOffset, 0, 1, RdKafka\\TopicPartition, 0)
ZEND_ARG_TYPE_INFO(0, offset, IS_LONG, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_WITH_TENTATIVE_RETURN_TYPE_INFO_EX(arginfo_class_RdKafka_TopicPartition_getErr, 0, 0, IS_LONG, 1)
ZEND_END_ARG_INFO()
ZEND_METHOD(RdKafka_TopicPartition, __construct);
ZEND_METHOD(RdKafka_TopicPartition, getTopic);
ZEND_METHOD(RdKafka_TopicPartition, setTopic);
ZEND_METHOD(RdKafka_TopicPartition, getPartition);
ZEND_METHOD(RdKafka_TopicPartition, setPartition);
ZEND_METHOD(RdKafka_TopicPartition, getOffset);
ZEND_METHOD(RdKafka_TopicPartition, setOffset);
ZEND_METHOD(RdKafka_TopicPartition, getErr);
static const zend_function_entry class_RdKafka_TopicPartition_methods[] = {
ZEND_ME(RdKafka_TopicPartition, __construct, arginfo_class_RdKafka_TopicPartition___construct, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicPartition, getTopic, arginfo_class_RdKafka_TopicPartition_getTopic, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicPartition, setTopic, arginfo_class_RdKafka_TopicPartition_setTopic, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicPartition, getPartition, arginfo_class_RdKafka_TopicPartition_getPartition, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicPartition, setPartition, arginfo_class_RdKafka_TopicPartition_setPartition, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicPartition, getOffset, arginfo_class_RdKafka_TopicPartition_getOffset, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicPartition, setOffset, arginfo_class_RdKafka_TopicPartition_setOffset, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicPartition, getErr, arginfo_class_RdKafka_TopicPartition_getErr, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_TopicPartition(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "TopicPartition", class_RdKafka_TopicPartition_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
/* This is a generated file, edit the .stub.php file instead.
* Stub hash: 7c722b9eb9357157d89a14431ebcfd79cc6f1116 */
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_TopicPartition___construct, 0, 0, 2)
ZEND_ARG_INFO(0, topic)
ZEND_ARG_INFO(0, partition)
ZEND_ARG_INFO(0, offset)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_TopicPartition_getTopic, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_TopicPartition_setTopic, 0, 0, 1)
ZEND_ARG_INFO(0, topic_name)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_TopicPartition_getPartition arginfo_class_RdKafka_TopicPartition_getTopic
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_TopicPartition_setPartition, 0, 0, 1)
ZEND_ARG_INFO(0, partition)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_TopicPartition_getOffset arginfo_class_RdKafka_TopicPartition_getTopic
ZEND_BEGIN_ARG_INFO_EX(arginfo_class_RdKafka_TopicPartition_setOffset, 0, 0, 1)
ZEND_ARG_INFO(0, offset)
ZEND_END_ARG_INFO()
#define arginfo_class_RdKafka_TopicPartition_getErr arginfo_class_RdKafka_TopicPartition_getTopic
ZEND_METHOD(RdKafka_TopicPartition, __construct);
ZEND_METHOD(RdKafka_TopicPartition, getTopic);
ZEND_METHOD(RdKafka_TopicPartition, setTopic);
ZEND_METHOD(RdKafka_TopicPartition, getPartition);
ZEND_METHOD(RdKafka_TopicPartition, setPartition);
ZEND_METHOD(RdKafka_TopicPartition, getOffset);
ZEND_METHOD(RdKafka_TopicPartition, setOffset);
ZEND_METHOD(RdKafka_TopicPartition, getErr);
static const zend_function_entry class_RdKafka_TopicPartition_methods[] = {
ZEND_ME(RdKafka_TopicPartition, __construct, arginfo_class_RdKafka_TopicPartition___construct, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicPartition, getTopic, arginfo_class_RdKafka_TopicPartition_getTopic, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicPartition, setTopic, arginfo_class_RdKafka_TopicPartition_setTopic, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicPartition, getPartition, arginfo_class_RdKafka_TopicPartition_getPartition, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicPartition, setPartition, arginfo_class_RdKafka_TopicPartition_setPartition, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicPartition, getOffset, arginfo_class_RdKafka_TopicPartition_getOffset, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicPartition, setOffset, arginfo_class_RdKafka_TopicPartition_setOffset, ZEND_ACC_PUBLIC)
ZEND_ME(RdKafka_TopicPartition, getErr, arginfo_class_RdKafka_TopicPartition_getErr, ZEND_ACC_PUBLIC)
ZEND_FE_END
};
static zend_class_entry *register_class_RdKafka_TopicPartition(void)
{
zend_class_entry ce, *class_entry;
INIT_NS_CLASS_ENTRY(ce, "RdKafka", "TopicPartition", class_RdKafka_TopicPartition_methods);
class_entry = zend_register_internal_class_ex(&ce, NULL);
return class_entry;
}
# This is a basic workflow to help you get started with Actions
name: CI
# Controls when the action will run.
on:
# Triggers the workflow on push or pull request events but only for the master branch
push:
branches: [ master ]
pull_request:
branches: [ master ]
env:
DOCKER_HOST_IP: 127.0.0.1
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "build"
build:
# The type of runner that the job will run on
runs-on: ubuntu-latest
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
- name: add kafka repo key
run: wget -qO - https://packages.confluent.io/deb/4.0/archive.key | sudo apt-key add -
- name: add kafka binary repo
run: sudo add-apt-repository "deb [arch=amd64] https://packages.confluent.io/deb/4.0 stable main"
- name: install kafka binary
run: sudo apt-get install confluent-platform-oss-2.11
# Runs a set of commands using the runners shell
- uses: docker-practice/actions-setup-docker@master
- name: run test single 2
run: ./test.sh zk-single-kafka-single.yml 2
- name: run test single 4
run: ./test.sh zk-multiple-kafka-single.yml 4
- name: run test multiple 4
run: ./test.sh zk-single-kafka-multiple.yml 4
- name: run test multiple 6
run: ./test.sh zk-multiple-kafka-multiple.yml 6
- name: run test multiple-schema-registry 7
run: ./test.sh zk-multiple-kafka-multiple-schema-registry.yml 7
- name: run test full-stack 7
run: ./test.sh full-stack.yml 7
zk-single-kafka-single/*
zk-single-kafka-multiple/*
zk-multiple-kafka-single/*
zk-multiple-kafka-multiple/*
zk-multiple-kafka-multiple-schema-registry/*
full-stack/*
.idea/
\ No newline at end of file
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2019 Stephane Maarek
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
[![Actions Status](https://github.com/conduktor/kafka-stack-docker-compose/workflows/CI/badge.svg)](https://github.com/conduktor/kafka-stack-docker-compose/actions)
# An open-source project by [![Conduktor.io](https://www.conduktor.io/uploads/conduktor.svg)](https://conduktor.io/)
This project is sponsored by [Conduktor.io](https://www.conduktor.io/), a graphical desktop user interface for Apache Kafka.
Once you have started your cluster, you can use Conduktor to easily manage it.
Just connect against `localhost:9092`. If you are on Mac or Windows and want to connect from another container, use `host.docker.internal:29092`
# kafka-stack-docker-compose
This replicates as well as possible real deployment configurations, where you have your zookeeper servers and kafka servers actually all distinct from each other. This solves all the networking hurdles that comes with Docker and docker-compose, and is compatible cross platform.
**UPDATE**: No /etc/hosts file changes are necessary anymore. Explanations at: https://rmoff.net/2018/08/02/kafka-listeners-explained/
## Stack version
- Zookeeper version: 3.6.3 (Confluent 7.2.0)
- Kafka version: 3.2.0 (Confluent 7.2.0)
- Kafka Schema Registry: Confluent 7.2.0
- Kafka Rest Proxy: Confluent 7.2.0
- Kafka Connect: Confluent 7.2.0
- ksqlDB Server: Confluent 7.2.0
- Zoonavigator: 1.1.1
For a UI tool to access your local Kafka cluster, use the free version of [Conduktor](https://www.conduktor.io/download)
# Requirements
Kafka will be exposed on `127.0.0.1` or `DOCKER_HOST_IP` if set in the environment.
(You probably don't need to set it if you're not using Docker-Toolbox)
## Docker-Toolbox
Docker toolbox is [deprecated](https://github.com/docker-archive/toolbox) and not maintained anymore for several years.
We can't guarantee this stack will work with Docker Toolbox, but if you want to try anyway, please export your environment before starting the stack:
```
export DOCKER_HOST_IP=192.168.99.100
```
(your docker machine IP is usually `192.168.99.100`)
## Apple M1 support
Confluent platform supports Apple M1 (ARM64) since version `7.2.0`! Basically, this stack will work out of the box.
If you want to downgrade confluent platform version, there are two ways:
1. Add `platform: linux/amd64`. It will work as docker is able to emulate AMD64 instructions.
2. Previous versions have been [built](https://github.com/arm64-compat/confluent-platform) for ARM64 by the community. If you want to use it, just change the image in the corresponding yml. Since it is a not an official image, use it at your own risks.
## Single Zookeeper / Single Kafka
This configuration fits most development requirements.
- Zookeeper will be available at `$DOCKER_HOST_IP:2181`
- Kafka will be available at `$DOCKER_HOST_IP:9092`
- (experimental) JMX port at `$DOCKER_HOST_IP:9999`
Run with:
```
docker-compose -f zk-single-kafka-single.yml up
docker-compose -f zk-single-kafka-single.yml down
```
## Single Zookeeper / Multiple Kafka
If you want to have three brokers and experiment with kafka replication / fault-tolerance.
- Zookeeper will be available at `$DOCKER_HOST_IP:2181`
- Kafka will be available at `$DOCKER_HOST_IP:9092,$DOCKER_HOST_IP:9093,$DOCKER_HOST_IP:9094`
Run with:
```
docker-compose -f zk-single-kafka-multiple.yml up
docker-compose -f zk-single-kafka-multiple.yml down
```
## Multiple Zookeeper / Single Kafka
If you want to have three zookeeper nodes and experiment with zookeeper fault-tolerance.
- Zookeeper will be available at `$DOCKER_HOST_IP:2181,$DOCKER_HOST_IP:2182,$DOCKER_HOST_IP:2183`
- Kafka will be available at `$DOCKER_HOST_IP:9092`
- (experimental) JMX port at `$DOCKER_HOST_IP:9999`
Run with:
```
docker-compose -f zk-multiple-kafka-single.yml up
docker-compose -f zk-multiple-kafka-single.yml down
```
## Multiple Zookeeper / Multiple Kafka
If you want to have three zookeeper nodes and three kafka brokers to experiment with production setup.
- Zookeeper will be available at `$DOCKER_HOST_IP:2181,$DOCKER_HOST_IP:2182,$DOCKER_HOST_IP:2183`
- Kafka will be available at `$DOCKER_HOST_IP:9092,$DOCKER_HOST_IP:9093,$DOCKER_HOST_IP:9094`
Run with:
```
docker-compose -f zk-multiple-kafka-multiple.yml up
docker-compose -f zk-multiple-kafka-multiple.yml down
```
## Full stack
Need a UI? We recommend using [Conduktor](https://conduktor.io) as your tool to bring a unified UI to all these components
- Single Zookeeper: `$DOCKER_HOST_IP:2181`
- Single Kafka: `$DOCKER_HOST_IP:9092`
- Kafka Schema Registry: `$DOCKER_HOST_IP:8081`
- Kafka Rest Proxy: `$DOCKER_HOST_IP:8082`
- Kafka Connect: `$DOCKER_HOST_IP:8083`
- KSQL Server: `$DOCKER_HOST_IP:8088`
- Zoonavigator Web: `$DOCKER_HOST_IP:8004`
- (experimental) JMX port at `$DOCKER_HOST_IP:9999`
Run with:
```
docker-compose -f full-stack.yml up
docker-compose -f full-stack.yml down
```
# FAQ
## Kafka
**Q: Kafka's log is too verbose, how can I reduce it?**
A: Add the following line to your docker-compose environment variables: `KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"`. Full logging control can be accessed here: https://github.com/confluentinc/cp-docker-images/blob/master/debian/kafka/include/etc/confluent/docker/log4j.properties.template
**Q: How do I delete data to start fresh?**
A: Your data is persisted from within the docker compose folder, so if you want for example to reset the data in the full-stack docker compose, do a `docker-compose -f full-stack.yml down`.
**Q: Can I change the zookeeper ports?**
A: yes. Say you want to change `zoo1` port to `12181` (only relevant lines are shown):
```
zoo1:
ports:
- "12181:12181"
environment:
ZOO_PORT: 12181
kafka1:
environment:
KAFKA_ZOOKEEPER_CONNECT: "zoo1:12181"
```
**Q: Can I change the Kafka ports?**
A: yes. Say you want to change `kafka1` port to `12345` (only relevant lines are shown). Note only `LISTENER_DOCKER_EXTERNAL` changes:
```
kafka1:
image: confluentinc/cp-kafka:7.2.0
hostname: kafka1
ports:
- "12345:12345"
environment:
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka1:19092,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:12345,DOCKER://host.docker.internal:29092
```
**Q: Kafka is using a lot of disk space for testing. Can I reduce it?**
A: yes. This is for testing only!!! Reduce the KAFKA_LOG_SEGMENT_BYTES to 16MB and the KAFKA_LOG_RETENTION_BYTES to 128MB
```
kafka1:
image: confluentinc/cp-kafka:7.2.0
...
environment:
...
# For testing small segments 16MB and retention of 128MB
KAFKA_LOG_SEGMENT_BYTES: 16777216
KAFKA_LOG_RETENTION_BYTES: 134217728
```
**Q: How do I expose kafka?**
A: If you want to expose kafka outside of your local machine, you must set `KAFKA_ADVERTISED_LISTENERS` to the IP of the machine so that kafka is externally accessible. To achieve this you can set `LISTENER_DOCKER_EXTERNAL` to the IP of the machine.
For example, if the IP of your machine is `50.10.2.3`, follow the sample mapping below:
```
kafka1:
image: confluentinc/cp-kafka:7.2.0
...
environment:
...
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka2:19093,EXTERNAL://50.10.2.3:9093,DOCKER://host.docker.internal:29093
```
**Q: How do I add connectors to kafka connect?**
Create a `connectors` directory and place your connectors there (usually in a subdirectory) `connectors/example/my.jar`
The directory is automatically mounted by the `kafka-connect` Docker container
OR edit the bash command which pulls connectors at runtime
```
confluent-hub install --no-prompt debezium/debezium-connector-mysql:latest
confluent-hub install
```
**Q: How to disable Confluent metrics?**
Add this environment variable
```
KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE=false
```
version: '2.1'
services:
zoo1:
image: confluentinc/cp-zookeeper:7.2.0
hostname: zoo1
container_name: zoo1
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_SERVERS: zoo1:2888:3888
kafka1:
image: confluentinc/cp-kafka:7.2.0
hostname: kafka1
container_name: kafka1
ports:
- "9092:9092"
- "29092:29092"
- "9999:9999"
environment:
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka1:19092,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9092,DOCKER://host.docker.internal:29092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,DOCKER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181"
KAFKA_BROKER_ID: 1
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: ${DOCKER_HOST_IP:-127.0.0.1}
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
depends_on:
- zoo1
kafka-schema-registry:
image: confluentinc/cp-schema-registry:7.2.0
hostname: kafka-schema-registry
container_name: kafka-schema-registry
ports:
- "8081:8081"
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092
SCHEMA_REGISTRY_HOST_NAME: kafka-schema-registry
SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081
depends_on:
- zoo1
- kafka1
kafka-rest-proxy:
image: confluentinc/cp-kafka-rest:7.2.0
hostname: kafka-rest-proxy
container_name: kafka-rest-proxy
ports:
- "8082:8082"
environment:
# KAFKA_REST_ZOOKEEPER_CONNECT: zoo1:2181
KAFKA_REST_LISTENERS: http://0.0.0.0:8082/
KAFKA_REST_SCHEMA_REGISTRY_URL: http://kafka-schema-registry:8081/
KAFKA_REST_HOST_NAME: kafka-rest-proxy
KAFKA_REST_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092
depends_on:
- zoo1
- kafka1
- kafka-schema-registry
kafka-connect:
image: confluentinc/cp-kafka-connect:7.2.0
hostname: kafka-connect
container_name: kafka-connect
ports:
- "8083:8083"
environment:
CONNECT_BOOTSTRAP_SERVERS: "kafka1:19092"
CONNECT_REST_PORT: 8083
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs
CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets
CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status
CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: 'http://kafka-schema-registry:8081'
CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: 'http://kafka-schema-registry:8081'
CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_REST_ADVERTISED_HOST_NAME: "kafka-connect"
CONNECT_LOG4J_ROOT_LOGLEVEL: "INFO"
CONNECT_LOG4J_LOGGERS: "org.apache.kafka.connect.runtime.rest=WARN,org.reflections=ERROR"
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_PLUGIN_PATH: '/usr/share/java,/etc/kafka-connect/jars,/usr/share/confluent-hub-components'
volumes:
- ./connectors:/etc/kafka-connect/jars/
depends_on:
- zoo1
- kafka1
- kafka-schema-registry
- kafka-rest-proxy
command:
- bash
- -c
- |
confluent-hub install --no-prompt debezium/debezium-connector-mysql:latest
confluent-hub install --no-prompt confluentinc/kafka-connect-datagen:0.4.0
/etc/confluent/docker/run
ksqldb-server:
image: confluentinc/cp-ksqldb-server:7.2.0
hostname: ksqldb-server
container_name: ksqldb-server
ports:
- "8088:8088"
environment:
KSQL_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092
KSQL_LISTENERS: http://0.0.0.0:8088/
KSQL_KSQL_SERVICE_ID: ksqldb-server_
depends_on:
- zoo1
- kafka1
zoonavigator:
image: elkozmon/zoonavigator:1.1.1
container_name: zoonavigator
ports:
- "8004:8000"
environment:
HTTP_PORT: 8000
AUTO_CONNECT_CONNECTION_STRING: zoo1:2181
#!/bin/bash
set -e
export file=$1
#!/bin/bash
f () {
errcode=$? # save the exit code as the first thing done in the trap function
echo "error $errorcode"
echo "the command executing at the time of the error was"
echo "$BASH_COMMAND"
echo "on line ${BASH_LINENO[0]}"
# do some error handling, cleanup, logging, notification
# $BASH_COMMAND contains the command that was being executed at the time of the trap
# ${BASH_LINENO[0]} contains the line number in the script of that command
# exit the script or return to try again, etc.
# creating stack...
docker-compose -f $file down
exit $errcode # or use some other value or do return instead
}
trap f ERR
all_great(){
# for testing
echo "Verifying Process"
running=`docker-compose -f $1 ps | grep Up | wc -l`
if [ "$running" != "$2" ]; then
# for logging
docker-compose -f $1 ps
# debug
docker-compose -f $1 logs
exit 1
fi
}
kafka_tests(){
echo "Testing Kafka"
topic="testtopic"
if grep -q kafka3 $1; then replication_factor="3"; else replication_factor="1"; fi
for i in 1 2 3 4 5; do echo "trying to create test topic" && kafka-topics --create --topic $topic --replication-factor $replication_factor --partitions 12 --zookeeper $DOCKER_HOST_IP:2181 && break || sleep 5; done
sleep 5
for x in {1..100}; do echo $x; done | kafka-console-producer --broker-list $DOCKER_HOST_IP:9092 --topic $topic
sleep 5
rows=`kafka-console-consumer --bootstrap-server $DOCKER_HOST_IP:9092 --topic $topic --from-beginning --timeout-ms 10000 | wc -l`
# rows=`kafkacat -C -b $DOCKER_HOST_IP:9092 -t $topic -o beginning -e | wc -l `
if [ "$rows" != "100" ]; then
kafka-console-consumer --bootstrap-server $DOCKER_HOST_IP:9092 --topic test-topic --from-beginning --timeout-ms 10000 | wc -l
exit 1
else
echo "Kafka Test Success"
fi
}
# creating stack...
docker-compose -f $file up -d
sleep 10
# logging
docker-compose -f $file ps
# tests
all_great $1 $2
kafka_tests $1
all_great $1 $2
# teardown
docker-compose -f $file down
echo "Success!"
\ No newline at end of file
version: '2.1'
services:
zoo1:
image: confluentinc/cp-zookeeper:7.2.0
hostname: zoo1
container_name: zoo1
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_SERVERS: zoo1:2888:3888;zoo2:2888:3888;zoo3:2888:3888
zoo2:
image: confluentinc/cp-zookeeper:7.2.0
hostname: zoo2
container_name: zoo2
ports:
- "2182:2182"
environment:
ZOOKEEPER_CLIENT_PORT: 2182
ZOOKEEPER_SERVER_ID: 2
ZOOKEEPER_SERVERS: zoo1:2888:3888;zoo2:2888:3888;zoo3:2888:3888
zoo3:
image: confluentinc/cp-zookeeper:7.2.0
hostname: zoo3
container_name: zoo3
ports:
- "2183:2183"
environment:
ZOOKEEPER_CLIENT_PORT: 2183
ZOOKEEPER_SERVER_ID: 3
ZOOKEEPER_SERVERS: zoo1:2888:3888;zoo2:2888:3888;zoo3:2888:3888
kafka1:
image: confluentinc/cp-kafka:7.2.0
hostname: kafka1
container_name: kafka1
ports:
- "9092:9092"
- "29092:29092"
environment:
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka1:19092,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9092,DOCKER://host.docker.internal:29092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,DOCKER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2182,zoo3:2183"
KAFKA_BROKER_ID: 1
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
depends_on:
- zoo1
- zoo2
- zoo3
kafka2:
image: confluentinc/cp-kafka:7.2.0
hostname: kafka2
container_name: kafka2
ports:
- "9093:9093"
- "29093:29093"
environment:
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka2:19093,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9093,DOCKER://host.docker.internal:29093
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,DOCKER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2182,zoo3:2183"
KAFKA_BROKER_ID: 2
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
depends_on:
- zoo1
- zoo2
- zoo3
kafka3:
image: confluentinc/cp-kafka:7.2.0
hostname: kafka3
container_name: kafka3
ports:
- "9094:9094"
- "29094:29094"
environment:
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka3:19094,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9094,DOCKER://host.docker.internal:29094
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,DOCKER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2182,zoo3:2183"
KAFKA_BROKER_ID: 3
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
depends_on:
- zoo1
- zoo2
- zoo3
kafka-schema-registry:
image: confluentinc/cp-schema-registry:7.2.0
hostname: kafka-schema-registry
container_name: kafka-schema-registry
depends_on:
- zoo1
- zoo2
- zoo3
- kafka1
- kafka2
- kafka3
ports:
- "8081:8081"
environment:
SCHEMA_REGISTRY_HOST_NAME: kafka-schema-registry
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'PLAINTEXT://kafka1:19092,PLAINTEXT://kafka2:19093,PLAINTEXT://kafka3:19094'
SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081
version: '2.1'
services:
zoo1:
image: confluentinc/cp-zookeeper:7.2.0
hostname: zoo1
container_name: zoo1
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_SERVERS: zoo1:2888:3888;zoo2:2888:3888;zoo3:2888:3888
zoo2:
image: confluentinc/cp-zookeeper:7.2.0
hostname: zoo2
container_name: zoo2
ports:
- "2182:2182"
environment:
ZOOKEEPER_CLIENT_PORT: 2182
ZOOKEEPER_SERVER_ID: 2
ZOOKEEPER_SERVERS: zoo1:2888:3888;zoo2:2888:3888;zoo3:2888:3888
zoo3:
image: confluentinc/cp-zookeeper:7.2.0
hostname: zoo3
container_name: zoo3
ports:
- "2183:2183"
environment:
ZOOKEEPER_CLIENT_PORT: 2183
ZOOKEEPER_SERVER_ID: 3
ZOOKEEPER_SERVERS: zoo1:2888:3888;zoo2:2888:3888;zoo3:2888:3888
kafka1:
image: confluentinc/cp-kafka:7.2.0
hostname: kafka1
container_name: kafka1
ports:
- "9092:9092"
- "29092:29092"
environment:
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka1:19092,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9092,DOCKER://host.docker.internal:29092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,DOCKER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2182,zoo3:2183"
KAFKA_BROKER_ID: 1
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
depends_on:
- zoo1
- zoo2
- zoo3
kafka2:
image: confluentinc/cp-kafka:7.2.0
hostname: kafka2
container_name: kafka2
ports:
- "9093:9093"
- "29093:29093"
environment:
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka2:19093,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9093,DOCKER://host.docker.internal:29093
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,DOCKER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2182,zoo3:2183"
KAFKA_BROKER_ID: 2
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
depends_on:
- zoo1
- zoo2
- zoo3
kafka3:
image: confluentinc/cp-kafka:7.2.0
hostname: kafka3
container_name: kafka3
ports:
- "9094:9094"
- "29094:29094"
environment:
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka3:19094,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9094,DOCKER://host.docker.internal:29094
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,DOCKER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2182,zoo3:2183"
KAFKA_BROKER_ID: 3
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
depends_on:
- zoo1
- zoo2
- zoo3
version: '2.1'
services:
zoo1:
image: confluentinc/cp-zookeeper:7.2.0
hostname: zoo1
container_name: zoo1
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_SERVERS: zoo1:2888:3888;zoo2:2888:3888;zoo3:2888:3888
zoo2:
image: confluentinc/cp-zookeeper:7.2.0
hostname: zoo2
container_name: zoo2
ports:
- "2182:2182"
environment:
ZOOKEEPER_CLIENT_PORT: 2182
ZOOKEEPER_SERVER_ID: 2
ZOOKEEPER_SERVERS: zoo1:2888:3888;zoo2:2888:3888;zoo3:2888:3888
zoo3:
image: confluentinc/cp-zookeeper:7.2.0
hostname: zoo3
container_name: zoo3
ports:
- "2183:2183"
environment:
ZOOKEEPER_CLIENT_PORT: 2183
ZOOKEEPER_SERVER_ID: 3
ZOOKEEPER_SERVERS: zoo1:2888:3888;zoo2:2888:3888;zoo3:2888:3888
kafka1:
image: confluentinc/cp-kafka:7.2.0
hostname: kafka1
container_name: kafka1
ports:
- "9092:9092"
- "29092:29092"
- "9999:9999"
environment:
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka1:19092,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9092,DOCKER://host.docker.internal:29092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,DOCKER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2182,zoo3:2183"
KAFKA_BROKER_ID: 1
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: ${DOCKER_HOST_IP:-127.0.0.1}
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
depends_on:
- zoo1
- zoo2
- zoo3
version: '2.1'
services:
zoo1:
image: confluentinc/cp-zookeeper:7.2.0
hostname: zoo1
container_name: zoo1
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_SERVERS: zoo1:2888:3888
kafka1:
image: confluentinc/cp-kafka:7.2.0
hostname: kafka1
container_name: kafka1
ports:
- "9092:9092"
- "29092:29092"
environment:
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka1:19092,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9092,DOCKER://host.docker.internal:29092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,DOCKER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181"
KAFKA_BROKER_ID: 1
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
depends_on:
- zoo1
kafka2:
image: confluentinc/cp-kafka:7.2.0
hostname: kafka2
container_name: kafka2
ports:
- "9093:9093"
- "29093:29093"
environment:
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka2:19093,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9093,DOCKER://host.docker.internal:29093
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,DOCKER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181"
KAFKA_BROKER_ID: 2
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
depends_on:
- zoo1
kafka3:
image: confluentinc/cp-kafka:7.2.0
hostname: kafka3
container_name: kafka3
ports:
- "9094:9094"
- "29094:29094"
environment:
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka3:19094,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9094,DOCKER://host.docker.internal:29094
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,DOCKER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181"
KAFKA_BROKER_ID: 3
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
depends_on:
- zoo1
version: '2.1'
services:
zoo1:
image: confluentinc/cp-zookeeper:7.2.0
hostname: zoo1
container_name: zoo1
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_SERVERS: zoo1:2888:3888
kafka1:
image: confluentinc/cp-kafka:7.2.0
hostname: kafka1
container_name: kafka1
ports:
- "9092:9092"
- "29092:29092"
- "9999:9999"
environment:
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka1:19092,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9092,DOCKER://host.docker.internal:29092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,DOCKER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181"
KAFKA_BROKER_ID: 1
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: ${DOCKER_HOST_IP:-127.0.0.1}
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
depends_on:
- zoo1
kafdrop1:
image: obsidiandynamics/kafdrop
restart: "no"
ports:
- "9000:9000"
environment:
KAFKA_BROKERCONNECT: "kafka1:9092"
depends_on:
- kafka1
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论