Re: [SPAM]Re: уменьшение трафика в сети
Alexey V. Karagodov
kav at karagodov.name
Thu Mar 19 19:09:33 MSK 2009
забыл сказать
после загрузки сервера сделал обоим em-интерфейсам ifconfig emX polling
On 19.03.2009, at 18:46, Alexey V. Karagodov wrote:
> # w
> 6:42PM up 3 days, 13:49, 3 users, load averages: 0.06, 0.10, 0.13
>
>
>
>
> em0 9000 <Link#1> 00:30:48:74:51:16 153436664 0
> 206056891 0 0
> em1 9000 <Link#2> 00:30:48:74:51:16 54944561 0
> 99091199 0 0
> lagg0 9000 <Link#6> 00:30:48:74:51:16 208360580 0
> 305126939 3870 0
>
> на всех интерфейсах ошибок 0
>
> настройки:
>
> # cat /etc/sysctl.conf
>
> # $FreeBSD: src/etc/sysctl.conf,v 1.8 2003/03/13 18:43:50 mux Exp $
> #
> # This file is read when going to multi-user and its contents piped
> thru
> # ``sysctl'' to adjust kernel values. ``man 5 sysctl.conf'' for
> details.
> #
>
> # Uncomment this to prevent users from seeing information about
> processes that
> # are being run under another UID.
> #security.bsd.see_other_uids=0
> #net.link.ether.inet.log_arp_wrong_iface=0
>
> dev.em.0.rx_processing_limit=-1
> dev.em.0.rx_int_delay=0
> dev.em.0.tx_int_delay=0
> dev.em.0.rx_abs_int_delay=0
> dev.em.0.tx_abs_int_delay=0
> dev.em.1.rx_processing_limit=-1
> dev.em.1.rx_int_delay=0
> dev.em.1.tx_int_delay=0
> dev.em.1.rx_abs_int_delay=0
> dev.em.1.tx_abs_int_delay=0
>
> net.inet.ip.intr_queue_maxlen=4096
> net.isr.direct=1
> net.inet.tcp.tso=1
>
> kern.polling.burst_max=1000
>
> kern.threads.max_threads_per_proc=8192
>
> kern.ipc.maxsockets=204800
> kern.ipc.somaxconn=4096
> #kern.maxfiles=204800
> #kern.maxfilesperproc=200000
> net.inet.ip.portrange.first=1024
> net.inet.ip.portrange.last=65535
> net.inet.ip.portrange.randomized=0
> net.inet.tcp.blackhole=1
> net.inet.tcp.fast_finwait2_recycle=1
> net.inet.tcp.maxtcptw=40960
> net.inet.tcp.msl=30000
> net.inet.tcp.nolocaltimewait=1
> net.inet.tcp.recvspace=131072
> net.inet.tcp.sendspace=131072
> net.inet.tcp.syncookies=1
> net.inet.udp.blackhole=1
> net.inet.udp.recvspace=131072
>
> #kern.ipc.nmbclusters=262144
> #kern.ipc.nmbclusters=0
> #net.inet.tcp.recvspace=8192
> #net.inet.tcp.sendspace=16384
>
> machdep.hyperthreading_allowed=1
> #vfs.nfs.access_cache_timeout=5
>
> #We did some tuning already and our current sysctl.conf looks like
> this:
>
> #kern.ipc.somaxconn=32768
> #net.inet.icmp.icmplim=3000
> #kern.ipc.maxsockets=300000
> #net.inet.tcp.delayed_ack=1
> #net.inet.tcp.finwait2_timeout=15000
> #net.inet.tcp.maxtcptw=196607
> #dev.em.0.rx_processing_limit=-1
> #net.isr.direct=0
>
> #hw.acpi.cpu.cx_lowest=C3
> # reduce swap paging
> #vm.defer_swapspace_pageouts=1
> # other tuning from "man tuning"
> #kern.ipc.shm_use_phys=1
>
> #
>
> # cat /boot/loader.conf
>
> verbose_loading="YES"
> loader_logo="beastie"
> #ichsmb_load="YES"
> #smb_load="YES"
> #ichwd_load="YES"
> ng_ether_load="YES"
> #linux_load="YES"
> accf_data_load="YES"
> accf_http_load="YES"
>
> net.inet.tcp.syncache.hashsize=1024
> net.inet.tcp.syncache.bucketlimit=100
> net.inet.tcp.tcbhashsize=4096
> kern.ipc.nsfbufs=10240
> kern.ipc.nmbclusters=0
> vm.kmem_size=1G
> vm.kmem_size_max=1G
>
> hw.em.tx_int_delay="0"
> hw.em.rx_int_delay="0"
> hw.em.tx_abs_int_delay="0"
> hw.em.rx_abs_int_delay="0"
> hw.em.rxd="4096"
> hw.em.txd="4096"
> #hw.em.smart_pwr_down=""
> #hw.em.sbp=""
> hw.em.enable_msi="1"
> hw.em.rx_process_limit="-1"
>
> #
>
> # cat /etc/rc.conf
> #etc
> linux_enable="YES"
> #watchdogd="YES"
> blanktime="60"
> check_quotas="NO"
> keyrate="fast"
> moused_enable="YES"
> named_enable="YES"
> named_flags="-c /etc/namedb/include/named.conf"
> dhcpd_enable="YES"
> dhcpd_flags="-q"
> dhcpd_conf="/usr/local/etc/dhcpd.conf"
> dhcpd_ifaces=""
> dhcpd_withumask="022"
> nfs_reserved_port_only="YES"
> rpcbind_enable="YES"
> saver="green"
> sshd_enable="YES"
> usbd_enable="YES"
> ntpd_enable="YES"
> ntpd_sync_on_start="YES"
> gateway_enable="YES"
> #router_enable="YES"
> router_flags="-v -s"
> defaultrouter="x.x.x.x"
> hostname="frontend1.x.x.x"
> #ipcad_enable="YES"
> #apcupsd_enable="YES"
> nginx_enable="YES"
> #squid_enable="YES"
> sendmail_enable="NO"
> sendmail_submit_enable="NO"
> sendmail_outbound_enable="NO"
> sendmail_msp_queue_enable="NO"
> #inetd_enable="YES"
> zabbix_agentd_enable="YES"
>
> #locale
> mousechar_start="3"
> #keymap="ru.cp1251"
> #scrnmap="win2cpp866"
> #font8x16="cp866-8x16"
> #font8x14="cp866-8x14"
> #font8x8="cp866-8x8"
>
> ifconfig_em0="up mtu 9000 vlanmtu vlanhwtag txcsum rxcsum tso lro wol"
> ifconfig_em1="up mtu 9000 vlanmtu vlanhwtag txcsum rxcsum tso lro wol"
>
> cloned_interfaces="lagg0 ..."
>
> ifconfig_lagg0="laggproto lacp laggport em0 laggport em1"
> ifconfig_lagg0_alias0=...
>
>
> On 19.03.2009, at 16:57, Монашёв Михаил wrote:
>
>> Здравствуйте, Дмитрий.
>>
>> ??>> А там можно какие-нить sysctl-ки покрутить?
>>
>>> /boot/loader.conf:
>>
>>> hw.em.rxd="4096"
>>> hw.em.txd="4096"
>>> hw.em.rx_process_limit="-1"
>>
>> ещё добавил net.inet.ip.intr_queue_maxlen=4096 в /etc/sysctl.conf
>>
>> На двух машинах Ierrs в netstat -i пропали. А на одной, с
>> самым
>> большим количеством пакетов, остались. Растут правда
>> намного
>> медленнее, но растут. за 5 часов на 3500 где-то. Это конечно не
>> много,
>> но всёравно не хорошо.
>>
>> Картина вот такая сейчас:
>>
>> netstat -i -Iem1
>> Name Mtu Network Address Ipkts Ierrs Opkts
>> Oerrs Coll
>> em1 9216 <Link#2> 00:04:23:de:b0:1c 281537845 3586
>> 147376712 0 0
> а за какое время данные?
>
>>
>>
>> Mar 19 16:49:45 softsearch1 kernel: em1: Excessive collisions = 0
>> Mar 19 16:49:45 softsearch1 kernel: em1: Sequence errors = 0
>> Mar 19 16:49:45 softsearch1 kernel: em1: Defer count = 0
>> Mar 19 16:49:45 softsearch1 kernel: em1: Missed Packets = 3586
>> Mar 19 16:49:45 softsearch1 kernel: em1: Receive No Buffers = 1428
>> Mar 19 16:49:45 softsearch1 kernel: em1: Receive Length Errors = 0
>> Mar 19 16:49:45 softsearch1 kernel: em1: Receive errors = 0
>> Mar 19 16:49:45 softsearch1 kernel: em1: Crc errors = 0
>> Mar 19 16:49:45 softsearch1 kernel: em1: Alignment errors = 0
>> Mar 19 16:49:45 softsearch1 kernel: em1: Collision/Carrier
>> extension errors = 0
>> Mar 19 16:49:45 softsearch1 kernel: em1: RX overruns = 9
>> Mar 19 16:49:45 softsearch1 kernel: em1: watchdog timeouts = 0
>> Mar 19 16:49:45 softsearch1 kernel: em1: RX MSIX IRQ = 0 TX MSIX
>> IRQ = 0 LINK MSIX IRQ = 0
>> Mar 19 16:49:45 softsearch1 kernel: em1: XON Rcvd = 0
>> Mar 19 16:49:45 softsearch1 kernel: em1: XON Xmtd = 0
>> Mar 19 16:49:45 softsearch1 kernel: em1: XOFF Rcvd = 0
>> Mar 19 16:49:45 softsearch1 kernel: em1: XOFF Xmtd = 0
>> Mar 19 16:49:45 softsearch1 kernel: em1: Good Packets Rcvd =
>> 285783391
>> Mar 19 16:49:45 softsearch1 kernel: em1: Good Packets Xmtd =
>> 149632906
>> Mar 19 16:49:45 softsearch1 kernel: em1: TSO Contexts Xmtd = 0
>> Mar 19 16:49:45 softsearch1 kernel: em1: TSO Contexts Failed = 0
>>
>> Это данные примерно за 5 часов.
>>
>> Может имеет смысл уменьшить dev.em.1.rx_abs_int_delay? Сейчас оно 66.
>>
>> --
>> С уважением,
>> Монашёв Михаил, SoftSearch.ru
>> mailto:postmaster at softsearch.ru
>> ICQ# 166233339
>> http://michael.mindmix.ru/
>> Без бэкапа по жизни.
>>
>>
>
>
More information about the nginx-ru
mailing list